code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.iot.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Specifies the MQTT context to use for the test authorizer request
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class MqttContext implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The value of the <code>username</code> key in an MQTT authorization request.
* </p>
*/
private String username;
/**
* <p>
* The value of the <code>password</code> key in an MQTT authorization request.
* </p>
*/
private java.nio.ByteBuffer password;
/**
* <p>
* The value of the <code>clientId</code> key in an MQTT authorization request.
* </p>
*/
private String clientId;
/**
* <p>
* The value of the <code>username</code> key in an MQTT authorization request.
* </p>
*
* @param username
* The value of the <code>username</code> key in an MQTT authorization request.
*/
public void setUsername(String username) {
this.username = username;
}
/**
* <p>
* The value of the <code>username</code> key in an MQTT authorization request.
* </p>
*
* @return The value of the <code>username</code> key in an MQTT authorization request.
*/
public String getUsername() {
return this.username;
}
/**
* <p>
* The value of the <code>username</code> key in an MQTT authorization request.
* </p>
*
* @param username
* The value of the <code>username</code> key in an MQTT authorization request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public MqttContext withUsername(String username) {
setUsername(username);
return this;
}
/**
* <p>
* The value of the <code>password</code> key in an MQTT authorization request.
* </p>
* <p>
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service.
* Users of the SDK should not perform Base64 encoding on this field.
* </p>
* <p>
* Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will
* be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or
* ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future
* major version of the SDK.
* </p>
*
* @param password
* The value of the <code>password</code> key in an MQTT authorization request.
*/
public void setPassword(java.nio.ByteBuffer password) {
this.password = password;
}
/**
* <p>
* The value of the <code>password</code> key in an MQTT authorization request.
* </p>
* <p>
* {@code ByteBuffer}s are stateful. Calling their {@code get} methods changes their {@code position}. We recommend
* using {@link java.nio.ByteBuffer#asReadOnlyBuffer()} to create a read-only view of the buffer with an independent
* {@code position}, and calling {@code get} methods on this rather than directly on the returned {@code ByteBuffer}.
* Doing so will ensure that anyone else using the {@code ByteBuffer} will not be affected by changes to the
* {@code position}.
* </p>
*
* @return The value of the <code>password</code> key in an MQTT authorization request.
*/
public java.nio.ByteBuffer getPassword() {
return this.password;
}
/**
* <p>
* The value of the <code>password</code> key in an MQTT authorization request.
* </p>
* <p>
* The AWS SDK for Java performs a Base64 encoding on this field before sending this request to the AWS service.
* Users of the SDK should not perform Base64 encoding on this field.
* </p>
* <p>
* Warning: ByteBuffers returned by the SDK are mutable. Changes to the content or position of the byte buffer will
* be seen by all objects that have a reference to this object. It is recommended to call ByteBuffer.duplicate() or
* ByteBuffer.asReadOnlyBuffer() before using or reading from the buffer. This behavior will be changed in a future
* major version of the SDK.
* </p>
*
* @param password
* The value of the <code>password</code> key in an MQTT authorization request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public MqttContext withPassword(java.nio.ByteBuffer password) {
setPassword(password);
return this;
}
/**
* <p>
* The value of the <code>clientId</code> key in an MQTT authorization request.
* </p>
*
* @param clientId
* The value of the <code>clientId</code> key in an MQTT authorization request.
*/
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* <p>
* The value of the <code>clientId</code> key in an MQTT authorization request.
* </p>
*
* @return The value of the <code>clientId</code> key in an MQTT authorization request.
*/
public String getClientId() {
return this.clientId;
}
/**
* <p>
* The value of the <code>clientId</code> key in an MQTT authorization request.
* </p>
*
* @param clientId
* The value of the <code>clientId</code> key in an MQTT authorization request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public MqttContext withClientId(String clientId) {
setClientId(clientId);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getUsername() != null)
sb.append("Username: ").append(getUsername()).append(",");
if (getPassword() != null)
sb.append("Password: ").append(getPassword()).append(",");
if (getClientId() != null)
sb.append("ClientId: ").append(getClientId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof MqttContext == false)
return false;
MqttContext other = (MqttContext) obj;
if (other.getUsername() == null ^ this.getUsername() == null)
return false;
if (other.getUsername() != null && other.getUsername().equals(this.getUsername()) == false)
return false;
if (other.getPassword() == null ^ this.getPassword() == null)
return false;
if (other.getPassword() != null && other.getPassword().equals(this.getPassword()) == false)
return false;
if (other.getClientId() == null ^ this.getClientId() == null)
return false;
if (other.getClientId() != null && other.getClientId().equals(this.getClientId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getUsername() == null) ? 0 : getUsername().hashCode());
hashCode = prime * hashCode + ((getPassword() == null) ? 0 : getPassword().hashCode());
hashCode = prime * hashCode + ((getClientId() == null) ? 0 : getClientId().hashCode());
return hashCode;
}
@Override
public MqttContext clone() {
try {
return (MqttContext) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.iot.model.transform.MqttContextMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| aws/aws-sdk-java | aws-java-sdk-iot/src/main/java/com/amazonaws/services/iot/model/MqttContext.java | Java | apache-2.0 | 9,309 |
using Messaging.Interfaces.Common;
using Messaging.Interfaces.Popup;
using Splat;
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Windows.ApplicationModel.Core;
using Windows.Foundation;
using Windows.UI.Core;
using Windows.UI.Popups;
#if WINDOWS_PHONE_APP
using Windows.UI.ViewManagement;
#endif
namespace Messaging.UniversalApp.Popup
{
public sealed class PopupManager : IPopupManager, IEnableLogger
{
private readonly object _queueMonitor = new object();
private readonly object _showMonitor = new object();
private IAsyncOperation<IUICommand> _currentDialogOperation;
private readonly Queue<MessageDialog> _dialogQueue = new Queue<MessageDialog>();
public async void ShowBusy(bool isBusy)
{
#if WINDOWS_PHONE_APP
var progressIndicator = StatusBar.GetForCurrentView().ProgressIndicator;
if (isBusy)
{
progressIndicator.Text = "Récupération en cours";
await progressIndicator.ShowAsync();
}
else
{
progressIndicator.Text = string.Empty;
await progressIndicator.HideAsync();
}
#else
await Task.FromResult(isBusy);
#endif
}
public async Task ShowMessage(string message)
{
await ShowMessage(message, null);
}
public async Task ShowMessage(string message, string title)
{
var md = string.IsNullOrEmpty(title) ? new MessageDialog(message) : new MessageDialog(message, title);
await ShowDialogAsync(md);
}
public async Task ShowMessage(IMessagingContent content)
{
await ShowMessage(content.Message, content.Title);
}
public async Task<bool> ShowYesNo(string question)
{
var messageDialogResult = await ShowYesNo(question, null);
return messageDialogResult;
}
public async Task<bool> ShowYesNo(string question, string title)
{
var messageDialogResult = string.IsNullOrEmpty(title)
? new MessageDialog(question)
: new MessageDialog(question, title);
var yesCommand = new UICommand("Yes");
var noCommand = new UICommand("No");
messageDialogResult.Commands.Add(yesCommand);
messageDialogResult.Commands.Add(noCommand);
var result = await ShowDialogAsync(messageDialogResult);
return result == yesCommand;
}
public async Task<bool> ShowYesNo(IMessagingContent content)
{
var messageDialogResult = await ShowYesNo(content.Message, content.Title);
return messageDialogResult;
}
private async Task<IUICommand> ShowDialogAsync(MessageDialog messageDialog)
{
IUICommand command = new UICommand("Ok");
await Task.Run(async () =>
{
lock (_queueMonitor)
{
_dialogQueue.Enqueue(messageDialog);
}
try
{
while (true)
{
MessageDialog nextMessageDialog;
lock (_queueMonitor)
{
if (_dialogQueue.Count > 1)
{
Monitor.Wait(_queueMonitor);
}
nextMessageDialog = _dialogQueue.Peek();
}
var showing = false;
await
CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
async () =>
{
try
{
lock (_showMonitor)
{
showing = true;
_currentDialogOperation = nextMessageDialog.ShowAsync();
}
command = await _currentDialogOperation;
lock (_showMonitor)
_currentDialogOperation = null;
}
catch (Exception e)
{
this.Log()
.Error(
$"[PopupManager][ShowDialogAsync] An Exception Occured:{e.Message}, StackTrace:{e.StackTrace}");
}
lock (_showMonitor)
{
showing = false;
Monitor.Pulse(_showMonitor);
}
});
lock (_showMonitor)
{
if (showing)
{
Monitor.Wait(_showMonitor);
}
}
return true;
}
}
finally
{
lock (_queueMonitor)
{
_dialogQueue.Dequeue();
Monitor.Pulse(_queueMonitor);
}
}
});
return command;
}
}
} | ApplETS/ETSMobile-WindowsPlatforms | Ets.Mobile/Modules/Messaging/Implementations/Messaging.UniversalApp/Popup/PopupManager.cs | C# | apache-2.0 | 5,886 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace @false
{
class Program
{
static void Main(string[] args)
{
}
}
}
| AlanBarber/WinCmdCoreUtilities | false/Program.cs | C# | apache-2.0 | 215 |
package us.ihmc.rosControl.wholeRobot;
public interface ForceTorqueSensorHandle
{
double getTz();
double getTy();
double getTx();
double getFz();
double getFy();
double getFx();
String getName();
}
| ihmcrobotics/ihmc-ros-control | src/main/java/us/ihmc/rosControl/wholeRobot/ForceTorqueSensorHandle.java | Java | apache-2.0 | 228 |
var CLIENT_ID = '';
var CLIENT_SECRET = '';
var OWNER = "";
var REPO = "";
/**
* Manage Form Answer
* Create a trigger by going to Resources > Current projet's triggers
* Select function manageAnswer() and create a trigger at form submission
*/
function manageAnswer(e) {
var form = e.source;
var rep = {
"Title":"",
"Message":"",
"Email":""
};
var itemResponses = e.response.getItemResponses();
for (var i = 0; i < itemResponses.length; i++) {
var itemTitle = itemResponses[i].getItem().getTitle();
var itemResponse = itemResponses[i].getResponse();
rep[itemTitle] = itemResponse;
Logger.log(itemTitle + ': ' + itemResponse );
}
try{
var issue = submitIssue(rep);
var body = "<p>Hi,</p>"
+"<p>Thank you for submitting your issue, you can follow it on this page : <a href='"+issue.html_url+"'>link</a>.</p>"
+"<p>Title : "+rep.Title+"<br>"
+"Message : "+rep.Message+"</p>"
+"Regards";
GmailApp.sendEmail(rep.Email, 'Issue posted on GitHub', '', {
htmlBody:body,
});
}catch(e){
GmailApp.sendEmail(Session.getEffectiveUser().getEmail(), 'Error issue submission', '', {
htmlBody:JSON.stringify(rep),
});
}
}
/**
* Function to send issue to GitHub
*/
function submitIssue(data){
var service = getService();
if (service.hasAccess()) {
var url = 'https://api.github.com/repos/'+OWNER+'/'+REPO+'/issues';
var bodyRequest = {
"title":data.Title,
"body":"_## Issue created anonymously for a user ##_\n"+data.Message
};
var response = UrlFetchApp.fetch(url, {
method : "post",
headers: {
Authorization: 'Bearer ' + service.getAccessToken()
},
payload : JSON.stringify(bodyRequest)
});
var result = JSON.parse(response.getContentText());
Logger.log(JSON.stringify(result, null, 2));
return result;
} else {
var authorizationUrl = service.getAuthorizationUrl();
Logger.log('Open the following URL and re-run the script: %s',
authorizationUrl);
}
}
/**
* Authorizes and makes a request to the GitHub API.
*/
function run() {
var service = getService();
if (service.hasAccess()) {
var url = 'https://api.github.com/user/repos';
var response = UrlFetchApp.fetch(url, {
headers: {
Authorization: 'Bearer ' + service.getAccessToken()
}
});
var result = JSON.parse(response.getContentText());
Logger.log(JSON.stringify(result, null, 2));
} else {
var authorizationUrl = service.getAuthorizationUrl();
Logger.log('Open the following URL and re-run the script: %s',
authorizationUrl);
}
}
/**
* Configures the service.
*/
function getService() {
return OAuth2.createService('GitHub')
// Set the endpoint URLs.
.setAuthorizationBaseUrl('https://github.com/login/oauth/authorize')
.setTokenUrl('https://github.com/login/oauth/access_token')
// Set the client ID and secret.
.setClientId(CLIENT_ID)
.setClientSecret(CLIENT_SECRET)
// Set the name of the callback function that should be invoked to complete
// the OAuth flow.
.setCallbackFunction('authCallback')
//scope for app
.setScope('repo')
// Set the property store where authorized tokens should be persisted.
.setPropertyStore(PropertiesService.getUserProperties())
}
/**
* Handles the OAuth callback.
*/
function authCallback(request) {
var service = getService();
var authorized = service.handleCallback(request);
if (authorized) {
return HtmlService.createHtmlOutput('Success!');
} else {
return HtmlService.createHtmlOutput('Denied');
}
}
| St3ph-fr/my-apps-script-utils | anonymous-issues-github/Code.js | JavaScript | apache-2.0 | 3,674 |
package com.linkedin.automation;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
public class PeopleYouMayKnow {
public void scrollThePage(WebDriver webDriver) {
int sleepTime = 100;
JavascriptExecutor js = (JavascriptExecutor) webDriver;
js.executeScript("window.scrollTo(0, (document.body.scrollHeight)/2)");
sleep(sleepTime);
js.executeScript("window.scrollTo(0, (document.body.scrollHeight)/6)");
sleep(sleepTime);
}
public void sleep(int time) {
try {
Thread.sleep(time);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public static void main(String[] args) throws InterruptedException {
PeopleYouMayKnow obj = new PeopleYouMayKnow();
WebDriver driver;
System.setProperty("webdriver.chrome.driver", "/temp/chromedriver_win32/chromedriver.exe");
driver = new ChromeDriver();
driver.get("https://www.linkedin.com");
driver.manage().window().maximize();
WebElement account = driver.findElements(By.xpath(".//input[@id='login-email']")).get(0);
account.sendKeys("17091275816");
driver.manage().timeouts().implicitlyWait(1, TimeUnit.SECONDS);
WebElement pass = driver.findElement(By.xpath(".//input[@id='login-password']"));
pass.sendKeys("hiro12345");
driver.manage().timeouts().implicitlyWait(2, TimeUnit.SECONDS);
WebElement button = driver.findElement(By.xpath(".//input[@id='login-submit']"));
button.click();
driver.manage().timeouts().implicitlyWait(2, TimeUnit.SECONDS);
for (int i = 0; i < 50; i++) {
driver.get("http://www.linkedin.com/mynetwork/");
driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS);
int count = 0;
while (true) {
try {
driver.manage().timeouts().implicitlyWait(3, TimeUnit.SECONDS);
List<WebElement> elements = driver
.findElements(By.xpath(".//button[@class='button-secondary-small']/span[text()='加为好友']"));
if (!elements.isEmpty()) {
elements.get(0).click();
driver.manage().timeouts().implicitlyWait(2, TimeUnit.SECONDS);
Thread.sleep(10000);
count++;
} else {
break;
}
} catch (Exception e) {
break;
}
if (count % 6 == 0) {
obj.scrollThePage(driver);
}
}
}
}
}
| sanlingdd/personalLinkedProfilesIn | linkedin-java/src/main/java/com/linkedin/automation/PeopleYouMayKnow.java | Java | apache-2.0 | 2,429 |
"use strict";
let User = require("../../../../persistence").models.User;
let debug = require("debug")("app:auth");
module.exports = (findUserEntity, createUserEntity, methodName) => {
return (tokenA, tokenB, profile, done) => {
return User.findOneQ(findUserEntity(profile))
.then((found) => found || User.createQ(createUserEntity(profile)))
.then((user) => done(null, user))
.catch((err) => {
debug(`error authenticating via ${methodName}`, err);
done(err, null);
});
};
};
| atsid/drugfax-18f | server/initialization/sections/passport/strategies/common_oauth_callback.js | JavaScript | apache-2.0 | 571 |
// Copyright 2010-2021 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START program]
using System;
using Google.OrTools.Sat;
public class SolveWithTimeLimitSampleSat
{
static void Main()
{
// Creates the model.
CpModel model = new CpModel();
// Creates the variables.
int num_vals = 3;
IntVar x = model.NewIntVar(0, num_vals - 1, "x");
IntVar y = model.NewIntVar(0, num_vals - 1, "y");
IntVar z = model.NewIntVar(0, num_vals - 1, "z");
// Adds a different constraint.
model.Add(x != y);
// Creates a solver and solves the model.
CpSolver solver = new CpSolver();
// Adds a time limit. Parameters are stored as strings in the solver.
solver.StringParameters = "max_time_in_seconds:10.0";
CpSolverStatus status = solver.Solve(model);
if (status == CpSolverStatus.Optimal)
{
Console.WriteLine("x = " + solver.Value(x));
Console.WriteLine("y = " + solver.Value(y));
Console.WriteLine("z = " + solver.Value(z));
}
}
}
// [END program]
| google/or-tools | ortools/sat/samples/SolveWithTimeLimitSampleSat.cs | C# | apache-2.0 | 1,644 |
package handlers
import (
"net/http"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/codedellemc/rexray/libstorage/api/types"
)
// queryParamsHandler is an HTTP filter for injecting the store with query
// parameters
type queryParamsHandler struct {
handler types.APIFunc
}
func (h *queryParamsHandler) Name() string {
return "query-params-handler"
}
// NewQueryParamsHandler returns a new filter for injecting the store with query
// parameters
func NewQueryParamsHandler() types.Middleware {
return &queryParamsHandler{}
}
func (h *queryParamsHandler) Handler(m types.APIFunc) types.APIFunc {
return (&queryParamsHandler{m}).Handle
}
// Handle is the type's Handler function.
func (h *queryParamsHandler) Handle(
ctx types.Context,
w http.ResponseWriter,
req *http.Request,
store types.Store) error {
for k, v := range req.URL.Query() {
ctx.WithFields(log.Fields{
"key": k,
"value": v,
"len(value)": len(v),
}).Debug("query param")
switch len(v) {
case 0:
store.Set(k, true)
case 1:
if len(v[0]) == 0 {
store.Set(k, true)
} else {
if i, err := strconv.ParseInt(v[0], 10, 64); err == nil {
store.Set(k, i)
} else if b, err := strconv.ParseBool(v[0]); err == nil {
store.Set(k, b)
} else {
store.Set(k, v[0])
}
}
default:
store.Set(k, v)
}
}
return h.handler(ctx, w, req, store)
}
| oppodeldoc/rexray | libstorage/api/server/handlers/handlers_query_params.go | GO | apache-2.0 | 1,397 |
package helpers.db.filter.column;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.List;
/**
* Filter on a disjunction of string matches.
* Instead of filtering on "column ~= filter", filters on (columns[0] ~= filter or ... or columns[n - 1] ~= filter).
*
* @author jtremeaux
*/
public class OrStringFilterColumn extends StringFilterColumn {
private String[] columns;
public OrStringFilterColumn(String column, String filter, String... columns) {
super(column, filter);
this.columns = columns;
}
@Override
public String getPredicate() {
List<String> predicates = new ArrayList<>();
for (String c : columns) {
StringFilterColumn f = new StringFilterColumn(c, filter) {
@Override
public String getParamName() {
return "filtercolumn_" + OrStringFilterColumn.this.hashCode();
}
};
predicates.add(f.getPredicate());
}
return "(" + StringUtils.join(predicates, " or ") + ")";
}
}
| sismics/play-nativedb | app/helpers/db/filter/column/OrStringFilterColumn.java | Java | apache-2.0 | 1,101 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Reflection;
using System.Web.UI;
namespace LTAF.Engine
{
internal class SystemWebExtensionsWrapper
{
private bool _methodsInitialized;
private readonly object _reflectionLock;
private IAspNetPageService _aspNetPageService;
private MethodInfo _regiterStartupScriptMethod;
public SystemWebExtensionsWrapper(IAspNetPageService aspNetPageService)
{
_reflectionLock = new object();
_aspNetPageService = aspNetPageService;
}
/// <summary>
/// Reference to the System.Web.Extensions assembly
/// </summary>
/// <remarks>
/// Functionality in System.Web.Extensions is being accessed via
/// reflection because we don't want the testing framework to be tied
/// to a specific version.
/// </remarks>
public Assembly SystemWebExtensionsAssembly { get; set; }
internal MethodInfo RegisterStartupScriptMethodInfo
{
get
{
return _regiterStartupScriptMethod;
}
set
{
_regiterStartupScriptMethod = value;
}
}
public void Initialize(Page page)
{
if (!_methodsInitialized)
{
lock (_reflectionLock)
{
if (!_methodsInitialized)
{
Type scriptManagerType = _aspNetPageService.FindControl(page, "DriverPageScriptManager").GetType();
SystemWebExtensionsAssembly = scriptManagerType.Assembly;
_regiterStartupScriptMethod = scriptManagerType.GetMethod(
"RegisterStartupScript",
new Type[] {
typeof(Page),
typeof(Type),
typeof(string),
typeof(string),
typeof(bool) });
_methodsInitialized = true;
}
}
}
}
public virtual void RegisterStartupScript(Control control, Type type, string key, string script, bool addScriptTags)
{
RegisterStartupScriptMethodInfo.Invoke(null, new object[] { control, type, key, script, addScriptTags });
}
/// <summary>
/// Deserialize a JSON string into a CLR object
/// </summary>
/// <param name="json">JSON</param>
/// <returns>Deserialized JSON</returns>
public object DeserializeJson(string json)
{
if (SystemWebExtensionsAssembly == null)
{
throw new InvalidOperationException("SystemWebExtesnionsReference must be provided!");
}
Type serializerType = SystemWebExtensionsAssembly.GetType("System.Web.Script.Serialization.JavaScriptSerializer");
if (serializerType == null)
{
throw new InvalidOperationException("Invalid SystemWebExtesnionsReference does not contain System.Web.Script.Serialization.JavaScriptSerializer!");
}
MethodInfo deserialize = serializerType.GetMethod("DeserializeObject");
if (deserialize == null)
{
throw new InvalidOperationException("System.Web.Script.Serialization.JavaScriptSerializer does not contain DeserializeObject method!");
}
object serializer = Activator.CreateInstance(serializerType);
if (serializer == null)
{
throw new InvalidOperationException("Failed to create System.Web.Script.Serialization.JavaScriptSerializer!");
}
return deserialize.Invoke(serializer, new object[] { json });
}
}
}
| OakRaven/ltaf | src/LTAF/Engine/SystemWebExtensionsWrapper.cs | C# | apache-2.0 | 3,985 |
package main
import (
"log"
"github.com/xitongsys/parquet-go-source/local"
"github.com/xitongsys/parquet-go/parquet"
"github.com/xitongsys/parquet-go/reader"
"github.com/xitongsys/parquet-go/writer"
)
type NameString string
type AgeInt int32
type Student struct {
Name NameString `parquet:"name=name, type=BYTE_ARRAY, convertedtype=UTF8, encoding=PLAIN_DICTIONARY"`
Age AgeInt `parquet:"name=age, type=INT32, encoding=PLAIN"`
}
func main() {
var err error
fw, err := local.NewLocalFileWriter("output/type_alias.parquet")
if err != nil {
log.Println("Can't create local file", err)
return
}
//write
pw, err := writer.NewParquetWriter(fw, new(Student), 4)
if err != nil {
log.Println("Can't create parquet writer", err)
return
}
pw.RowGroupSize = 128 * 1024 * 1024 //128M
pw.PageSize = 8 * 1024 //8K
pw.CompressionType = parquet.CompressionCodec_SNAPPY
num := 10
for i := 0; i < num; i++ {
stu := Student{
Name: "StudentName",
Age: AgeInt(20 + i%5),
}
if err = pw.Write(stu); err != nil {
log.Println("Write error", err)
}
}
if err = pw.WriteStop(); err != nil {
log.Println("WriteStop error", err)
return
}
log.Println("Write Finished")
fw.Close()
///read
fr, err := local.NewLocalFileReader("output/type_alias.parquet")
if err != nil {
log.Println("Can't open file")
return
}
pr, err := reader.NewParquetReader(fr, new(Student), 4)
if err != nil {
log.Println("Can't create parquet reader", err)
return
}
num = int(pr.GetNumRows())
stus := make([]Student, num)
if err = pr.Read(&stus); err != nil {
log.Println("Read error", err)
}
log.Println(stus)
pr.ReadStop()
fr.Close()
}
| xitongsys/parquet-go | example/type_alias.go | GO | apache-2.0 | 1,684 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Microsoft.ServiceBus.Messaging;
using System.IO;
using System.Configuration;
using System.Threading;
using Danvy.Azure;
namespace DecoderJob
{
class Program
{
static bool quit = false;
public static void Main()
{
var eventHubName = "dispatch";
var consumerGroup = "storage";
var busConnectionString = ConfigurationManager.ConnectionStrings["SigfoxDemoDispatchListener"].ConnectionString;
var storageConnectionString = ConfigurationManager.ConnectionStrings["SigfoxDemoStorage"].ConnectionString;
if (!WebJobsHelper.RunAsWebJobs)
Console.CancelKeyPress += Console_CancelKeyPress;
EventHubClient eventHubClient = null;
var retries = 3;
while (retries > 0)
{
try
{
retries--;
eventHubClient = EventHubClient.CreateFromConnectionString(busConnectionString, eventHubName);
retries = 0;
}
catch (Exception e)
{
Console.Error.WriteLine("Error opening source Event Hub: " + e.Message);
if (retries == 0)
throw;
}
}
if (consumerGroup == null)
consumerGroup = eventHubClient.GetDefaultConsumerGroup().GroupName;
var eventProcessorHost = new EventProcessorHost("StorageProcessor", eventHubClient.Path,
consumerGroup, busConnectionString, storageConnectionString, eventHubName.ToLowerInvariant());
eventProcessorHost.RegisterEventProcessorAsync<EventProcessor>().Wait();
while (true)
{
if (WebJobsHelper.RunAsWebJobs)
{
Thread.Sleep(50);
}
else
{
Console.WriteLine("Waiting for new messages " + DateTime.UtcNow);
Thread.Sleep(1000);
}
if (quit || WebJobsHelper.NeedShutdown)
break;
}
eventProcessorHost.UnregisterEventProcessorAsync().Wait();
}
private static void Console_CancelKeyPress(object sender, ConsoleCancelEventArgs e)
{
quit = true;
}
}
}
| danvy/sigfox | src/StorageJob/Program.cs | C# | apache-2.0 | 2,508 |
/*
* Waltz - Enterprise Architecture
* Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project
* See README.md for more information
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific
*
*/
package com.khartec.waltz.data.complexity;
import com.khartec.waltz.model.EntityKind;
import com.khartec.waltz.model.tally.ImmutableTally;
import com.khartec.waltz.model.tally.Tally;
import org.jooq.*;
import org.jooq.impl.DSL;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.math.BigDecimal;
import java.util.List;
import static com.khartec.waltz.common.Checks.checkNotNull;
import static com.khartec.waltz.data.logical_flow.LogicalFlowDao.LOGICAL_NOT_REMOVED;
import static com.khartec.waltz.schema.tables.LogicalFlow.LOGICAL_FLOW;
@Deprecated
@Repository
public class ConnectionComplexityDao {
private static final Field<Integer> CONNECTION_COUNT_ALIAS = DSL.field("connection_count", Integer.class);
private static final Field<Long> APP_ID_ALIAS = DSL.field("app_id", Long.class);
private static final Field<Integer> TOTAL_CONNECTIONS_FIELD = DSL.field("total_connections", Integer.class);
private static final Field<Long> SOURCE_APP_FIELD = LOGICAL_FLOW.SOURCE_ENTITY_ID.as(APP_ID_ALIAS);
private static final Field<Long> TARGET_APP_FIELD = LOGICAL_FLOW.TARGET_ENTITY_ID.as(APP_ID_ALIAS);
private static final Field<Integer> TARGET_COUNT_FIELD = DSL.countDistinct(LOGICAL_FLOW.TARGET_ENTITY_ID).as(CONNECTION_COUNT_ALIAS);
private static final Field<Integer> SOURCE_COUNT_FIELD = DSL.countDistinct(LOGICAL_FLOW.SOURCE_ENTITY_ID).as(CONNECTION_COUNT_ALIAS);
private static final String APPLICATION_KIND = EntityKind.APPLICATION.name();
private static final Condition BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED =
LOGICAL_FLOW.SOURCE_ENTITY_KIND
.eq(APPLICATION_KIND)
.and(LOGICAL_FLOW.TARGET_ENTITY_KIND
.eq(APPLICATION_KIND))
.and(LOGICAL_NOT_REMOVED);
private static final SelectHavingStep<Record2<Long, Integer>> OUTBOUND_FLOWS =
DSL.select(SOURCE_APP_FIELD, TARGET_COUNT_FIELD)
.from(LOGICAL_FLOW)
.where(BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED)
.groupBy(LOGICAL_FLOW.SOURCE_ENTITY_ID);
private static final SelectHavingStep<Record2<Long, Integer>> INBOUND_FLOWS =
DSL.select(TARGET_APP_FIELD, SOURCE_COUNT_FIELD)
.from(LOGICAL_FLOW)
.where(BOTH_ARE_APPLICATIONS_AND_NOT_REMOVED)
.groupBy(LOGICAL_FLOW.TARGET_ENTITY_ID);
private static final SelectHavingStep<Record2<Long, BigDecimal>> TOTAL_FLOW_COUNTS =
DSL.select(APP_ID_ALIAS, DSL.sum(CONNECTION_COUNT_ALIAS).as(TOTAL_CONNECTIONS_FIELD))
.from(OUTBOUND_FLOWS.unionAll(INBOUND_FLOWS))
.groupBy(APP_ID_ALIAS);
private DSLContext dsl;
@Autowired
public ConnectionComplexityDao(DSLContext dsl) {
this.dsl = dsl;
checkNotNull(dsl, "DSL cannot be null");
}
// ---- convenience functions
public int calculateBaseline() {
return calculateBaseline(DSL.trueCondition());
}
public int calculateBaseline(Select<Record1<Long>> appIdProvider) {
return calculateBaseline(APP_ID_ALIAS.in(appIdProvider));
}
public int calculateBaseline(Long appIds) {
return calculateBaseline(APP_ID_ALIAS.in(appIds));
}
public List<Tally<Long>> findCounts() {
return findCounts(DSL.trueCondition());
}
public List<Tally<Long>> findCounts(Select<Record1<Long>> appIdProvider) {
return findCounts(APP_ID_ALIAS.in(appIdProvider));
}
public List<Tally<Long>> findCounts(Long... appIds) {
return findCounts(APP_ID_ALIAS.in(appIds));
}
// ---- base queries
private int calculateBaseline(Condition condition) {
return dsl.select(DSL.max(TOTAL_CONNECTIONS_FIELD))
.from(TOTAL_FLOW_COUNTS)
.where(condition)
.fetchOptional(0, Integer.class)
.orElse(0);
}
private List<Tally<Long>> findCounts(Condition condition) {
return dsl.select(APP_ID_ALIAS, TOTAL_CONNECTIONS_FIELD)
.from(TOTAL_FLOW_COUNTS)
.where(condition)
.fetch(r -> ImmutableTally.<Long>builder()
.id(r.value1())
.count(r.value2())
.build());
}
}
| kamransaleem/waltz | waltz-data/src/main/java/com/khartec/waltz/data/complexity/ConnectionComplexityDao.java | Java | apache-2.0 | 5,062 |
/*
Copyright 2016 ElasticBox All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
class InstanceFiltersController {
constructor($scope) {
'ngInject';
this.instancesFilteredByState = [];
this.selectedOwners = [];
this.filteredInstances = [];
$scope.$watch('ctrl.selectedState', () => this.filterInstancesByState());
$scope.$watchCollection('ctrl.instancesToFilter', () => this.filterInstancesByState());
$scope.$watchCollection('ctrl.selectedOwners', () => this.filterInstancesByOwners());
$scope.$watchCollection('ctrl.instancesFilteredByState', () => this.filterInstancesByOwners());
}
filterInstancesByState() {
this.instancesFilteredByState = _.chain(this.instancesToFilter)
.filter((x) => {
return _.isUndefined(this.selectedState) || this.selectedState.state.kind === 'all'
|| this.selectedState.state.kind.toLowerCase() === (x.kind || '').toLowerCase()
&& _.isUndefined(this.selectedState.substate) || !_.isUndefined(this.selectedState.substate)
&& _.get(x, 'status.phase') === this.selectedState.substate.state;
})
.value();
}
filterInstancesByOwners() {
this.filteredInstances = _.isEmpty(this.selectedOwners)
? this.instancesFilteredByState
: _.filter(this.instancesFilteredByState, (x) => _.includes(this.selectedOwners, x.owner));
}
}
export default InstanceFiltersController;
| ElasticBox/elastickube | src/ui/app/instances/ek-instance-filters/ek-instance-filters.controller.js | JavaScript | apache-2.0 | 2,031 |
/*
Copyright (c) 2019 the Octant contributors. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package component
import (
"fmt"
"github.com/vmware-tanzu/octant/internal/util/json"
)
// Error is a component for freetext
//
// +octant:component
type Error struct {
Base
Config ErrorConfig `json:"config"`
}
// ErrorConfig is the contents of Text
type ErrorConfig struct {
Data string `json:"data,omitempty"`
}
// NewError creates a text component
func NewError(title []TitleComponent, err error) *Error {
return &Error{
Base: newBase(TypeError, title),
Config: ErrorConfig{
Data: fmt.Sprintf("%+v", err),
},
}
}
// SupportsTitle denotes this is a TextComponent.
func (t *Error) SupportsTitle() {}
type errorMarshal Error
// MarshalJSON implements json.Marshaler
func (t *Error) MarshalJSON() ([]byte, error) {
m := errorMarshal(*t)
m.Metadata.Type = TypeError
return json.Marshal(&m)
}
// String returns the text content of the component.
func (t *Error) String() string {
return t.Config.Data
}
// LessThan returns true if this component's value is less than the argument supplied.
func (t *Error) LessThan(i interface{}) bool {
v, ok := i.(*Error)
if !ok {
return false
}
return t.Config.Data < v.Config.Data
}
| vmware/octant | pkg/view/component/error.go | GO | apache-2.0 | 1,262 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security.Claims;
namespace HETSAPI.Models
{
/// <summary>
/// User Model Extension
/// </summary>
public static class UserModelExtensions
{
/// <summary>
/// Convert User to ClaimsPrincipal
/// </summary>
/// <param name="user"></param>
/// <param name="authenticationType"></param>
/// <returns></returns>
public static ClaimsPrincipal ToClaimsPrincipal(this User user, string authenticationType)
{
return new ClaimsPrincipal(user.ToClaimsIdentity(authenticationType));
}
private static ClaimsIdentity ToClaimsIdentity(this User user, string authenticationType)
{
return new ClaimsIdentity(user.GetClaims(), authenticationType);
}
private static List<Claim> GetClaims(this User user)
{
List<Claim> claims = new List<Claim> {new Claim(ClaimTypes.Name, user.SmUserId)};
if (!string.IsNullOrEmpty(user.Surname))
claims.Add(new Claim(ClaimTypes.Surname, user.Surname));
if (!string.IsNullOrEmpty(user.GivenName))
claims.Add(new Claim(ClaimTypes.GivenName, user.GivenName));
if (!string.IsNullOrEmpty(user.Email))
claims.Add(new Claim(ClaimTypes.Email, user.Email));
if (user.Id != 0)
claims.Add(new Claim(User.UseridClaim, user.Id.ToString()));
var permissions = user.GetActivePermissions().Select(p => new Claim(User.PermissionClaim, p.Code)).ToList();
if (permissions.Any())
claims.AddRange(permissions);
var roles = user.GetActiveRoles().Select(r => new Claim(ClaimTypes.Role, r.Name)).ToList();
if (roles.Any())
claims.AddRange(roles);
return claims;
}
private static List<Permission> GetActivePermissions(this User user)
{
List<Permission> result = null;
var activeRoles = user.GetActiveRoles();
if (activeRoles != null)
{
IEnumerable<RolePermission> rolePermissions = activeRoles
.Where (x => x != null && x.RolePermissions != null)
.SelectMany(x => x.RolePermissions);
result = rolePermissions.Select(x => x.Permission).Distinct().ToList();
}
return result;
}
private static List<Role> GetActiveRoles(this User user)
{
List<Role> roles = new List<Role>();
if (user.UserRoles == null)
return roles;
roles = user.UserRoles.Where(
x => x.Role != null
&& x.EffectiveDate <= DateTime.UtcNow
&& (x.ExpiryDate == null || x.ExpiryDate > DateTime.UtcNow))
.Select(x => x.Role).ToList();
return roles;
}
}
}
| swcurran/hets | Server/src/HETSAPI/Authorization/UserModelExtensions.cs | C# | apache-2.0 | 3,077 |
/*
Copyright IBM Corp. 2016 All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package config
import (
"time"
"github.com/hyperledger/fabric/common/channelconfig"
ab "github.com/hyperledger/fabric/protos/orderer"
)
// Orderer is a mock implementation of channelconfig.Orderer
type Orderer struct {
// ConsensusTypeVal is returned as the result of ConsensusType()
ConsensusTypeVal string
// BatchSizeVal is returned as the result of BatchSize()
BatchSizeVal *ab.BatchSize
// BatchTimeoutVal is returned as the result of BatchTimeout()
BatchTimeoutVal time.Duration
// KafkaBrokersVal is returned as the result of KafkaBrokers()
KafkaBrokersVal []string
// MaxChannelsCountVal is returns as the result of MaxChannelsCount()
MaxChannelsCountVal uint64
// OrganizationsVal is returned as the result of Organizations()
OrganizationsVal map[string]channelconfig.Org
// CapabilitiesVal is returned as the result of Capabilities()
CapabilitiesVal channelconfig.OrdererCapabilities
}
// ConsensusType returns the ConsensusTypeVal
func (scm *Orderer) ConsensusType() string {
return scm.ConsensusTypeVal
}
// BatchSize returns the BatchSizeVal
func (scm *Orderer) BatchSize() *ab.BatchSize {
return scm.BatchSizeVal
}
// BatchTimeout returns the BatchTimeoutVal
func (scm *Orderer) BatchTimeout() time.Duration {
return scm.BatchTimeoutVal
}
// KafkaBrokers returns the KafkaBrokersVal
func (scm *Orderer) KafkaBrokers() []string {
return scm.KafkaBrokersVal
}
// MaxChannelsCount returns the MaxChannelsCountVal
func (scm *Orderer) MaxChannelsCount() uint64 {
return scm.MaxChannelsCountVal
}
// Organizations returns OrganizationsVal
func (scm *Orderer) Organizations() map[string]channelconfig.Org {
return scm.OrganizationsVal
}
// Capabilities returns CapabilitiesVal
func (scm *Orderer) Capabilities() channelconfig.OrdererCapabilities {
return scm.CapabilitiesVal
}
// OrdererCapabilities mocks the channelconfig.OrdererCapabilities interface
type OrdererCapabilities struct {
// SupportedErr is returned by Supported()
SupportedErr error
// SetChannelModPolicyDuringCreateVal is returned by SetChannelModPolicyDuringCreate()
SetChannelModPolicyDuringCreateVal bool
// ResubmissionVal is returned by Resubmission()
ResubmissionVal bool
}
// Supported returns SupportedErr
func (oc *OrdererCapabilities) Supported() error {
return oc.SupportedErr
}
// SetChannelModPolicyDuringCreate returns SetChannelModPolicyDuringCreateVal
func (oc *OrdererCapabilities) SetChannelModPolicyDuringCreate() bool {
return oc.SetChannelModPolicyDuringCreateVal
}
// Resubmission returns ResubmissionVal
func (oc *OrdererCapabilities) Resubmission() bool {
return oc.ResubmissionVal
}
| mqshen/fabric | common/mocks/config/orderer.go | GO | apache-2.0 | 2,723 |
var fs = require('fs');
fs.readFile('data/file1.txt', function (err, data) {
console.log('Second');
});
console.log('First');
| acoburn/interterm2014 | async/async1.js | JavaScript | apache-2.0 | 130 |
package com.miloshpetrov.sol2.common;
import com.badlogic.gdx.graphics.Color;
public class SolColorUtil {
public static void fromHSB(float hue, float saturation, float brightness, float a, Color dest) {
float r = 0, g = 0, b = 0;
if (saturation == 0) {
r = g = b = brightness;
} else {
float h = (hue - (float)Math.floor(hue)) * 6.0f;
float f = h - (float) Math.floor(h);
float p = brightness * (1.0f - saturation);
float q = brightness * (1.0f - saturation * f);
float t = brightness * (1.0f - (saturation * (1.0f - f)));
switch ((int) h) {
case 0:
r = brightness;
g = t;
b = p;
break;
case 1:
r = q;
g = brightness;
b = p;
break;
case 2:
r = p;
g = brightness;
b = t;
break;
case 3:
r = p;
g = q;
b = brightness;
break;
case 4:
r = t;
g = p;
b = brightness;
break;
case 5:
r = brightness;
g = p;
b = q;
break;
}
}
dest.r = r;
dest.g = g;
dest.b = b;
dest.a = a;
}
public static float[] toHSB(Color src) {
int r = (int)(src.r * 255 + .5f);
int g = (int)(src.g * 255 + .5f);
int b = (int)(src.b * 255 + .5f);
float hue, saturation, brightness;
int cmax = (r > g) ? r : g;
if (b > cmax) cmax = b;
int cmin = (r < g) ? r : g;
if (b < cmin) cmin = b;
brightness = ((float) cmax) / 255.0f;
if (cmax != 0)
saturation = ((float) (cmax - cmin)) / ((float) cmax);
else
saturation = 0;
if (saturation == 0)
hue = 0;
else {
float redc = ((float) (cmax - r)) / ((float) (cmax - cmin));
float greenc = ((float) (cmax - g)) / ((float) (cmax - cmin));
float bluec = ((float) (cmax - b)) / ((float) (cmax - cmin));
if (r == cmax)
hue = bluec - greenc;
else if (g == cmax)
hue = 2.0f + redc - bluec;
else
hue = 4.0f + greenc - redc;
hue = hue / 6.0f;
if (hue < 0)
hue = hue + 1.0f;
}
float[] hsba = new float[4];
hsba[0] = hue;
hsba[1] = saturation;
hsba[2] = brightness;
hsba[3] = src.a;
return hsba;
}
public static Color load(String s) {
String[] parts = s.split(" ");
boolean hsb = "hsb".equals(parts[0]);
int idx = hsb ? 1 : 0;
int v1 = Integer.parseInt(parts[idx++]);
int v2 = Integer.parseInt(parts[idx++]);
int v3 = Integer.parseInt(parts[idx++]);
float a = 1;
if (parts.length > idx) a = Integer.parseInt(parts[idx]) / 255f;
Color res = new Color();
if (hsb) {
fromHSB(v1/360f, v2/100f, v3/100f, a, res);
} else {
res.set(v1/255f, v2/255f, v3/255f, a);
}
return res;
}
public static void changeBrightness(Color c, float b) {
if (b > 0) {
float bi = 1 - b;
c.r = 1 - bi * (1 - c.r);
c.g = 1 - bi * (1 - c.g);
c.b = 1 - bi * (1 - c.b);
return;
}
float bi = 1 + b;
c.r *= bi;
c.g *= bi;
c.b *= bi;
}
}
| Cervator/DestinationSol | main/src/com/miloshpetrov/sol2/common/SolColorUtil.java | Java | apache-2.0 | 3,119 |
package com.wodejia.myapp.data.contacts;
import java.io.Serializable;
/**
* Created by clarence on 16/9/2.
*/
public class ContactsMenuDO implements Serializable {
private int key;
private String value;
private String title;
public int getKey() {
return key;
}
public void setKey(int key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
}
| clarenceV1/MyApp | app/src/main/java/com/wodejia/myapp/data/contacts/ContactsMenuDO.java | Java | apache-2.0 | 634 |
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package scc
import (
"errors"
"fmt"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/core/chaincode/shim"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/core/container/ccintf"
"github.com/hyperledger/fabric/core/container/inproccontroller"
"github.com/hyperledger/fabric/core/peer"
pb "github.com/hyperledger/fabric/protos/peer"
"github.com/spf13/viper"
)
var sysccLogger = flogging.MustGetLogger("sccapi")
// Registrar provides a way for system chaincodes to be registered
type Registrar interface {
// Register registers a system chaincode
Register(ccid *ccintf.CCID, cc shim.Chaincode) error
}
// SystemChaincode defines the metadata needed to initialize system chaincode
// when the fabric comes up. SystemChaincodes are installed by adding an
// entry in importsysccs.go
type SystemChaincode struct {
//Unique name of the system chaincode
Name string
//Path to the system chaincode; currently not used
Path string
//InitArgs initialization arguments to startup the system chaincode
InitArgs [][]byte
// Chaincode holds the actual chaincode instance
Chaincode shim.Chaincode
// InvokableExternal keeps track of whether
// this system chaincode can be invoked
// through a proposal sent to this peer
InvokableExternal bool
// InvokableCC2CC keeps track of whether
// this system chaincode can be invoked
// by way of a chaincode-to-chaincode
// invocation
InvokableCC2CC bool
// Enabled a convenient switch to enable/disable system chaincode without
// having to remove entry from importsysccs.go
Enabled bool
}
type SysCCWrapper struct {
SCC *SystemChaincode
}
func (sccw *SysCCWrapper) Name() string { return sccw.SCC.Name }
func (sccw *SysCCWrapper) Path() string { return sccw.SCC.Path }
func (sccw *SysCCWrapper) InitArgs() [][]byte { return sccw.SCC.InitArgs }
func (sccw *SysCCWrapper) Chaincode() shim.Chaincode { return sccw.SCC.Chaincode }
func (sccw *SysCCWrapper) InvokableExternal() bool { return sccw.SCC.InvokableExternal }
func (sccw *SysCCWrapper) InvokableCC2CC() bool { return sccw.SCC.InvokableCC2CC }
func (sccw *SysCCWrapper) Enabled() bool { return sccw.SCC.Enabled }
type SelfDescribingSysCC interface {
//Unique name of the system chaincode
Name() string
//Path to the system chaincode; currently not used
Path() string
//InitArgs initialization arguments to startup the system chaincode
InitArgs() [][]byte
// Chaincode returns the underlying chaincode
Chaincode() shim.Chaincode
// InvokableExternal keeps track of whether
// this system chaincode can be invoked
// through a proposal sent to this peer
InvokableExternal() bool
// InvokableCC2CC keeps track of whether
// this system chaincode can be invoked
// by way of a chaincode-to-chaincode
// invocation
InvokableCC2CC() bool
// Enabled a convenient switch to enable/disable system chaincode without
// having to remove entry from importsysccs.go
Enabled() bool
}
// registerSysCC registers the given system chaincode with the peer
func (p *Provider) registerSysCC(syscc SelfDescribingSysCC) (bool, error) {
if !syscc.Enabled() || !isWhitelisted(syscc) {
sysccLogger.Info(fmt.Sprintf("system chaincode (%s,%s,%t) disabled", syscc.Name(), syscc.Path(), syscc.Enabled()))
return false, nil
}
// XXX This is an ugly hack, version should be tied to the chaincode instance, not he peer binary
version := util.GetSysCCVersion()
ccid := &ccintf.CCID{
Name: syscc.Name(),
Version: version,
}
err := p.Registrar.Register(ccid, syscc.Chaincode())
if err != nil {
//if the type is registered, the instance may not be... keep going
if _, ok := err.(inproccontroller.SysCCRegisteredErr); !ok {
errStr := fmt.Sprintf("could not register (%s,%v): %s", syscc.Path(), syscc, err)
sysccLogger.Error(errStr)
return false, fmt.Errorf(errStr)
}
}
sysccLogger.Infof("system chaincode %s(%s) registered", syscc.Name(), syscc.Path())
return true, err
}
// deploySysCC deploys the given system chaincode on a chain
func deploySysCC(chainID string, ccprov ccprovider.ChaincodeProvider, syscc SelfDescribingSysCC) error {
if !syscc.Enabled() || !isWhitelisted(syscc) {
sysccLogger.Info(fmt.Sprintf("system chaincode (%s,%s) disabled", syscc.Name(), syscc.Path()))
return nil
}
txid := util.GenerateUUID()
// Note, this structure is barely initialized,
// we omit the history query executor, the proposal
// and the signed proposal
txParams := &ccprovider.TransactionParams{
TxID: txid,
ChannelID: chainID,
}
if chainID != "" {
lgr := peer.GetLedger(chainID)
if lgr == nil {
panic(fmt.Sprintf("syschain %s start up failure - unexpected nil ledger for channel %s", syscc.Name(), chainID))
}
txsim, err := lgr.NewTxSimulator(txid)
if err != nil {
return err
}
txParams.TXSimulator = txsim
defer txsim.Done()
}
chaincodeID := &pb.ChaincodeID{Path: syscc.Path(), Name: syscc.Name()}
spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_Type(pb.ChaincodeSpec_Type_value["GOLANG"]), ChaincodeId: chaincodeID, Input: &pb.ChaincodeInput{Args: syscc.InitArgs()}}
chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{ExecEnv: pb.ChaincodeDeploymentSpec_SYSTEM, ChaincodeSpec: spec}
// XXX This is an ugly hack, version should be tied to the chaincode instance, not he peer binary
version := util.GetSysCCVersion()
cccid := &ccprovider.CCContext{
Name: chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeId.Name,
Version: version,
}
resp, _, err := ccprov.ExecuteLegacyInit(txParams, cccid, chaincodeDeploymentSpec)
if err == nil && resp.Status != shim.OK {
err = errors.New(resp.Message)
}
sysccLogger.Infof("system chaincode %s/%s(%s) deployed", syscc.Name(), chainID, syscc.Path())
return err
}
// deDeploySysCC stops the system chaincode and deregisters it from inproccontroller
func deDeploySysCC(chainID string, ccprov ccprovider.ChaincodeProvider, syscc SelfDescribingSysCC) error {
// XXX This is an ugly hack, version should be tied to the chaincode instance, not he peer binary
version := util.GetSysCCVersion()
ccci := &ccprovider.ChaincodeContainerInfo{
Type: "GOLANG",
Name: syscc.Name(),
Path: syscc.Path(),
Version: version,
ContainerType: inproccontroller.ContainerType,
}
err := ccprov.Stop(ccci)
return err
}
func isWhitelisted(syscc SelfDescribingSysCC) bool {
chaincodes := viper.GetStringMapString("chaincode.system")
val, ok := chaincodes[syscc.Name()]
enabled := val == "enable" || val == "true" || val == "yes"
return ok && enabled
}
| xixuejia/fabric | core/scc/sysccapi.go | GO | apache-2.0 | 6,802 |
/*
*
* ==============================================================================
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.wicketstuff.gmap.geocoder.pojos;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* POJO for an entity in Google geocoders address_components Array <br/>
*
* <p>
* See also: <a href="https://developers.google.com/maps/documentation/geocoding/?hl=en#Results">
* Google Geocoder Result Documentation</a><br/>
*
* <b>Note:</b><br/>
* The most documentation in this class a have been adopted by Google documentation.<br/>
* Say thank you to Google!
* </p>
*
* @author Mathias Born - Contact: www.mathiasborn.de
*/
public class GeocoderAddress
{
/** full text description or name of the address component */
@JsonProperty("long_name")
private String longName;
/** an abbreviated textual name for the address component */
@JsonProperty("short_name")
private String shortName;
/** array indicating the type of the address component. */
private String[] types;
/**
* @return the longName
*/
public String getLongName()
{
return longName;
}
/**
* Set the full text description or name of the address component
*
* @param longName
* the longName to set
*/
public void setLongName(String longName)
{
this.longName = longName;
}
/**
* Get the full text description or name of the address component
*
* @return the shortName
*/
public String getShortName()
{
return shortName;
}
/**
* Set an abbreviated textual name for the address component
*
* @param shortName
* the shortName to set
*/
public void setShortName(String shortName)
{
this.shortName = shortName;
}
/**
* Get an array that indicating the type of the address component.
*
* @return the types
*/
public String[] getTypes()
{
return types;
}
/**
* Set an array that indicating the type of the address component.
*
* @param types
* the types to set
*/
public void setTypes(String[] types)
{
this.types = types;
}
}
| rycaon/isis-base | amap_base/src/main/java/org/wicketstuff/gmap/geocoder/pojos/GeocoderAddress.java | Java | apache-2.0 | 2,587 |
namespace F5
{
/// <summary>
/// Represents an iRule on the BIGIP LTM
/// </summary>
public class Rule
{
public string Name { get; set; }
public string Code { get; set; }
}
}
| jstrassburg/F5BIGIP.NET | F5/Rule.cs | C# | apache-2.0 | 189 |
/*******
Copyright 2015 NeuroBASE,Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**********/
package com.echopf;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import java.util.TimeZone;
/**
* An ECHODate is the extended Date object for the SDK.
*/
public class ECHODate extends Date {
private static final long serialVersionUID = 1L;
/**
* {@.en Constructs a new ECHODate.}
* {@.ja 日時オブジェクトを現在時刻で生成します。}
*/
public ECHODate() {
super();
}
/**
* {@.en Constructs a new ECHODate with an acceptable date string for the API.}
* {@.ja APIの仕様に準拠した文字列形式の日時から、日時オブジェクトを生成します。}
* @param s an acceptable date string for the API (e.g. "2015-02-20 00:00:00")
*/
public ECHODate(String s) throws ParseException {
super();
DateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US);
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
try {
setTime(sdf.parse(s).getTime());
} catch (ParseException e) {
throw e;
}
}
/**
* {@.en Converts this object to an acceptable date string for the API.}
* {@.ja APIの仕様に準拠した文字列形式の日時へ変換します。}
* @return the formatted date string for the ECHO API.
*/
public String toStringForECHO() {
DateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US);
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
return sdf.format(this);
}
}
| echopfcom/ECHO-Android-SDK | src/com/echopf/ECHODate.java | Java | apache-2.0 | 2,110 |
/*
Copyright 2014 Dániel Sólyom
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ds.framework.v4.widget;
import android.content.Context;
import android.content.res.TypedArray;
import android.util.AttributeSet;
import ds.framework.v4.R;
import ds.framework.v4.widget.LaizyImageView.LaizyImageViewInfo;
import ds.framework.v4.widget.LaizyImageView.OnImageSetListener;
public class LaizyImageFlipAnimationLayout extends FlipAnimationLayout {
private LaizyImageViewInfo mImageInfo;
private int mDirection = TOP_TO_BOTTOM;
private int mNextImagePosition = 0;
private int mNextLoadingPosition = 0;
private LaizyImageView mNextImageView;
private boolean mFirstImage = true;
private boolean mShowingLoading = false;
private boolean mNeedToShowLoading = false;
private boolean mFlipFirst;
private OnImageSetListener mOnImageSetListener = new OnImageSetListener() {
@Override
public void onDefaultSet(LaizyImageView view) {
if (!view.getInfo().info.equals(mImageInfo.info)) {
return;
}
onFinishedLoading();
}
@Override
public void onLoadingSet(LaizyImageView view) {
if (!view.getInfo().info.equals(mImageInfo.info)) {
return;
}
mShowingLoading = true;
if (mNextImagePosition == mNextLoadingPosition) {
// only happens when we are loading the first image and no need to flip
((LaizyImageView) getChildAt(0)).showLoading(mImageInfo);
return;
}
// just animate in the loading image
mNeedToShowLoading = true;
showLoading();
}
@Override
public void onErrorSet(LaizyImageView view) {
if (!view.getInfo().info.equals(mImageInfo.info)) {
return;
}
onFinishedLoading();
}
@Override
public void onImageSet(LaizyImageView view) {
if (!view.getInfo().info.equals(mImageInfo.info)) {
return;
}
onFinishedLoading();
}
};
public LaizyImageFlipAnimationLayout(Context context) {
this(context, null);
}
public LaizyImageFlipAnimationLayout(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public LaizyImageFlipAnimationLayout(Context context, AttributeSet attrs,
int defStyle) {
super(context, attrs, defStyle);
TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.DsView, defStyle, 0);
mFlipFirst = a.getBoolean(R.styleable.DsView_flip_first, true);
a.recycle();
}
/**
* create third image view which will be used to lazy-load the images
*
* @param context
* @return
*/
protected LaizyImageView createThirdImageView(Context context) {
return new LaizyImageView(context);
}
public void reset() {
stop();
setCurrentChild(0);
mFirstImage = true;
}
/**
*
* @param always
*/
public void flipAlways(boolean always) {
mFlipFirst = always;
}
/**
*
* @param info
*/
public void loadImage(LaizyImageViewInfo info) {
if (mImageInfo != null && (info == null || info.info.equals(mImageInfo.info))) {
// already loading / loaded this image
return;
}
stop();
if (getChildCount() < 3) {
for(int i = getChildCount(); i < 3; ++i) {
final LaizyImageView thirdView = createThirdImageView(getContext());
addView(thirdView);
}
}
mNeedToShowLoading = false;
mImageInfo = info;
info.needFading = false;
// load the image
if (mFirstImage) {
// first image to load
mNextLoadingPosition = mNextImagePosition = 1;
// act like we are showing loading so we could do the flip even for the first image
mShowingLoading = mFlipFirst;
mFirstImage = false;
} else {
// load to the next empty position
mNextImagePosition = advancePosition(getCurrentChildPosition());
// the current third position which is not showing and not used to
// load into it will be the only empty position we can have
mNextLoadingPosition = advancePosition(mNextImagePosition);
}
mNextImageView = (LaizyImageView) getChildAt(mNextImagePosition);
mNextImageView.setOnImageSetListener(mOnImageSetListener);
mNextImageView.reset();
mNextImageView.load(mImageInfo);
}
/**
*
*/
public void stop() {
mNeedToShowLoading = false;
mShowingLoading = false;
if (mNextImageView != null) {
mNextImageView.stopLoading();
mNextImageView.setOnImageSetListener(null);
}
mNextImageView = null;
mImageInfo = null;
super.cancel();
}
/**
*
* @param direction
*/
public void setDirection(int direction) {
assert(direction >= 0 && direction < DIRECTIONS.length);
mDirection = direction;
}
@Override
void setState(int state) {
super.setState(state);
if (mNeedToShowLoading && state == STATE_CALM) {
// still loading the image and the previous flip is finished
// show loading
showLoading();
}
}
/**
*
*/
private void showLoading() {
if (!mNeedToShowLoading) {
return;
}
if (getState() == STATE_ANIMATING) {
// wait for the previous animation to finish when loading
return;
}
mNeedToShowLoading = false;
((LaizyImageView) getChildAt(mNextLoadingPosition)).showLoading(mImageInfo);
start(mDirection, mNextLoadingPosition);
}
/**
*
*/
private void onFinishedLoading() {
if (getCurrentChildPosition() == mNextImagePosition) {
// we are showing the image that just finished loading so nothing to do
// except we cancel the animation if there was any
// this would look messy if animating but mostly it is not the case
cancel();
return;
}
if (!mShowingLoading) {
// there was no need to show loading - image was right there
// just switch without animation
setCurrentChild(mNextImagePosition);
} else {
// flip to the loaded image
start(mDirection, mNextImagePosition);
}
mNextImageView.setOnImageSetListener(null);
}
/**
*
* @param resource
*/
public void setCurrentTo(int resId) {
stop();
((LaizyImageView) getCurrentChild()).setImageResource(resId);
}
/**
*
*/
private int advancePosition(int position) {
position++;
if (position > 2) {
position = 0;
}
return position;
}
}
| DSolyom/AndroidDSFramework | FrameworkV4/frameworkv4/src/main/java/ds/framework/v4/widget/LaizyImageFlipAnimationLayout.java | Java | apache-2.0 | 6,588 |
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.xdebugger.impl.breakpoints;
import com.intellij.lang.Language;
import com.intellij.xdebugger.XExpression;
import com.intellij.xdebugger.evaluation.EvaluationMode;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* @author egor
*/
public class XExpressionImpl implements XExpression {
public static final XExpression EMPTY_EXPRESSION = fromText("", EvaluationMode.EXPRESSION);
public static final XExpression EMPTY_CODE_FRAGMENT = fromText("", EvaluationMode.CODE_FRAGMENT);
@NotNull private final String myExpression;
private final Language myLanguage;
private final String myCustomInfo;
private final EvaluationMode myMode;
public XExpressionImpl(@NotNull String expression, Language language, String customInfo) {
this(expression, language, customInfo, EvaluationMode.EXPRESSION);
}
public XExpressionImpl(@NotNull String expression, Language language, String customInfo, EvaluationMode mode) {
myExpression = expression;
myLanguage = language;
myCustomInfo = customInfo;
myMode = mode;
}
@NotNull
@Override
public String getExpression() {
return myExpression;
}
@Override
public Language getLanguage() {
return myLanguage;
}
@Override
public String getCustomInfo() {
return myCustomInfo;
}
@Override
public EvaluationMode getMode() {
return myMode;
}
@Nullable
public static XExpressionImpl fromText(@Nullable String text) {
return text != null ? new XExpressionImpl(text, null, null, EvaluationMode.EXPRESSION) : null;
}
@Nullable
public static XExpressionImpl fromText(@Nullable String text, EvaluationMode mode) {
return text != null ? new XExpressionImpl(text, null, null, mode) : null;
}
public static XExpressionImpl changeMode(XExpression expression, EvaluationMode mode) {
return new XExpressionImpl(expression.getExpression(), expression.getLanguage(), expression.getCustomInfo(), mode);
}
@Override
public String toString() {
return myExpression;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
XExpressionImpl that = (XExpressionImpl)o;
if (myCustomInfo != null ? !myCustomInfo.equals(that.myCustomInfo) : that.myCustomInfo != null) return false;
if (!myExpression.equals(that.myExpression)) return false;
if (myLanguage != null ? !myLanguage.equals(that.myLanguage) : that.myLanguage != null) return false;
if (myMode != that.myMode) return false;
return true;
}
@Override
public int hashCode() {
int result = myExpression.hashCode();
result = 31 * result + (myLanguage != null ? myLanguage.hashCode() : 0);
result = 31 * result + (myCustomInfo != null ? myCustomInfo.hashCode() : 0);
result = 31 * result + (myMode != null ? myMode.hashCode() : 0);
return result;
}
}
| ernestp/consulo | platform/xdebugger-impl/src/com/intellij/xdebugger/impl/breakpoints/XExpressionImpl.java | Java | apache-2.0 | 3,535 |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
function drawSctDiagram(concept, parentDiv) {
parentDiv.svg({settings: {width: '600px', height: '500px'}});
var svg = parentDiv.svg('get');
loadDefs(svg);
rect1 = drawSctBox(parentDiv, 10, 10, "<span class='sct-box-id'>12676007<br></span>Fracture of radius", "sct-defined-concept");
circle1 = drawEquivalentNode(svg, 120,130);
drawSubsumedByNode(svg, 120,230);
drawSubsumesNode(svg, 120,330);
drawSctBox(parentDiv, 100, 400, "<slot>", "sct-slot");
connectElements(svg, rect1, circle1, 'center', 'left');
circle2 = drawConjunctionNode(svg, 200, 130);
connectElements(svg, circle1, circle2, 'right', 'left');
rect2 = drawSctBox(parentDiv, 250, 100, "<span class='sct-box-id'>65966004<br></span>Fracture of forearm", "sct-defined-concept");
connectElements(svg, circle2, rect2, 'bottom', 'left', 'ClearTriangle');
rect3 = drawSctBox(parentDiv, 250, 200, "<span class='sct-box-id'>429353004<br></span>Injury of radius", "sct-defined-concept");
connectElements(svg, circle2, rect3, 'bottom', 'left', 'ClearTriangle');
circle3 = drawAttributeGroupNode(svg, 250, 330);
connectElements(svg, circle2, circle3, 'bottom', 'left');
circle4 = drawConjunctionNode(svg, 300, 330);
connectElements(svg, circle3, circle4, 'right', 'left');
rect4 = drawSctBox(parentDiv, 350, 300, "<span class='sct-box-id'>116676008<br></span>Associated morphology", "sct-attribute");
connectElements(svg, circle4, rect4, 'right', 'left');
rect5 = drawSctBox(parentDiv, 550, 300, "<span class='sct-box-id'>72704001<br></span>Fracture", "sct-primitive-concept");
connectElements(svg, rect4, rect5, 'right', 'left');
rect6 = drawSctBox(parentDiv, 350, 400, "<span class='sct-box-id'>363698007<br></span>Finding site", "sct-attribute");
connectElements(svg, circle4, rect6, 'bottom', 'left');
rect7 = drawSctBox(parentDiv, 550, 400, "<span class='sct-box-id'>62413002<br></span>Bone structure of radius", "sct-primitive-concept");
connectElements(svg, rect6, rect7, 'right', 'left');
}
function toggleIds() {
$('.sct-box-id').toggle();
}
| termMed/ihtsdo-daily-build-browser | js/diagramsTest.js | JavaScript | apache-2.0 | 2,348 |
var win = Ti.UI.createWindow({
backgroundColor:'white'
});
win.open();
var GA = require('analytics.google');
//GA.optOut = true;
GA.debug = true;
GA.trackUncaughtExceptions = true;
var tracker = GA.getTracker("UA-XXXXXX-X");
tracker.trackEvent({
category: "category",
action: "test",
label: "label",
value: 1
});
tracker.trackSocial({
network: "facebook",
action: "action",
target: "target"
});
tracker.trackTiming({
category: "",
time: 10,
name: "",
label: ""
});
tracker.trackScreen("Home");
var transaction = GA.makeTransaction({
id: "hi",
tax: 0.6,
shipping: 0,
revenue: 24.99 * 0.7
});
transaction.addItem({
sku: "ABC123",
name: "My Alphabet",
category: "product category",
price: 24.99,
quantity: 1
});
tracker.trackTransaction(transaction);
| Tipasha/DivinityCraft | modules/iphone/analytics.google/1.0/example/app.js | JavaScript | apache-2.0 | 772 |
<?php
/**
* $Header: /home/rpm/cvs/Comercial/propel/Log/file.php,v 1.1 2007/01/03 20:10:14 rpm Exp $
*
* @version $Revision: 1.1 $
* @package Log
*/
/**
* The Log_file class is a concrete implementation of the Log abstract
* class that logs messages to a text file.
*
* @author Jon Parise <jon@php.net>
* @author Roman Neuhauser <neuhauser@bellavista.cz>
* @since Log 1.0
* @package Log
*
* @example file.php Using the file handler.
*/
class Log_file extends Log
{
/**
* String containing the name of the log file.
* @var string
* @access private
*/
var $_filename = 'php.log';
/**
* Handle to the log file.
* @var resource
* @access private
*/
var $_fp = false;
/**
* Should new log entries be append to an existing log file, or should the
* a new log file overwrite an existing one?
* @var boolean
* @access private
*/
var $_append = true;
/**
* Should advisory file locking (i.e., flock()) be used?
* @var boolean
* @access private
*/
var $_locking = false;
/**
* Integer (in octal) containing the log file's permissions mode.
* @var integer
* @access private
*/
var $_mode = 0644;
/**
* Integer (in octal) specifying the file permission mode that will be
* used when creating directories that do not already exist.
* @var integer
* @access private
*/
var $_dirmode = 0755;
/**
* String containing the format of a log line.
* @var string
* @access private
*/
var $_lineFormat = '%1$s %2$s [%3$s] %4$s';
/**
* String containing the timestamp format. It will be passed directly to
* strftime(). Note that the timestamp string will generated using the
* current locale.
* @var string
* @access private
*/
var $_timeFormat = '%b %d %H:%M:%S';
/**
* String containing the end-on-line character sequence.
* @var string
* @access private
*/
var $_eol = "\n";
/**
* Constructs a new Log_file object.
*
* @param string $name Ignored.
* @param string $ident The identity string.
* @param array $conf The configuration array.
* @param int $level Log messages up to and including this level.
* @access public
*/
function Log_file($name, $ident = '', $conf = array(),
$level = PEAR_LOG_DEBUG)
{
$this->_id = md5(microtime());
$this->_filename = $name;
$this->_ident = $ident;
$this->_mask = Log::UPTO($level);
if (isset($conf['append'])) {
$this->_append = $conf['append'];
}
if (isset($conf['locking'])) {
$this->_locking = $conf['locking'];
}
if (!empty($conf['mode'])) {
if (is_string($conf['mode'])) {
$this->_mode = octdec($conf['mode']);
} else {
$this->_mode = $conf['mode'];
}
}
if (!empty($conf['dirmode'])) {
if (is_string($conf['dirmode'])) {
$this->_dirmode = octdec($conf['dirmode']);
} else {
$this->_dirmode = $conf['dirmode'];
}
}
if (!empty($conf['lineFormat'])) {
$this->_lineFormat = str_replace(array_keys($this->_formatMap),
array_values($this->_formatMap),
$conf['lineFormat']);
}
if (!empty($conf['timeFormat'])) {
$this->_timeFormat = $conf['timeFormat'];
}
if (!empty($conf['eol'])) {
$this->_eol = $conf['eol'];
} else {
$this->_eol = (strstr(PHP_OS, 'WIN')) ? "\r\n" : "\n";
}
register_shutdown_function(array(&$this, '_Log_file'));
}
/**
* Destructor
*/
function _Log_file()
{
if ($this->_opened) {
$this->close();
}
}
/**
* Creates the given directory path. If the parent directories don't
* already exist, they will be created, too.
*
* This implementation is inspired by Python's os.makedirs function.
*
* @param string $path The full directory path to create.
* @param integer $mode The permissions mode with which the
* directories will be created.
*
* @return True if the full path is successfully created or already
* exists.
*
* @access private
*/
function _mkpath($path, $mode = 0700)
{
/* Separate the last pathname component from the rest of the path. */
$head = dirname($path);
$tail = basename($path);
/* Make sure we've split the path into two complete components. */
if (empty($tail)) {
$head = dirname($path);
$tail = basename($path);
}
/* Recurse up the path if our current segment does not exist. */
if (!empty($head) && !empty($tail) && !is_dir($head)) {
$this->_mkpath($head, $mode);
}
/* Create this segment of the path. */
return @mkdir($head, $mode);
}
/**
* Opens the log file for output. If the specified log file does not
* already exist, it will be created. By default, new log entries are
* appended to the end of the log file.
*
* This is implicitly called by log(), if necessary.
*
* @access public
*/
function open()
{
if (!$this->_opened) {
/* If the log file's directory doesn't exist, create it. */
if (!is_dir(dirname($this->_filename))) {
$this->_mkpath($this->_filename, $this->_dirmode);
}
/* Determine whether the log file needs to be created. */
$creating = !file_exists($this->_filename);
/* Obtain a handle to the log file. */
$this->_fp = fopen($this->_filename, ($this->_append) ? 'a' : 'w');
/* We consider the file "opened" if we have a valid file pointer. */
$this->_opened = ($this->_fp !== false);
/* Attempt to set the file's permissions if we just created it. */
if ($creating && $this->_opened) {
chmod($this->_filename, $this->_mode);
}
}
return $this->_opened;
}
/**
* Closes the log file if it is open.
*
* @access public
*/
function close()
{
/* If the log file is open, close it. */
if ($this->_opened && fclose($this->_fp)) {
$this->_opened = false;
}
return ($this->_opened === false);
}
/**
* Flushes all pending data to the file handle.
*
* @access public
* @since Log 1.8.2
*/
function flush()
{
return fflush($this->_fp);
}
/**
* Logs $message to the output window. The message is also passed along
* to any Log_observer instances that are observing this Log.
*
* @param mixed $message String or object containing the message to log.
* @param string $priority The priority of the message. Valid
* values are: PEAR_LOG_EMERG, PEAR_LOG_ALERT,
* PEAR_LOG_CRIT, PEAR_LOG_ERR, PEAR_LOG_WARNING,
* PEAR_LOG_NOTICE, PEAR_LOG_INFO, and PEAR_LOG_DEBUG.
* @return boolean True on success or false on failure.
* @access public
*/
function log($message, $priority = null)
{
/* If a priority hasn't been specified, use the default value. */
if ($priority === null) {
$priority = $this->_priority;
}
/* Abort early if the priority is above the maximum logging level. */
if (!$this->_isMasked($priority)) {
return false;
}
/* If the log file isn't already open, open it now. */
if (!$this->_opened && !$this->open()) {
return false;
}
/* Extract the string representation of the message. */
$message = $this->_extractMessage($message);
/* Build the string containing the complete log line. */
$line = $this->_format($this->_lineFormat,
strftime($this->_timeFormat),
$priority, $message) . $this->_eol;
/* If locking is enabled, acquire an exclusive lock on the file. */
if ($this->_locking) {
flock($this->_fp, LOCK_EX);
}
/* Write the log line to the log file. */
$success = (fwrite($this->_fp, $line) !== false);
/* Unlock the file now that we're finished writing to it. */
if ($this->_locking) {
flock($this->_fp, LOCK_UN);
}
/* Notify observers about this log message. */
$this->_announce(array('priority' => $priority, 'message' => $message));
return $success;
}
}
| rodrigoprestesmachado/whiteboard | propel/Log/file.php | PHP | apache-2.0 | 9,393 |
package com.simplegame.server.bus.client.io.action;
import javax.annotation.Resource;
import com.simplegame.core.action.annotation.ActionMapping;
import com.simplegame.core.action.annotation.ActionWorker;
import com.simplegame.core.message.Message;
import com.simplegame.server.bus.client.io.command.ClientIoCommands;
import com.simplegame.server.bus.client.io.service.IIoService;
@ActionWorker
public class BusOutAction {
@Resource
private IIoService ioService;
@ActionMapping(mapping = ClientIoCommands.ROLE_OUT)
public void roleOut(Message message) {
this.ioService.roleOut(message.getRoleId());
}
}
| zuesgooogle/game-server | src/main/java/com/simplegame/server/bus/client/io/action/BusOutAction.java | Java | apache-2.0 | 640 |
using System.Runtime.InteropServices;
namespace BAP.Loader.PE
{
/// <summary>
/// Every image file has an optional header that provides information to the loader.
/// This header is optional in the sense that some files (specifically, object files) do not have it.
/// For image files, this header is required.
/// An object file can have an optional header, but generally this header has no function in an object file except to increase its size.
/// Note that the size of the optional header is not fixed.
/// The SizeOfOptionalHeader field in the COFF header must be used to validate that a probe into the file for a particular data directory does not go beyond SizeOfOptionalHeader.
/// For more information, see section 3.3, “COFF File Header (Object and Image).”
///
/// The NumberOfRvaAndSizes field of the optional header should also be used to ensure that no probe for a particular data directory entry goes beyond the optional header.
/// In addition, it is important to validate the optional header magic number for format compatibility.
/// </summary>
[StructLayout(LayoutKind.Explicit)]
public struct IMAGE_OPTIONAL_HEADER32
{
#region Optional Header Standard Fields (Image Only)
// The first eight fields of the optional header are standard fields that are defined for every implementation of COFF.
// These fields contain general information that is useful for loading and running an executable file.
// They are unchanged for the PE32+ format.
/// <summary>
/// The unsigned integer that identifies the state of the image file.
/// The most common number is 0x10B, which identifies it as a normal executable file.
/// 0x107 identifies it as a ROM image, and 0x20B identifies it as a PE32+ executable.
/// </summary>
[FieldOffset(0)]
public IMAGE_OPTIONAL_HEADER_MAGIC Magic;
/// <summary>
/// The linker major version number.
/// </summary>
[FieldOffset(2)]
public byte MajorLinkerVersion;
/// <summary>
/// The linker minor version number.
/// </summary>
[FieldOffset(3)]
public byte MinorLinkerVersion;
/// <summary>
/// The size of the code (text) section, or the sum of all code sections if there are multiple sections.
/// </summary>
[FieldOffset(4)]
public uint SizeOfCode;
/// <summary>
/// The size of the initialized data section, or the sum of all such sections if there are multiple data sections.
/// </summary>
[FieldOffset(8)]
public uint SizeOfInitializedData;
/// <summary>
/// The size of the uninitialized data section (BSS), or the sum of all such sections if there are multiple BSS sections.
/// </summary>
[FieldOffset(12)]
public uint SizeOfUninitializedData;
/// <summary>
/// The address of the entry point relative to the image base when the executable file is loaded into memory.
/// For program images, this is the starting address.
/// For device drivers, this is the address of the initialization function. An entry point is optional for DLLs.
/// When no entry point is present, this field must be zero.
/// </summary>
[FieldOffset(16)]
public uint AddressOfEntryPoint;
/// <summary>
/// The address that is relative to the image base of the beginning-of-code section when it is loaded into memory.
/// </summary>
[FieldOffset(20)]
public uint BaseOfCode;
#endregion
#region PE32 Only
// PE32 contains this additional field, which is absent in PE32+, following BaseOfCode.
/// <summary>
/// The address that is relative to the image base of the beginning-of-data section when it is loaded
/// </summary>
[FieldOffset(24)]
public uint BaseOfData;
#endregion
#region Optional Header Windows-Specific Fields (Image Only)
/// <summary>
/// The preferred address of the first byte of image when loaded into memory;
/// must be a multiple of 64 K. The default for DLLs is 0x10000000.
/// The default for Windows CE EXEs is 0x00010000.
/// The default for Windows NT, Windows 2000, Windows XP, Windows 95, Windows 98, and Windows Me is 0x00400000.
/// </summary>
[FieldOffset(28)]
public uint ImageBase;
/// <summary>
/// The alignment (in bytes) of sections when they are loaded into memory.
/// It must be greater than or equal to FileAlignment.
/// The default is the page size for the architecture.
/// </summary>
[FieldOffset(32)]
public uint SectionAlignment;
/// <summary>
/// The alignment factor (in bytes) that is used to align the raw data of sections in the image file.
/// The value should be a power of 2 between 512 and 64 K, inclusive.
/// The default is 512.
/// If the SectionAlignment is less than the architecture’s page size, then FileAlignment must match SectionAlignment.
/// </summary>
[FieldOffset(36)]
public uint FileAlignment;
/// <summary>
/// The major version number of the required operating system.
/// </summary>
[FieldOffset(40)]
public ushort MajorOperatingSystemVersion;
/// <summary>
/// The minor version number of the required operating system.
/// </summary>
[FieldOffset(42)]
public ushort MinorOperatingSystemVersion;
/// <summary>
/// The major version number of the image.
/// </summary>
[FieldOffset(44)]
public ushort MajorImageVersion;
/// <summary>
/// The minor version number of the image.
/// </summary>
[FieldOffset(46)]
public ushort MinorImageVersion;
/// <summary>
/// The major version number of the subsystem.
/// </summary>
[FieldOffset(48)]
public ushort MajorSubsystemVersion;
/// <summary>
/// The minor version number of the subsystem.
/// </summary>
[FieldOffset(50)]
public ushort MinorSubsystemVersion;
/// <summary>
/// Reserved, must be zero.
/// </summary>
[FieldOffset(52)]
public uint Win32VersionValue;
/// <summary>
/// The size (in bytes) of the image, including all headers, as the image is loaded in memory.
/// It must be a multiple of SectionAlignment.
/// </summary>
[FieldOffset(56)]
public uint SizeOfImage;
/// <summary>
/// The combined size of an MS DOS stub, PE header, and section headers rounded up to a multiple of FileAlignment.
/// </summary>
[FieldOffset(60)]
public uint SizeOfHeaders;
/// <summary>
/// The image file checksum.
/// The algorithm for computing the checksum is incorporated into IMAGHELP.DLL.
/// The following are checked for validation at load time:
/// all drivers,
/// any DLL loaded at boot time,
/// and any DLL that is loaded into a critical Windows process.
/// </summary>
[FieldOffset(64)]
public uint CheckSum;
/// <summary>
/// The subsystem that is required to run this image.
/// For more information, see “Windows Subsystem” later in this specification.
/// </summary>
[FieldOffset(68)]
public IMAGE_SUBSYSTEM Subsystem;
/// <summary>
/// For more information, see “DLL Characteristics” later in this specification.
/// </summary>
[FieldOffset(70)]
public DllCharacteristicsType DllCharacteristics;
/// <summary>
/// The size of the stack to reserve. Only SizeOfStackCommit is committed;
/// the rest is made available one page at a time until the reserve size is reached.
/// </summary>
[FieldOffset(72)]
public uint SizeOfStackReserve;
/// <summary>
/// The size of the stack to commit.
/// </summary>
[FieldOffset(76)]
public uint SizeOfStackCommit;
/// <summary>
/// The size of the local heap space to reserve.
/// Only SizeOfHeapCommit is committed; the rest is made available one page at a time until the reserve size is reached.
/// </summary>
[FieldOffset(80)]
public uint SizeOfHeapReserve;
/// <summary>
/// The size of the local heap space to commit.
/// </summary>
[FieldOffset(84)]
public uint SizeOfHeapCommit;
/// <summary>
/// Reserved, must be zero.
/// </summary>
[FieldOffset(88)]
public uint LoaderFlags;
/// <summary>
/// The number of data-directory entries in the remainder of the optional header.
/// Each describes a location and size.
/// </summary>
[FieldOffset(92)]
public uint NumberOfRvaAndSizes;
#endregion
#region Optional Header Data Directories (Image Only)
/// <summary>
/// The export table address and size. For more information see section 6.3, “The .edata Section (Image Only).”
/// </summary>
[FieldOffset(96)]
public IMAGE_DATA_DIRECTORY ExportTable;
/// <summary>
/// The import table address and size. For more information, see section 6.4, “The .idata Section.”
/// </summary>
[FieldOffset(104)]
public IMAGE_DATA_DIRECTORY ImportTable;
/// <summary>
/// The resource table address and size. For more information, see section 6.9, “The .rsrc Section.”
/// </summary>
[FieldOffset(112)]
public IMAGE_DATA_DIRECTORY ResourceTable;
/// <summary>
/// The exception table address and size. For more information, see section 6.5, “The .pdata Section.
/// </summary>
[FieldOffset(120)]
public IMAGE_DATA_DIRECTORY ExceptionTable;
/// <summary>
/// The attribute certificate table address and size. For more information, see section 5.7, “The attribute certificate table (Image Only).”
/// </summary>
[FieldOffset(128)]
public IMAGE_DATA_DIRECTORY CertificateTable;
/// <summary>
/// The base relocation table address and size. For more information, see section 6.6, “The .reloc Section (Image Only).”
/// </summary>
[FieldOffset(136)]
public IMAGE_DATA_DIRECTORY BaseRelocationTable;
/// <summary>
/// The debug data starting address and size. For more information, see section 6.1, “The .debug Section.”
/// </summary>
[FieldOffset(144)]
public IMAGE_DATA_DIRECTORY Debug;
/// <summary>
/// Reserved, must be 0
/// </summary>
[FieldOffset(152)]
public IMAGE_DATA_DIRECTORY Architecture;
/// <summary>
/// The RVA of the value to be stored in the global pointer register.
/// The size member of this structure must be set to zero.
/// </summary>
[FieldOffset(160)]
public IMAGE_DATA_DIRECTORY GlobalPtr;
/// <summary>
/// The thread local storage (TLS) table address and size. For more information, see section 6.7, “The .tls Section.”
/// </summary>
[FieldOffset(168)]
public IMAGE_DATA_DIRECTORY TLSTable;
/// <summary>
/// The load configuration table address and size. For more information, see section 6.8, “The Load Configuration Structure (Image Only).”
/// </summary>
[FieldOffset(176)]
public IMAGE_DATA_DIRECTORY LoadConfigTable;
/// <summary>
/// The bound import table address and size.
/// </summary>
[FieldOffset(184)]
public IMAGE_DATA_DIRECTORY BoundImport;
/// <summary>
/// The import address table address and size. For more information, see section 6.4.4, “Import Address Table.”
/// </summary>
[FieldOffset(192)]
public IMAGE_DATA_DIRECTORY IAT;
/// <summary>
/// The delay import descriptor address and size. For more information, see section 5.8, “Delay-Load Import Tables (Image Only).”
/// </summary>
[FieldOffset(200)]
public IMAGE_DATA_DIRECTORY DelayImportDescriptor;
/// <summary>
/// The CLR runtime header address and size. For more information, see section 6.10, “The .cormeta Section (Object Only).”
/// </summary>
[FieldOffset(208)]
public IMAGE_DATA_DIRECTORY CLRRuntimeHeader;
/// <summary>
/// Reserved, must be zero
/// </summary>
[FieldOffset(216)]
public IMAGE_DATA_DIRECTORY Reserved;
#endregion
}
} | binsys/bap | Code/BAP/BAP.Loader.PE/IMAGE_OPTIONAL_HEADER32.cs | C# | apache-2.0 | 11,882 |
/*
* Copyright (C) 2013 salesforce.com, inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.auraframework.impl.system;
import static org.auraframework.instance.AuraValueProviderType.LABEL;
import java.io.Serializable;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.auraframework.Aura;
import org.auraframework.builder.DefBuilder;
import org.auraframework.def.DefDescriptor;
import org.auraframework.def.Definition;
import org.auraframework.def.DefinitionAccess;
import org.auraframework.expression.PropertyReference;
import org.auraframework.impl.DefinitionAccessImpl;
import org.auraframework.instance.GlobalValueProvider;
import org.auraframework.system.Location;
import org.auraframework.system.SubDefDescriptor;
import org.auraframework.throwable.AuraExceptionInfo;
import org.auraframework.throwable.quickfix.InvalidDefinitionException;
import org.auraframework.throwable.quickfix.QuickFixException;
import org.auraframework.util.json.Serialization;
import org.auraframework.util.json.Serialization.ReferenceScope;
import org.auraframework.util.json.Serialization.ReferenceType;
import org.auraframework.util.text.Hash;
import com.google.common.collect.Maps;
/**
* The implementation for a definition.
*/
@Serialization(referenceType = ReferenceType.IDENTITY, referenceScope = ReferenceScope.REQUEST)
public abstract class DefinitionImpl<T extends Definition> implements Definition, Serializable {
private static final long serialVersionUID = 5836732915093913670L;
protected final DefDescriptor<T> descriptor;
protected final Location location;
protected final Map<SubDefDescriptor<?, T>, Definition> subDefs;
protected final String apiVersion;
protected final String description;
private final QuickFixException parseError;
private final String ownHash;
private final DefinitionAccess access;
private boolean valid;
protected DefinitionImpl(DefDescriptor<T> descriptor, Location location) {
this(descriptor, location, null, null, null, null, null, null);
}
protected DefinitionImpl(RefBuilderImpl<T, ?> builder) {
this(builder.getDescriptor(), builder.getLocation(), builder.subDefs, builder.apiVersion, builder.description,
builder.getAccess(), builder.getOwnHash(), builder.getParseError());
}
DefinitionImpl(DefDescriptor<T> descriptor, Location location, Map<SubDefDescriptor<?, T>, Definition> subDefs,
String apiVersion, String description, DefinitionAccess access, String ownHash,
QuickFixException parseError) {
this.descriptor = descriptor;
this.location = location;
this.subDefs = subDefs;
this.apiVersion = apiVersion;
this.description = description;
this.ownHash = ownHash;
this.parseError = parseError;
this.access = access == null ? DefinitionAccessImpl.defaultAccess(descriptor != null ? descriptor.getNamespace() : null) : access;
}
/**
* @see Definition#getDescriptor()
*/
@Override
public DefDescriptor<T> getDescriptor() {
return descriptor;
}
/**
* @see Definition#getLocation()
*/
@Override
public Location getLocation() {
return location;
}
@Override
public DefinitionAccess getAccess() {
return access;
}
/**
* @see Definition#getName()
*/
@Override
public String getName() {
return descriptor == null ? getClass().getName() : descriptor.getName();
}
@Override
public String getOwnHash() {
return ownHash;
}
/**
* @throws QuickFixException
* @see Definition#appendDependencies(java.util.Set)
*/
@Override
public void appendDependencies(Set<DefDescriptor<?>> dependencies) {
}
/**
* @throws QuickFixException
* @see Definition#appendSupers(java.util.Set)
*/
@Override
public void appendSupers(Set<DefDescriptor<?>> dependencies) throws QuickFixException {
}
/**
* @throws QuickFixException
* @see Definition#validateDefinition()
*/
@Override
public void validateDefinition() throws QuickFixException {
if (parseError != null) {
throw parseError;
}
if (descriptor == null) {
throw new InvalidDefinitionException("No descriptor", location);
}
}
@Override
public void markValid() {
this.valid = true;
}
@Override
public boolean isValid() {
return this.valid;
}
/**
* @throws QuickFixException
* @see Definition#validateReferences()
*/
@Override
public void validateReferences() throws QuickFixException {
}
@Override
public String toString() {
// getDescriptor is not always non-null (though is should be). Avoid
// throwing a null pointer
// exception when someone asks for a string representation.
if (getDescriptor() != null) {
return getDescriptor().toString();
} else {
return "INVALID[" + this.location + "]: " + this.description;
}
}
@SuppressWarnings("unchecked")
@Override
public <D extends Definition> D getSubDefinition(SubDefDescriptor<D, ?> sddesc) {
if (subDefs == null) {
return null;
}
return (D) subDefs.get(sddesc);
}
public abstract static class BuilderImpl<T extends Definition> extends RefBuilderImpl<T, T> {
protected BuilderImpl(Class<T> defClass) {
super(defClass);
}
};
public abstract static class RefBuilderImpl<T extends Definition, A extends Definition> implements DefBuilder<T, A> {
private boolean descriptorLocked;
public DefDescriptor<T> descriptor;
public Location location;
public Map<SubDefDescriptor<?, T>, Definition> subDefs;
private final Class<T> defClass;
public String apiVersion;
public String description;
public Hash hash;
public String ownHash;
private QuickFixException parseError;
private DefinitionAccess access;
protected RefBuilderImpl(Class<T> defClass) {
this.defClass = defClass;
//this.ownHash = String.valueOf(System.currentTimeMillis());
}
public RefBuilderImpl<T, A> setAccess(DefinitionAccess access) {
this.access = access;
return this;
}
public DefinitionAccess getAccess() {
return access;
}
@Override
public RefBuilderImpl<T, A> setLocation(String fileName, int line, int column, long lastModified) {
location = new Location(fileName, line, column, lastModified);
return this;
}
@Override
public RefBuilderImpl<T, A> setLocation(String fileName, long lastModified) {
location = new Location(fileName, lastModified);
return this;
}
@Override
public RefBuilderImpl<T, A> setLocation(Location location) {
this.location = location;
return this;
}
public Location getLocation() {
return this.location;
}
public RefBuilderImpl<T, A> addSubDef(SubDefDescriptor<?, T> sddesc, Definition inner) {
if (this.subDefs == null) {
this.subDefs = Maps.newHashMap();
}
this.subDefs.put(sddesc, inner);
return this;
}
public RefBuilderImpl<T, A> lockDescriptor(DefDescriptor<T> desc) {
this.descriptorLocked = true;
this.descriptor = desc;
return this;
}
@Override
public RefBuilderImpl<T, A> setDescriptor(String qualifiedName) {
try {
return this.setDescriptor(DefDescriptorImpl.getInstance(qualifiedName, defClass));
} catch (Exception e) {
setParseError(e);
return this;
}
}
@Override
public RefBuilderImpl<T, A> setDescriptor(DefDescriptor<T> desc) {
if (!this.descriptorLocked) {
this.descriptor = desc;
}
return this;
}
@Override
public DefDescriptor<T> getDescriptor() {
return descriptor;
}
@Override
public RefBuilderImpl<T, A> setAPIVersion(String apiVersion) {
this.apiVersion = apiVersion;
return this;
}
@Override
public RefBuilderImpl<T, A> setDescription(String description) {
this.description = description;
return this;
}
@Override
public RefBuilderImpl<T,A> setOwnHash(Hash hash) {
if (hash != null) {
this.ownHash = null;
}
this.hash = hash;
return this;
}
@Override
public RefBuilderImpl<T,A> setOwnHash(String ownHash) {
this.ownHash = ownHash;
return this;
}
private String getOwnHash() {
//
// Try to make sure that we have a hash string.
//
if (ownHash == null && hash != null && hash.isSet()) {
ownHash = hash.toString();
}
return ownHash;
}
@Override
public void setParseError(Throwable cause) {
if (this.parseError != null) {
return;
}
if (cause instanceof QuickFixException) {
this.parseError = (QuickFixException)cause;
} else {
Location location = null;
if (cause instanceof AuraExceptionInfo) {
AuraExceptionInfo aei = (AuraExceptionInfo)cause;
location = aei.getLocation();
}
this.parseError = new InvalidDefinitionException(cause.getMessage(), location, cause);
}
}
@Override
public QuickFixException getParseError() {
return parseError;
}
}
@Override
public void retrieveLabels() throws QuickFixException {
}
/**
* A utility routine to get the full set of labels out of a set of property references.
*
* This is used everywhere that we parse javascript to get property references and want to
* process them. But can be applied to literally anything.
*
* @param props the collection of properties to scan.
*/
protected void retrieveLabels(Collection<PropertyReference> props) throws QuickFixException {
if (props != null && !props.isEmpty()) {
GlobalValueProvider labelProvider = Aura.getContextService().getCurrentContext().getGlobalProviders().get(LABEL.getPrefix());
for (PropertyReference e : props) {
if (e.getRoot().equals(LABEL.getPrefix())) {
labelProvider.validate(e.getStem());
labelProvider.getValue(e.getStem());
}
}
}
}
@Override
public String getAPIVersion() {
return apiVersion;
}
@Override
public String getDescription() {
return description;
}
}
| badlogicmanpreet/aura | aura-impl/src/main/java/org/auraframework/impl/system/DefinitionImpl.java | Java | apache-2.0 | 11,852 |
package com.planet_ink.fakedb;
import java.io.InputStream;
import java.io.Reader;
import java.sql.NClob;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.util.Map;
/*
Copyright 2001 Thomas Neumann
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
class ResultSet implements java.sql.ResultSet
{
private Statement statement;
private Backend.Relation relation;
private java.util.Iterator iter;
private int currentRow=0;
private int conditionIndex;
private String conditionValue;
private boolean eq=true;
private boolean lt=false;
private boolean gt=false;
private final String[] values;
private final boolean[] nullIndicators;
private boolean nullFlag = false;
ResultSet(Statement s,
Backend.Relation r,
int ci,
String cv,
String comp)
{
statement=s;
relation=r;
conditionIndex=ci;
conditionValue=cv;
comp=comp.trim();
eq=(comp.indexOf("=")>=0);
lt=(comp.indexOf("<")>=0);
gt=(comp.indexOf(">")>=0) ;
currentRow=0;
values=new String[r.attributes.length];
nullIndicators=new boolean[values.length];
if ((ci<0)&&(cv!=null)) {
iter=r.index.keySet().iterator();
} else {
iter=r.index.values().iterator();
}
}
public java.sql.Statement getStatement() throws java.sql.SQLException { return statement; }
public static boolean isNumber(String s)
{
if(s==null) return false;
s=s.trim();
if(s.length()==0) return false;
if((s.length()>1)&&(s.startsWith("-")))
s=s.substring(1);
for(int i=0;i<s.length();i++)
if("0123456789.,".indexOf(s.charAt(i))<0)
return false;
return true;
}
public static double s_double(String DOUBLE)
{
double sdouble=0;
try{ sdouble=Double.parseDouble(DOUBLE); }
catch(Exception e){ return 0;}
return sdouble;
}
public static long s_long(String LONG)
{
long slong=0;
try{ slong=Long.parseLong(LONG); }
catch(Exception e){ return 0;}
return slong;
}
public static boolean isDouble(String DBL)
{
if(DBL.length()==0) return false;
if(DBL.startsWith("-")&&(DBL.length()>1))
DBL=DBL.substring(1);
boolean alreadyDot=false;
for(int i=0;i<DBL.length();i++)
if(!Character.isDigit(DBL.charAt(i)))
{
if(DBL.charAt(i)=='.')
{
if(alreadyDot)
return false;
alreadyDot=true;
}
else
return false;
}
return alreadyDot;
}
public int numCompare(String s1, String s2)
{
if((s1==null)||(s2==null)) return 0;
if((!isNumber(s1))||(!isNumber(s2))) return 0;
if(isDouble(s1)||(isDouble(s2)))
{
double d1=isDouble(s1)?s_double(s1):Long.valueOf(s_long(s1)).doubleValue();
double d2=isDouble(s2)?s_double(s2):Long.valueOf(s_long(s2)).doubleValue();
if(d1==d2) return 0;
if(d1>d2) return 1;
return -1;
}
long l1=s_long(s1);
long l2=s_long(s2);
if(l1==l2) return 0;
if(l1>l2) return 1;
return -1;
}
public boolean next() throws java.sql.SQLException
{
while (true)
{
if (!iter.hasNext()) return false;
if ((conditionIndex<0)&&(conditionValue!=null))
{
String key=(String)iter.next();
String subKey=key;
int x=subKey.indexOf("\n");
if(x>0)subKey=subKey.substring(0,x);
int nc=(lt||gt)?numCompare(subKey,conditionValue):0;
int sc=(lt||gt)?subKey.compareTo(conditionValue):0;
if(((eq)&&(subKey.equals(conditionValue)))
||((eq)&&(key.startsWith(conditionValue+"\n")))
||((lt)&&(nc<0))
||((gt)&&(nc>0))
||((lt)&&(sc<0))
||((gt)&&(sc>0)))
{
currentRow++;
return relation.getRecord(nullIndicators,values,(Backend.RecordInfo)relation.index.get(key));
}
continue;
}
if (!relation.getRecord(nullIndicators,values,(Backend.RecordInfo)iter.next()))
return false;
if (conditionIndex>=0)
{
if (nullIndicators[conditionIndex])
continue;
String subKey=values[conditionIndex];
int nc=(lt||gt)?numCompare(subKey,conditionValue):0;
int sc=(lt||gt)?subKey.compareTo(conditionValue):0;
if(!(((eq)&&(subKey.equals(conditionValue)))
||((lt)&&(nc<0))
||((gt)&&(nc>0))
||((lt)&&(sc<0))
||((gt)&&(sc>0))))
continue;
}
currentRow++;
return true;
}
}
public void close() throws java.sql.SQLException
{
}
public boolean wasNull() throws java.sql.SQLException
{
return nullFlag;
}
public String getString(int columnIndex) throws java.sql.SQLException
{
if ((columnIndex<0)||(columnIndex>=nullIndicators.length)||(nullIndicators[columnIndex])) {
nullFlag=true;
return null;
}
nullFlag=false;
return values[columnIndex];
}
public java.sql.Array getArray(int columnIndex) throws java.sql.SQLException
{
//String s=getString(columnIndex);
if (nullFlag) return null;
throw new java.sql.SQLException();
}
public java.sql.Blob getBlob(int columnIndex) throws java.sql.SQLException
{
//String s=getString(columnIndex);
if (nullFlag) return null;
throw new java.sql.SQLException();
}
public java.sql.Clob getClob(int columnIndex) throws java.sql.SQLException
{
//String s=getString(columnIndex);
if (nullFlag) return null;
throw new java.sql.SQLException();
}
public java.sql.Ref getRef(int columnIndex) throws java.sql.SQLException
{
//String s=getString(columnIndex);
if (nullFlag) return null;
throw new java.sql.SQLException();
}
public boolean getBoolean(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if ((s!=null)&&(s.length()>0))
switch (Character.toUpperCase(s.charAt(0))) {
case 'T': case 'Y': case '1': return true;
}
return false;
}
public byte getByte(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Byte.parseByte(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public short getShort(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Short.parseShort(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public int getInt(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Integer.parseInt(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public long getLong(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Long.parseLong(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public float getFloat(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Float.parseFloat(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public double getDouble(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return 0;
try {
return Double.parseDouble(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public java.math.BigDecimal getBigDecimal(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return new java.math.BigDecimal(0);
try {
return new java.math.BigDecimal(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
/**
* @deprecated
*/
public java.math.BigDecimal getBigDecimal(int columnIndex, int scale) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) { java.math.BigDecimal v=new java.math.BigDecimal(0); v.setScale(scale); return v; }
try {
java.math.BigDecimal v=new java.math.BigDecimal(s); v.setScale(scale); return v;
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public byte[] getBytes(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
try {
return s.getBytes();
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public java.sql.Date getDate(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
try {
return java.sql.Date.valueOf(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public java.sql.Time getTime(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
try {
return java.sql.Time.valueOf(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public java.sql.Timestamp getTimestamp(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
try {
return java.sql.Timestamp.valueOf(s);
} catch (NumberFormatException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public java.io.InputStream getAsciiStream(int columnIndex) throws java.sql.SQLException
{
return getBinaryStream(columnIndex);
}
/**
* @deprecated
*/
public java.io.InputStream getUnicodeStream(int columnIndex) throws java.sql.SQLException
{
return getBinaryStream(columnIndex);
}
public java.io.InputStream getBinaryStream(int columnIndex) throws java.sql.SQLException
{
byte b[] = getBytes(columnIndex);
if (nullFlag) return null;
return new java.io.ByteArrayInputStream(b);
}
public java.io.Reader getCharacterStream(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
return new java.io.CharArrayReader(s.toCharArray());
}
public Object getObject(int columnIndex) throws java.sql.SQLException
{
return getString(columnIndex);
}
public java.net.URL getURL(int columnIndex) throws java.sql.SQLException
{
String s=getString(columnIndex);
if (nullFlag) return null;
try {
return new java.net.URL(s);
} catch (java.net.MalformedURLException e) { throw new java.sql.SQLException(e.getMessage()); }
}
public int findColumn(String columnName) throws java.sql.SQLException
{
return relation.findAttribute(columnName);
}
public String getString(String columnName) throws java.sql.SQLException
{ return getString(findColumn(columnName)); }
public java.sql.Array getArray(String columnName) throws java.sql.SQLException
{ return getArray(findColumn(columnName)); }
public java.sql.Blob getBlob(String columnName) throws java.sql.SQLException
{ return getBlob(findColumn(columnName)); }
public java.sql.Clob getClob(String columnName) throws java.sql.SQLException
{ return getClob(findColumn(columnName)); }
public java.sql.Ref getRef(String columnName) throws java.sql.SQLException
{ return getRef(findColumn(columnName)); }
public boolean getBoolean(String columnName) throws java.sql.SQLException
{ return getBoolean(findColumn(columnName)); }
public byte getByte(String columnName) throws java.sql.SQLException
{ return getByte(findColumn(columnName)); }
public short getShort(String columnName) throws java.sql.SQLException
{ return getShort(findColumn(columnName)); }
public int getInt(String columnName) throws java.sql.SQLException
{ return getInt(findColumn(columnName)); }
public long getLong(String columnName) throws java.sql.SQLException
{ return getLong(findColumn(columnName)); }
public float getFloat(String columnName) throws java.sql.SQLException
{ return getFloat(findColumn(columnName)); }
public double getDouble(String columnName) throws java.sql.SQLException
{ return getDouble(findColumn(columnName)); }
public java.math.BigDecimal getBigDecimal(String columnName) throws java.sql.SQLException
{ return getBigDecimal(findColumn(columnName)); }
/**
* @deprecated
*/
public java.math.BigDecimal getBigDecimal(String columnName, int scale) throws java.sql.SQLException
{ return getBigDecimal(findColumn(columnName), scale); }
public byte[] getBytes(String columnName) throws java.sql.SQLException
{ return getBytes(findColumn(columnName)); }
public java.sql.Date getDate(String columnName) throws java.sql.SQLException
{ return getDate(findColumn(columnName)); }
public java.sql.Date getDate(int columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getDate(columnName); }
public java.sql.Date getDate(String columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getDate(findColumn(columnName)); }
public java.sql.Time getTime(String columnName) throws java.sql.SQLException
{ return getTime(findColumn(columnName)); }
public java.sql.Time getTime(int columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getTime(columnName); }
public java.sql.Time getTime(String columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getTime(findColumn(columnName)); }
public java.sql.Timestamp getTimestamp(String columnName) throws java.sql.SQLException
{ return getTimestamp(findColumn(columnName)); }
public java.sql.Timestamp getTimestamp(int columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getTimestamp(columnName); }
public java.sql.Timestamp getTimestamp(String columnName,java.util.Calendar c) throws java.sql.SQLException
{ return getTimestamp(findColumn(columnName)); }
public java.io.Reader getCharacterStream(String columnName) throws java.sql.SQLException
{ return getCharacterStream(findColumn(columnName)); }
public java.io.InputStream getAsciiStream(String columnName) throws java.sql.SQLException
{ return getAsciiStream(findColumn(columnName)); }
/**
* @deprecated
*/
public java.io.InputStream getUnicodeStream(String columnName) throws java.sql.SQLException
{ return getUnicodeStream(findColumn(columnName)); }
public java.io.InputStream getBinaryStream(String columnName) throws java.sql.SQLException
{ return getBinaryStream(findColumn(columnName)); }
public java.net.URL getURL(String columnName) throws java.sql.SQLException
{ return getURL(findColumn(columnName)); }
public Object getObject(String columnName) throws java.sql.SQLException
{ return getObject(findColumn(columnName)); }
public java.sql.SQLWarning getWarnings() throws java.sql.SQLException
{ return null; }
public void clearWarnings() throws java.sql.SQLException
{ }
public String getCursorName() throws java.sql.SQLException
{ throw new java.sql.SQLException("Positioned Update not supported.", "S1C00"); }
public java.sql.ResultSetMetaData getMetaData() throws java.sql.SQLException
{ return null; }
public void updateArray(int columnIndex,java.sql.Array x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateArray(String columnName,java.sql.Array x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateAsciiStream(int columnIndex,java.io.InputStream x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateAsciiStream(String columnName,java.io.InputStream x, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBigDecimal(int columnIndex,java.math.BigDecimal x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBigDecimal(String columnName,java.math.BigDecimal x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBinaryStream(int columnIndex,java.io.InputStream x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBinaryStream(String columnName,java.io.InputStream x, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBlob(int columnIndex,java.sql.Blob x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBlob(String columnName,java.sql.Blob x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBoolean(int columnIndex,boolean x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBoolean(String columnName,boolean x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateByte(int columnIndex,byte x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateByte(String columnName,byte x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBytes(int columnIndex,byte[] x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateBytes(String columnName,byte[] x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateCharacterStream(int columnIndex,java.io.Reader x,int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateCharacterStream(String columnName,java.io.Reader reader, int length) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateClob(int columnIndex,java.sql.Clob x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateClob(String columnName,java.sql.Clob x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateDate(int columnIndex,java.sql.Date x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateDate(String columnName,java.sql.Date x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateDouble(int columnIndex,double x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateDouble(String columnName,double x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateFloat(int columnIndex,float x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateFloat(String columnName,float x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateInt(int columnIndex,int x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateInt(String columnName,int x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateLong(int columnIndex,long x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateLong(String columnName,long x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateNull(int columnIndex) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateNull(String columnName) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateObject(int columnIndex,Object x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateObject(int columnIndex,Object x,int scale) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateObject(String columnName,Object x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateObject(String columnName,Object x,int scale) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateRef(int columnIndex,java.sql.Ref x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateRef(String columnName,java.sql.Ref x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateShort(int columnIndex,short x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateShort(String columnName,short x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateString(int columnIndex,String x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateString(String columnName,String x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateTime(int columnIndex,java.sql.Time x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateTime(String columnName,java.sql.Time x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateTimestamp(int columnIndex,java.sql.Timestamp x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void updateTimestamp(String columnName,java.sql.Timestamp x) throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void deleteRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void moveToInsertRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void moveToCurrentRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void cancelRowUpdates() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void insertRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public void refreshRow() throws java.sql.SQLException { throw new java.sql.SQLException(); }
public int getRow() { return currentRow; }
public boolean first() { return false; }
public boolean previous() { return false; }
public boolean isFirst() { return false; }
private boolean afterLast=false;
public boolean last()
{
try{
while(next());
}
catch(java.sql.SQLException sqle){}
afterLast=true;
return true;
}
public boolean isLast() { return false; }
public void beforeFirst() throws java.sql.SQLException
{
if(relation==null)
throw new java.sql.SQLException();
if ((conditionIndex<0)&&(conditionValue!=null)) {
iter=relation.index.keySet().iterator();
} else {
iter=relation.index.values().iterator();
}
currentRow=0;
}
public boolean isBeforeFirst() { return (currentRow==0); }
public void afterLast(){ last(); }
public boolean isAfterLast(){return afterLast;}
public boolean absolute(int i) { return true; }
public boolean relative(int i) { return false; }
public boolean rowDeleted() { return false; }
public boolean rowInserted() { return false; }
public boolean rowUpdated() { return false; }
public int getConcurrency() { return 0; }
public int getType() { return 0; }
public void setFetchSize(int i) throws java.sql.SQLException { statement.setFetchSize(i); }
public int getFetchSize() throws java.sql.SQLException { return statement.getFetchSize(); }
public void setFetchDirection(int i) throws java.sql.SQLException { statement.setFetchDirection(i); }
public int getFetchDirection() throws java.sql.SQLException { return statement.getFetchDirection(); }
public int getResultSetConcurrency() throws java.sql.SQLException { return statement.getResultSetConcurrency(); }
public int getResultSetType() throws java.sql.SQLException { return statement.getResultSetType(); }
public int getHoldability() throws SQLException { return 0; }
public Reader getNCharacterStream(int arg0) throws SQLException { return null; }
public Reader getNCharacterStream(String arg0) throws SQLException { return null; }
public NClob getNClob(int arg0) throws SQLException { return null; }
public NClob getNClob(String arg0) throws SQLException { return null; }
public String getNString(int arg0) throws SQLException { return null; }
public String getNString(String arg0) throws SQLException { return null; }
//public Object getObject(int arg0, Map arg1) throws SQLException { return getString(arg0); }
public Object getObject(int arg0, Map<String, Class<?>> arg1) throws SQLException { return getString(arg0); }
public Object getObject(String arg0, Map<String, Class<?>> arg1) throws SQLException { return getObject(findColumn(arg0),arg1); }
//public Object getObject(String arg0, Map arg1) throws SQLException { return getObject(findColumn(arg0),arg1); }
public RowId getRowId(int arg0) throws SQLException { return null; }
public RowId getRowId(String arg0) throws SQLException { return null; }
public SQLXML getSQLXML(int arg0) throws SQLException { return null; }
public SQLXML getSQLXML(String arg0) throws SQLException { return null;}
public boolean isClosed() throws SQLException { return false; }
public void updateAsciiStream(int arg0, InputStream arg1) throws SQLException {}
public void updateAsciiStream(String arg0, InputStream arg1) throws SQLException {}
public void updateAsciiStream(int arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateAsciiStream(String arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateBinaryStream(int arg0, InputStream arg1) throws SQLException {}
public void updateBinaryStream(String arg0, InputStream arg1) throws SQLException {}
public void updateBinaryStream(int arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateBinaryStream(String arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateBlob(int arg0, InputStream arg1) throws SQLException {}
public void updateBlob(String arg0, InputStream arg1) throws SQLException {}
public void updateBlob(int arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateBlob(String arg0, InputStream arg1, long arg2) throws SQLException {}
public void updateCharacterStream(int arg0, Reader arg1) throws SQLException {}
public void updateCharacterStream(String arg0, Reader arg1) throws SQLException {}
public void updateCharacterStream(int arg0, Reader arg1, long arg2) throws SQLException {}
public void updateCharacterStream(String arg0, Reader arg1, long arg2) throws SQLException {}
public void updateClob(int arg0, Reader arg1) throws SQLException {}
public void updateClob(String arg0, Reader arg1) throws SQLException {}
public void updateClob(int arg0, Reader arg1, long arg2) throws SQLException {}
public void updateClob(String arg0, Reader arg1, long arg2) throws SQLException {}
public void updateNCharacterStream(int arg0, Reader arg1) throws SQLException {}
public void updateNCharacterStream(String arg0, Reader arg1) throws SQLException {}
public void updateNCharacterStream(int arg0, Reader arg1, long arg2) throws SQLException {}
public void updateNCharacterStream(String arg0, Reader arg1, long arg2) throws SQLException {}
public void updateNClob(int arg0, NClob arg1) throws SQLException {}
public void updateNClob(String arg0, NClob arg1) throws SQLException {}
public void updateNClob(int arg0, Reader arg1) throws SQLException {}
public void updateNClob(String arg0, Reader arg1) throws SQLException {}
public void updateNClob(int arg0, Reader arg1, long arg2) throws SQLException {}
public void updateNClob(String arg0, Reader arg1, long arg2)throws SQLException {}
public void updateNString(int arg0, String arg1) throws SQLException {}
public void updateNString(String arg0, String arg1) throws SQLException {}
public void updateRowId(int arg0, RowId arg1) throws SQLException {}
public void updateRowId(String arg0, RowId arg1) throws SQLException {}
public void updateSQLXML(int arg0, SQLXML arg1) throws SQLException {}
public void updateSQLXML(String arg0, SQLXML arg1) throws SQLException {}
public boolean isWrapperFor(Class<?> iface) throws SQLException {return false;}
public <T> T unwrap(Class<T> iface) throws SQLException {return null;}
}
| robjcaskey/Unofficial-Coffee-Mud-Upstream | com/planet_ink/fakedb/ResultSet.java | Java | apache-2.0 | 30,543 |
/**
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* This file is part of the Smart Developer Hub Project:
* http://www.smartdeveloperhub.org/
*
* Center for Open Middleware
* http://www.centeropenmiddleware.com/
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Copyright (C) 2015-2016 Center for Open Middleware.
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Artifact : org.smartdeveloperhub.harvesters.scm:scm-harvester-frontend:0.3.0
* Bundle : scm-harvester.war
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
*/
package org.smartdeveloperhub.harvesters.scm.frontend.core.util;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.sameInstance;
import static org.junit.Assert.fail;
import java.io.Serializable;
import java.util.Collection;
import java.util.Iterator;
import java.util.Set;
import mockit.Expectations;
import mockit.Mocked;
import org.ldp4j.application.data.DataSet;
import org.ldp4j.application.data.Individual;
import org.ldp4j.application.data.Name;
import org.ldp4j.application.data.NamingScheme;
import org.ldp4j.application.ext.ApplicationRuntimeException;
import org.ldp4j.application.session.ContainerSnapshot;
import org.ldp4j.application.session.ResourceSnapshot;
import org.ldp4j.application.session.WriteSession;
public abstract class AbstractCappedContainerHandlerTestHelper {
private final class CustomDataSet implements DataSet {
@Override
public Iterator<Individual<?, ?>> iterator() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Name<?> name() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public int numberOfIndividuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean hasIndividuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Collection<? extends Individual<?, ?>> individuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Set<Serializable> individualIds() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean hasIndividual(final Object id) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public <T extends Serializable, S extends Individual<T, S>> S individualOfId(final T id) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public <T extends Serializable, S extends Individual<T, S>> S individual(final T id, final Class<? extends S> clazz) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean isEmpty() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public void remove(final Individual<?, ?> src) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public String toString() {
return "DATASET";
}
}
@Mocked private ContainerSnapshot container;
@Mocked private WriteSession session;
@Mocked private ResourceSnapshot snapshot;
protected final void verifyGetReturnsEmptyDataset(final AbstractCappedContainerHandler sut) throws Exception {
final Name<String> name=NamingScheme.getDefault().name("id");
new Expectations() {{
AbstractCappedContainerHandlerTestHelper.this.snapshot.name();this.result=name;
}};
final DataSet result = sut.get(this.snapshot);
assertThat((Object)result.name(),sameInstance((Object)name));
assertThat(result.hasIndividuals(),equalTo(false));
}
protected final void verifyFactoryMethodIsDisabled(final String name, final AbstractCappedContainerHandler sut) {
try {
sut.create(this.container, new CustomDataSet(), this.session);
fail("Factory method should be disabled");
} catch (final ApplicationRuntimeException e) {
assertThat(e.getMessage().toLowerCase(),equalTo(name+" creation is not supported"));
}
}
}
| SmartDeveloperHub/sdh-scm-harvester | frontend/src/test/java/org/smartdeveloperhub/harvesters/scm/frontend/core/util/AbstractCappedContainerHandlerTestHelper.java | Java | apache-2.0 | 4,942 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/elasticache/model/DescribeReplicationGroupsResult.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::ElastiCache::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
DescribeReplicationGroupsResult::DescribeReplicationGroupsResult()
{
}
DescribeReplicationGroupsResult::DescribeReplicationGroupsResult(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
*this = result;
}
DescribeReplicationGroupsResult& DescribeReplicationGroupsResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (!rootNode.IsNull() && (rootNode.GetName() != "DescribeReplicationGroupsResult"))
{
resultNode = rootNode.FirstChild("DescribeReplicationGroupsResult");
}
if(!resultNode.IsNull())
{
XmlNode markerNode = resultNode.FirstChild("Marker");
if(!markerNode.IsNull())
{
m_marker = StringUtils::Trim(markerNode.GetText().c_str());
}
XmlNode replicationGroupsNode = resultNode.FirstChild("ReplicationGroups");
if(!replicationGroupsNode.IsNull())
{
XmlNode replicationGroupsMember = replicationGroupsNode.FirstChild("ReplicationGroup");
while(!replicationGroupsMember.IsNull())
{
m_replicationGroups.push_back(replicationGroupsMember);
replicationGroupsMember = replicationGroupsMember.NextNode("ReplicationGroup");
}
}
}
if (!rootNode.IsNull()) {
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::ElastiCache::Model::DescribeReplicationGroupsResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
}
return *this;
}
| JoyIfBam5/aws-sdk-cpp | aws-cpp-sdk-elasticache/source/model/DescribeReplicationGroupsResult.cpp | C++ | apache-2.0 | 2,624 |
/*
* Copyright 2002-2008 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans;
import java.beans.PropertyChangeEvent;
import org.springframework.util.ClassUtils;
/**
* Exception thrown on a type mismatch when trying to set a bean property.
*
* @author Rod Johnson
* @author Juergen Hoeller
*/
public class TypeMismatchException extends PropertyAccessException {
/**
* Error code that a type mismatch error will be registered with.
*/
public static final String ERROR_CODE = "typeMismatch";
private transient Object value;
private Class requiredType;
/**
* Create a new TypeMismatchException.
* @param propertyChangeEvent the PropertyChangeEvent that resulted in the problem
* @param requiredType the required target type
*/
public TypeMismatchException(PropertyChangeEvent propertyChangeEvent, Class requiredType) {
this(propertyChangeEvent, requiredType, null);
}
/**
* Create a new TypeMismatchException.
* @param propertyChangeEvent the PropertyChangeEvent that resulted in the problem
* @param requiredType the required target type (or <code>null</code> if not known)
* @param cause the root cause (may be <code>null</code>)
*/
public TypeMismatchException(PropertyChangeEvent propertyChangeEvent, Class requiredType, Throwable cause) {
super(propertyChangeEvent,
"Failed to convert property value of type [" +
ClassUtils.getDescriptiveType(propertyChangeEvent.getNewValue()) + "]" +
(requiredType != null ?
" to required type [" + ClassUtils.getQualifiedName(requiredType) + "]" : "") +
(propertyChangeEvent.getPropertyName() != null ?
" for property '" + propertyChangeEvent.getPropertyName() + "'" : ""),
cause);
this.value = propertyChangeEvent.getNewValue();
this.requiredType = requiredType;
}
/**
* Create a new TypeMismatchException without PropertyChangeEvent.
* @param value the offending value that couldn't be converted (may be <code>null</code>)
* @param requiredType the required target type (or <code>null</code> if not known)
*/
public TypeMismatchException(Object value, Class requiredType) {
this(value, requiredType, null);
}
/**
* Create a new TypeMismatchException without PropertyChangeEvent.
* @param value the offending value that couldn't be converted (may be <code>null</code>)
* @param requiredType the required target type (or <code>null</code> if not known)
* @param cause the root cause (may be <code>null</code>)
*/
public TypeMismatchException(Object value, Class requiredType, Throwable cause) {
super("Failed to convert value of type [" + ClassUtils.getDescriptiveType(value) + "]" +
(requiredType != null ? " to required type [" + ClassUtils.getQualifiedName(requiredType) + "]" : ""),
cause);
this.value = value;
this.requiredType = requiredType;
}
/**
* Return the offending value (may be <code>null</code>)
*/
public Object getValue() {
return this.value;
}
/**
* Return the required target type, if any.
*/
public Class getRequiredType() {
return this.requiredType;
}
public String getErrorCode() {
return ERROR_CODE;
}
}
| cbeams-archive/spring-framework-2.5.x | src/org/springframework/beans/TypeMismatchException.java | Java | apache-2.0 | 3,697 |
<?php require_once(dirname(__FILE__).'/inc/config.inc.php');IsModelPriv('upload_file'); ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>上传区域</title>
<link href="templates/style/admin.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="templates/js/jquery.min.js"></script>
<script type="text/javascript" src="templates/js/forms.func.js"></script>
</head>
<body>
<div class="newupload">
<div class="newupload_area">
<form name="from" id="from" enctype="multipart/form-data" method="post" onsubmit="return CheckIsUpload();">
<label>请选择上传文件:</label>
<input type="file" name="upfile" id="upfile" class="upload_newfile_file">
<input type="submit" class="upload_newfile_btn" onclick="UploadPrompt(0)" value="上传" />
</form>
</div>
<div class="uploading"></div>
<div class="cl"></div>
</div>
<?php
if(!empty($_FILES))
{
//上传类在页面底端引用,以便显示提示信息
require_once(PHPMYWIND_DATA.'/httpfile/upload.class.php');
$upload_info = UploadFile('upfile');
if(!is_array($upload_info))
{
echo '<script>UploadPrompt(\'<span class="upload_file_nok">'.$upload_info.'</span>\')</script>';
}
else
{
echo '<script>UploadPrompt(\'<span class="upload_file_ok">上传成功!</span>上传后路径为:<span class="upload_file_name">'.$upload_info[2].'</span>,大小为:<span class="upload_file_name">'.GetRealSize($upload_info[1]).'</span>\');</script>';
}
}
?>
</body>
</html> | CQHanSa/lingang | adminhs/upload_file_do.php | PHP | apache-2.0 | 1,675 |
/*Copyright (c) 2004,University of Illinois at Urbana-Champaign. All rights reserved.
*
* Created on Jun 14, 2006
*
* Developed by: CCT, Center for Computation and Technology,
* NCSA, University of Illinois at Urbana-Champaign
* OSC, Ohio Supercomputing Center
* TACC, Texas Advanced Computing Center
* UKy, University of Kentucky
*
* https://www.gridchem.org/
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal with the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the names of Chemistry and Computational Biology Group , NCSA,
* University of Illinois at Urbana-Champaign, nor the names of its contributors
* may be used to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS WITH THE SOFTWARE.
*/
package org.gridchem.client.gui.filebrowser;
import java.net.URI;
/**
* Interface for the <code>FileBrowser</code> class to provide some common
* methods. This is probably unnecessary.
*
* @author Rion Dooley < dooley [at] tacc [dot] utexas [dot] edu >
*
*/
public interface FileBrowser {
/**
* Set the path of the file browser
* @param uri
*/
public void setPath(String path);
/**
* Get the currently selected files URI
*/
public String getPath();
/**
* Select the file corresponding to the file name
*
* @param filename
*/
public void setSelected(String filename);
/**
* Get the name of the currently selected file
*
*/
public String getSelected();
}
| SciGaP/SEAGrid-Desktop-GUI | src/main/java/org/gridchem/client/gui/filebrowser/FileBrowser.java | Java | apache-2.0 | 2,787 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/core/client/AWSError.h>
#include <aws/lex-models/LexModelBuildingServiceErrorMarshaller.h>
#include <aws/lex-models/LexModelBuildingServiceErrors.h>
using namespace Aws::Client;
using namespace Aws::LexModelBuildingService;
AWSError<CoreErrors> LexModelBuildingServiceErrorMarshaller::FindErrorByName(const char* errorName) const
{
AWSError<CoreErrors> error = LexModelBuildingServiceErrorMapper::GetErrorForName(errorName);
if(error.GetErrorType() != CoreErrors::UNKNOWN)
{
return error;
}
return AWSErrorMarshaller::FindErrorByName(errorName);
} | chiaming0914/awe-cpp-sdk | aws-cpp-sdk-lex-models/source/LexModelBuildingServiceErrorMarshaller.cpp | C++ | apache-2.0 | 1,153 |
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
package dmltestgenerated
import (
json "encoding/json"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// suppress unused package warning
var (
_ *json.RawMessage
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(in *jlexer.Lexer, out *ViewCustomerNoAutoIncrements) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*ViewCustomerNoAutoIncrement, 0, 8)
} else {
out.Data = []*ViewCustomerNoAutoIncrement{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v1 *ViewCustomerNoAutoIncrement
if in.IsNull() {
in.Skip()
v1 = nil
} else {
if v1 == nil {
v1 = new(ViewCustomerNoAutoIncrement)
}
(*v1).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v1)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(out *jwriter.Writer, in ViewCustomerNoAutoIncrements) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v2, v3 := range in.Data {
if v2 > 0 {
out.RawByte(',')
}
if v3 == nil {
out.RawString("null")
} else {
(*v3).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v ViewCustomerNoAutoIncrements) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v ViewCustomerNoAutoIncrements) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *ViewCustomerNoAutoIncrements) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *ViewCustomerNoAutoIncrements) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(in *jlexer.Lexer, out *ViewCustomerNoAutoIncrement) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "email":
if data := in.Raw(); in.Ok() {
in.AddError((out.Email).UnmarshalJSON(data))
}
case "firstname":
out.Firstname = string(in.String())
case "lastname":
out.Lastname = string(in.String())
case "city":
out.City = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(out *jwriter.Writer, in ViewCustomerNoAutoIncrement) {
out.RawByte('{')
first := true
_ = first
if true {
const prefix string = ",\"email\":"
first = false
out.RawString(prefix[1:])
out.Raw((in.Email).MarshalJSON())
}
if in.Firstname != "" {
const prefix string = ",\"firstname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Firstname))
}
if in.Lastname != "" {
const prefix string = ",\"lastname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Lastname))
}
if in.City != "" {
const prefix string = ",\"city\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.City))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v ViewCustomerNoAutoIncrement) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v ViewCustomerNoAutoIncrement) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *ViewCustomerNoAutoIncrement) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *ViewCustomerNoAutoIncrement) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated1(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(in *jlexer.Lexer, out *ViewCustomerAutoIncrements) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*ViewCustomerAutoIncrement, 0, 8)
} else {
out.Data = []*ViewCustomerAutoIncrement{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v4 *ViewCustomerAutoIncrement
if in.IsNull() {
in.Skip()
v4 = nil
} else {
if v4 == nil {
v4 = new(ViewCustomerAutoIncrement)
}
(*v4).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v4)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(out *jwriter.Writer, in ViewCustomerAutoIncrements) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v5, v6 := range in.Data {
if v5 > 0 {
out.RawByte(',')
}
if v6 == nil {
out.RawString("null")
} else {
(*v6).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v ViewCustomerAutoIncrements) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v ViewCustomerAutoIncrements) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *ViewCustomerAutoIncrements) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *ViewCustomerAutoIncrements) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated2(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(in *jlexer.Lexer, out *ViewCustomerAutoIncrement) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "ceEntityID":
out.CeEntityID = uint32(in.Uint32())
case "email":
if data := in.Raw(); in.Ok() {
in.AddError((out.Email).UnmarshalJSON(data))
}
case "firstname":
out.Firstname = string(in.String())
case "lastname":
out.Lastname = string(in.String())
case "city":
out.City = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(out *jwriter.Writer, in ViewCustomerAutoIncrement) {
out.RawByte('{')
first := true
_ = first
if in.CeEntityID != 0 {
const prefix string = ",\"ceEntityID\":"
first = false
out.RawString(prefix[1:])
out.Uint32(uint32(in.CeEntityID))
}
if true {
const prefix string = ",\"email\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Email).MarshalJSON())
}
if in.Firstname != "" {
const prefix string = ",\"firstname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Firstname))
}
if in.Lastname != "" {
const prefix string = ",\"lastname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Lastname))
}
if in.City != "" {
const prefix string = ",\"city\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.City))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v ViewCustomerAutoIncrement) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v ViewCustomerAutoIncrement) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *ViewCustomerAutoIncrement) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *ViewCustomerAutoIncrement) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated3(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(in *jlexer.Lexer, out *SalesOrderStatusStates) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*SalesOrderStatusState, 0, 8)
} else {
out.Data = []*SalesOrderStatusState{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v7 *SalesOrderStatusState
if in.IsNull() {
in.Skip()
v7 = nil
} else {
if v7 == nil {
v7 = new(SalesOrderStatusState)
}
(*v7).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v7)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(out *jwriter.Writer, in SalesOrderStatusStates) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v8, v9 := range in.Data {
if v8 > 0 {
out.RawByte(',')
}
if v9 == nil {
out.RawString("null")
} else {
(*v9).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v SalesOrderStatusStates) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v SalesOrderStatusStates) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *SalesOrderStatusStates) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *SalesOrderStatusStates) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated4(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(in *jlexer.Lexer, out *SalesOrderStatusState) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "status":
out.Status = string(in.String())
case "state":
out.State = string(in.String())
case "isDefault":
out.IsDefault = bool(in.Bool())
case "visibleOnFront":
out.VisibleOnFront = uint32(in.Uint32())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(out *jwriter.Writer, in SalesOrderStatusState) {
out.RawByte('{')
first := true
_ = first
if in.Status != "" {
const prefix string = ",\"status\":"
first = false
out.RawString(prefix[1:])
out.String(string(in.Status))
}
if in.State != "" {
const prefix string = ",\"state\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.State))
}
if in.IsDefault {
const prefix string = ",\"isDefault\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.IsDefault))
}
if in.VisibleOnFront != 0 {
const prefix string = ",\"visibleOnFront\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint32(uint32(in.VisibleOnFront))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v SalesOrderStatusState) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v SalesOrderStatusState) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *SalesOrderStatusState) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *SalesOrderStatusState) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated5(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(in *jlexer.Lexer, out *DmlgenTypesCollection) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*DmlgenTypes, 0, 8)
} else {
out.Data = []*DmlgenTypes{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v10 *DmlgenTypes
if in.IsNull() {
in.Skip()
v10 = nil
} else {
if v10 == nil {
v10 = new(DmlgenTypes)
}
(*v10).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v10)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(out *jwriter.Writer, in DmlgenTypesCollection) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v11, v12 := range in.Data {
if v11 > 0 {
out.RawByte(',')
}
if v12 == nil {
out.RawString("null")
} else {
(*v12).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v DmlgenTypesCollection) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v DmlgenTypesCollection) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *DmlgenTypesCollection) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *DmlgenTypesCollection) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated6(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(in *jlexer.Lexer, out *DmlgenTypes) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "id":
out.ID = int32(in.Int32())
case "col_bigint_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColBigint1).UnmarshalJSON(data))
}
case "col_bigint_2":
out.ColBigint2 = int64(in.Int64())
case "col_bigint_3":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColBigint3).UnmarshalJSON(data))
}
case "col_bigint_4":
out.ColBigint4 = uint64(in.Uint64())
case "col_blob":
if in.IsNull() {
in.Skip()
out.ColBlob = nil
} else {
out.ColBlob = in.Bytes()
}
case "col_date_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDate1).UnmarshalJSON(data))
}
case "col_date_2":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDate2).UnmarshalJSON(data))
}
case "col_datetime_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDatetime1).UnmarshalJSON(data))
}
case "col_datetime_2":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDatetime2).UnmarshalJSON(data))
}
case "col_decimal_10_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDecimal101).UnmarshalJSON(data))
}
case "col_decimal_12_4":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDecimal124).UnmarshalJSON(data))
}
case "price_a_12_4":
if data := in.Raw(); in.Ok() {
in.AddError((out.PriceA124).UnmarshalJSON(data))
}
case "price_b_12_4":
if data := in.Raw(); in.Ok() {
in.AddError((out.PriceB124).UnmarshalJSON(data))
}
case "col_decimal_12_3":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDecimal123).UnmarshalJSON(data))
}
case "col_decimal_20_6":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDecimal206).UnmarshalJSON(data))
}
case "col_decimal_24_12":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColDecimal2412).UnmarshalJSON(data))
}
case "col_int_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColInt1).UnmarshalJSON(data))
}
case "col_int_2":
out.ColInt2 = int32(in.Int32())
case "col_int_3":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColInt3).UnmarshalJSON(data))
}
case "col_int_4":
out.ColInt4 = uint32(in.Uint32())
case "col_longtext_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColLongtext1).UnmarshalJSON(data))
}
case "col_longtext_2":
out.ColLongtext2 = string(in.String())
case "col_mediumblob":
if in.IsNull() {
in.Skip()
out.ColMediumblob = nil
} else {
out.ColMediumblob = in.Bytes()
}
case "col_mediumtext_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColMediumtext1).UnmarshalJSON(data))
}
case "col_mediumtext_2":
out.ColMediumtext2 = string(in.String())
case "col_smallint_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColSmallint1).UnmarshalJSON(data))
}
case "col_smallint_2":
out.ColSmallint2 = int32(in.Int32())
case "col_smallint_3":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColSmallint3).UnmarshalJSON(data))
}
case "col_smallint_4":
out.ColSmallint4 = uint32(in.Uint32())
case "has_smallint_5":
out.HasSmallint5 = bool(in.Bool())
case "is_smallint_5":
if data := in.Raw(); in.Ok() {
in.AddError((out.IsSmallint5).UnmarshalJSON(data))
}
case "col_text":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColText).UnmarshalJSON(data))
}
case "col_timestamp_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColTimestamp1).UnmarshalJSON(data))
}
case "col_timestamp_2":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColTimestamp2).UnmarshalJSON(data))
}
case "col_tinyint_1":
out.ColTinyint1 = int32(in.Int32())
case "col_varchar_1":
out.ColVarchar1 = string(in.String())
case "col_varchar_100":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColVarchar100).UnmarshalJSON(data))
}
case "col_varchar_16":
out.ColVarchar16 = string(in.String())
case "col_char_1":
if data := in.Raw(); in.Ok() {
in.AddError((out.ColChar1).UnmarshalJSON(data))
}
case "col_char_2":
out.ColChar2 = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(out *jwriter.Writer, in DmlgenTypes) {
out.RawByte('{')
first := true
_ = first
if in.ID != 0 {
const prefix string = ",\"id\":"
first = false
out.RawString(prefix[1:])
out.Int32(int32(in.ID))
}
if true {
const prefix string = ",\"col_bigint_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColBigint1).MarshalJSON())
}
if in.ColBigint2 != 0 {
const prefix string = ",\"col_bigint_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.ColBigint2))
}
if true {
const prefix string = ",\"col_bigint_3\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColBigint3).MarshalJSON())
}
if in.ColBigint4 != 0 {
const prefix string = ",\"col_bigint_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint64(uint64(in.ColBigint4))
}
if len(in.ColBlob) != 0 {
const prefix string = ",\"col_blob\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Base64Bytes(in.ColBlob)
}
if true {
const prefix string = ",\"col_date_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDate1).MarshalJSON())
}
if true {
const prefix string = ",\"col_date_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDate2).MarshalJSON())
}
if true {
const prefix string = ",\"col_datetime_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDatetime1).MarshalJSON())
}
if true {
const prefix string = ",\"col_datetime_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDatetime2).MarshalJSON())
}
if true {
const prefix string = ",\"col_decimal_10_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDecimal101).MarshalJSON())
}
if true {
const prefix string = ",\"col_decimal_12_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDecimal124).MarshalJSON())
}
if true {
const prefix string = ",\"price_a_12_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.PriceA124).MarshalJSON())
}
if true {
const prefix string = ",\"price_b_12_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.PriceB124).MarshalJSON())
}
if true {
const prefix string = ",\"col_decimal_12_3\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDecimal123).MarshalJSON())
}
if true {
const prefix string = ",\"col_decimal_20_6\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDecimal206).MarshalJSON())
}
if true {
const prefix string = ",\"col_decimal_24_12\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColDecimal2412).MarshalJSON())
}
if true {
const prefix string = ",\"col_int_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColInt1).MarshalJSON())
}
if in.ColInt2 != 0 {
const prefix string = ",\"col_int_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int32(int32(in.ColInt2))
}
if true {
const prefix string = ",\"col_int_3\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColInt3).MarshalJSON())
}
if in.ColInt4 != 0 {
const prefix string = ",\"col_int_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint32(uint32(in.ColInt4))
}
if true {
const prefix string = ",\"col_longtext_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColLongtext1).MarshalJSON())
}
if in.ColLongtext2 != "" {
const prefix string = ",\"col_longtext_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ColLongtext2))
}
if len(in.ColMediumblob) != 0 {
const prefix string = ",\"col_mediumblob\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Base64Bytes(in.ColMediumblob)
}
if true {
const prefix string = ",\"col_mediumtext_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColMediumtext1).MarshalJSON())
}
if in.ColMediumtext2 != "" {
const prefix string = ",\"col_mediumtext_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ColMediumtext2))
}
if true {
const prefix string = ",\"col_smallint_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColSmallint1).MarshalJSON())
}
if in.ColSmallint2 != 0 {
const prefix string = ",\"col_smallint_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int32(int32(in.ColSmallint2))
}
if true {
const prefix string = ",\"col_smallint_3\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColSmallint3).MarshalJSON())
}
if in.ColSmallint4 != 0 {
const prefix string = ",\"col_smallint_4\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint32(uint32(in.ColSmallint4))
}
if in.HasSmallint5 {
const prefix string = ",\"has_smallint_5\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.HasSmallint5))
}
if true {
const prefix string = ",\"is_smallint_5\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.IsSmallint5).MarshalJSON())
}
if true {
const prefix string = ",\"col_text\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColText).MarshalJSON())
}
if true {
const prefix string = ",\"col_timestamp_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColTimestamp1).MarshalJSON())
}
if true {
const prefix string = ",\"col_timestamp_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColTimestamp2).MarshalJSON())
}
if in.ColTinyint1 != 0 {
const prefix string = ",\"col_tinyint_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int32(int32(in.ColTinyint1))
}
if in.ColVarchar1 != "" {
const prefix string = ",\"col_varchar_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ColVarchar1))
}
if true {
const prefix string = ",\"col_varchar_100\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColVarchar100).MarshalJSON())
}
if in.ColVarchar16 != "" {
const prefix string = ",\"col_varchar_16\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ColVarchar16))
}
if true {
const prefix string = ",\"col_char_1\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ColChar1).MarshalJSON())
}
if in.ColChar2 != "" {
const prefix string = ",\"col_char_2\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ColChar2))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v DmlgenTypes) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v DmlgenTypes) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *DmlgenTypes) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *DmlgenTypes) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated7(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(in *jlexer.Lexer, out *CustomerEntity) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "entityID":
out.EntityID = uint32(in.Uint32())
case "websiteID":
if data := in.Raw(); in.Ok() {
in.AddError((out.WebsiteID).UnmarshalJSON(data))
}
case "email":
if data := in.Raw(); in.Ok() {
in.AddError((out.Email).UnmarshalJSON(data))
}
case "groupID":
out.GroupID = uint32(in.Uint32())
case "storeID":
if data := in.Raw(); in.Ok() {
in.AddError((out.StoreID).UnmarshalJSON(data))
}
case "createdAt":
if data := in.Raw(); in.Ok() {
in.AddError((out.CreatedAt).UnmarshalJSON(data))
}
case "updatedAt":
if data := in.Raw(); in.Ok() {
in.AddError((out.UpdatedAt).UnmarshalJSON(data))
}
case "isActive":
out.IsActive = bool(in.Bool())
case "createdIn":
if data := in.Raw(); in.Ok() {
in.AddError((out.CreatedIn).UnmarshalJSON(data))
}
case "firstname":
if data := in.Raw(); in.Ok() {
in.AddError((out.Firstname).UnmarshalJSON(data))
}
case "lastname":
if data := in.Raw(); in.Ok() {
in.AddError((out.Lastname).UnmarshalJSON(data))
}
case "dob":
if data := in.Raw(); in.Ok() {
in.AddError((out.Dob).UnmarshalJSON(data))
}
case "rpToken":
if data := in.Raw(); in.Ok() {
in.AddError((out.RpToken).UnmarshalJSON(data))
}
case "rpTokenCreatedAt":
if data := in.Raw(); in.Ok() {
in.AddError((out.RpTokenCreatedAt).UnmarshalJSON(data))
}
case "defaultBilling":
if data := in.Raw(); in.Ok() {
in.AddError((out.DefaultBilling).UnmarshalJSON(data))
}
case "defaultShipping":
if data := in.Raw(); in.Ok() {
in.AddError((out.DefaultShipping).UnmarshalJSON(data))
}
case "gender":
if data := in.Raw(); in.Ok() {
in.AddError((out.Gender).UnmarshalJSON(data))
}
case "relations":
if in.IsNull() {
in.Skip()
out.Relations = nil
} else {
if out.Relations == nil {
out.Relations = new(customerEntityRelations)
}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated9(in, out.Relations)
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(out *jwriter.Writer, in CustomerEntity) {
out.RawByte('{')
first := true
_ = first
if in.EntityID != 0 {
const prefix string = ",\"entityID\":"
first = false
out.RawString(prefix[1:])
out.Uint32(uint32(in.EntityID))
}
if true {
const prefix string = ",\"websiteID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.WebsiteID).MarshalJSON())
}
if true {
const prefix string = ",\"email\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Email).MarshalJSON())
}
if in.GroupID != 0 {
const prefix string = ",\"groupID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint32(uint32(in.GroupID))
}
if true {
const prefix string = ",\"storeID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.StoreID).MarshalJSON())
}
if true {
const prefix string = ",\"createdAt\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.CreatedAt).MarshalJSON())
}
if true {
const prefix string = ",\"updatedAt\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.UpdatedAt).MarshalJSON())
}
if in.IsActive {
const prefix string = ",\"isActive\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.IsActive))
}
if true {
const prefix string = ",\"createdIn\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.CreatedIn).MarshalJSON())
}
if true {
const prefix string = ",\"firstname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Firstname).MarshalJSON())
}
if true {
const prefix string = ",\"lastname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Lastname).MarshalJSON())
}
if true {
const prefix string = ",\"dob\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Dob).MarshalJSON())
}
if true {
const prefix string = ",\"rpToken\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.RpToken).MarshalJSON())
}
if true {
const prefix string = ",\"rpTokenCreatedAt\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.RpTokenCreatedAt).MarshalJSON())
}
if true {
const prefix string = ",\"defaultBilling\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.DefaultBilling).MarshalJSON())
}
if true {
const prefix string = ",\"defaultShipping\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.DefaultShipping).MarshalJSON())
}
if true {
const prefix string = ",\"gender\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Gender).MarshalJSON())
}
if in.Relations != nil {
const prefix string = ",\"relations\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated9(out, *in.Relations)
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CustomerEntity) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CustomerEntity) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CustomerEntity) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CustomerEntity) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated8(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated9(in *jlexer.Lexer, out *customerEntityRelations) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "customerAddressEntities":
if in.IsNull() {
in.Skip()
out.CustomerAddressEntities = nil
} else {
if out.CustomerAddressEntities == nil {
out.CustomerAddressEntities = new(CustomerAddressEntities)
}
(*out.CustomerAddressEntities).UnmarshalEasyJSON(in)
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated9(out *jwriter.Writer, in customerEntityRelations) {
out.RawByte('{')
first := true
_ = first
if in.CustomerAddressEntities != nil {
const prefix string = ",\"customerAddressEntities\":"
first = false
out.RawString(prefix[1:])
(*in.CustomerAddressEntities).MarshalEasyJSON(out)
}
out.RawByte('}')
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(in *jlexer.Lexer, out *CustomerEntities) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*CustomerEntity, 0, 8)
} else {
out.Data = []*CustomerEntity{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v19 *CustomerEntity
if in.IsNull() {
in.Skip()
v19 = nil
} else {
if v19 == nil {
v19 = new(CustomerEntity)
}
(*v19).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v19)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(out *jwriter.Writer, in CustomerEntities) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v20, v21 := range in.Data {
if v20 > 0 {
out.RawByte(',')
}
if v21 == nil {
out.RawString("null")
} else {
(*v21).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CustomerEntities) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CustomerEntities) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CustomerEntities) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CustomerEntities) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated10(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(in *jlexer.Lexer, out *CustomerAddressEntity) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "entityID":
out.EntityID = uint32(in.Uint32())
case "incrementID":
if data := in.Raw(); in.Ok() {
in.AddError((out.IncrementID).UnmarshalJSON(data))
}
case "parentID":
if data := in.Raw(); in.Ok() {
in.AddError((out.ParentID).UnmarshalJSON(data))
}
case "createdAt":
if data := in.Raw(); in.Ok() {
in.AddError((out.CreatedAt).UnmarshalJSON(data))
}
case "updatedAt":
if data := in.Raw(); in.Ok() {
in.AddError((out.UpdatedAt).UnmarshalJSON(data))
}
case "isActive":
out.IsActive = bool(in.Bool())
case "city":
out.City = string(in.String())
case "company":
if data := in.Raw(); in.Ok() {
in.AddError((out.Company).UnmarshalJSON(data))
}
case "countryID":
out.CountryID = string(in.String())
case "firstname":
out.Firstname = string(in.String())
case "lastname":
out.Lastname = string(in.String())
case "postcode":
if data := in.Raw(); in.Ok() {
in.AddError((out.Postcode).UnmarshalJSON(data))
}
case "region":
if data := in.Raw(); in.Ok() {
in.AddError((out.Region).UnmarshalJSON(data))
}
case "street":
out.Street = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(out *jwriter.Writer, in CustomerAddressEntity) {
out.RawByte('{')
first := true
_ = first
if in.EntityID != 0 {
const prefix string = ",\"entityID\":"
first = false
out.RawString(prefix[1:])
out.Uint32(uint32(in.EntityID))
}
if true {
const prefix string = ",\"incrementID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.IncrementID).MarshalJSON())
}
if true {
const prefix string = ",\"parentID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.ParentID).MarshalJSON())
}
if true {
const prefix string = ",\"createdAt\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.CreatedAt).MarshalJSON())
}
if true {
const prefix string = ",\"updatedAt\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.UpdatedAt).MarshalJSON())
}
if in.IsActive {
const prefix string = ",\"isActive\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Bool(bool(in.IsActive))
}
if in.City != "" {
const prefix string = ",\"city\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.City))
}
if true {
const prefix string = ",\"company\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Company).MarshalJSON())
}
if in.CountryID != "" {
const prefix string = ",\"countryID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.CountryID))
}
if in.Firstname != "" {
const prefix string = ",\"firstname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Firstname))
}
if in.Lastname != "" {
const prefix string = ",\"lastname\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Lastname))
}
if true {
const prefix string = ",\"postcode\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Postcode).MarshalJSON())
}
if true {
const prefix string = ",\"region\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Region).MarshalJSON())
}
if in.Street != "" {
const prefix string = ",\"street\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Street))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CustomerAddressEntity) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CustomerAddressEntity) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CustomerAddressEntity) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CustomerAddressEntity) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated11(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(in *jlexer.Lexer, out *CustomerAddressEntities) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*CustomerAddressEntity, 0, 8)
} else {
out.Data = []*CustomerAddressEntity{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v22 *CustomerAddressEntity
if in.IsNull() {
in.Skip()
v22 = nil
} else {
if v22 == nil {
v22 = new(CustomerAddressEntity)
}
(*v22).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v22)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(out *jwriter.Writer, in CustomerAddressEntities) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v23, v24 := range in.Data {
if v23 > 0 {
out.RawByte(',')
}
if v24 == nil {
out.RawString("null")
} else {
(*v24).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CustomerAddressEntities) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CustomerAddressEntities) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CustomerAddressEntities) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CustomerAddressEntities) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated12(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(in *jlexer.Lexer, out *CoreConfigurations) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "data":
if in.IsNull() {
in.Skip()
out.Data = nil
} else {
in.Delim('[')
if out.Data == nil {
if !in.IsDelim(']') {
out.Data = make([]*CoreConfiguration, 0, 8)
} else {
out.Data = []*CoreConfiguration{}
}
} else {
out.Data = (out.Data)[:0]
}
for !in.IsDelim(']') {
var v25 *CoreConfiguration
if in.IsNull() {
in.Skip()
v25 = nil
} else {
if v25 == nil {
v25 = new(CoreConfiguration)
}
(*v25).UnmarshalEasyJSON(in)
}
out.Data = append(out.Data, v25)
in.WantComma()
}
in.Delim(']')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(out *jwriter.Writer, in CoreConfigurations) {
out.RawByte('{')
first := true
_ = first
if len(in.Data) != 0 {
const prefix string = ",\"data\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v26, v27 := range in.Data {
if v26 > 0 {
out.RawByte(',')
}
if v27 == nil {
out.RawString("null")
} else {
(*v27).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CoreConfigurations) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CoreConfigurations) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CoreConfigurations) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CoreConfigurations) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated13(l, v)
}
func easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(in *jlexer.Lexer, out *CoreConfiguration) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "config_id":
out.ConfigID = uint32(in.Uint32())
case "scope":
out.Scope = string(in.String())
case "scope_id":
out.ScopeID = int32(in.Int32())
case "expires":
if data := in.Raw(); in.Ok() {
in.AddError((out.Expires).UnmarshalJSON(data))
}
case "x_path":
out.Path = string(in.String())
case "value":
if data := in.Raw(); in.Ok() {
in.AddError((out.Value).UnmarshalJSON(data))
}
case "version_ts":
if data := in.Raw(); in.Ok() {
in.AddError((out.VersionTs).UnmarshalJSON(data))
}
case "version_te":
if data := in.Raw(); in.Ok() {
in.AddError((out.VersionTe).UnmarshalJSON(data))
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(out *jwriter.Writer, in CoreConfiguration) {
out.RawByte('{')
first := true
_ = first
if in.ConfigID != 0 {
const prefix string = ",\"config_id\":"
first = false
out.RawString(prefix[1:])
out.Uint32(uint32(in.ConfigID))
}
if in.Scope != "" {
const prefix string = ",\"scope\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Scope))
}
if in.ScopeID != 0 {
const prefix string = ",\"scope_id\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int32(int32(in.ScopeID))
}
if true {
const prefix string = ",\"expires\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Expires).MarshalJSON())
}
if in.Path != "" {
const prefix string = ",\"x_path\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Path))
}
if true {
const prefix string = ",\"value\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.Value).MarshalJSON())
}
if true {
const prefix string = ",\"version_ts\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.VersionTs).MarshalJSON())
}
if true {
const prefix string = ",\"version_te\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((in.VersionTe).MarshalJSON())
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CoreConfiguration) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CoreConfiguration) MarshalEasyJSON(w *jwriter.Writer) {
easyjson4b0a353eEncodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CoreConfiguration) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CoreConfiguration) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4b0a353eDecodeGithubComCorestoreioPkgSqlDmlgenDmltestgenerated14(l, v)
}
| corestoreio/csfw | sql/dmlgen/dmltestgenerated/dmltestgenerated_easyjson.go | GO | apache-2.0 | 58,427 |
/*
* Copyright 2012 The Kuali Foundation.
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kfs.sys.businessobject.format;
import java.math.BigDecimal;
import org.apache.log4j.Logger;
import org.kuali.rice.core.api.util.type.KualiDecimal;
import org.kuali.rice.core.web.format.BigDecimalFormatter;
/**
* This class is used to format explicit decimal value to BigDecimal objects.
*/
public class ExplicitKualiDecimalFormatter extends BigDecimalFormatter {
private static Logger LOG = Logger.getLogger(ExplicitKualiDecimalFormatter.class);
/**
* Converts the given String into a KualiDecimal with the final two characters being behind the decimal place
*/
@Override
protected Object convertToObject(String target) {
BigDecimal value = (BigDecimal)super.convertToObject(addDecimalPoint(target));
return new KualiDecimal(value);
}
/**
* Adds the decimal point to the String
* @param amount the String representing the amount
* @return a new String, with a decimal inserted in the third to last place
*/
private String addDecimalPoint (String amount) {
if (!amount.contains(".")) { //have to add decimal point if it's missing
int length = amount.length();
amount = amount.substring(0, length - 2) + "." + amount.substring(length - 2, length);
}
return amount;
}
}
| Ariah-Group/Finance | af_webapp/src/main/java/org/kuali/kfs/sys/businessobject/format/ExplicitKualiDecimalFormatter.java | Java | apache-2.0 | 1,889 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.serialization;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.lightbend.lagom.javadsl.immutable.ImmutableStyle;
import org.immutables.value.Value;
import org.immutables.value.Value.Parameter;
@Value.Immutable
@ImmutableStyle
@JsonDeserialize(as = Event2.class)
public interface AbstractEvent2 extends Jsonable {
@Parameter
String getField1V2(); // renamed from field1
@Parameter
int getField2(); // new mandatory field
}
| lagom/lagom | jackson/src/test/java/com/lightbend/lagom/serialization/AbstractEvent2.java | Java | apache-2.0 | 559 |
package eu.darken.myolib.processor.classifier;
import eu.darken.myolib.processor.BaseDataPacket;
import eu.darken.myolib.tools.ByteHelper;
public class WarmUpResultClassifierEvent extends ClassifierEvent {
/**
* Possible warm-up results for Myo.
*/
public enum WarmUpResult {
UNKNOWN((byte) 0x00), SUCCESS((byte) 0x01), FAILED_TIMEOUT((byte) 0x02);
private final byte mValue;
WarmUpResult(byte value) {
mValue = value;
}
public byte getValue() {
return mValue;
}
}
private WarmUpResult mWarmUpResult;
public WarmUpResultClassifierEvent(BaseDataPacket packet) {
super(packet, Type.WARM_UP_RESULT);
ByteHelper byteHelper = new ByteHelper(packet.getData());
int typeValue = byteHelper.getUInt8();
if (getType().getValue() != typeValue)
throw new RuntimeException("Incompatible BaseDataPacket:" + typeValue);
int warmUpResultValue = byteHelper.getUInt8();
for (WarmUpResultClassifierEvent.WarmUpResult warmUpResult : WarmUpResultClassifierEvent.WarmUpResult.values()) {
if (warmUpResult.getValue() == warmUpResultValue) {
mWarmUpResult = warmUpResult;
break;
}
}
}
public WarmUpResult getWarmUpResult() {
return mWarmUpResult;
}
public void setWarmUpResult(WarmUpResult warmUpResult) {
mWarmUpResult = warmUpResult;
}
}
| d4rken/myolib | myolib/src/main/java/eu/darken/myolib/processor/classifier/WarmUpResultClassifierEvent.java | Java | apache-2.0 | 1,487 |
# frozen_string_literal: true
require 'select2-rails'
require 'nest'
require 'redis-namespace'
require 'mailboxer'
require 'carrierwave'
require 'rails_autolink'
require 'font-awesome-rails'
require 'tinymce-rails'
require 'blacklight'
require 'blacklight/gallery'
require 'iiif_manifest'
require 'noid-rails'
require 'hydra/head'
require 'hydra-editor'
require 'browse-everything'
require 'hydra/works'
require 'hyrax/engine'
require 'hyrax/version'
require 'hyrax/inflections'
require 'hyrax/name'
require 'hyrax/valkyrie_can_can_adapter'
require 'kaminari_route_prefix'
require 'valkyrie/indexing_adapter'
require 'valkyrie/indexing/solr/indexing_adapter'
require 'valkyrie/indexing/null_indexing_adapter'
##
# Hyrax is a Ruby on Rails Engine built by the Samvera community. The engine
# provides a foundation for creating many different digital repository
# applications.
#
# @see https://samvera.org Samvera Community
# @see https://guides.rubyonrails.org/engines.html Rails Guides: Getting Started with Engines
module Hyrax
extend ActiveSupport::Autoload
eager_autoload do
autoload :Arkivo
autoload :Collections
autoload :Configuration
autoload :ControlledVocabularies
autoload :EventStore
autoload :RedisEventStore
autoload :ResourceSync
autoload :Zotero
autoload :Listeners
end
##
# @return [GlobalID]
# @see https://github.com/rails/globalid
def self.GlobalID(input) # rubocop:disable Naming/MethodName
case input
when Valkyrie::Resource
return input.to_global_id if input.respond_to?(:to_global_id)
ValkyrieGlobalIdProxy.new(resource: input).to_global_id
else
input.to_global_id if input.respond_to?(:to_global_id)
end
end
##
# @api public
#
# Exposes the Hyrax configuration
#
# @yield [Hyrax::Configuration] if a block is passed
# @return [Hyrax::Configuration]
# @see Hyrax::Configuration for configuration options
def self.config(&block)
@config ||= Hyrax::Configuration.new
yield @config if block
@config
end
##
# @return [Logger]
def self.logger
@logger ||= Valkyrie.logger
end
def self.primary_work_type
config.curation_concerns.first
end
##
# @return [Valkyrie::IndexingAdapter]
def self.index_adapter
config.index_adapter
end
##
# @return [Dry::Events::Publisher]
def self.publisher
config.publisher
end
##
# The Valkyrie persister used for PCDM models throughout Hyrax
#
# @note always use this method to retrieve the persister when data
# interoperability with Hyrax is required
def self.persister
metadata_adapter.persister
end
##
# The Valkyrie metadata adapter used for PCDM models throughout Hyrax
#
# @note always use this method to retrieve the metadata adapter when data
# interoperability with Hyrax is required
def self.metadata_adapter
Valkyrie.config.metadata_adapter
end
##
# The Valkyrie storage_adapter used for PCDM files throughout Hyrax
#
# @note always use this method to retrieve the storage adapter when handling
# files that will be used by Hyrax
def self.storage_adapter
Valkyrie.config.storage_adapter
end
##
# The Valkyrie query service used for PCDM files throughout Hyrax
#
# @note always use this method to retrieve the query service when data
# interoperability with Hyrax is required
def self.query_service
metadata_adapter.query_service
end
##
# The custom queries common to Hyrax
def self.custom_queries
query_service.custom_queries
end
end
| samvera/hyrax | lib/hyrax.rb | Ruby | apache-2.0 | 3,563 |
package org.aisen.download;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import org.aisen.download.core.DownloadInfo;
import org.aisen.download.utils.Constants;
import org.aisen.download.utils.DLogger;
import java.util.Vector;
/**
* 用于刷新UI
*
* Created by wangdan on 16/6/14.
*/
public final class DownloadController {
static final String TAG = Constants.TAG + "_DownloadController";
// 所有注册的Proxy
private final Vector<IDownloadSubject> mDownloadProxy = new Vector<>();
private final Handler mHandler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
if (msg.what == 0) {
DownloadMsg downloadMsg = (DownloadMsg) msg.getData().getSerializable("msg");
publishDownload(downloadMsg);
}
}
};
DownloadController() {
}
public synchronized void register(IDownloadSubject callback) {
if (callback == null) {
return;
}
if (!mDownloadProxy.contains(callback)) {
mDownloadProxy.add(callback);
DLogger.v(TAG, "register proxy[%s]", callback.toString());
}
}
public synchronized void unregister(IDownloadSubject callback) {
if (callback == null) {
return;
}
boolean removed = mDownloadProxy.remove(callback);
if (removed) {
DLogger.v(TAG, "unregister proxy[%s]", callback.toString());
}
}
void publishDownload(DownloadMsg downloadMsg) {
if (Looper.myLooper() == Looper.getMainLooper()) {
for (IDownloadSubject proxy : mDownloadProxy) {
proxy.publish(downloadMsg);
}
}
else {
Message message = mHandler.obtainMessage();
message.what = 0;
message.getData().putSerializable("msg", downloadMsg);
message.sendToTarget();
}
}
void publishDownload(DownloadInfo downloadInfo) {
publishDownload(new DownloadMsg(downloadInfo));
}
}
| wangdan/DownloadManager | downloader/src/main/java/org/aisen/download/DownloadController.java | Java | apache-2.0 | 2,163 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the
* NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package com.ricemap.spateDB.core;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.Stack;
import java.util.Vector;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.IndexedSortable;
import org.apache.hadoop.util.IndexedSorter;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.QuickSort;
import com.ricemap.spateDB.io.MemoryInputStream;
import com.ricemap.spateDB.io.Text2;
import com.ricemap.spateDB.shape.Point3d;
import com.ricemap.spateDB.shape.Prism;
import com.ricemap.spateDB.shape.Shape;
/**
* An RTree loaded in bulk and never changed after that. It cannot by
* dynamically manipulated by either insertion or deletion. It only works with
* 2-dimensional objects (keys).
*
* @author tonyren, eldawy
*
*/
public class RTree<T extends Shape> implements Writable, Iterable<T> {
public static enum FIELD_TYPE{NULL, Integer, Long, Double};
/** Logger */
private static final Log LOG = LogFactory.getLog(RTree.class);
/** Size of tree header on disk. Height + Degree + Number of records + isColumnar*/
public static final int TreeHeaderSize = 4 + 4 + 4 + 4;
/** Size of a node. Offset of first child + dimensions (x, y, width, height) */
public static final int NodeSize = 4 + 8 * 6;
/** t, x ,y */
public static final int IndexUnitSize = 8 * 3;
/** An instance of T that can be used to deserialize objects from disk */
T stockObject;
public boolean columnar;
/** Height of the tree (number of levels) */
private int height;
/** Degree of internal nodes in the tree */
private int degree;
/** Total number of nodes in the tree */
private int nodeCount;
/** Number of leaf nodes */
private int leafNodeCount;
/** Number of non-leaf nodes */
private int nonLeafNodeCount;
/** Number of elements in the tree */
private int elementCount;
/** An input stream that is used to read node structure (i.e., nodes) */
private FSDataInputStream structure;
/** Input stream to tree data */
private FSDataInputStream data;
/** The start offset of the tree in the data stream */
private long treeStartOffset;
/**
* Total tree size (header + structure + data) used to read the data in the
* last leaf node correctly
*/
private int treeSize;
public RTree() {
}
/**
* Builds the RTree given a serialized list of elements. It uses the given
* stockObject to deserialize these elements and build the tree. Also writes
* the created tree to the disk directly.
*
* @param elements
* - serialization of elements to be written
* @param offset
* - index of the first element to use in the elements array
* @param len
* - number of bytes to user from the elements array
* @param bytesAvailable
* - size available (in bytes) to store the tree structures
* @param dataOut
* - an output to use for writing the tree to
* @param fast_sort
* - setting this to <code>true</code> allows the method to run
* faster by materializing the offset of each element in the list
* which speeds up the comparison. However, this requires an
* additional 16 bytes per element. So, for each 1M elements, the
* method will require an additional 16 M bytes (approximately).
*/
public void bulkLoadWrite(final byte[] element_bytes, final int offset,
final int len, final int degree, DataOutput dataOut,
final boolean fast_sort, final boolean columnarStorage) {
try {
columnar = columnarStorage;
//TODO: the order of fields should be stable under Oracle JVM, but not guaranteed
Field[] fields = stockObject.getClass().getDeclaredFields();
// Count number of elements in the given text
int i_start = offset;
final Text line = new Text();
while (i_start < offset + len) {
int i_end = skipToEOL(element_bytes, i_start);
// Extract the line without end of line character
line.set(element_bytes, i_start, i_end - i_start - 1);
stockObject.fromText(line);
elementCount++;
i_start = i_end;
}
LOG.info("Bulk loading an RTree with " + elementCount + " elements");
// It turns out the findBestDegree returns the best degree when the
// whole
// tree is loaded to memory when processed. However, as current
// algorithms
// process the tree while it's on disk, a higher degree should be
// selected
// such that a node fits one file block (assumed to be 4K).
// final int degree = findBestDegree(bytesAvailable, elementCount);
LOG.info("Writing an RTree with degree " + degree);
int height = Math.max(1,
(int) Math.ceil(Math.log(elementCount) / Math.log(degree)));
int leafNodeCount = (int) Math.pow(degree, height - 1);
if (elementCount < 2 * leafNodeCount && height > 1) {
height--;
leafNodeCount = (int) Math.pow(degree, height - 1);
}
int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
int nonLeafNodeCount = nodeCount - leafNodeCount;
// Keep track of the offset of each element in the text
final int[] offsets = new int[elementCount];
final int[] ids = new int[elementCount];
final double[] ts = fast_sort ? new double[elementCount] : null;
final double[] xs = fast_sort ? new double[elementCount] : null;
final double[] ys = fast_sort ? new double[elementCount] : null;
//initialize columnar data output
ByteArrayOutputStream index_bos = new ByteArrayOutputStream();
DataOutputStream index_dos = new DataOutputStream(index_bos);
ByteArrayOutputStream[] bos = new ByteArrayOutputStream[fields.length];
DataOutputStream[] dos = new DataOutputStream[fields.length];
for (int i = 0; i < bos.length; i++){
bos[i] = new ByteArrayOutputStream();
dos[i] = new DataOutputStream(bos[i]);
}
i_start = offset;
line.clear();
for (int i = 0; i < elementCount; i++) {
offsets[i] = i_start;
ids[i] = i;
int i_end = skipToEOL(element_bytes, i_start);
if (xs != null) {
// Extract the line with end of line character
line.set(element_bytes, i_start, i_end - i_start - 1);
stockObject.fromText(line);
// Sample center of the shape
ts[i] = (stockObject.getMBR().t1 + stockObject.getMBR().t2) / 2;
xs[i] = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;
ys[i] = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;
//build columnar storage
if (stockObject instanceof Point3d){
index_dos.writeDouble(ts[i]);
index_dos.writeDouble(xs[i]);
index_dos.writeDouble(ys[i]);
}
else{
throw new RuntimeException("Indexing non-point shape with RTREE is not supported yet");
}
for (int j = 0 ; j < fields.length; j++){
if (fields[j].getType().equals(Integer.TYPE)){
dos[j].writeInt(fields[j].getInt(stockObject));
}
else if (fields[j].getType().equals(Double.TYPE)){
dos[j].writeDouble(fields[j].getDouble(stockObject));
}
else if (fields[j].getType().equals(Long.TYPE)){
dos[j].writeLong(fields[j].getLong(stockObject));
}
else{
continue;
//throw new RuntimeException("Field type is not supported yet");
}
}
}
i_start = i_end;
}
index_dos.close();
for (int i = 0; i < dos.length; i++){
dos[i].close();
}
/** A struct to store information about a split */
class SplitStruct extends Prism {
/** Start and end index for this split */
int index1, index2;
/** Direction of this split */
byte direction;
/** Index of first element on disk */
int offsetOfFirstElement;
static final byte DIRECTION_T = 0;
static final byte DIRECTION_X = 1;
static final byte DIRECTION_Y = 2;
SplitStruct(int index1, int index2, byte direction) {
this.index1 = index1;
this.index2 = index2;
this.direction = direction;
}
@Override
public void write(DataOutput out) throws IOException {
//
if (columnarStorage)
out.writeInt(index1);
else
out.writeInt(offsetOfFirstElement);
super.write(out);
}
void partition(Queue<SplitStruct> toBePartitioned) {
IndexedSortable sortableT;
IndexedSortable sortableX;
IndexedSortable sortableY;
if (fast_sort) {
// Use materialized xs[] and ys[] to do the comparisons
sortableT = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap ts
double tempt = ts[i];
ts[i] = ts[j];
ts[j] = tempt;
// Swap xs
double tempx = xs[i];
xs[i] = xs[j];
xs[j] = tempx;
// Swap ys
double tempY = ys[i];
ys[i] = ys[j];
ys[j] = tempY;
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
if (ts[i] < ts[j])
return -1;
if (ts[i] > ts[j])
return 1;
return 0;
}
};
sortableX = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap ts
double tempt = ts[i];
ts[i] = ts[j];
ts[j] = tempt;
// Swap xs
double tempx = xs[i];
xs[i] = xs[j];
xs[j] = tempx;
// Swap ys
double tempY = ys[i];
ys[i] = ys[j];
ys[j] = tempY;
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
if (ts[i] < ts[j])
return -1;
if (xs[i] < xs[j])
return -1;
if (xs[i] > xs[j])
return 1;
return 0;
}
};
sortableY = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap ts
double tempt = ts[i];
ts[i] = ts[j];
ts[j] = tempt;
// Swap xs
double tempx = xs[i];
xs[i] = xs[j];
xs[j] = tempx;
// Swap ys
double tempY = ys[i];
ys[i] = ys[j];
ys[j] = tempY;
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
if (ys[i] < ys[j])
return -1;
if (ys[i] > ys[j])
return 1;
return 0;
}
};
} else {
// No materialized xs and ys. Always deserialize objects
// to compare
sortableT = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
// Get end of line
int eol = skipToEOL(element_bytes, offsets[i]);
line.set(element_bytes, offsets[i], eol
- offsets[i] - 1);
stockObject.fromText(line);
double ti = (stockObject.getMBR().t1 + stockObject
.getMBR().t2) / 2;
eol = skipToEOL(element_bytes, offsets[j]);
line.set(element_bytes, offsets[j], eol
- offsets[j] - 1);
stockObject.fromText(line);
double tj = (stockObject.getMBR().t1 + stockObject
.getMBR().t2) / 2;
if (ti < tj)
return -1;
if (ti > tj)
return 1;
return 0;
}
};
sortableX = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
// Get end of line
int eol = skipToEOL(element_bytes, offsets[i]);
line.set(element_bytes, offsets[i], eol
- offsets[i] - 1);
stockObject.fromText(line);
double xi = (stockObject.getMBR().x1 + stockObject
.getMBR().x2) / 2;
eol = skipToEOL(element_bytes, offsets[j]);
line.set(element_bytes, offsets[j], eol
- offsets[j] - 1);
stockObject.fromText(line);
double xj = (stockObject.getMBR().x1 + stockObject
.getMBR().x2) / 2;
if (xi < xj)
return -1;
if (xi > xj)
return 1;
return 0;
}
};
sortableY = new IndexedSortable() {
@Override
public void swap(int i, int j) {
// Swap id
int tempid = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tempid;
tempid = ids[i];
ids[i] = ids[j];
ids[j] = tempid;
}
@Override
public int compare(int i, int j) {
int eol = skipToEOL(element_bytes, offsets[i]);
line.set(element_bytes, offsets[i], eol
- offsets[i] - 1);
stockObject.fromText(line);
double yi = (stockObject.getMBR().y1 + stockObject
.getMBR().y2) / 2;
eol = skipToEOL(element_bytes, offsets[j]);
line.set(element_bytes, offsets[j], eol
- offsets[j] - 1);
stockObject.fromText(line);
double yj = (stockObject.getMBR().y1 + stockObject
.getMBR().y2) / 2;
if (yi < yj)
return -1;
if (yi > yj)
return 1;
return 0;
}
};
}
final IndexedSorter sorter = new QuickSort();
final IndexedSortable[] sortables = new IndexedSortable[3];
sortables[SplitStruct.DIRECTION_T] = sortableT;
sortables[SplitStruct.DIRECTION_X] = sortableX;
sortables[SplitStruct.DIRECTION_Y] = sortableY;
sorter.sort(sortables[direction], index1, index2);
// Partition into maxEntries partitions (equally) and
// create a SplitStruct for each partition
int i1 = index1;
for (int iSplit = 0; iSplit < degree; iSplit++) {
int i2 = index1 + (index2 - index1) * (iSplit + 1)
/ degree;
SplitStruct newSplit;
if (direction == 0){
newSplit = new SplitStruct(i1, i2,
(byte) 1);
}
else if (direction == 1){
newSplit = new SplitStruct(i1, i2,
(byte) 2);
}
else{
newSplit = new SplitStruct(i1, i2,
(byte) 0);
}
toBePartitioned.add(newSplit);
i1 = i2;
}
}
}
// All nodes stored in level-order traversal
Vector<SplitStruct> nodes = new Vector<SplitStruct>();
final Queue<SplitStruct> toBePartitioned = new LinkedList<SplitStruct>();
toBePartitioned.add(new SplitStruct(0, elementCount,
SplitStruct.DIRECTION_X));
while (!toBePartitioned.isEmpty()) {
SplitStruct split = toBePartitioned.poll();
if (nodes.size() < nonLeafNodeCount) {
// This is a non-leaf
split.partition(toBePartitioned);
}
nodes.add(split);
}
if (nodes.size() != nodeCount) {
throw new RuntimeException("Expected node count: " + nodeCount
+ ". Real node count: " + nodes.size());
}
// Now we have our data sorted in the required order. Start building
// the tree.
// Store the offset of each leaf node in the tree
FSDataOutputStream fakeOut = new FSDataOutputStream(
new java.io.OutputStream() {
// Null output stream
@Override
public void write(int b) throws IOException {
// Do nothing
}
@Override
public void write(byte[] b, int off, int len)
throws IOException {
// Do nothing
}
@Override
public void write(byte[] b) throws IOException {
// Do nothing
}
}, null, TreeHeaderSize + nodes.size() * NodeSize);
for (int i_leaf = nonLeafNodeCount, i = 0; i_leaf < nodes.size(); i_leaf++) {
nodes.elementAt(i_leaf).offsetOfFirstElement = (int) fakeOut
.getPos();
if (i != nodes.elementAt(i_leaf).index1)
throw new RuntimeException();
double t1, x1, y1, t2, x2, y2;
// Initialize MBR to first object
int eol = skipToEOL(element_bytes, offsets[i]);
fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
stockObject.fromText(line);
Prism mbr = stockObject.getMBR();
t1 = mbr.t1;
x1 = mbr.x1;
y1 = mbr.y1;
t2 = mbr.t2;
x2 = mbr.x2;
y2 = mbr.y2;
i++;
while (i < nodes.elementAt(i_leaf).index2) {
eol = skipToEOL(element_bytes, offsets[i]);
fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
stockObject.fromText(line);
mbr = stockObject.getMBR();
if (mbr.t1 < t1)
t1 = mbr.t1;
if (mbr.x1 < x1)
x1 = mbr.x1;
if (mbr.y1 < y1)
y1 = mbr.y1;
if (mbr.t2 > t2)
t2 = mbr.t2;
if (mbr.x2 > x2)
x2 = mbr.x2;
if (mbr.y2 > y2)
y2 = mbr.y2;
i++;
}
nodes.elementAt(i_leaf).set(t1, x1, y1, t2, x2, y2);
}
fakeOut.close();
fakeOut = null;
// Calculate MBR and offsetOfFirstElement for non-leaves
for (int i_node = nonLeafNodeCount - 1; i_node >= 0; i_node--) {
int i_first_child = i_node * degree + 1;
nodes.elementAt(i_node).offsetOfFirstElement = nodes
.elementAt(i_first_child).offsetOfFirstElement;
int i_child = 0;
Prism mbr;
mbr = nodes.elementAt(i_first_child + i_child);
double t1 = mbr.t1;
double x1 = mbr.x1;
double y1 = mbr.y1;
double t2 = mbr.t2;
double x2 = mbr.x2;
double y2 = mbr.y2;
i_child++;
while (i_child < degree) {
mbr = nodes.elementAt(i_first_child + i_child);
if (mbr.t1 < t1)
t1 = mbr.t1;
if (mbr.x1 < x1)
x1 = mbr.x1;
if (mbr.y1 < y1)
y1 = mbr.y1;
if (mbr.t2 > t2)
t2 = mbr.t2;
if (mbr.x2 > x2)
x2 = mbr.x2;
if (mbr.y2 > y2)
y2 = mbr.y2;
i_child++;
}
nodes.elementAt(i_node).set(t1, x1, y1, t2, x2, y2);
}
// Start writing the tree
// write tree header (including size)
// Total tree size. (== Total bytes written - 8 bytes for the size
// itself)
dataOut.writeInt(TreeHeaderSize + NodeSize * nodeCount + len);
// Tree height
dataOut.writeInt(height);
// Degree
dataOut.writeInt(degree);
dataOut.writeInt(elementCount);
//isColumnar
dataOut.writeInt(columnarStorage ? 1 : 0);
// write nodes
for (SplitStruct node : nodes) {
node.write(dataOut);
}
// write elements
if (columnarStorage){
byte[] index_bs = index_bos.toByteArray();
byte[][] bss = new byte[bos.length][];
for (int i = 0; i < bss.length; i++){
bss[i] = bos[i].toByteArray();
}
for (int element_i = 0; element_i < elementCount; element_i++) {
//int eol = skipToEOL(element_bytes, offsets[element_i]);
//dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]);
dataOut.write(index_bs, ids[element_i]*IndexUnitSize, IndexUnitSize);
}
for (int i = 0; i < fields.length; i++){
int fieldSize = 0;
if (fields[i].getType().equals(Integer.TYPE)){
fieldSize = 4;
}
else if (fields[i].getType().equals(Long.TYPE)){
fieldSize = 8;
}
else if (fields[i].getType().equals(Double.TYPE)){
fieldSize = 8;
}
else{
//throw new RuntimeException("Unsupported field type: " + fields[i].getType().getName());
continue;
}
for (int element_i = 0; element_i < elementCount; element_i++) {
//int eol = skipToEOL(element_bytes, offsets[element_i]);
//dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]);
dataOut.write(bss[i], ids[element_i]*fieldSize, fieldSize);
}
}
}
else{
for (int element_i = 0; element_i < elementCount; element_i++) {
int eol = skipToEOL(element_bytes, offsets[element_i]);
dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]);
}
}
} catch (IOException e) {
e.printStackTrace();
} catch (IllegalArgumentException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalAccessException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
@Override
public void write(DataOutput out) throws IOException {
throw new RuntimeException("write is no longer supported. "
+ "Please use bulkLoadWrite to write the RTree.");
}
@Override
public void readFields(DataInput in) throws IOException {
// Tree size (Header + structure + data)
treeSize = in.readInt();
if (treeSize == 0) {
height = elementCount = 0;
return;
}
// Read only the tree structure in memory while actual records remain on
// disk and loaded when necessary
height = in.readInt();
if (height == 0)
return;
degree = in.readInt();
elementCount = in.readInt();
columnar = in.readInt()==1;
// Keep only tree structure in memory
nodeCount = (int) ((powInt(degree, height) - 1) / (degree - 1));
int structureSize = nodeCount * NodeSize;
byte[] treeStructure = new byte[structureSize];
in.readFully(treeStructure, 0, structureSize);
structure = new FSDataInputStream(new MemoryInputStream(treeStructure));
if (in instanceof FSDataInputStream) {
this.treeStartOffset = ((FSDataInputStream) in).getPos()
- structureSize - TreeHeaderSize;
this.data = (FSDataInputStream) in;
} else {
// Load all tree data in memory
this.treeStartOffset = 0 - structureSize - TreeHeaderSize;
int treeDataSize = treeSize - TreeHeaderSize - structureSize;
byte[] treeData = new byte[treeDataSize];
in.readFully(treeData, 0, treeDataSize);
this.data = new FSDataInputStream(new MemoryInputStream(treeData));
}
nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
leafNodeCount = (int) Math.pow(degree, height - 1);
nonLeafNodeCount = nodeCount - leafNodeCount;
}
/**
* Reads and skips the header of the tree returning the total number of
* bytes skipped from the stream. This is used as a preparatory function to
* read all elements in the tree without the index part.
*
* @param in
* @return - Total number of bytes read and skipped
* @throws IOException
*/
public static int skipHeader(InputStream in) throws IOException {
DataInput dataIn = in instanceof DataInput ? (DataInput) in
: new DataInputStream(in);
int skippedBytes = 0;
/* int treeSize = */dataIn.readInt();
skippedBytes += 4;
int height = dataIn.readInt();
skippedBytes += 4;
if (height == 0) {
// Empty tree. No results
return skippedBytes;
}
int degree = dataIn.readInt();
skippedBytes += 4;
int nodeCount = (int) ((powInt(degree, height) - 1) / (degree - 1));
/* int elementCount = */dataIn.readInt();
skippedBytes += 4;
// Skip all nodes
dataIn.skipBytes(nodeCount * NodeSize);
skippedBytes += nodeCount * NodeSize;
return skippedBytes;
}
/**
* Returns the total size of the header (including the index) in bytes.
* Assume that the input is aligned to the start offset of the tree
* (header). Note that the part of the header is consumed from the given
* input to be able to determine header size.
*
* @param in
* @return
* @throws IOException
*/
public static int getHeaderSize(DataInput in) throws IOException {
int header_size = 0;
/* int treeSize = */in.readInt();
header_size += 4;
int height = in.readInt();
header_size += 4;
if (height == 0) {
// Empty tree. No results
return header_size;
}
int degree = in.readInt();
header_size += 4;
int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
/* int elementCount = */in.readInt();
header_size += 4;
// Add the size of all nodes
header_size += nodeCount * NodeSize;
return header_size;
}
/**
* Returns total number of elements
*
* @return
*/
public int getElementCount() {
return elementCount;
}
/**
* Returns the MBR of the root
*
* @return
*/
public Prism getMBR() {
Prism mbr = null;
try {
// MBR of the tree is the MBR of the root node
structure.seek(0);
mbr = new Prism();
/* int offset = */structure.readInt();
mbr.readFields(structure);
} catch (IOException e) {
e.printStackTrace();
}
return mbr;
}
/**
* Reads and returns the element with the given index
*
* @param i
* @return
* @throws IOException
*/
public T readElement(int i) {
Iterator<T> iter = iterator();
while (i-- > 0 && iter.hasNext()) {
iter.next();
}
return iter.next();
}
public void setStockObject(T stockObject) {
this.stockObject = stockObject;
}
/**
* Create Prisms that together pack all points in sample such that each
* Prism contains roughly the same number of points. In other words it tries
* to balance number of points in each Prism. Works similar to the logic of
* bulkLoad but does only one level of Prisms.
*
* @param samples
* @param gridInfo
* - Used as a hint for number of Prisms per row or column
* @return
*/
public static Prism[] packInPrisms(GridInfo gridInfo, final Point3d[] sample) {
Prism[] Prisms = new Prism[gridInfo.layers * gridInfo.columns * gridInfo.rows];
int iPrism = 0;
// Sort in t direction
final IndexedSortable sortableT = new IndexedSortable() {
@Override
public void swap(int i, int j) {
Point3d temp = sample[i];
sample[i] = sample[j];
sample[j] = temp;
}
@Override
public int compare(int i, int j) {
if (sample[i].t < sample[j].t)
return -1;
if (sample[i].t > sample[j].t)
return 1;
return 0;
}
};
// Sort in x direction
final IndexedSortable sortableX = new IndexedSortable() {
@Override
public void swap(int i, int j) {
Point3d temp = sample[i];
sample[i] = sample[j];
sample[j] = temp;
}
@Override
public int compare(int i, int j) {
if (sample[i].x < sample[j].x)
return -1;
if (sample[i].x > sample[j].x)
return 1;
return 0;
}
};
// Sort in y direction
final IndexedSortable sortableY = new IndexedSortable() {
@Override
public void swap(int i, int j) {
Point3d temp = sample[i];
sample[i] = sample[j];
sample[j] = temp;
}
@Override
public int compare(int i, int j) {
if (sample[i].y < sample[j].y)
return -1;
if (sample[i].y > sample[j].y)
return 1;
return 0;
}
};
final QuickSort quickSort = new QuickSort();
quickSort.sort(sortableT, 0, sample.length);
//tony
int tindex1 = 0;
double t1 = gridInfo.t1;
for (int lay = 0; lay < gridInfo.layers; lay++){
int tindex2 = sample.length * (lay + 1) / gridInfo.layers;
double t2 = lay == gridInfo.layers - 1 ? gridInfo.t2 : sample[tindex2 - 1].t;
quickSort.sort(sortableX, tindex1, tindex2);
int xindex1 = tindex1;
double x1 = gridInfo.x1;
for (int col = 0; col < gridInfo.columns; col++) {
int xindex2 = sample.length * (col + 1) / gridInfo.columns;
// Determine extents for all Prisms in this column
double x2 = col == gridInfo.columns - 1 ? gridInfo.x2
: sample[xindex2 - 1].x;
// Sort all points in this column according to its y-coordinate
quickSort.sort(sortableY, xindex1, xindex2);
// Create Prisms in this column
double y1 = gridInfo.y1;
for (int row = 0; row < gridInfo.rows; row++) {
int yindex2 = xindex1 + (xindex2 - xindex1) * (row + 1)
/ gridInfo.rows;
double y2 = row == gridInfo.rows - 1 ? gridInfo.y2
: sample[yindex2 - 1].y;
Prisms[iPrism++] = new Prism(t1, x1, y1, t2, x2, y2);
y1 = y2;
}
xindex1 = xindex2;
x1 = x2;
}
}
return Prisms;
}
/**
* An iterator that goes over all elements in the tree in no particular
* order
*
* @author tonyren, eldawy
*
*/
class RTreeIterator implements Iterator<T> {
/** Current offset in the data stream */
int offset;
/** Temporary text that holds one line to deserialize objects */
Text line;
/** A stock object to read from stream */
T _stockObject;
/** A reader to read lines from the tree */
LineReader reader;
RTreeIterator() throws IOException {
offset = TreeHeaderSize + NodeSize * RTree.this.nodeCount;
_stockObject = (T) RTree.this.stockObject.clone();
line = new Text();
RTree.this.data.seek(offset + RTree.this.treeStartOffset);
reader = new LineReader(RTree.this.data);
}
@Override
public boolean hasNext() {
return offset < RTree.this.treeSize;
}
@Override
public T next() {
try {
offset += reader.readLine(line);
_stockObject.fromText(line);
} catch (IOException e) {
e.printStackTrace();
return null;
}
return _stockObject;
}
@Override
public void remove() {
throw new RuntimeException("Not supported");
}
}
/**
* Skip bytes until the end of line
*
* @param bytes
* @param startOffset
* @return
*/
public static int skipToEOL(byte[] bytes, int startOffset) {
int eol = startOffset;
while (eol < bytes.length && (bytes[eol] != '\n' && bytes[eol] != '\r'))
eol++;
while (eol < bytes.length && (bytes[eol] == '\n' || bytes[eol] == '\r'))
eol++;
return eol;
}
@Override
public Iterator<T> iterator() {
try {
return new RTreeIterator();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
/**
* Given a block size, record size and a required tree degree, this function
* calculates the maximum number of records that can be stored in this block
* taking into consideration the overhead needed by node structure.
*
* @param blockSize
* @param degree
* @param recordSize
* @return
*/
public static int getBlockCapacity(long blockSize, int degree,
int recordSize) {
double a = (double) NodeSize / (degree - 1);
double ratio = (blockSize + a) / (recordSize + a);
double break_even_height = Math.log(ratio) / Math.log(degree);
double h_min = Math.floor(break_even_height);
double capacity1 = Math.floor(Math.pow(degree, h_min));
double structure_size = 4 + TreeHeaderSize + a
* (capacity1 * degree - 1);
double capacity2 = Math
.floor((blockSize - structure_size) / recordSize);
return Math.max((int) capacity1, (int) capacity2);
}
/**
* Searches the RTree starting from the given start position. This is either
* a node number or offset of an element. If it's a node number, it performs
* the search in the subtree rooted at this node. If it's an offset number,
* it searches only the object found there. It is assumed that the
* openQuery() has been called before this function and that endQuery() will
* be called afterwards.
*
* @param query_mbr
* @param output
* @param start
* - where to start searching
* @param end
* - where to end searching. Only used when start is an offset of
* an object.
* @return
* @throws IOException
*/
protected int searchColumnar(Shape query_shape, ResultCollector<Writable> output,
int start, int end, String field) throws IOException {
if (output == null){
throw new RuntimeException("Output is NULL");
}
//build search field
int fieldOffset = 0;
int fieldSize = -1;
FIELD_TYPE fieldType = FIELD_TYPE.NULL;
//get fields
Field[] fields = stockObject.getClass().getDeclaredFields();
for (int i = 0; i < fields.length; i++){
if (fields[i].getName().equals(field)){
if ( fields[i].getType().equals(Integer.TYPE)){
fieldSize = 4;
fieldType = FIELD_TYPE.Integer;
}
else if ( fields[i].getType().equals(Long.TYPE)){
fieldSize = 8;
fieldType = FIELD_TYPE.Long;
}
else if ( fields[i].getType().equals(Double.TYPE)){
fieldSize = 8;
fieldType = FIELD_TYPE.Double;
}
else{
//throw new RuntimeException("Unsupported type: " + fields[i].getType());
}
break;
}
else{
if ( fields[i].getType().equals(Integer.TYPE)){
fieldOffset += elementCount * 4;
}
else if ( fields[i].getType().equals(Long.TYPE) || fields[i].getType().equals(Double.TYPE)){
fieldOffset += elementCount * 8;
}
else{
//throw new RuntimeException("Unsupported type: " + fields[i].getType());
}
}
}
Prism query_mbr = query_shape.getMBR();
int resultSize = 0;
// Special case for an empty tree
if (height == 0)
return 0;
Stack<Integer> toBeSearched = new Stack<Integer>();
// Start from the given node
toBeSearched.push(start);
if (start >= nodeCount) {
toBeSearched.push(end);
}
Prism node_mbr = new Prism();
// Holds one data line from tree data
Text line = new Text2();
while (!toBeSearched.isEmpty()) {
int searchNumber = toBeSearched.pop();
int mbrsToTest = searchNumber == 0 ? 1 : degree;
if (searchNumber < nodeCount) {
long nodeOffset = NodeSize * searchNumber;
structure.seek(nodeOffset);
int dataOffset = structure.readInt();
for (int i = 0; i < mbrsToTest; i++) {
node_mbr.readFields(structure);
int lastOffset = (searchNumber + i) == nodeCount - 1 ? elementCount - 1
: structure.readInt();
if (query_mbr.contains(node_mbr)) {
// The node is full contained in the query range.
// Save the time and do full scan for this node
// Checks if this node is the last node in its level
// This can be easily detected because the next node in
// the level
// order traversal will be the first node in the next
// level
// which means it will have an offset less than this
// node
if (lastOffset <= dataOffset)
lastOffset = elementCount;
data.seek(treeStartOffset + TreeHeaderSize + nodeCount * NodeSize + elementCount * IndexUnitSize + fieldOffset + dataOffset * fieldSize);
for (int j = 0; j < lastOffset - dataOffset; j++){
switch (fieldType){
case Integer:
output.collect(new IntWritable(data.readInt()));
break;
case Long:
output.collect(new LongWritable(data.readLong()));
break;
case Double:
output.collect(new DoubleWritable(data.readDouble()));
break;
default:
output.collect(new Point3d(data.readDouble(), data.readDouble(), data.readDouble()));
break;
}
resultSize++;
}
} else if (query_mbr.isIntersected(node_mbr)) {
// Node partially overlaps with query. Go deep under
// this node
if (searchNumber < nonLeafNodeCount) {
// Search child nodes
toBeSearched.push((searchNumber + i) * degree + 1);
} else {
// Search all elements in this node
//toBeSearched.push(dataOffset);
// Checks if this node is the last node in its level
// This can be easily detected because the next node
// in the level
// order traversal will be the first node in the
// next level
// which means it will have an offset less than this
// node
if (lastOffset <= dataOffset)
lastOffset = elementCount;
//toBeSearched.push(lastOffset);
data.seek(treeStartOffset + TreeHeaderSize + nodeCount * NodeSize + dataOffset * IndexUnitSize);
boolean report[] = new boolean[lastOffset - dataOffset];
Point3d point = new Point3d();
for (int j = 0; j < lastOffset - dataOffset; j++){
point.t = data.readDouble();
point.x = data.readDouble();
point.y = data.readDouble();
if (point.isIntersected(query_shape)){
report[j] = true;
}
else
report[j] = false;
}
data.seek(treeStartOffset + TreeHeaderSize + nodeCount * NodeSize + elementCount * IndexUnitSize + fieldOffset + dataOffset * fieldSize);
for (int j = 0; j < lastOffset - dataOffset; j++){
if (report[j]){
switch (fieldType){
case Integer:
output.collect(new IntWritable(data.readInt()));
break;
case Long:
output.collect(new LongWritable(data.readLong()));
break;
case Double:
output.collect(new DoubleWritable(data.readDouble()));
break;
default:
output.collect(new Point3d(data.readDouble(), data.readDouble(), data.readDouble()));
break;
}
resultSize++;
}
}
}
}
dataOffset = lastOffset;
}
} else {
LOG.error("searchNumber > nodeCount, something is wrong");
int firstOffset, lastOffset;
// Search for data items (records)
lastOffset = searchNumber;
firstOffset = toBeSearched.pop();
data.seek(firstOffset + treeStartOffset);
LineReader lineReader = new LineReader(data);
while (firstOffset < lastOffset) {
firstOffset += lineReader.readLine(line);
stockObject.fromText(line);
if (stockObject.isIntersected(query_shape)) {
resultSize++;
if (output != null)
output.collect(stockObject);
}
}
}
}
return resultSize;
}
protected int search(Shape query_shape, ResultCollector<T> output,
int start, int end) throws IOException {
Prism query_mbr = query_shape.getMBR();
int resultSize = 0;
// Special case for an empty tree
if (height == 0)
return 0;
Stack<Integer> toBeSearched = new Stack<Integer>();
// Start from the given node
toBeSearched.push(start);
if (start >= nodeCount) {
toBeSearched.push(end);
}
Prism node_mbr = new Prism();
// Holds one data line from tree data
Text line = new Text2();
while (!toBeSearched.isEmpty()) {
int searchNumber = toBeSearched.pop();
int mbrsToTest = searchNumber == 0 ? 1 : degree;
if (searchNumber < nodeCount) {
long nodeOffset = NodeSize * searchNumber;
structure.seek(nodeOffset);
int dataOffset = structure.readInt();
for (int i = 0; i < mbrsToTest; i++) {
node_mbr.readFields(structure);
int lastOffset = (searchNumber + i) == nodeCount - 1 ? treeSize
: structure.readInt();
if (query_mbr.contains(node_mbr)) {
// The node is full contained in the query range.
// Save the time and do full scan for this node
toBeSearched.push(dataOffset);
// Checks if this node is the last node in its level
// This can be easily detected because the next node in
// the level
// order traversal will be the first node in the next
// level
// which means it will have an offset less than this
// node
if (lastOffset <= dataOffset)
lastOffset = treeSize;
toBeSearched.push(lastOffset);
} else if (query_mbr.isIntersected(node_mbr)) {
// Node partially overlaps with query. Go deep under
// this node
if (searchNumber < nonLeafNodeCount) {
// Search child nodes
toBeSearched.push((searchNumber + i) * degree + 1);
} else {
// Search all elements in this node
toBeSearched.push(dataOffset);
// Checks if this node is the last node in its level
// This can be easily detected because the next node
// in the level
// order traversal will be the first node in the
// next level
// which means it will have an offset less than this
// node
if (lastOffset <= dataOffset)
lastOffset = treeSize;
toBeSearched.push(lastOffset);
}
}
dataOffset = lastOffset;
}
} else {
int firstOffset, lastOffset;
// Search for data items (records)
lastOffset = searchNumber;
firstOffset = toBeSearched.pop();
data.seek(firstOffset + treeStartOffset);
LineReader lineReader = new LineReader(data);
while (firstOffset < lastOffset) {
firstOffset += lineReader.readLine(line);
stockObject.fromText(line);
if (stockObject.isIntersected(query_shape)) {
resultSize++;
if (output != null)
output.collect(stockObject);
}
}
}
}
return resultSize;
}
/**
* Performs a range query over this tree using the given query range.
*
* @param query
* - The query Prism to use (TODO make it any shape not just
* Prism)
* @param output
* - Shapes found are reported to this output. If null, results
* are not reported
* @return - Total number of records found
*/
public int searchColumnar(Shape query, ResultCollector<Writable> output, String field) {
int resultCount = 0;
try {
resultCount = searchColumnar(query, output, 0, 0, field);
} catch (IOException e) {
e.printStackTrace();
}
return resultCount;
}
public int search(Shape query, ResultCollector<T> output, String field) {
int resultCount = 0;
try {
resultCount = search(query, output, 0, 0);
} catch (IOException e) {
e.printStackTrace();
}
return resultCount;
}
/**
* k nearest neighbor query Note: Current algorithm is approximate just for
* simplicity. Writing an exact algorithm is on our TODO list
*
* @param qx
* @param qy
* @param k
* @param output
*/
public int knn(final double qt, final double qx, final double qy, int k,
final ResultCollector2<T, Double> output) {
double query_area = ((getMBR().x2 - getMBR().x1) * (getMBR().y2 - getMBR().y1))
* k / getElementCount();
double query_radius = Math.sqrt(query_area / Math.PI);
boolean result_correct;
final Vector<Double> distances = new Vector<Double>();
final Vector<T> shapes = new Vector<T>();
// Find results in the range and increase this range if needed to ensure
// correctness of the answer
do {
// Initialize result and query range
distances.clear();
shapes.clear();
Prism queryRange = new Prism();
queryRange.x1 = qx - query_radius / 2;
queryRange.y1 = qy - query_radius / 2;
queryRange.x2 = qx + query_radius / 2;
queryRange.y2 = qy + query_radius / 2;
// Retrieve all results in range
searchColumnar(queryRange, new ResultCollector<Writable>() {
@Override
public void collect(Writable shape) {
distances.add(((T)shape).distanceTo(qt, qx, qy));
shapes.add((T) ((T) shape).clone());
}
}, null);
if (shapes.size() < k) {
// Didn't find k elements in range, double the range to get more
// items
if (shapes.size() == getElementCount()) {
// Already returned all possible elements
result_correct = true;
} else {
query_radius *= 2;
result_correct = false;
}
} else {
// Sort items by distance to get the kth neighbor
IndexedSortable s = new IndexedSortable() {
@Override
public void swap(int i, int j) {
double temp_distance = distances.elementAt(i);
distances.set(i, distances.elementAt(j));
distances.set(j, temp_distance);
T temp_shape = shapes.elementAt(i);
shapes.set(i, shapes.elementAt(j));
shapes.set(j, temp_shape);
}
@Override
public int compare(int i, int j) {
// Note. Equality is not important to check because
// items with the
// same distance can be ordered anyway.
if (distances.elementAt(i) < distances.elementAt(j))
return -1;
return 1;
}
};
IndexedSorter sorter = new QuickSort();
sorter.sort(s, 0, shapes.size());
if (distances.elementAt(k - 1) > query_radius) {
result_correct = false;
query_radius = distances.elementAt(k);
} else {
result_correct = true;
}
}
} while (!result_correct);
int result_size = Math.min(k, shapes.size());
if (output != null) {
for (int i = 0; i < result_size; i++) {
output.collect(shapes.elementAt(i), distances.elementAt(i));
}
}
return result_size;
}
protected static <S1 extends Shape, S2 extends Shape> int spatialJoinMemory(
final RTree<S1> R, final RTree<S2> S,
final ResultCollector2<S1, S2> output) throws IOException {
S1[] rs = (S1[]) Array.newInstance(R.stockObject.getClass(),
R.getElementCount());
int i = 0;
for (S1 r : R)
rs[i++] = (S1) r.clone();
if (i != rs.length)
throw new RuntimeException(i + "!=" + rs.length);
S2[] ss = (S2[]) Array.newInstance(S.stockObject.getClass(),
S.getElementCount());
i = 0;
for (S2 s : S)
ss[i++] = (S2) s.clone();
if (i != ss.length)
throw new RuntimeException(i + "!=" + ss.length);
return SpatialAlgorithms.SpatialJoin_planeSweep(rs, ss, output);
}
// LRU cache used to avoid deserializing the same records again and again
static class LruCache<A, B> extends LinkedHashMap<A, B> {
private static final long serialVersionUID = 702044567572914544L;
private final int maxEntries;
private B unusedEntry;
public LruCache(final int maxEntries) {
super(maxEntries + 1, 1.0f, true);
this.maxEntries = maxEntries;
}
@Override
protected boolean removeEldestEntry(final Map.Entry<A, B> eldest) {
if (super.size() > maxEntries) {
unusedEntry = eldest.getValue();
return true;
}
return false;
}
public B popUnusedEntry() {
B temp = unusedEntry;
unusedEntry = null;
return temp;
}
}
/**
* Performs a spatial join between records in two R-trees
*
* @param R
* @param S
* @param output
* @return
* @throws IOException
*/
protected static <S1 extends Shape, S2 extends Shape> int spatialJoinDisk(
final RTree<S1> R, final RTree<S2> S,
final ResultCollector2<S1, S2> output) throws IOException {
// Reserve locations for nodes MBRs and data offset [start, end)
final Prism[] r_nodes = new Prism[R.degree];
for (int i = 0; i < r_nodes.length; i++)
r_nodes[i] = new Prism();
final int[] r_data_offset = new int[R.degree + 1];
final Prism[] s_nodes = new Prism[S.degree];
for (int i = 0; i < s_nodes.length; i++)
s_nodes[i] = new Prism();
final int[] s_data_offset = new int[S.degree + 1];
PriorityQueue<Long> nodesToJoin = new PriorityQueue<Long>() {
{
initialize(R.leafNodeCount + S.leafNodeCount);
}
@Override
protected boolean lessThan(Object a, Object b) {
return ((Long) a) < ((Long) b);
}
};
nodesToJoin.put(0L);
LruCache<Integer, Shape[]> r_records_cache = new LruCache<Integer, Shape[]>(
R.degree * 2);
LruCache<Integer, Shape[]> s_records_cache = new LruCache<Integer, Shape[]>(
S.degree * R.degree * 4);
Text line = new Text2();
int result_count = 0;
LineReader r_lr = null, s_lr = null;
// Last offset read from r and s
int r_last_offset = 0;
int s_last_offset = 0;
while (nodesToJoin.size() > 0) {
long nodes_to_join = nodesToJoin.pop();
int r_node = (int) (nodes_to_join >>> 32);
int s_node = (int) (nodes_to_join & 0xFFFFFFFF);
// Read all R nodes
int r_mbrsToTest = r_node == 0 ? 1 : R.degree;
boolean r_leaf = r_node * R.degree + 1 >= R.nodeCount;
long nodeOffset = NodeSize * r_node;
R.structure.seek(nodeOffset);
for (int i = 0; i < r_mbrsToTest; i++) {
r_data_offset[i] = R.structure.readInt();
r_nodes[i].readFields(R.structure);
}
r_data_offset[r_mbrsToTest] = (r_node + r_mbrsToTest) == R.nodeCount ? R.treeSize
: R.structure.readInt();
// Read all S nodes
int s_mbrsToTest = s_node == 0 ? 1 : S.degree;
boolean s_leaf = s_node * S.degree + 1 >= S.nodeCount;
if (r_leaf != s_leaf) {
// This case happens when the two trees are of different heights
if (r_leaf)
r_mbrsToTest = 1;
else
s_mbrsToTest = 1;
}
nodeOffset = NodeSize * s_node;
S.structure.seek(nodeOffset);
for (int i = 0; i < s_mbrsToTest; i++) {
s_data_offset[i] = S.structure.readInt();
s_nodes[i].readFields(S.structure);
}
s_data_offset[s_mbrsToTest] = (s_node + s_mbrsToTest) == S.nodeCount ? S.treeSize
: S.structure.readInt();
// Find overlapping nodes by Cartesian product
for (int i = 0; i < r_mbrsToTest; i++) {
for (int j = 0; j < s_mbrsToTest; j++) {
if (r_nodes[i].isIntersected(s_nodes[j])) {
if (r_leaf && s_leaf) {
// Reached leaf nodes in both trees. Start comparing
// records
int r_start_offset = r_data_offset[i];
int r_end_offset = r_data_offset[i + 1];
int s_start_offset = s_data_offset[j];
int s_end_offset = s_data_offset[j + 1];
// /////////////////////////////////////////////////////////////////
// Read or retrieve r_records
Shape[] r_records = r_records_cache
.get(r_start_offset);
if (r_records == null) {
int cache_key = r_start_offset;
r_records = r_records_cache.popUnusedEntry();
if (r_records == null) {
r_records = new Shape[R.degree * 2];
}
// Need to read it from stream
if (r_last_offset != r_start_offset) {
long seekTo = r_start_offset
+ R.treeStartOffset;
R.data.seek(seekTo);
r_lr = new LineReader(R.data);
}
int record_i = 0;
while (r_start_offset < r_end_offset) {
r_start_offset += r_lr.readLine(line);
if (r_records[record_i] == null)
r_records[record_i] = R.stockObject
.clone();
r_records[record_i].fromText(line);
record_i++;
}
r_last_offset = r_start_offset;
// Nullify other records
while (record_i < r_records.length)
r_records[record_i++] = null;
r_records_cache.put(cache_key, r_records);
}
// Read or retrieve s_records
Shape[] s_records = s_records_cache
.get(s_start_offset);
if (s_records == null) {
int cache_key = s_start_offset;
// Need to read it from stream
if (s_lr == null
|| s_last_offset != s_start_offset) {
// Need to reposition s_lr (LineReader of S)
long seekTo = s_start_offset
+ S.treeStartOffset;
S.data.seek(seekTo);
s_lr = new LineReader(S.data);
}
s_records = s_records_cache.popUnusedEntry();
if (s_records == null) {
s_records = new Shape[S.degree * 2];
}
int record_i = 0;
while (s_start_offset < s_end_offset) {
s_start_offset += s_lr.readLine(line);
if (s_records[record_i] == null)
s_records[record_i] = S.stockObject
.clone();
s_records[record_i].fromText(line);
record_i++;
}
// Nullify other records
while (record_i < s_records.length)
s_records[record_i++] = null;
// Put in cache
s_records_cache.put(cache_key, s_records);
s_last_offset = s_start_offset;
}
// Do Cartesian product between records to find
// overlapping pairs
for (int i_r = 0; i_r < r_records.length
&& r_records[i_r] != null; i_r++) {
for (int i_s = 0; i_s < s_records.length
&& s_records[i_s] != null; i_s++) {
if (r_records[i_r]
.isIntersected(s_records[i_s])) {
result_count++;
if (output != null) {
output.collect((S1) r_records[i_r],
(S2) s_records[i_s]);
}
}
}
}
// /////////////////////////////////////////////////////////////////
} else {
// Add a new pair to node pairs to be tested
// Go down one level if possible
int new_r_node, new_s_node;
if (!r_leaf) {
new_r_node = (r_node + i) * R.degree + 1;
} else {
new_r_node = r_node + i;
}
if (!s_leaf) {
new_s_node = (s_node + j) * S.degree + 1;
} else {
new_s_node = s_node + j;
}
long new_pair = (((long) new_r_node) << 32)
| new_s_node;
nodesToJoin.put(new_pair);
}
}
}
}
}
return result_count;
}
public static <S1 extends Shape, S2 extends Shape> int spatialJoin(
final RTree<S1> R, final RTree<S2> S,
final ResultCollector2<S1, S2> output) throws IOException {
if (R.treeStartOffset >= 0 && S.treeStartOffset >= 0) {
// Both trees are read from disk
return spatialJoinDisk(R, S, output);
} else {
return spatialJoinMemory(R, S, output);
}
}
/**
* Calculate the storage overhead required to build an RTree for the given
* number of nodes.
*
* @return - storage overhead in bytes
*/
public static int calculateStorageOverhead(int elementCount, int degree) {
// Update storage overhead
int height = Math.max(1,
(int) Math.ceil(Math.log(elementCount) / Math.log(degree)));
int leafNodeCount = (int) Math.pow(degree, height - 1);
if (elementCount <= 2 * leafNodeCount && height > 1) {
height--;
leafNodeCount = (int) Math.pow(degree, height - 1);
}
int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
int storage_overhead = 4 + TreeHeaderSize + nodeCount * NodeSize;
return storage_overhead;
}
/**
* Find log to the base 2 quickly
*
* @param x
* @return
*/
public static int log2Floor(int x) {
if (x == 0)
return -1;
int pos = 0;
if ((x & 0xFFFF0000) != 0) {
pos += 16;
x >>>= 16;
}
if ((x & 0xFF00) != 0) {
pos += 8;
x >>>= 8;
}
if ((x & 0xF0) != 0) {
pos += 4;
x >>>= 4;
}
if ((x & 0xC) != 0) {
pos += 2;
x >>>= 2;
}
if ((x & 0x2) != 0) {
pos++;
x >>>= 1;
}
return pos;
}
public static int powInt(int base, int exponent) {
int pow = 1;
while (exponent != 0) {
if ((exponent & 1) != 0)
pow *= base;
exponent >>>= 1;
base *= base;
}
return pow;
}
private static final double LogLookupTable[];
static {
int count = 100;
LogLookupTable = new double[count];
for (int i = 0; i < count; i++) {
LogLookupTable[i] = Math.log(i);
}
}
public static double fastLog(int x) {
if (x < LogLookupTable.length) {
return LogLookupTable[x];
}
return Math.log(x);
}
public static double fastPow(double a, double b) {
final long tmp = (long) (9076650 * (a - 1)
/ (a + 1 + 4 * (Math.sqrt(a))) * b + 1072632447);
return Double.longBitsToDouble(tmp << 32);
}
/**
* Find the best (minimum) degree that can index the given number of records
* such that the whole tree structure can be stored in the given bytes
* available.
*
* @param bytesAvailable
* @param recordCount
* @return
*/
public static int findBestDegree(int bytesAvailable, int recordCount) {
// Maximum number of nodes that can be stored in the bytesAvailable
int maxNodeCount = (bytesAvailable - TreeHeaderSize) / NodeSize;
// Calculate maximum possible tree height to store the given record
// count
int h_max = log2Floor(recordCount / 2);
// Minimum height is always 1 (degree = recordCount)
int h_min = 2;
// Best degree is the minimum degree
int d_best = Integer.MAX_VALUE;
double log_recordcount_e = Math.log(recordCount / 2);
double log_recordcount_2 = log_recordcount_e / fastLog(2);
// Find the best height among all possible heights
for (int h = h_min; h <= h_max; h++) {
// Find the minimum degree for the given height (h)
// This approximation is good enough for our case.
// Not proven but tested with millions of random cases
int d_min = (int) Math.ceil(fastPow(2.0, log_recordcount_2
/ (h + 1)));
// Some heights are invalid, recalculate the height to ensure it's
// valid
int h_recalculated = (int) Math.floor(log_recordcount_e
/ fastLog(d_min));
if (h != h_recalculated)
continue;
int nodeCount = (int) ((powInt(d_min, h + 1) - 1) / (d_min - 1));
if (nodeCount < maxNodeCount && d_min < d_best)
d_best = d_min;
}
return d_best;
}
public static int calculateTreeStorage(int elementCount, int degree) {
int height = Math.max(1,
(int) Math.ceil(Math.log(elementCount) / Math.log(degree)));
int leafNodeCount = (int) Math.pow(degree, height - 1);
if (elementCount < 2 * leafNodeCount && height > 1) {
height--;
leafNodeCount = (int) Math.pow(degree, height - 1);
}
int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
return TreeHeaderSize + nodeCount * NodeSize;
}
}
| t0nyren/spatedb | src/main/java/com/ricemap/spateDB/core/RTree.java | Java | apache-2.0 | 57,963 |
/*
* Copyright 2016 Greg Whitaker
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.gregwhitaker.catnap.springboot.annotation;
import com.github.gregwhitaker.catnap.core.annotation.CatnapAnnotation;
import com.github.gregwhitaker.catnap.springboot.config.CatnapWebMvcConfigurerAdapter;
import org.springframework.context.annotation.Import;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
@CatnapAnnotation
@Import({CatnapWebMvcConfigurerAdapter.class})
public @interface EnableCatnap {
}
| gregwhitaker/catnap | catnap-springboot/src/main/java/com/github/gregwhitaker/catnap/springboot/annotation/EnableCatnap.java | Java | apache-2.0 | 1,203 |
#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pretty print logging."""
import logging
import pprint
from typing import Any
def log(level: int, x: Any) -> None:
if logging.getLogger(None).isEnabledFor(level):
for line in pprint.pformat(x).split('\n'):
logging.log(level, line)
def info(x: Any) -> None:
log(logging.INFO, x)
def debug(x: Any) -> None:
log(logging.DEBUG, x)
| project-chip/connectedhomeip | scripts/tools/memory/memdf/util/pretty.py | Python | apache-2.0 | 957 |
package pl.dzielins42.dmtools.generator.religion;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import pl.dzielins42.dmtools.model.enumeration.Alignment;
import pl.dzielins42.dmtools.model.enumeration.Gender;
import pl.dzielins42.dmtools.model.religion.Deity;
import pl.dzielins42.dmtools.model.religion.Domain;
import pl.dzielins42.dmtools.model.religion.Pantheon;
import pl.dzielins42.dmtools.util.ProbabilityDistributionTable;
public class BasicPantheonGenerator implements PantheonGenerator<BasicPantheonGeneratorOptions> {
protected final int MAX_DIVINE_RANK = 25;
public Pantheon generate(BasicPantheonGeneratorOptions options) {
// Validate options
if (options == null || options.getRandom() == null || options.getDomainsProbability() == null
|| options.getNameGenerator() == null) {
throw new IllegalArgumentException();
}
// Generate number of deities as random number between minDeitiesNumber
// and maxDeitiesNumber
int numberOfDeities = options.getMinDeitiesNumber();
if (options.getMinDeitiesNumber() != options.getMaxDeitiesNumber()) {
numberOfDeities += options.getRandom().nextInt(options.getMaxDeitiesNumber() - options.getMinDeitiesNumber() + 1);
}
// Generate each deity independently
List<Deity> deities = new ArrayList<Deity>(numberOfDeities);
Deity deity;
for (int i = 0; i < numberOfDeities; i++) {
deity = generateDeity(options);
deities.add(deity);
}
return new Pantheon("The Pantheon", deities);
}
protected Deity generateDeity(BasicPantheonGeneratorOptions options) {
// Generate rank
// TODO higher ranks should be rarer, probably by some mathematical
// formula
// Basic pantheons should have a few greater deities (16-20) but mostly
// intermediate deities (11-15) and lesser deities (6-10), demigods
// (1-5) and heroes (0) if the pantheon size enables it. There should
// not be many overdeities (21+).
int rank = options.getRandom().nextInt(MAX_DIVINE_RANK + 1);
// Generate domains
// Number of deity's domains is its ceiling of its rank divided by 5
int numberOfDomains = (int) Math.ceil(((double) rank) / 5.0d);
// Temporarily it is 3
numberOfDomains = 3;
// If it is overdeity, its power is beyond domain partitioning - it has
// power over every domain
List<Domain> domains = new ArrayList<Domain>();
Domain domain;
while (domains.size() < numberOfDomains) {
domain = options.getDomainsProbability().getRandom(options.getRandom());
if (!domains.contains(domain)) {
domains.add(domain);
}
}
Alignment alignment = getRandomAlignmentForDomains(domains, options);
Gender gender = Gender.values()[options.getRandom().nextInt(Gender.values().length)];
Deity deity = new Deity(options.getNameGenerator().generate(gender, options), alignment, gender, rank, domains);
return deity;
}
/**
* Returns deity's {@link Domain} list suited for pre-drawn
* {@link Alignment}.
*
* @param alignment
* deity's alignment.
* @param options
* generation options.
* @return deity's {@link Domain} list suited for pre-drawn
* {@link Alignment}.
*/
protected Domain getRandomDomainForAlignment(Alignment alignment, BasicPantheonGeneratorOptions options) {
return null;
}
/**
* Returns deity's {@link Alignment} suited for pre-drawn {@link Domain}
* list. For each domain, probability for each alignment is retrieved using
* {@link Domain#getAlignmentProbabilities()} method. Values are used to
* create new {@link ProbabilityDistributionTable}, which is used to get
* returned alignment.
*
* @param domains
* list of domains of the deity.
* @param options
* generation options.
* @return deity's {@link Alignment} suited for pre-drawn {@link Domain}
* list.
*/
protected Alignment getRandomAlignmentForDomains(List<Domain> domains, BasicPantheonGeneratorOptions options) {
// TODO maybe return random alignment based on uniform distribution
if (domains == null || domains.isEmpty()) {
throw new IllegalArgumentException();
}
double[] probabilities = new double[Alignment.values().length];
Arrays.fill(probabilities, 1.0d);
for (Domain domain : domains) {
for (int i = 0; i < probabilities.length; i++) {
probabilities[i] *= domain.getAlignmentProbabilities().getProbabilities().get(i);
}
}
ProbabilityDistributionTable<Alignment> tempPdt = new ProbabilityDistributionTable<Alignment>(Alignment.values(),
probabilities);
return tempPdt.getRandom(options.getRandom());
}
} | dzielins42/urban-bear | src/main/java/pl/dzielins42/dmtools/generator/religion/BasicPantheonGenerator.java | Java | apache-2.0 | 5,109 |
package jp.co.thcomp.bluetoothhelper;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
public class BleReceiveDataProvider extends BleDataProvider {
public static final int AddPacketResultSuccess = -1;
public static final int AddPacketResultAlreadyFinished = -2;
public static final int AddPacketResultNoData = -3;
private boolean mReceiveDataFinish = false;
private byte[][] mReceiveDataArray;
private int mLeftPacketCount = 0;
private int mDataSize;
private Short mReservedMessageId = null;
private ArrayList<byte[]> mReservedPacketList = new ArrayList<>();
/**
* @param packetData
* @return AddPacketResultAlreadyFinished: 既に完了済みのメッセージへの追加(追加失敗)
* AddPacketResultSuccess: 追加成功
* 0-ShortMax: 別のメッセージを追加している(追加失敗)
*/
public int addPacket(byte[] packetData) {
int ret = AddPacketResultSuccess;
if (packetData != null && packetData.length > 0) {
if (!mReceiveDataFinish) {
ByteBuffer tempBufferForShort = ByteBuffer.allocate(Short.SIZE / Byte.SIZE);
ByteBuffer tempBufferForInt = ByteBuffer.allocate(Integer.SIZE / Byte.SIZE);
// 0-1バイト:メッセージID(ShortMax上限且つPeripheralからの送信順番を示すが値は循環する)
tempBufferForShort.position(0);
tempBufferForShort.put(packetData, 0, LengthMessageID);
short messageId = tempBufferForShort.getShort(0);
// 2-5バイト:パケットサイズ、MTUサイズ以下の値が設定される
tempBufferForInt.position(0);
tempBufferForInt.put(packetData, IndexPacketSize, LengthPacketSize);
int packetSize = tempBufferForInt.getInt(0);
// 6-9バイト: パケットポジション、0は設定パケット、1以上の値が設定されている場合はデータパケット
tempBufferForInt.position(0);
tempBufferForInt.put(packetData, IndexPacketPosition, LengthPacketPosition);
int packetPosition = tempBufferForInt.getInt(0);
if (packetPosition == 0) {
if (mMessageId == null) {
boolean matchMessageId = true;
if (mReservedMessageId != null) {
// 既にリザーブされたMessageIdがあるので、それ以外の設定パケットは受け付けない
if (messageId != mReservedMessageId) {
matchMessageId = false;
}
}
if (matchMessageId) {
mMessageId = messageId;
// 設定パケット
// 10-13バイト:パケット数(設定パケットも含む)(IntMax上限)
tempBufferForInt.position(0);
tempBufferForInt.put(packetData, IndexPacketCount, LengthPacketCount);
mLeftPacketCount = tempBufferForInt.getInt(0) - 1;
mReceiveDataArray = new byte[mLeftPacketCount][];
// 14-17バイト:データサイズ(IntMax上限)
tempBufferForInt.position(0);
tempBufferForInt.put(packetData, IndexDataSize, LengthDataSize);
mDataSize = tempBufferForInt.getInt(0);
if (mReservedMessageId != null && mReservedPacketList.size() > 0) {
// 保留されているメッセージを展開
for (byte[] reservedPacketData : mReservedPacketList) {
addPacket(reservedPacketData);
}
}
mReservedMessageId = null;
mReservedPacketList.clear();
}
} else {
// 別のメッセージパケットを追加しようとしているので、新しい方のメッセージIDを返却
ret = messageId;
}
} else {
if (mMessageId == null) {
if (mReservedMessageId == null) {
mReservedMessageId = messageId;
}
if (mReservedMessageId == messageId) {
// 設定パケットが未だないので保留リストに
mReservedPacketList.add(packetData);
}
} else if (mMessageId == messageId) {
// データパケット
if (mReceiveDataArray != null) {
mLeftPacketCount--;
// 10 バイト:次のパケットがあるかのフラグ、0:次パケットなし、1:次パケットあり
tempBufferForInt.position(0);
tempBufferForInt.put(packetData, IndexExistNextPacket, LengthExistNextPacket);
int existNextPacket = tempBufferForInt.getInt(0);
// 以後、0-3バイトに記載されていたサイズ - 9バイトを引算したサイズだけデータが格納
mReceiveDataArray[packetPosition - 1] = Arrays.copyOfRange(packetData, IndexDataStartPosition, packetSize);
if ((mLeftPacketCount == 0) || (existNextPacket == NotExistNextPacket)) {
// 一旦0に残パケット数を0にして、受信状況に合わせて正しい値にする
mLeftPacketCount = 0;
for (int i = 0, size = mReceiveDataArray.length; i < size; i++) {
if (mReceiveDataArray[i] == null) {
mLeftPacketCount++;
}
}
if (mLeftPacketCount == 0) {
mReceiveDataFinish = true;
}
}
}
} else {
ret = messageId;
}
}
} else {
ret = AddPacketResultAlreadyFinished;
}
} else {
ret = AddPacketResultNoData;
}
return ret;
}
public boolean isCompleted() {
return mReceiveDataFinish;
}
@Override
public byte[] getData() {
byte[] ret = null;
if (mReceiveDataFinish) {
if (mData == null) {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
for (int i = 0, size = mReceiveDataArray.length; i < size; i++) {
stream.write(mReceiveDataArray[i]);
}
mData = stream.toByteArray();
} catch (IOException e) {
e.printStackTrace();
}
}
ret = super.getData();
}
return ret;
}
@Override
public Short getMessageId() {
if (mReservedMessageId != null && mMessageId == null) {
return mReservedMessageId;
} else {
return super.getMessageId();
}
}
}
| thcomp/Android_BluetoothHelper | app/src/main/java/jp/co/thcomp/bluetoothhelper/BleReceiveDataProvider.java | Java | apache-2.0 | 7,875 |
// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "compat.h"
#include "handler_map.h"
#include "reactor_factory.h"
#include "reactor_runner.h"
#include <cassert>
extern "C" {
#include <signal.h>
}
namespace palo {
std::vector<ReactorPtr> ReactorFactory::ms_reactors;
boost::thread_group ReactorFactory::ms_threads;
std::default_random_engine ReactorFactory::rng {1};
std::mutex ReactorFactory::ms_mutex;
std::atomic<int> ReactorFactory::ms_next_reactor(0);
bool ReactorFactory::ms_epollet = true;
bool ReactorFactory::proxy_master = false;
void ReactorFactory::initialize(uint16_t reactor_count) {
std::lock_guard<std::mutex> lock(ms_mutex);
if (!ms_reactors.empty())
return;
ReactorPtr reactor;
ReactorRunner rrunner;
ReactorRunner::handler_map = std::make_shared<HandlerMap>();
signal(SIGPIPE, SIG_IGN);
assert(reactor_count > 0);
ms_reactors.reserve(reactor_count+2);
for (uint16_t i=0; i<reactor_count+2; i++) {
reactor = std::make_shared<Reactor>();
ms_reactors.push_back(reactor);
rrunner.set_reactor(reactor);
ms_threads.create_thread(rrunner);
}
}
void ReactorFactory::destroy() {
ReactorRunner::shutdown = true;
for (size_t i=0; i<ms_reactors.size(); i++) {
ms_reactors[i]->poll_loop_interrupt();
}
ms_threads.join_all();
ms_reactors.clear();
ReactorRunner::handler_map = 0;
}
void ReactorFactory::join() {
ms_threads.join_all();
}
} //namespace palo
| lingbin/palo | be/src/rpc/reactor_factory.cpp | C++ | apache-2.0 | 2,061 |
using System.Data.Entity;
using System.Data.Entity.Infrastructure;
using System.Threading;
using System.Threading.Tasks;
namespace MunkeyIssues.Core.Data
{
public interface IDbContext
{
DbEntityEntry<TEntity> Entry<TEntity>(TEntity entity) where TEntity : class;
int SaveChanges();
Task<int> SaveChangesAsync(CancellationToken cancellationToken);
DbSet<TEntity> Set<TEntity>() where TEntity : class;
}
}
| munumafia/MunkeyIssues | MunkeyIssues.Core/Data/IDbContext.cs | C# | apache-2.0 | 468 |
/*
* Copyright 2021 Solace Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.solace.samples.java.snippets;
import com.solace.messaging.MessagingService;
import com.solace.messaging.PubSubPlusClientException;
import com.solace.messaging.receiver.DirectMessageReceiver;
import com.solace.messaging.receiver.InboundMessage;
import com.solace.messaging.receiver.MessageReceiver.InboundMessageSupplier;
import com.solace.messaging.receiver.MessageReceiver.MessageHandler;
import com.solace.messaging.resources.TopicSubscription;
import com.solace.messaging.util.CompletionListener;
import com.solace.messaging.util.Converter.BytesToObject;
import com.solace.messaging.util.InteroperabilitySupport.RestInteroperabilitySupport;
import java.io.Serializable;
import java.util.Properties;
import java.util.concurrent.CompletionStage;
/**
* Sampler for direct message consumption
*/
public class HowToConsumeDirectMessage {
/**
* Example how to start direct message receiver. This call is blocking.
*
* @param receiverToBeStarted receiver to be started
*/
public static void startDirectMessageReceiver(final DirectMessageReceiver receiverToBeStarted) {
receiverToBeStarted.start();
}
/**
* Example how to start direct message receiver using callback listener and asynchronously get
* notifications when start operation is complete
*
* @param receiverToBeStarted receiver to be started
*/
public static void startDirectMessageReceiverAsyncCallback(
final DirectMessageReceiver receiverToBeStarted) {
final CompletionListener<DirectMessageReceiver> receiverStartupListener = (directReceiver, throwable) -> {
if (throwable == null) {
// deal with an exception during start
} else {
//started successfully, i.e can receive messages
}
};
receiverToBeStarted.startAsync(receiverStartupListener);
}
/**
* Example how to start direct message receiver using callback listener and asynchronously get
* notifications when start operation is complete.
*
* @param receiverToBeStarted receiver to be started
* @see <a href="https://community.oracle.com/docs/DOC-995305">CompletableFuture for Asynchronous
* Programming in Java 8</a>
*/
public static void startDirectMessageReceiverAsyncCompletionStage(
final DirectMessageReceiver receiverToBeStarted) {
final CompletionStage<DirectMessageReceiver> receiverOnceStartCompleteStage = receiverToBeStarted
.startAsync();
// use CompletionStage API for reactive pipeline implementation
}
/**
* Example how to consume raw bytes direct message
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void consumeDirectMessageBytePayload(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build().start();
final byte[] messagePayload = receiver.receiveMessage().getPayloadAsBytes();
}
/**
* Example how to consume converted to utf 8 string direct message
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void consumeDirectMessageStringPayload(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build().start();
final String messagePayload = receiver.receiveMessage().getPayloadAsString();
}
/**
* Example how to consume direct message and extract HTTP/REST specific content from direct
* message if available
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void consumeDirectMessagePublishedFromRestClient(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build().start();
final InboundMessage message = receiver.receiveMessage();
final RestInteroperabilitySupport restSpecificFields = message.getRestInteroperabilitySupport();
final String contentEncoding = restSpecificFields.getHTTPContentEncoding();
final String contentType = restSpecificFields.getHTTPContentType();
}
/**
* Example how to consume full direct message
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void consumeDirectDetailedMessage(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build().start();
// extensive details about message, payload, header, properties,
// message delivery information are available using InboundMessage
final InboundMessage message = receiver.receiveMessage();
// i.e message expiration time point (UTC)
final long expiration = message.getExpiration();
// in assumption that MyData business object is expected in a message,
// a simple converter is provided
final BytesToObject<MyData> bytesToBusinessObjectConverter = (bytes) -> {
return new MyData(new String(bytes));
};
final MyData myBusinessObjectFromMessage = message
.getAndConvertPayload(bytesToBusinessObjectConverter, MyData.class);
}
/**
* Example how to consume (blocking) full direct messages in a loop
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void blockingConsumeDirectMessagesInLoop(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build()
.start();
int count = 0;
//receive next 1000 messages
while (count < 1000) {
try {
final InboundMessage message = receiver.receiveMessage();
// process a message
count++;
} catch (PubSubPlusClientException e) {
// deal with an exception, mostly timeout exception
}
}
}
/**
* Example how to consume (blocking with timeout) full direct messages in a loop
*
* @param service connected instance of a messaging service, ready to be used
* @param receiveTimeout time out in milliseconds after that blocking receive exits, values > 0
* are expected, use {@code receiveOrElse (..)} method when immediate
* response is required
*/
public static void blockingConsumeDirectMessagesInLoop(MessagingService service,
int receiveTimeout) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build()
.start();
int count = 0;
//receive next 1000 messages
while (count < 1000) {
try {
final InboundMessage message = receiver.receiveMessage(receiveTimeout);
if (message != null)
// process a message
//message can be null when timeout expired and new message was received
{
count++;
}
} catch (PubSubPlusClientException e) {
// deal with an exception, mostly timeout exception
}
}
}
/**
* Example how to consume (non-blocking) full direct messages in a loop
*
* @param service connected instance of a messaging service, ready to be used
* @param receiveTimeout time out in milliseconds after that blocking receive exits, values > 0
* are expected, use {@code receiveOrElse (..)} method when immediate
* response is required
*/
public static void nonBockingConsumeDirectMessagesInLoop(MessagingService service,
int receiveTimeout) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build()
.start();
int count = 0;
// more message supplier are available
final InboundMessageSupplier nullSupplier = InboundMessageSupplier.nullMessageSupplier();
//receive next 1000 messages
while (count < 1000) {
try {
InboundMessage message = receiver.receiveOrElse(nullSupplier);
if (message != null)
// process a message
//message can be null since Null supplier is used,
// when no message is available to receive, given InboundMessageSupplier is used to generate one
// in particular case null is generated
{
count++;
}
} catch (PubSubPlusClientException e) {
// deal with an exception, mostly timeout exception
}
}
}
/**
* Example how to consume full direct messages asynchronous using callback
*
* @param service connected instance of a messaging service, ready to be used
*/
public static void consumeDirectMessageAsync(MessagingService service) {
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder()
.withSubscriptions(TopicSubscription.of("setSubscriptionExpressionHere"))
.build().start();
final MessageHandler messageHandler = (message) -> {
// do something with a message, i.e access raw payload:
byte[] bytes = message.getPayloadAsBytes();
};
receiver.receiveAsync(messageHandler);
}
/**
* Example how to configure {@link DirectMessageReceiver} using {@link Properties}. See {@link
* com.solace.messaging.DirectMessageReceiverBuilder#fromProperties(Properties)} and {@link
* com.solace.messaging.MessageReceiverBuilder#fromProperties(Properties)} for the list available
* properties.
*
* @param service connected instance of a messaging service, ready to be used
* @param receiverConfiguration full configuration and/or fine tuning advanced configuration
* properties for {@link DirectMessageReceiver}.
* @return started direct message receiver ready to receive messages
*/
public static DirectMessageReceiver configureConsumerFromProperties(MessagingService service,
Properties receiverConfiguration) {
// Note: property based configuration can be extended/overridden using api calls
final DirectMessageReceiver receiver = service
.createDirectMessageReceiverBuilder().fromProperties(receiverConfiguration)
.build().start();
return receiver;
}
/**
* basic example for a business object to be send in a message
*/
static class MyData implements Serializable {
private static final long serialVersionUID = 1L;
private final String name;
MyData(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
}
| SolaceSamples/solace-samples-java | src/main/java/com/solace/samples/java/snippets/HowToConsumeDirectMessage.java | Java | apache-2.0 | 11,744 |
import {Component} from '@angular/core';
import * as $ from "jquery"
import {downgradeComponent} from '@angular/upgrade/static';
import {NotificationService} from "../../core/services/notification.service";
import {AgentService} from "../../core/services/agent.service";
declare var angular:any
@Component({
selector: 'scheduler',
templateUrl: "./scheduler.component.html",
styleUrls: []
})
class SchedulerComponent {
constructor(private notification: NotificationService, private agentService: AgentService) {
// agent
// this.agentService.isActive().then(() => {
// this.init();
// });
}
init() {
}
}
angular.module('scheduler.components', []).directive(
`scheduler`,
downgradeComponent({component: SchedulerComponent}));
export {SchedulerComponent};
| orientechnologies/orientdb-studio | src/app/administration/scheduler/scheduler.component.ts | TypeScript | apache-2.0 | 804 |
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.enterprise.channel.binary;
import java.io.IOException;
import com.orientechnologies.common.thread.OSoftThread;
import com.orientechnologies.orient.core.Orient;
import com.orientechnologies.orient.core.record.ORecordInternal;
/**
* Service thread that catches internal messages sent by the server
*
* @author Luca Garulli (l.garulli--at--orientechnologies.com)
*/
public class OAsynchChannelServiceThread extends OSoftThread {
private OChannelBinaryAsynchClient network;
private int sessionId;
private ORemoteServerEventListener remoteServerEventListener;
public OAsynchChannelServiceThread(final ORemoteServerEventListener iRemoteServerEventListener,
final OChannelBinaryAsynchClient iChannel) {
super(Orient.instance().getThreadGroup(), "OrientDB <- Asynch Client (" + iChannel.socket.getRemoteSocketAddress() + ")");
sessionId = Integer.MIN_VALUE;
remoteServerEventListener = iRemoteServerEventListener;
network = iChannel;
start();
}
@Override
protected void execute() throws Exception {
try {
network.beginResponse(sessionId, 0);
final byte request = network.readByte();
Object obj = null;
switch (request) {
case OChannelBinaryProtocol.REQUEST_PUSH_RECORD:
obj = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
break;
case OChannelBinaryProtocol.REQUEST_PUSH_DISTRIB_CONFIG:
obj = network.readBytes();
break;
}
if (remoteServerEventListener != null)
remoteServerEventListener.onRequest(request, obj);
} catch (IOException ioe) {
// EXCEPTION RECEIVED (THE SOCKET HAS BEEN CLOSED?) ASSURE TO UNLOCK THE READ AND EXIT THIS THREAD
sendShutdown();
if (network != null) {
final OChannelBinaryAsynchClient n = network;
network = null;
n.close();
}
} finally {
if (network != null)
network.endResponse();
}
}
}
| delebash/orientdb-parent | enterprise/src/main/java/com/orientechnologies/orient/enterprise/channel/binary/OAsynchChannelServiceThread.java | Java | apache-2.0 | 2,656 |
import Future = require("fibers/future");
import { AddPlatformCommand } from "./add-platform";
export class BuildCommandBase {
constructor(protected $options: IOptions,
private $platformService: IPlatformService) { }
executeCore(args: string[], buildConfig?: IBuildConfig): IFuture<void> {
return (() => {
let platform = args[0].toLowerCase();
this.$platformService.preparePlatform(platform, true).wait();
this.$platformService.buildPlatform(platform, buildConfig).wait();
if(this.$options.copyTo) {
this.$platformService.copyLastOutput(platform, this.$options.copyTo, {isForDevice: this.$options.forDevice}).wait();
}
}).future<void>()();
}
}
export class BuildIosCommand extends BuildCommandBase implements ICommand {
constructor(protected $options: IOptions,
private $platformsData: IPlatformsData,
$platformService: IPlatformService) {
super($options, $platformService);
}
public allowedParameters: ICommandParameter[] = [];
public execute(args: string[]): IFuture<void> {
return this.executeCore([this.$platformsData.availablePlatforms.iOS]);
}
}
$injector.registerCommand("build|ios", BuildIosCommand);
export class BuildAndroidCommand extends BuildCommandBase implements ICommand {
constructor(protected $options: IOptions,
private $platformsData: IPlatformsData,
private $errors: IErrors,
$platformService: IPlatformService) {
super($options, $platformService);
}
public execute(args: string[]): IFuture<void> {
return this.executeCore([this.$platformsData.availablePlatforms.Android]);
}
public allowedParameters: ICommandParameter[] = [];
public canExecute(args: string[]): IFuture<boolean> {
return (() => {
if (this.$options.release && (!this.$options.keyStorePath || !this.$options.keyStorePassword || !this.$options.keyStoreAlias || !this.$options.keyStoreAliasPassword)) {
this.$errors.fail("When producing a release build, you need to specify all --key-store-* options.");
}
return args.length === 0;
}).future<boolean>()();
}
}
$injector.registerCommand("build|android", BuildAndroidCommand);
export class BuildVRCommand extends AddPlatformCommand {
constructor(protected $projectData: IProjectData,
$platformService: IPlatformService,
protected $errors: IErrors,
$fs: IFileSystem,
protected $logger: ILogger) {
super($projectData, $platformService, $errors, $fs, $logger);
}
protected pathToApk: string;
public allowedParameters: ICommandParameter[] = [];
public execute(args: string[]): IFuture<void> {
return (() => {
super.execute(["vr"]).wait();
let vr = require("nativescript-cli-vr");
let buildFuture = new Future<string>();
this.$logger.info("Building...");
let promise: any = vr.build(this.$projectData.projectId, this.$projectData.projectDir);
promise
.then((result: string) => buildFuture.return(result))
.catch((e: Error) => buildFuture.throw(e));
this.pathToApk = buildFuture.wait();
this.$logger.printMarkdown(`Successfully built application \`${this.$projectData.projectId}\` for \`Virtual Reality\`.`);
}).future<void>()();
}
}
$injector.registerCommand("build|vr", BuildVRCommand);
| tsvetie/nativescript-cli | lib/commands/build.ts | TypeScript | apache-2.0 | 3,172 |
<?php
/************************************************************************/
/* ATutor */
/************************************************************************/
/* Copyright (c) 2002-2010 */
/* Inclusive Design Institute */
/* http://atutor.ca */
/* This program is free software. You can redistribute it and/or */
/* modify it under the terms of the GNU General Public License */
/* as published by the Free Software Foundation. */
/************************************************************************/
// $Id$
define('AT_INCLUDE_PATH', '../../../../include/');
require(AT_INCLUDE_PATH.'vitals.inc.php');
admin_authenticate(AT_ADMIN_PRIV_FORUMS);
if (isset($_POST['cancel'])) {
$msg->addFeedback('CANCELLED');
header('Location: '.AT_BASE_HREF.'mods/_standard/forums/admin/forums.php');
exit;
} else if (isset($_POST['add_forum'])) {
$missing_fields = array();
if (empty($_POST['title'])) {
$missing_fields[] = _AT('title');
}
if (empty($_POST['courses'])) {
$missing_fields[] = _AT('courses');
}
if ($missing_fields) {
$missing_fields = implode(', ', $missing_fields);
$msg->addError(array('EMPTY_FIELDS', $missing_fields));
}
$_POST['edit'] = intval($_POST['edit']);
if (!($msg->containsErrors())) {
//add forum
$sql = "INSERT INTO %sforums (title, description, mins_to_edit) VALUES ('%s','%s', %d)";
$result = queryDB($sql, array(TABLE_PREFIX, $_POST['title'], $_POST['description'], $_POST['edit']));
$forum_id = at_insert_id();
global $sqlout;
write_to_log(AT_ADMIN_LOG_INSERT, 'forums', $result, $sqlout);
//for each course, add an entry to the forums_courses table
foreach ($_POST['courses'] as $course) {
$sql = "INSERT INTO %sforums_courses VALUES (%d,%d)";
$result = queryDB($sql, array(TABLE_PREFIX, $forum_id, $course));
global $sqlout;
write_to_log(AT_ADMIN_LOG_INSERT, 'forums_courses', $result, $sqlout);
}
$msg->addFeedback('ACTION_COMPLETED_SUCCESSFULLY');
if($course =="0"){
$msg->addFeedback('FORUM_POSTING');
}
header('Location: '.AT_BASE_HREF.'mods/_standard/forums/admin/forums.php');
exit;
}
}
$onload = 'document.form.title.focus();';
$sql = "SELECT course_id, title FROM %scourses ORDER BY title";
$rows_titles = queryDB($sql, array(TABLE_PREFIX));
$savant->assign('titles', $rows_titles);
require(AT_INCLUDE_PATH.'header.inc.php');
$savant->assign('system_courses', $system_courses);
$savant->display('admin/courses/forum_add.tmpl.php');
require(AT_INCLUDE_PATH.'footer.inc.php'); ?> | CaviereFabien/Test | ATutor/new_mods/_standard/forums/admin/forum_add.php | PHP | apache-2.0 | 2,711 |
package project2.jdbc.bean;
import org.json.JSONObject;
public class OrderSingleBean {
private int qty;
private double unit_price;
private int movie_id;
private String title;
public int getQty() {
return qty;
}
public void setQty(int qty) {
this.qty = qty;
}
public double getUnit_price() {
return unit_price;
}
public void setUnit_price(double unit_price) {
this.unit_price = unit_price;
}
public int getMovie_id() {
return movie_id;
}
public void setMovie_id(int movie_id) {
this.movie_id = movie_id;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public JSONObject toJson() {
JSONObject jsonStr = new JSONObject();
jsonStr.put("qty", this.qty);
jsonStr.put("unit_price", this.unit_price);
jsonStr.put("movie_id", this.movie_id);
jsonStr.put("title", this.title);
return jsonStr;
}
@Override
public String toString() {
return toJson().toString();
}
}
| CS122B-CWP/cs122b | project2/src/main/java/project2/jdbc/bean/OrderSingleBean.java | Java | apache-2.0 | 1,033 |
/*
* Licensed to GraphHopper GmbH under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* GraphHopper GmbH licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.graphhopper.util.gpx;
import com.graphhopper.util.*;
import com.graphhopper.util.shapes.GHPoint3D;
import java.text.DateFormat;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
// todo: the code here does not really belong into core, but we moved it here for now so its available from
// map-matching resource (it cannot be in the api module, because it uses AngleCalc). Probably we should separate the
// actual gpx conversion (which belongs to the web module) from the angle calculations. Or at least move this code back
// into web-bundle once MapMatchingResource is in core. Or we need another module for code that is used in different
// modules like web, but does not really fit into core either.
public class GpxFromInstructions {
private static final AngleCalc AC = AngleCalc.ANGLE_CALC;
static String simpleXMLEscape(String str) {
// We could even use the 'more flexible' CDATA section but for now do the following. The 'and' could be important sometimes:
return str.replaceAll("&", "&").
// but do not care for:
replaceAll("[\\<\\>]", "_");
}
public static List<GPXEntry> createGPXList(InstructionList instructions) {
List<GPXEntry> gpxList = new ArrayList<>();
long timeOffset = 0;
for (Instruction instruction : instructions) {
int i = 0;
for (GHPoint3D point : instruction.getPoints()) {
GPXEntry gpxEntry;
if (i == 0) {
gpxEntry = new GPXEntry(point, timeOffset);
} else {
// We don't have timestamps for pillar nodes
gpxEntry = new GPXEntry(point);
}
gpxList.add(gpxEntry);
i++;
}
timeOffset = timeOffset + instruction.getTime();
}
return gpxList;
}
private static void createWayPointBlock(StringBuilder output, Instruction instruction, DecimalFormat decimalFormat, Translation tr) {
output.append("\n<wpt ");
output.append("lat=\"").append(decimalFormat.format(instruction.getPoints().getLatitude(0)));
output.append("\" lon=\"").append(decimalFormat.format(instruction.getPoints().getLongitude(0))).append("\">");
String name;
if (instruction.getName().isEmpty())
name = instruction.getTurnDescription(tr);
else
name = instruction.getName();
output.append(" <name>").append(simpleXMLEscape(name)).append("</name>");
output.append("</wpt>");
}
public static String createGPX(InstructionList instructions, String trackName, long startTimeMillis, boolean includeElevation, boolean withRoute, boolean withTrack, boolean withWayPoints, String version, Translation tr) {
DateFormat formatter = Helper.createFormatter();
DecimalFormat decimalFormat = new DecimalFormat("#", DecimalFormatSymbols.getInstance(Locale.ROOT));
decimalFormat.setMinimumFractionDigits(1);
decimalFormat.setMaximumFractionDigits(6);
decimalFormat.setMinimumIntegerDigits(1);
String header = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>"
+ "<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\""
+ " creator=\"Graphhopper version " + version + "\" version=\"1.1\""
// This xmlns:gh acts only as ID, no valid URL necessary.
// Use a separate namespace for custom extensions to make basecamp happy.
+ " xmlns:gh=\"https://graphhopper.com/public/schema/gpx/1.1\">"
+ "\n<metadata>"
+ "<copyright author=\"OpenStreetMap contributors\"/>"
+ "<link href=\"http://graphhopper.com\">"
+ "<text>GraphHopper GPX</text>"
+ "</link>"
+ "<time>" + formatter.format(startTimeMillis) + "</time>"
+ "</metadata>";
StringBuilder gpxOutput = new StringBuilder(header);
if (!instructions.isEmpty()) {
if (withWayPoints) {
createWayPointBlock(gpxOutput, instructions.get(0), decimalFormat, tr); // Start
for (Instruction currInstr : instructions) {
if ((currInstr.getSign() == Instruction.REACHED_VIA) // Via
|| (currInstr.getSign() == Instruction.FINISH)) // End
{
createWayPointBlock(gpxOutput, currInstr, decimalFormat, tr);
}
}
}
if (withRoute) {
gpxOutput.append("\n<rte>");
Instruction nextInstr = null;
for (Instruction currInstr : instructions) {
if (null != nextInstr)
createRteptBlock(gpxOutput, nextInstr, currInstr, decimalFormat, tr);
nextInstr = currInstr;
}
createRteptBlock(gpxOutput, nextInstr, null, decimalFormat, tr);
gpxOutput.append("\n</rte>");
}
}
if (withTrack) {
gpxOutput.append("\n<trk><name>").append(trackName).append("</name>");
gpxOutput.append("<trkseg>");
for (GPXEntry entry : createGPXList(instructions)) {
gpxOutput.append("\n<trkpt lat=\"").append(decimalFormat.format(entry.getPoint().getLat()));
gpxOutput.append("\" lon=\"").append(decimalFormat.format(entry.getPoint().getLon())).append("\">");
if (includeElevation)
gpxOutput.append("<ele>").append(Helper.round2(((GHPoint3D) entry.getPoint()).getEle())).append("</ele>");
if (entry.getTime() != null)
gpxOutput.append("<time>").append(formatter.format(startTimeMillis + entry.getTime())).append("</time>");
gpxOutput.append("</trkpt>");
}
gpxOutput.append("\n</trkseg>");
gpxOutput.append("\n</trk>");
}
// we could now use 'wpt' for via points
gpxOutput.append("\n</gpx>");
return gpxOutput.toString();
}
private static void createRteptBlock(StringBuilder output, Instruction instruction, Instruction nextI, DecimalFormat decimalFormat, Translation tr) {
output.append("\n<rtept lat=\"").append(decimalFormat.format(instruction.getPoints().getLatitude(0))).
append("\" lon=\"").append(decimalFormat.format(instruction.getPoints().getLongitude(0))).append("\">");
if (!instruction.getName().isEmpty())
output.append("<desc>").append(simpleXMLEscape(instruction.getTurnDescription(tr))).append("</desc>");
output.append("<extensions>");
output.append("<gh:distance>").append(Helper.round(instruction.getDistance(), 1)).append("</gh:distance>");
output.append("<gh:time>").append(instruction.getTime()).append("</gh:time>");
String direction = calcDirection(instruction, nextI);
if (!direction.isEmpty())
output.append("<gh:direction>").append(direction).append("</gh:direction>");
double azimuth = calcAzimuth(instruction, nextI);
if (!Double.isNaN(azimuth))
output.append("<gh:azimuth>").append(Helper.round2(azimuth)).append("</gh:azimuth>");
if (instruction instanceof RoundaboutInstruction) {
RoundaboutInstruction ri = (RoundaboutInstruction) instruction;
output.append("<gh:exit_number>").append(ri.getExitNumber()).append("</gh:exit_number>");
}
output.append("<gh:sign>").append(instruction.getSign()).append("</gh:sign>");
output.append("</extensions>");
output.append("</rtept>");
}
/**
* Return the direction like 'NE' based on the first tracksegment of the instruction. If
* Instruction does not contain enough coordinate points, an empty string will be returned.
*/
public static String calcDirection(Instruction instruction, Instruction nextI) {
double azimuth = calcAzimuth(instruction, nextI);
if (Double.isNaN(azimuth))
return "";
return AC.azimuth2compassPoint(azimuth);
}
/**
* Return the azimuth in degree based on the first tracksegment of this instruction. If this
* instruction contains less than 2 points then NaN will be returned or the specified
* instruction will be used if that is the finish instruction.
*/
public static double calcAzimuth(Instruction instruction, Instruction nextI) {
double nextLat;
double nextLon;
if (instruction.getPoints().getSize() >= 2) {
nextLat = instruction.getPoints().getLatitude(1);
nextLon = instruction.getPoints().getLongitude(1);
} else if (nextI != null && instruction.getPoints().getSize() == 1) {
nextLat = nextI.getPoints().getLatitude(0);
nextLon = nextI.getPoints().getLongitude(0);
} else {
return Double.NaN;
}
double lat = instruction.getPoints().getLatitude(0);
double lon = instruction.getPoints().getLongitude(0);
return AC.calcAzimuth(lat, lon, nextLat, nextLon);
}
}
| don-philipe/graphhopper | core/src/main/java/com/graphhopper/util/gpx/GpxFromInstructions.java | Java | apache-2.0 | 10,232 |
package com.coolweather.app.receiver;
import com.coolweather.app.service.AutoUpdateService;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
public class AutoUpdateReceiver extends BroadcastReceiver{
@Override
public void onReceive(Context context,Intent intent){
Intent i = new Intent(context,AutoUpdateService.class);
context.startService(i);
}
}
| lpff/coolweather | src/com/coolweather/app/receiver/AutoUpdateReceiver.java | Java | apache-2.0 | 419 |
package com.google.ratel.deps.jackson.databind.jsonschema;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Retention;
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
import com.google.ratel.deps.jackson.annotation.JacksonAnnotation;
/**
* Annotation that can be used to define JSON Schema definition for
* the annotated class.
*<p>
* Note that annotation is often not needed: for example, regular
* Jackson beans that Jackson can introspect can be used without
* annotations, to produce JSON schema definition.
*
* @author Ryan Heaton
* @author Tatu Saloranta
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@JacksonAnnotation
public @interface JsonSerializableSchema
{
/**
* Marker value used to indicate that property has "no value";
* needed because annotations can not have null as default
* value.
*/
public final static String NO_VALUE = "##irrelevant";
/**
* Property that can be used to indicate id of the type when
* generating JSON Schema; empty String indicates that no id
* is defined.
*/
public String id() default "";
/**
* The schema type for this JsonSerializable instance.
* Possible values: "string", "number", "boolean", "object", "array", "null", "any"
*
* @return The schema type for this JsonSerializable instance.
*/
public String schemaType() default "any";
/**
* If the schema type is "object", JSON definition of properties of the object as
* a String.
*
* @return The node representing the schema properties, or "##irrelevant" if irrelevant.
*
* @deprecated (since 2.1) -- support will be dropped in future, since JSON-as-String is
* fundamentally bad way for customizing anything. No direct replacements offered.
*/
@Deprecated
public String schemaObjectPropertiesDefinition() default NO_VALUE;
/**
* If the schema type if "array", JSON definition of the schema for item types contained.
*
* @return The schema for the items in the array, or "##irrelevant" if irrelevant.
*
* @deprecated (since 2.1) -- support will be dropped in future, since JSON-as-String is
* fundamentally bad way for customizing anything. No direct replacements offered.
*/
@Deprecated
public String schemaItemDefinition() default NO_VALUE;
}
| sabob/ratel | ratel/src/com/google/ratel/deps/jackson/databind/jsonschema/JsonSerializableSchema.java | Java | apache-2.0 | 2,431 |
var __window = window;
var destringify = function(date) {
if (date != null && 'string' === typeof(date)) return new Date(date);
return date;
}
var stringify = function(date) {
if (!date) return null;
return dateFormat(date, "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'");
}
var stringreplaceall = function(target, search, replacement) {
return target.split(search).join(replacement);
};
var grabiframecontentdocument = function(id) {
return document.getElementById(id).contentWindow.document;
};
var grabcontentdocument = function(selector, parent) {
return jQuery(selector, parent).get(0).contentWindow.document;
}; | KritikalFabric/corefabric.io | a2/static/vendor/unclassified/destringify.js | JavaScript | apache-2.0 | 629 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
using System;
using System.IO;
using Org.Apache.REEF.Client.Common;
using Org.Apache.REEF.Client.Yarn;
using Org.Apache.REEF.Client.YARN.RestClient.DataModel;
using Org.Apache.REEF.Common.Files;
using Org.Apache.REEF.IO.FileSystem;
using Org.Apache.REEF.Tang.Annotations;
using Org.Apache.REEF.Utilities.Diagnostics;
using Org.Apache.REEF.Utilities.Logging;
namespace Org.Apache.REEF.Client.YARN.RestClient
{
/// <summary>
/// Provides FileSystem agnostic job resource uploader.
/// User can provide custom implementation of
/// <see cref="IFileSystem"/> for their choice of DFS.
/// </summary>
internal sealed class FileSystemJobResourceUploader : IJobResourceUploader
{
private static readonly Logger Log = Logger.GetLogger(typeof(FileSystemJobResourceUploader));
private static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, 0);
private readonly IResourceArchiveFileGenerator _resourceArchiveFileGenerator;
private readonly IFileSystem _fileSystem;
private readonly IFile _file;
private readonly REEFFileNames _reefFileNames;
private readonly IResourceFileRemoteUrlToClusterUrlConverter _urlConverter;
[Inject]
private FileSystemJobResourceUploader(
IResourceArchiveFileGenerator resourceArchiveFileGenerator,
IFileSystem fileSystem,
REEFFileNames reefFileNames,
IFile file,
IResourceFileRemoteUrlToClusterUrlConverter urlConverter)
{
_urlConverter = urlConverter;
_reefFileNames = reefFileNames;
_fileSystem = fileSystem;
_resourceArchiveFileGenerator = resourceArchiveFileGenerator;
_file = file;
}
public JobResource UploadArchiveResource(string driverLocalFolderPath, string remoteUploadDirectoryPath)
{
driverLocalFolderPath = driverLocalFolderPath.TrimEnd('\\') + @"\";
var driverUploadPath = remoteUploadDirectoryPath.TrimEnd('/') + @"/";
var parentDirectoryUri = _fileSystem.CreateUriForPath(remoteUploadDirectoryPath);
Log.Log(Level.Verbose, "DriverFolderPath: {0} DriverUploadPath: {1}", driverLocalFolderPath, driverUploadPath);
_fileSystem.CreateDirectory(parentDirectoryUri);
var archivePath = _resourceArchiveFileGenerator.CreateArchiveToUpload(driverLocalFolderPath);
return GetJobResource(archivePath, ResourceType.ARCHIVE, driverUploadPath, _reefFileNames.GetReefFolderName());
}
public JobResource UploadFileResource(string fileLocalPath, string remoteUploadDirectoryPath)
{
var driverUploadPath = remoteUploadDirectoryPath.TrimEnd('/') + @"/";
var parentDirectoryUri = _fileSystem.CreateUriForPath(driverUploadPath);
_fileSystem.CreateDirectory(parentDirectoryUri);
return GetJobResource(fileLocalPath, ResourceType.FILE, remoteUploadDirectoryPath);
}
private JobResource GetJobResource(string filePath, ResourceType resourceType, string driverUploadPath, string localizedName = null)
{
if (!_file.Exists(filePath))
{
Exceptions.Throw(
new FileNotFoundException("Could not find resource file " + filePath),
Log);
}
var destinationPath = driverUploadPath + Path.GetFileName(filePath);
var remoteFileUri = _fileSystem.CreateUriForPath(destinationPath);
Log.Log(Level.Verbose, @"Copy {0} to {1}", filePath, remoteFileUri);
_fileSystem.CopyFromLocal(filePath, remoteFileUri);
var fileStatus = _fileSystem.GetFileStatus(remoteFileUri);
return new JobResource
{
Name = localizedName ?? Path.GetFileName(filePath),
LastModificationUnixTimestamp = DateTimeToUnixTimestamp(fileStatus.ModificationTime),
RemoteUploadPath = _urlConverter.ConvertToLocalUrl(remoteFileUri),
ResourceSize = fileStatus.LengthBytes,
ResourceType = resourceType
};
}
private static long DateTimeToUnixTimestamp(DateTime dateTime)
{
return (long)(dateTime - Epoch).TotalMilliseconds;
}
}
} | dafrista/incubator-reef | lang/cs/Org.Apache.REEF.Client/YARN/RESTClient/FileSystemJobResourceUploader.cs | C# | apache-2.0 | 5,169 |
import java.util.Random;
public class _0382LinkedListRandomNode {
ListNode head;
Random r;
public _0382LinkedListRandomNode(ListNode head) {
this.head = head;
r = new Random();
}
public int getRandom() {
ListNode thisN = head;
ListNode result = null;
for (int n = 1; thisN != null; n++) {
if (r.nextInt(n) == 0) result = thisN;
thisN = thisN.next;
}
return result.val;
}
public class ListNode {
int val;
ListNode next;
ListNode(int x) { val = x; }
}
}
| im-sure/LeetCode | src/_0382LinkedListRandomNode.java | Java | apache-2.0 | 550 |
package org.wildfly.swarm.webservices.runtime;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import org.wildfly.swarm.container.Interface;
import org.wildfly.swarm.spi.api.Customizer;
import org.wildfly.swarm.spi.runtime.annotations.Post;
import org.wildfly.swarm.webservices.WebServicesFraction;
/**
* @author Bob McWhirter
*/
@Post
@ApplicationScoped
public class WSDLHostCustomizer implements Customizer {
@Inject
Interface iface;
@Inject
WebServicesFraction fraction;
@Override
public void customize() {
if (fraction.wsdlHost() == null) {
fraction.wsdlHost(this.iface.getExpression());
}
}
}
| christian-posta/wildfly-swarm | fractions/javaee/webservices/src/main/java/org/wildfly/swarm/webservices/runtime/WSDLHostCustomizer.java | Java | apache-2.0 | 693 |
package com.thilinamb.mqtt.client.sub;
import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken;
import org.eclipse.paho.client.mqttv3.MqttCallback;
import org.eclipse.paho.client.mqttv3.MqttMessage;
import java.util.logging.Logger;
/**
* Subscriber callback
* Author: Thilina
* Date: 7/19/14
*/
public class SubscriberCallback implements MqttCallback {
private static Logger logger = Logger.getLogger(SubscriberCallback.class.getName());
@Override
public void connectionLost(Throwable throwable) {
logger.warning("Connection Lost!");
}
@Override
public void messageArrived(String s, MqttMessage mqttMessage) throws Exception {
logger.info("Message Arrived. Topic: " + s + ", Message: " + new String(mqttMessage.getPayload()));
}
@Override
public void deliveryComplete(IMqttDeliveryToken iMqttDeliveryToken) {
}
}
| thilinamb/mqtt-client-example | src/main/java/com/thilinamb/mqtt/client/sub/SubscriberCallback.java | Java | apache-2.0 | 882 |
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.math.matrix;
import java.util.Arrays;
import org.apache.commons.lang.Validate;
/**
* A minimal implementation of a vector (in the mathematical sense) that contains doubles.
*/
public class DoubleMatrix1D implements Matrix<Double> {
private final double[] _data;
private final int _elements;
/** Empty vector. */
public static final DoubleMatrix1D EMPTY_MATRIX = new DoubleMatrix1D(new double[0]);
/**
* @param data
* The data, not null
*/
public DoubleMatrix1D(final Double[] data) {
Validate.notNull(data);
_elements = data.length;
_data = new double[_elements];
for (int i = 0; i < _elements; i++) {
_data[i] = data[i];
}
}
/**
* @param data
* The data, not null
*/
public DoubleMatrix1D(final double... data) {
Validate.notNull(data);
_elements = data.length;
_data = Arrays.copyOf(data, _elements);
}
/**
* Create an vector of length n with all entries equal to value.
*
* @param n
* number of elements
* @param value
* value of elements
*/
public DoubleMatrix1D(final int n, final double value) {
_elements = n;
_data = new double[_elements];
Arrays.fill(_data, value);
}
/**
* Returns the underlying vector data. If this is changed so is the vector.
*
* @see #toArray to get a copy of data
* @return An array containing the vector elements
*/
public double[] getData() {
return _data;
}
/**
* Convert the vector to a double array. As its elements are copied, the array is independent from the vector data.
*
* @return An array containing a copy of vector elements
*/
public double[] toArray() {
return Arrays.copyOf(_data, _elements);
}
/**
* {@inheritDoc}
*/
@Override
public int getNumberOfElements() {
return _elements;
}
/**
* {@inheritDoc} This method expects one index - any subsequent indices will be ignored.
*/
@Override
public Double getEntry(final int... index) {
return _data[index[0]];
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Arrays.hashCode(_data);
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final DoubleMatrix1D other = (DoubleMatrix1D) obj;
if (!Arrays.equals(_data, other._data)) {
return false;
}
return true;
}
@Override
public String toString() {
final StringBuffer sb = new StringBuffer();
final int n = _data.length;
sb.append(" (");
for (int i = 0; i < n - 1; i++) {
sb.append(_data[i] + ", ");
}
sb.append(_data[n - 1] + ") ");
return sb.toString();
}
}
| McLeodMoores/starling | projects/analytics/src/main/java/com/opengamma/analytics/math/matrix/DoubleMatrix1D.java | Java | apache-2.0 | 3,045 |
/*****************************************************************************
* *
* This file is part of the tna framework distribution. *
* Documentation and updates may be get from biaoping.yin the author of *
* this framework *
* *
* Sun Public License Notice: *
* *
* The contents of this file are subject to the Sun Public License Version *
* 1.0 (the "License"); you may not use this file except in compliance with *
* the License. A copy of the License is available at http://www.sun.com *
* *
* The Original Code is tag. The Initial Developer of the Original *
* Code is biaoping yin. Portions created by biaoping yin are Copyright *
* (C) 2000. All Rights Reserved. *
* *
* GNU Public License Notice: *
* *
* Alternatively, the contents of this file may be used under the terms of *
* the GNU Lesser General Public License (the "LGPL"), in which case the *
* provisions of LGPL are applicable instead of those above. If you wish to *
* allow use of your version of this file only under the terms of the LGPL *
* and not to allow others to use your version of this file under the SPL, *
* indicate your decision by deleting the provisions above and replace *
* them with the notice and other provisions required by the LGPL. If you *
* do not delete the provisions above, a recipient may use your version of *
* this file under either the SPL or the LGPL. *
* *
* biaoping.yin (yin-bp@163.com) *
* *
*****************************************************************************/
package com.frameworkset.common.poolman.sql;
import java.sql.Connection;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
import com.frameworkset.common.poolman.management.BaseTableManager;
/**
* 缓冲数据库的主键信息
*
* @author biaoping.yin created on 2005-3-29 version 1.0
*/
public class PrimaryKeyCache {
private static Logger log = Logger.getLogger(PrimaryKeyCache.class);
// 数据库链接池名称
private String dbname;
// private static PrimaryKeyCache primaryKeyCache;
private Map id_tables;
/**
* 没有在tableinfo中存放主键的信息的表的主键信息用NULL_来替换
*/
private static final PrimaryKey NULL_ = new PrimaryKey();
public PrimaryKeyCache(String dbname) {
this.dbname = dbname;
id_tables = new java.util.concurrent.ConcurrentHashMap(new HashMap());
}
// public static PrimaryKeyCache getInstance()
// {
// if(primaryKeyCache == null)
// primaryKeyCache = new PrimaryKeyCache();
// return primaryKeyCache;
// }
public void addIDTable(PrimaryKey primaryKey) {
if (!id_tables.containsKey(primaryKey.getTableName()))
id_tables.put(primaryKey.getTableName(), primaryKey);
}
public PrimaryKey getIDTable(String tableName) {
return getIDTable(null,tableName);
}
public PrimaryKey getIDTable(Connection con,String tableName) {
PrimaryKey key = (PrimaryKey) id_tables.get(tableName.toLowerCase());
if (key != null)
{
if(key == NULL_)
return null;
return key;
}
else
{
key = loaderPrimaryKey(con,tableName);
return key;
}
}
/**
* @return Returns the dbname.
*/
public String getDbname() {
return dbname;
}
/**
* 动态增加表的主键信息到系统缓冲中
* @param tableName
* @return
*/
public PrimaryKey loaderPrimaryKey(String tableName) {
return loaderPrimaryKey(null,tableName);
}
/**
* 动态增加表的主键信息到系统缓冲中
* @param tableName
* @return
*/
public PrimaryKey loaderPrimaryKey(Connection con,String tableName) {
try {
log.debug("开始装载表【" + tableName +"】的主键信息到缓冲。");
// PrimaryKey key = this.getIDTable(tableName);
// if(key != null)
// {
// System.out.println("表【" + tableName +"】的主键信息已经存在,无需装载!");
// return key;
// }
PrimaryKey key = BaseTableManager.getPoolTableInfo(dbname,con,
tableName);
if (key != null)
{
id_tables.put(key.getTableName().trim().toLowerCase(), key);
log.debug("完成装载表【" + tableName +"】的主键信息。");
}
else
{
id_tables.put(tableName.trim().toLowerCase(),NULL_);
log.debug("完成装载表【" + tableName +"】的主键信息,NULL_,");
}
return key;
} catch (Exception ex) {
// ex.printStackTrace();
log.error(ex.getMessage(),ex);
}
return null;
}
public void destroy() {
if(id_tables != null)
{
id_tables.clear();
id_tables = null;
}
}
public void reset() {
if(id_tables != null)
{
id_tables.clear();
// id_tables = null;
}
}
}
| shufudong/bboss | bboss-persistent/src/com/frameworkset/common/poolman/sql/PrimaryKeyCache.java | Java | apache-2.0 | 5,515 |
package packages
import (
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/stretchr/testify/assert"
)
func TestNewPackageFromImage(t *testing.T) {
// with tag
pkg, err := NewPackageFromImage("whalebrew/foo:bar", types.ImageInspect{})
assert.Nil(t, err)
assert.Equal(t, pkg.Name, "foo")
assert.Equal(t, pkg.Image, "whalebrew/foo:bar")
// test labels
pkg, err = NewPackageFromImage("whalebrew/whalesay", types.ImageInspect{
ContainerConfig: &container.Config{
Labels: map[string]string{
"io.whalebrew.name": "ws",
"io.whalebrew.config.environment": "[\"SOME_CONFIG_OPTION\"]",
"io.whalebrew.config.volumes": "[\"/somesource:/somedest\"]",
"io.whalebrew.config.ports": "[\"8100:8100\"]",
"io.whalebrew.config.networks": "[\"host\"]",
},
},
})
assert.Nil(t, err)
assert.Equal(t, pkg.Name, "ws")
assert.Equal(t, pkg.Image, "whalebrew/whalesay")
assert.Equal(t, pkg.Environment, []string{"SOME_CONFIG_OPTION"})
assert.Equal(t, pkg.Volumes, []string{"/somesource:/somedest"})
assert.Equal(t, pkg.Ports, []string{"8100:8100"})
assert.Equal(t, pkg.Networks, []string{"host"})
}
func TestPreinstallMessage(t *testing.T) {
pkg := &Package{}
assert.Equal(t, pkg.PreinstallMessage(), "")
pkg = &Package{
Environment: []string{"AWS_ACCESS_KEY"},
Ports: []string{
"80:80",
"81:81:udp",
},
Volumes: []string{
"/etc/passwd:/passwdtosteal",
"/etc/readonly:/readonly:ro",
},
}
assert.Equal(t, pkg.PreinstallMessage(),
"This package needs additional access to your system. It wants to:\n"+
"\n"+
"* Read the environment variable AWS_ACCESS_KEY\n"+
"* Listen on TCP port 80\n"+
"* Listen on UDP port 81\n"+
"* Read and write to the file or directory \"/etc/passwd\"\n"+
"* Read the file or directory \"/etc/readonly\"\n",
)
}
| 3846masa/whalebrew | packages/package_test.go | GO | apache-2.0 | 1,893 |
package ru.stqa.pft.mantis.appmanager;
import org.apache.http.HttpEntity;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.client.LaxRedirectStrategy;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class HttpSession {
private CloseableHttpClient httpclient;
private ApplicationManager app;
public HttpSession(ApplicationManager app) {
this.app = app;
httpclient = HttpClients.custom().setRedirectStrategy(new LaxRedirectStrategy()).build();
}
public boolean login(String username, String password) throws IOException {
HttpPost post = new HttpPost(app.getProperty("web.baseUrl") + "/login.php");
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair("username", username));
params.add(new BasicNameValuePair("password", password));
params.add(new BasicNameValuePair("secure_session", "on"));
params.add(new BasicNameValuePair("return", "index.php"));
post.setEntity(new UrlEncodedFormEntity(params));
CloseableHttpResponse response = httpclient.execute(post);
String body = geTextFrom(response);
return body.contains(String.format("<span id=\"logged-in-user\">%s</span>", username));
}
private String geTextFrom(CloseableHttpResponse response) throws IOException {
try {
return EntityUtils.toString(response.getEntity());
} finally {
response.close();
}
}
public boolean isLoggedInAs(String username) throws IOException {
HttpGet get = new HttpGet(app.getProperty("web.baseUrl") + "/index.php");
CloseableHttpResponse response = httpclient.execute(get);
String body = geTextFrom(response);
// return body.contains(String.format("<span class=\"italic\">%s</span>", username));
return body.contains(String.format("<span id=\"logged-in-user\">%s</span>", username));
}
} | mynewyear/java_pft | mantis-tests/src/test/java/ru/stqa/pft/mantis/appmanager/HttpSession.java | Java | apache-2.0 | 2,406 |
/**
* Copyright (C) 2013 Julian Atienza Herrero <j.atienza at har.mrc.ac.uk>
*
* MEDICAL RESEARCH COUNCIL UK MRC
*
* Harwell Mammalian Genetics Unit
*
* http://www.har.mrc.ac.uk
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.mousephenotype.dcc.exportlibrary.xmlvalidationresourcescollection.impress.utils;
import java.lang.reflect.InvocationTargetException;
import java.math.BigInteger;
import java.util.HashMap;
import junit.framework.Assert;
import org.junit.Test;
import org.mousephenotype.dcc.exportlibrary.xmlvalidationdatastructure.external.impress.ImpressPipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* @author julian
*/
public class InstantiatorTest {
protected static final Logger logger = LoggerFactory.getLogger(InstantiatorTest.class);
/*
* <xs:attribute name="is_deprecated" type="xs:boolean" use="required"/>
* <xs:attribute name="pipeline_name" type="xs:string" use="required"/>
* <xs:attribute name="minor_version" type="xs:integer" use="required"/>
* <xs:attribute name="pipeline_id" type="xs:integer" use="required"/>
* <xs:attribute name="description" type="xs:string" use="required"/>
* <xs:attribute name="major_version" type="xs:integer" use="required"/>
* <xs:attribute name="pipeline_key" type="xs:string" use="required"/>
*/
public static HashMap<String, String> getImpressPipelineMap() {
HashMap<String, String> map = new HashMap<String, String>();
map.put("is_deprecated", "false");
map.put("pipeline_name", "pipeline_name");
map.put("minor_version", "1");
map.put("pipeline_id", "456");
//map.put("description", "description");
map.put("major_version", "2");
map.put("pipeline_key", "pipeline_key");
return map;
}
@Test
public void testImpressPipeline() {
ImpressPipeline impressPipeline = new ImpressPipeline();
HashMap<String, String> map =InstantiatorTest.getImpressPipelineMap();
try {
Instantiator.getInstance(ImpressPipeline.class, impressPipeline, map);
} catch (NoSuchFieldException ex) {
logger.error("", ex);
Assert.fail();
} catch (IllegalArgumentException ex) {
logger.error("", ex);
Assert.fail();
} catch (IllegalAccessException ex) {
logger.error("", ex);
Assert.fail();
} catch (NoSuchMethodException ex) {
logger.error("", ex);
Assert.fail();
} catch (InvocationTargetException ex) {
logger.error("", ex);
Assert.fail();
}
ImpressPipeline impressPipeline2 = new ImpressPipeline();
impressPipeline2.setIsDeprecated(false);
impressPipeline2.setPipelineName("pipeline_name");
impressPipeline2.setMinorVersion(BigInteger.valueOf(1L));
impressPipeline2.setPipelineId(BigInteger.valueOf(456L));
//impressPipeline2.setDescription("description");
impressPipeline2.setMajorVersion(BigInteger.valueOf(2L));
impressPipeline2.setPipelineKey("pipeline_key");
Assert.assertEquals(impressPipeline2,impressPipeline);
}
}
| mpi2/exportlibrary | exportlibrary.xmlvalidationresourcescollection/src/test/java/org/mousephenotype/dcc/exportlibrary/xmlvalidationresourcescollection/impress/utils/InstantiatorTest.java | Java | apache-2.0 | 3,784 |
# (c) Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
module OneviewCookbook
module API2000
module Synergy
# Scope API2000 Synergy provider
class ScopeProvider < OneviewCookbook::API1800::Synergy::ScopeProvider
end
end
end
end
| HewlettPackard/oneview-chef | libraries/resource_providers/api2000/synergy/scope_provider.rb | Ruby | apache-2.0 | 803 |
namespace De.Osthus.Ambeth.Ioc
{
public interface IInitializingBean
{
void AfterPropertiesSet();
}
}
| Dennis-Koch/ambeth | ambeth/Ambeth.IoC/ambeth/ioc/IInitializingBean.cs | C# | apache-2.0 | 125 |
package edu.colostate.cs.storm.util;
import java.util.HashSet;
import java.util.Set;
/**
* Author: Thilina
* Date: 12/6/14
*/
public class OutlierTracker {
private Set<String> completeSet = new HashSet<String>();
private Set<String> outlierSet = new HashSet<String>();
public void addMember(String key){
completeSet.add(key);
}
public void addOutlier(String key){
outlierSet.add(key);
}
public void removeOutlier(String key){
outlierSet.remove(key);
}
public boolean isOutlier(String key){
return outlierSet.contains(key);
}
public boolean isMember(String key){
return completeSet.contains(key);
}
public double getCurrentPercentage(){
return (outlierSet.size() * 1.0)/(completeSet.size());
}
}
| thilinamb/debs14-grand-challenge | src/main/java/edu/colostate/cs/storm/util/OutlierTracker.java | Java | apache-2.0 | 810 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.riot.system;
import java.io.OutputStream ;
import java.io.Writer ;
import org.apache.jena.atlas.io.AWriter ;
import org.apache.jena.atlas.io.IO ;
import org.apache.jena.atlas.lib.CharSpace ;
import org.apache.jena.atlas.lib.Sink ;
import org.apache.jena.graph.Graph ;
import org.apache.jena.graph.Node ;
import org.apache.jena.graph.Triple ;
import org.apache.jena.riot.lang.StreamRDFCounting ;
import org.apache.jena.riot.writer.WriterStreamRDFPlain ;
import org.apache.jena.shared.JenaException ;
import org.apache.jena.shared.PrefixMapping ;
import org.apache.jena.sparql.core.DatasetGraph ;
import org.apache.jena.sparql.core.Quad ;
/** Various Common StreamRDF setups */
public class StreamRDFLib
{
/** Send everything to nowhere ... efficiently */
public static StreamRDF sinkNull() { return new StreamRDFBase() ; }
public static StreamRDF writer(OutputStream out) { return new WriterStreamRDFPlain(IO.wrapUTF8(out)) ; }
public static StreamRDF writer(AWriter out) { return new WriterStreamRDFPlain(out) ; }
public static StreamRDF writer(Writer out) { return new WriterStreamRDFPlain(IO.wrap(out)) ; }
public static StreamRDF writer(OutputStream out, CharSpace charSpace)
{
switch (charSpace) {
case ASCII:
return new WriterStreamRDFPlain(IO.wrapASCII(out), charSpace);
case UTF8:
default:
return writer(out);
}
}
public static StreamRDF writer(AWriter out, CharSpace charSpace)
{
return new WriterStreamRDFPlain(out, charSpace);
}
public static StreamRDF writer(Writer out, CharSpace charSpace)
{
return new WriterStreamRDFPlain(IO.wrap(out), charSpace);
}
public static StreamRDF graph(Graph graph) { return new ParserOutputGraph(graph) ; }
public static StreamRDF dataset(DatasetGraph dataset) { return new ParserOutputDataset(dataset) ; }
/**
* Output to a sink; prefix and base handled only within the parser.
* Unfortunately, Java needs different names for the triples and
* quads versions because of type erasure.
*/
public static StreamRDF sinkTriples(Sink<Triple> sink) { return new ParserOutputSinkTriples(sink) ; }
/**
* Output to a sink; prefix and base handled only within the parser.
* Unfortunately, Java needs different names for the triples and
* quads versions because of type erasure.
*/
public static StreamRDF sinkQuads(Sink<Quad> sink) { return new ParserOutputSinkQuads(sink) ; }
/** Convert any triples seen to a quads, adding a graph node of {@link Quad#tripleInQuad} */
public static StreamRDF extendTriplesToQuads(StreamRDF base)
{ return extendTriplesToQuads(Quad.tripleInQuad, base) ; }
/** Convert any triples seen to a quads, adding the specified graph node */
public static StreamRDF extendTriplesToQuads(Node graphNode, StreamRDF base)
{ return new ParserOutputSinkTriplesToQuads(graphNode, base) ; }
public static StreamRDFCounting count()
{ return new StreamRDFCountingBase(sinkNull()) ; }
public static StreamRDFCounting count(StreamRDF other)
{ return new StreamRDFCountingBase(other) ; }
private static class ParserOutputSinkTriplesToQuads extends StreamRDFWrapper
{
private final Node gn ;
ParserOutputSinkTriplesToQuads(Node gn, StreamRDF base)
{ super(base) ; this.gn = gn ; }
@Override public void triple(Triple triple)
{ other.quad(new Quad(gn, triple)) ; }
}
private static class ParserOutputSinkTriples extends StreamRDFBase
{
private final Sink<Triple> sink ;
public ParserOutputSinkTriples(Sink<Triple> sink)
{ this.sink = sink ; }
@Override
public void triple(Triple triple)
{ sink.send(triple) ; }
@Override
public void finish()
{ sink.flush() ; }
}
private static class ParserOutputSinkQuads extends StreamRDFBase
{
private final Sink<Quad> sink ;
public ParserOutputSinkQuads(Sink<Quad> sink)
{ this.sink = sink ; }
@Override
public void quad(Quad quad)
{ sink.send(quad) ; }
@Override
public void finish()
{ sink.flush() ; }
}
private static class ParserOutputGraph extends StreamRDFBase
{
protected final Graph graph ;
protected boolean warningIssued = false ;
public ParserOutputGraph(Graph graph) { this.graph = graph ; }
@Override public void triple(Triple triple) { graph.add(triple) ; }
@Override public void quad(Quad quad)
{
if ( quad.isTriple() || quad.isDefaultGraph() )
graph.add(quad.asTriple()) ;
else
{
if ( ! warningIssued )
{
//SysRIOT.getLogger().warn("Only triples or default graph data expected : named graph data ignored") ;
// Not ideal - assumes the global default.
ErrorHandlerFactory.getDefaultErrorHandler().warning("Only triples or default graph data expected : named graph data ignored", -1, -1) ;
}
warningIssued = true ;
}
//throw new IllegalStateException("Quad passed to graph parsing") ;
}
@Override public void base(String base)
{ }
@Override public void prefix(String prefix, String uri)
{
try { // Jena applies XML rules to prerfixes.
graph.getPrefixMapping().setNsPrefix(prefix, uri) ;
} catch (JenaException ex) {}
}
}
private static class ParserOutputDataset extends StreamRDFBase
{
protected final DatasetGraph dsg ;
protected final PrefixMapping prefixMapping ;
public ParserOutputDataset(DatasetGraph dsg)
{
this.dsg = dsg ;
this.prefixMapping = dsg.getDefaultGraph().getPrefixMapping() ;
// = dsg.getPrefixMapping().setNsPrefix(prefix, uri) ;
}
@Override public void triple(Triple triple)
{
dsg.add(Quad.defaultGraphNodeGenerated, triple.getSubject(), triple.getPredicate(), triple.getObject()) ;
//throw new IllegalStateException("Triple passed to dataset parsing") ;
}
@Override public void quad(Quad quad)
{
if ( quad.isTriple() )
dsg.add(Quad.defaultGraphNodeGenerated, quad.getSubject(), quad.getPredicate(), quad.getObject()) ;
else
dsg.add(quad) ;
}
@Override public void base(String base)
{ }
@Override public void prefix(String prefix, String uri)
{
try { // Jena applies XML rules to prerfixes.
prefixMapping.setNsPrefix(prefix, uri) ;
} catch (JenaException ex) {}
}
}
} | CesarPantoja/jena | jena-arq/src/main/java/org/apache/jena/riot/system/StreamRDFLib.java | Java | apache-2.0 | 7,950 |
/*
* Copyright 1999-2020 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.api.remote.response;
/**
* ResponseCode.
*
* @author liuzunfei
* @version $Id: ResponseCode.java, v 0.1 2020年07月14日 2:04 PM liuzunfei Exp $
*/
public enum ResponseCode {
/**
* Request success.
*/
SUCCESS(200, "Response ok"),
/**
* Request failed.
*/
FAIL(500, "Response fail");
int code;
String desc;
ResponseCode(int code, String desc) {
this.code = code;
this.desc = desc;
}
/**
* Getter method for property <tt>code</tt>.
*
* @return property value of code
*/
public int getCode() {
return code;
}
/**
* Setter method for property <tt>code</tt>.
*
* @param code value to be assigned to property code
*/
public void setCode(int code) {
this.code = code;
}
/**
* Getter method for property <tt>desc</tt>.
*
* @return property value of desc
*/
public String getDesc() {
return desc;
}
/**
* Setter method for property <tt>desc</tt>.
*
* @param desc value to be assigned to property desc
*/
public void setDesc(String desc) {
this.desc = desc;
}
}
| alibaba/nacos | api/src/main/java/com/alibaba/nacos/api/remote/response/ResponseCode.java | Java | apache-2.0 | 1,875 |
package cn.springmvc.service;
public interface ShopService {
}
| JoshEliYang/PriceTag | src/main/java/cn/springmvc/service/ShopService.java | Java | apache-2.0 | 65 |
import sys
import cv2
import helper as hp
class MSP():
name = "MSP"
def __init__(self):
self.__patterns_num = []
self.__patterns_sym = []
self.__labels_num = []
self.__labels_sym = []
msp_num, msp_sym = "msp/num", "msp/sym"
self.__load_num_patterns(msp_num)
self.__load_sym_patterns(msp_sym)
print 'loading MSP...'
def __load_num_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_num = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_num = [hp.get_test(path, "num")[0] for path in paths]
def __load_sym_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_sym = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_sym = [hp.get_test(path, "sym")[0] for path in paths]
def __get_mode(self, mode):
if mode == "num":
return self.__labels_num, self.__patterns_num
elif mode == "sym":
return self.__labels_sym, self.__patterns_sym
def rec(self, img, mode):
tmp_max, tmp, rec = sys.maxint, 0, 0
labels, patterns = self.__get_mode(mode)
for pattern, label in zip(patterns, labels):
tmp = cv2.countNonZero(pattern - img)
if tmp < tmp_max: tmp_max, rec = tmp, label
return rec
| capital-boss/plate-recognition | msp.py | Python | apache-2.0 | 1,393 |
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class AlarmsVitrage(horizon.Panel):
name = _("Alarms")
slug = "vitragealarms"
| openstack/vitrage-dashboard | vitrage_dashboard/alarms/panel.py | Python | apache-2.0 | 733 |
/**
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jasig.portlet.notice.service.jpa.action;
import java.io.IOException;
import java.util.Date;
import java.util.Map;
import javax.portlet.ActionRequest;
import javax.portlet.ActionResponse;
import org.jasig.portlet.notice.NotificationAction;
import org.jasig.portlet.notice.NotificationEntry;
import org.jasig.portlet.notice.NotificationState;
import org.jasig.portlet.notice.service.CacheNotificationService;
import org.jasig.portlet.notice.service.jpa.JpaNotificationService;
import org.jasig.portlet.notice.util.SpringContext;
/**
*
* @author mglazier
*/
public class CompleteOnRedirectAction extends NotificationAction {
private static final long serialVersionUID = 1L;
public CompleteOnRedirectAction() {
// Provide a sensible (default) label; most
// use cases will use the setter and override
setLabel("COMPLETE");
}
public CompleteOnRedirectAction(String label) {
setLabel(label);
}
/**
* When invoke is called, a configured notification state is set for the entry if it has not already been set.
* {@link JpaNotificationService} and {@link CacheNotificationService} are used here to add the entry state and clear
* the cache for the user. This class is not managed by Spring, so these objects must be obtained using the
* Spring context that {@SpringContext} provides.
*
* @param req
* @param res
* @throws IOException
*/
@Override
public void invoke(final ActionRequest req, final ActionResponse res) throws IOException {
JpaNotificationService jpaService = (JpaNotificationService) SpringContext.getApplicationContext().getBean("jpaNotificationService");
final NotificationEntry entry = getTarget();
Map<NotificationState, Date> stateMap = entry.getStates();
if (stateMap != null && !stateMap.containsKey(NotificationState.COMPLETED)) {
jpaService.addEntryState(req, entry.getId(), NotificationState.COMPLETED);
}
res.sendRedirect(entry.getUrl());
}
}
| is-apps/NotificationPortlet | notification-portlet-webapp/src/main/java/org/jasig/portlet/notice/service/jpa/action/CompleteOnRedirectAction.java | Java | apache-2.0 | 2,861 |
namespace Org.LoadRunner.Core.Models
{
internal class RequestResult : BaseResult
{
public int Id { get; set; }
}
}
| unruledboy/loadrunner | LoadRunner/Core/Models/RequestResult.cs | C# | apache-2.0 | 145 |
/**
* Copyright 2008-2017 Qualogy Solutions B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.com.qualogy.qafe.business.integration.adapter;
import java.util.HashMap;
import java.util.Map;
public class DummyPersonMoreComplexObject extends DummyPerson {
Map<String,Object> members = new HashMap<String,Object>();
public DummyPersonMoreComplexObject(){
}
public DummyPersonMoreComplexObject(String name, String lastName){
super(name,lastName);
}
public void add(String key,Object value){
members.put(key,value);
}
}
| qafedev/qafe-platform | qafe-business/src/test/java/test/com/qualogy/qafe/business/integration/adapter/DummyPersonMoreComplexObject.java | Java | apache-2.0 | 1,065 |
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/obstacle/lidar/roi_filter/hdmap_roi_filter/polygon_mask.h"
namespace apollo {
namespace perception {
void GetValidXRange(const typename PolygonScanConverter::Polygon& polygon,
const Bitmap2D& bitmap,
const PolygonScanConverter::DirectionMajor major_dir,
const double major_dir_grid_size, Interval* valid_x_range) {
Eigen::Vector2d polygon_min_pt, polygon_max_pt;
polygon_min_pt.setConstant(std::numeric_limits<double>::max());
polygon_max_pt.setConstant(std::numeric_limits<double>::min());
for (const auto& point : polygon) {
polygon_min_pt.x() = std::min(polygon_min_pt.x(), point.x());
polygon_min_pt.y() = std::min(polygon_min_pt.y(), point.y());
polygon_max_pt.x() = std::max(polygon_max_pt.x(), point.x());
polygon_max_pt.y() = std::max(polygon_max_pt.y(), point.y());
}
const Eigen::Vector2d& bitmap_min_pt = bitmap.get_min_p();
const Eigen::Vector2d& bitmap_max_pt = bitmap.get_max_p();
valid_x_range->first =
std::max(polygon_min_pt[major_dir], bitmap_min_pt[major_dir]);
valid_x_range->second =
std::min(polygon_max_pt[major_dir], bitmap_max_pt[major_dir]);
// For numerical stability
valid_x_range->first =
(static_cast<int>((valid_x_range->first - bitmap_min_pt[major_dir]) /
major_dir_grid_size) +
0.5) *
major_dir_grid_size +
bitmap_min_pt[major_dir];
}
void DrawPolygonInBitmap(const typename PolygonScanConverter::Polygon& polygon,
const double extend_dist, Bitmap2D* bitmap) {
PolygonScanConverter::DirectionMajor major_dir = bitmap->get_dir_major();
PolygonScanConverter::DirectionMajor op_major_dir =
bitmap->get_op_dir_major();
double major_dir_grid_size = bitmap->get_grid_size()[major_dir];
// 1. Get valid x range
Interval valid_x_range;
GetValidXRange(polygon, *bitmap, major_dir, major_dir_grid_size,
&valid_x_range);
// 2. Convert polygon to scan intervals(Most important)
std::vector<std::vector<Interval>> scans_intervals;
PolygonScanConverter polygon_scan_converter;
polygon_scan_converter.Init(major_dir, valid_x_range, polygon,
major_dir_grid_size);
polygon_scan_converter.ConvertScans(&scans_intervals);
// 3. Draw grids in bitmap based on scan intervals
const Eigen::Vector2d& bitmap_min_pt = bitmap->get_min_p();
const Eigen::Vector2d& bitmap_max_pt = bitmap->get_max_p();
double x = valid_x_range.first;
for (size_t i = 0; i < scans_intervals.size();
x += major_dir_grid_size, ++i) {
for (const auto& scan_interval : scans_intervals[i]) {
if (scan_interval.first > scan_interval.second) {
AERROR << "scan interval is not valid: "
<< "scan_interval.first = " << scan_interval.first << ", "
<< "scan_interval.second = " << scan_interval.second << ".";
}
Interval valid_y_range;
valid_y_range.first = std::max(bitmap_min_pt[op_major_dir],
scan_interval.first - extend_dist);
valid_y_range.second = std::min(bitmap_max_pt[op_major_dir],
scan_interval.second + extend_dist);
if (valid_y_range.first > valid_y_range.second) {
continue;
}
bitmap->Set(x, valid_y_range.first, valid_y_range.second);
}
}
}
void DrawPolygonInBitmap(
const std::vector<typename PolygonScanConverter::Polygon>& polygons,
const double extend_dist, Bitmap2D* bitmap) {
for (const auto& polygon : polygons) {
DrawPolygonInBitmap(polygon, extend_dist, bitmap);
}
}
} // namespace perception
} // namespace apollo
| startcode/apollo | modules/perception/obstacle/lidar/roi_filter/hdmap_roi_filter/polygon_mask.cc | C++ | apache-2.0 | 4,510 |
// <copyright file="Error.cs" company="Stormpath, Inc.">
// Copyright (c) 2016 Stormpath, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using Microsoft.Extensions.Logging;
namespace Stormpath.Owin.Middleware.Internal
{
public static class QueryStringParser
{
public static IDictionary<string, string[]> Parse(string queryString, ILogger logger)
{
if (string.IsNullOrEmpty(queryString))
{
return new Dictionary<string, string[]>();
}
var temporaryDictionary = new Dictionary<string, List<string>>();
foreach (var item in queryString.Split('&'))
{
if (string.IsNullOrEmpty(item))
{
continue;
}
try
{
var tokens = item.Split('=');
var key = WebUtility.UrlDecode(tokens[0]);
var value = WebUtility.UrlDecode(tokens[1]);
if (!temporaryDictionary.ContainsKey(key))
{
temporaryDictionary[key] = new List<string>();
}
temporaryDictionary[key].Add(value);
}
catch (Exception ex)
{
logger.LogWarning(1006, ex, $"Error parsing item '{item}'", "QueryStringParser.Parse");
}
}
return temporaryDictionary.ToDictionary(kvp => kvp.Key, kvp => kvp.Value.ToArray());
}
}
}
| stormpath/stormpath-dotnet-owin-middleware | src/Stormpath.Owin.Middleware/Internal/QueryStringParser.cs | C# | apache-2.0 | 2,182 |
/*!
* ${copyright}
*/
sap.ui.define([
"sap/base/Log",
"sap/ui/model/odata/v4/lib/_GroupLock"
], function (Log, _GroupLock) {
"use strict";
//*********************************************************************************************
QUnit.module("sap.ui.model.odata.v4.lib._GroupLock", {
beforeEach : function () {
this.oLogMock = this.mock(Log);
this.oLogMock.expects("warning").never();
this.oLogMock.expects("error").never();
}
});
//*********************************************************************************************
QUnit.test("unlocked, initialized", function (assert) {
var oOwner = {/*owner*/},
oGroupLock = new _GroupLock("foo", oOwner);
assert.strictEqual(oGroupLock.isCanceled(), false);
assert.strictEqual(oGroupLock.getGroupId(), "foo");
assert.strictEqual(oGroupLock.oOwner, oOwner);
assert.strictEqual(oGroupLock.isLocked(), false);
assert.strictEqual(oGroupLock.waitFor("foo"), undefined);
assert.strictEqual(oGroupLock.waitFor("bar"), undefined);
});
//*********************************************************************************************
QUnit.test("owner is mandatory", function (assert) {
assert.throws(function () {
return new _GroupLock("group");
}, new Error("Missing owner"));
});
//*********************************************************************************************
QUnit.test("locked", function (assert) {
var oGroupLock,
oOwner = {/*owner*/},
oPromise1,
oPromise2;
// code under test
oGroupLock = new _GroupLock("foo", oOwner, true);
assert.strictEqual(oGroupLock.getGroupId(), "foo");
assert.strictEqual(oGroupLock.oOwner, oOwner);
assert.strictEqual(oGroupLock.isLocked(), true);
// code under test
oPromise1 = oGroupLock.waitFor("foo");
oPromise2 = oGroupLock.waitFor("foo");
assert.ok(oPromise1.isPending());
assert.ok(oPromise2.isPending());
// code under test
assert.strictEqual(oGroupLock.waitFor("bar"), undefined);
// code under test
oGroupLock.unlock();
assert.ok(oPromise1.isFulfilled());
assert.ok(oPromise2.isFulfilled());
assert.notOk(oGroupLock.isLocked());
});
//*********************************************************************************************
QUnit.test("multiple unlocks", function (assert) {
var oGroupLock = new _GroupLock("group", {/*owner*/});
oGroupLock.unlock();
assert.throws(function () {
oGroupLock.unlock();
}, new Error("GroupLock unlocked twice"));
oGroupLock.unlock(true); // no error!
});
//*********************************************************************************************
QUnit.test("getUnlockedCopy", function (assert) {
var oGroupLock1 = new _GroupLock("group", {/*owner*/}, true, true, 42),
oGroupLock2;
// code under test
oGroupLock2 = oGroupLock1.getUnlockedCopy();
assert.strictEqual(oGroupLock2.getGroupId(), oGroupLock1.getGroupId());
assert.strictEqual(oGroupLock2.oOwner, oGroupLock1.oOwner);
assert.strictEqual(oGroupLock2.isLocked(), false);
assert.strictEqual(oGroupLock2.isModifying(), false);
assert.strictEqual(oGroupLock2.getSerialNumber(), oGroupLock1.getSerialNumber());
});
//*********************************************************************************************
QUnit.test("owner & toString", function (assert) {
var oGroupLock,
oOwner = {
toString : function () {
return "owner";
}
};
oGroupLock = new _GroupLock("group", oOwner, true);
assert.strictEqual(oGroupLock.toString(),
"sap.ui.model.odata.v4.lib._GroupLock(group=group, owner=owner, locked)");
oGroupLock = new _GroupLock("group", oOwner, true, true);
assert.strictEqual(oGroupLock.toString(),
"sap.ui.model.odata.v4.lib._GroupLock(group=group, owner=owner, locked, modifying)");
oGroupLock = new _GroupLock("group", oOwner, false);
assert.strictEqual(oGroupLock.oOwner, oOwner);
assert.strictEqual(oGroupLock.toString(),
"sap.ui.model.odata.v4.lib._GroupLock(group=group, owner=owner)");
oGroupLock = new _GroupLock("group", oOwner, false, undefined, 0);
assert.strictEqual(oGroupLock.toString(),
"sap.ui.model.odata.v4.lib._GroupLock(group=group, owner=owner, serialNumber=0)");
oGroupLock = new _GroupLock("group", oOwner, true, true, 0);
assert.strictEqual(oGroupLock.toString(),
"sap.ui.model.odata.v4.lib._GroupLock(group=group, owner=owner, locked, modifying,"
+ " serialNumber=0)");
});
//*********************************************************************************************
QUnit.test("constants", function (assert) {
assert.strictEqual(_GroupLock.$cached.getGroupId(), "$cached");
assert.strictEqual(_GroupLock.$cached.isLocked(), false);
assert.strictEqual(_GroupLock.$cached.isModifying(), false);
assert.strictEqual(_GroupLock.$cached.oOwner, "sap.ui.model.odata.v4.lib._GroupLock");
// ensure that $cached can be unlocked several times
_GroupLock.$cached.unlock();
_GroupLock.$cached.unlock();
});
//*********************************************************************************************
QUnit.test("serial number", function (assert) {
var oOwner = {/*owner*/};
assert.strictEqual(new _GroupLock("group", oOwner, true, true, 42).getSerialNumber(), 42);
assert.strictEqual(new _GroupLock("group", oOwner, true).getSerialNumber(), Infinity);
assert.strictEqual(new _GroupLock("group", oOwner, true, false, 0).getSerialNumber(), 0);
});
//*********************************************************************************************
[undefined, false, true].forEach(function (bModifying, i) {
QUnit.test("modifying: " + bModifying, function (assert) {
assert.strictEqual(new _GroupLock("group", {/*owner*/}, true, bModifying, 42).isModifying(),
i === 2);
});
});
//*********************************************************************************************
QUnit.test("modifying: throws if not locked", function (assert) {
assert.throws(function () {
return new _GroupLock("group", {/*owner*/}, false, true, 42);
}, new Error("A modifying group lock has to be locked"));
});
//*********************************************************************************************
QUnit.test("cancel w/o function", function (assert) {
var oGroupLock = new _GroupLock("group", {/*owner*/}, true);
this.mock(oGroupLock).expects("unlock").withExactArgs(true);
// code under test
oGroupLock.cancel();
assert.ok(oGroupLock.isCanceled());
});
//*********************************************************************************************
QUnit.test("cancel w/ function", function (assert) {
var fnCancel = sinon.spy(),
oGroupLock = new _GroupLock("group", {/*owner*/}, true, false, undefined, fnCancel);
assert.strictEqual(oGroupLock.fnCancel, fnCancel);
sinon.assert.notCalled(fnCancel);
this.mock(oGroupLock).expects("unlock").withExactArgs(true);
// code under test
oGroupLock.cancel();
assert.ok(oGroupLock.isCanceled());
sinon.assert.calledOnce(fnCancel);
sinon.assert.calledWithExactly(fnCancel);
oGroupLock.cancel();
sinon.assert.calledOnce(fnCancel); // cancel function must not be called again
});
});
| SAP/openui5 | src/sap.ui.core/test/sap/ui/core/qunit/odata/v4/lib/_GroupLock.qunit.js | JavaScript | apache-2.0 | 7,157 |
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2017 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.job.entries.deletefolders;
import org.pentaho.di.job.entry.validator.AbstractFileValidator;
import org.pentaho.di.job.entry.validator.AndValidator;
import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils;
import java.io.IOException;
import java.util.List;
import org.apache.commons.vfs2.FileObject;
import org.apache.commons.vfs2.FileSelectInfo;
import org.apache.commons.vfs2.FileSelector;
import org.apache.commons.vfs2.FileType;
import org.pentaho.di.cluster.SlaveServer;
import org.pentaho.di.core.CheckResultInterface;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.util.Utils;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.core.exception.KettleDatabaseException;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleXMLException;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.core.xml.XMLHandler;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.job.JobMeta;
import org.pentaho.di.job.entry.JobEntryBase;
import org.pentaho.di.job.entry.JobEntryInterface;
import org.pentaho.di.job.entry.validator.ValidatorContext;
import org.pentaho.di.repository.ObjectId;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.resource.ResourceEntry;
import org.pentaho.di.resource.ResourceEntry.ResourceType;
import org.pentaho.di.resource.ResourceReference;
import org.pentaho.metastore.api.IMetaStore;
import org.w3c.dom.Node;
/**
* This defines a 'delete folders' job entry.
*
* @author Samatar Hassan
* @since 13-05-2008
*/
public class JobEntryDeleteFolders extends JobEntryBase implements Cloneable, JobEntryInterface {
private static Class<?> PKG = JobEntryDeleteFolders.class; // for i18n purposes, needed by Translator2!!
public boolean argFromPrevious;
public String[] arguments;
private String success_condition;
public String SUCCESS_IF_AT_LEAST_X_FOLDERS_DELETED = "success_when_at_least";
public String SUCCESS_IF_ERRORS_LESS = "success_if_errors_less";
public String SUCCESS_IF_NO_ERRORS = "success_if_no_errors";
private String limit_folders;
int NrErrors = 0;
int NrSuccess = 0;
boolean successConditionBroken = false;
boolean successConditionBrokenExit = false;
int limitFolders = 0;
public JobEntryDeleteFolders( String n ) {
super( n, "" );
argFromPrevious = false;
arguments = null;
success_condition = SUCCESS_IF_NO_ERRORS;
limit_folders = "10";
}
public JobEntryDeleteFolders() {
this( "" );
}
public void allocate( int nrFields ) {
arguments = new String[nrFields];
}
public Object clone() {
JobEntryDeleteFolders je = (JobEntryDeleteFolders) super.clone();
if ( arguments != null ) {
int nrFields = arguments.length;
je.allocate( nrFields );
System.arraycopy( arguments, 0, je.arguments, 0, nrFields );
}
return je;
}
public String getXML() {
StringBuilder retval = new StringBuilder( 300 );
retval.append( super.getXML() );
retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) );
retval.append( " " ).append( XMLHandler.addTagValue( "success_condition", success_condition ) );
retval.append( " " ).append( XMLHandler.addTagValue( "limit_folders", limit_folders ) );
retval.append( " <fields>" ).append( Const.CR );
if ( arguments != null ) {
for ( int i = 0; i < arguments.length; i++ ) {
retval.append( " <field>" ).append( Const.CR );
retval.append( " " ).append( XMLHandler.addTagValue( "name", arguments[i] ) );
retval.append( " </field>" ).append( Const.CR );
if ( parentJobMeta != null ) {
parentJobMeta.getNamedClusterEmbedManager().registerUrl( arguments[i] );
}
}
}
retval.append( " </fields>" ).append( Const.CR );
return retval.toString();
}
public void loadXML( Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers,
Repository rep, IMetaStore metaStore ) throws KettleXMLException {
try {
super.loadXML( entrynode, databases, slaveServers );
argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) );
success_condition = XMLHandler.getTagValue( entrynode, "success_condition" );
limit_folders = XMLHandler.getTagValue( entrynode, "limit_folders" );
Node fields = XMLHandler.getSubNode( entrynode, "fields" );
// How many field arguments?
int nrFields = XMLHandler.countNodes( fields, "field" );
allocate( nrFields );
// Read them all...
for ( int i = 0; i < nrFields; i++ ) {
Node fnode = XMLHandler.getSubNodeByNr( fields, "field", i );
arguments[i] = XMLHandler.getTagValue( fnode, "name" );
}
} catch ( KettleXMLException xe ) {
throw new KettleXMLException( BaseMessages.getString( PKG, "JobEntryDeleteFolders.UnableToLoadFromXml" ), xe );
}
}
public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases,
List<SlaveServer> slaveServers ) throws KettleException {
try {
argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" );
limit_folders = rep.getJobEntryAttributeString( id_jobentry, "limit_folders" );
success_condition = rep.getJobEntryAttributeString( id_jobentry, "success_condition" );
// How many arguments?
int argnr = rep.countNrJobEntryAttributes( id_jobentry, "name" );
allocate( argnr );
// Read them all...
for ( int a = 0; a < argnr; a++ ) {
arguments[a] = rep.getJobEntryAttributeString( id_jobentry, a, "name" );
}
} catch ( KettleException dbe ) {
throw new KettleException( BaseMessages.getString( PKG, "JobEntryDeleteFolders.UnableToLoadFromRepo", String
.valueOf( id_jobentry ) ), dbe );
}
}
public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException {
try {
rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious );
rep.saveJobEntryAttribute( id_job, getObjectId(), "limit_folders", limit_folders );
rep.saveJobEntryAttribute( id_job, getObjectId(), "success_condition", success_condition );
// save the arguments...
if ( arguments != null ) {
for ( int i = 0; i < arguments.length; i++ ) {
rep.saveJobEntryAttribute( id_job, getObjectId(), i, "name", arguments[i] );
}
}
} catch ( KettleDatabaseException dbe ) {
throw new KettleException( BaseMessages.getString( PKG, "JobEntryDeleteFolders.UnableToSaveToRepo", String
.valueOf( id_job ) ), dbe );
}
}
public Result execute( Result result, int nr ) throws KettleException {
List<RowMetaAndData> rows = result.getRows();
result.setNrErrors( 1 );
result.setResult( false );
NrErrors = 0;
NrSuccess = 0;
successConditionBroken = false;
successConditionBrokenExit = false;
limitFolders = Const.toInt( environmentSubstitute( getLimitFolders() ), 10 );
//Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS
if ( parentJobMeta.getNamedClusterEmbedManager() != null ) {
parentJobMeta.getNamedClusterEmbedManager()
.passEmbeddedMetastoreKey( this, parentJobMeta.getEmbeddedMetastoreProviderKey() );
}
if ( argFromPrevious ) {
if ( log.isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "JobEntryDeleteFolders.FoundPreviousRows", String
.valueOf( ( rows != null ? rows.size() : 0 ) ) ) );
}
}
if ( argFromPrevious && rows != null ) {
for ( int iteration = 0; iteration < rows.size() && !parentJob.isStopped(); iteration++ ) {
if ( successConditionBroken ) {
logError( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Error.SuccessConditionbroken", ""
+ NrErrors ) );
result.setNrErrors( NrErrors );
result.setNrLinesDeleted( NrSuccess );
return result;
}
RowMetaAndData resultRow = rows.get( iteration );
String args_previous = resultRow.getString( 0, null );
if ( !Utils.isEmpty( args_previous ) ) {
if ( deleteFolder( args_previous ) ) {
updateSuccess();
} else {
updateErrors();
}
} else {
// empty filename !
logError( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Error.EmptyLine" ) );
}
}
} else if ( arguments != null ) {
for ( int i = 0; i < arguments.length && !parentJob.isStopped(); i++ ) {
if ( successConditionBroken ) {
logError( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Error.SuccessConditionbroken", ""
+ NrErrors ) );
result.setNrErrors( NrErrors );
result.setNrLinesDeleted( NrSuccess );
return result;
}
String realfilename = environmentSubstitute( arguments[i] );
if ( !Utils.isEmpty( realfilename ) ) {
if ( deleteFolder( realfilename ) ) {
updateSuccess();
} else {
updateErrors();
}
} else {
// empty filename !
logError( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Error.EmptyLine" ) );
}
}
}
if ( log.isDetailed() ) {
logDetailed( "=======================================" );
logDetailed( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Log.Info.NrError", "" + NrErrors ) );
logDetailed( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Log.Info.NrDeletedFolders", "" + NrSuccess ) );
logDetailed( "=======================================" );
}
result.setNrErrors( NrErrors );
result.setNrLinesDeleted( NrSuccess );
if ( getSuccessStatus() ) {
result.setResult( true );
}
return result;
}
private void updateErrors() {
NrErrors++;
if ( checkIfSuccessConditionBroken() ) {
// Success condition was broken
successConditionBroken = true;
}
}
private boolean checkIfSuccessConditionBroken() {
boolean retval = false;
if ( ( NrErrors > 0 && getSuccessCondition().equals( SUCCESS_IF_NO_ERRORS ) )
|| ( NrErrors >= limitFolders && getSuccessCondition().equals( SUCCESS_IF_ERRORS_LESS ) ) ) {
retval = true;
}
return retval;
}
private void updateSuccess() {
NrSuccess++;
}
private boolean getSuccessStatus() {
boolean retval = false;
if ( ( NrErrors == 0 && getSuccessCondition().equals( SUCCESS_IF_NO_ERRORS ) )
|| ( NrSuccess >= limitFolders && getSuccessCondition().equals( SUCCESS_IF_AT_LEAST_X_FOLDERS_DELETED ) )
|| ( NrErrors <= limitFolders && getSuccessCondition().equals( SUCCESS_IF_ERRORS_LESS ) ) ) {
retval = true;
}
return retval;
}
private boolean deleteFolder( String foldername ) {
boolean rcode = false;
FileObject filefolder = null;
try {
filefolder = KettleVFS.getFileObject( foldername, this );
if ( filefolder.exists() ) {
// the file or folder exists
if ( filefolder.getType() == FileType.FOLDER ) {
// It's a folder
if ( log.isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "JobEntryDeleteFolders.ProcessingFolder", foldername ) );
}
// Delete Files
int Nr = filefolder.delete( new TextFileSelector() );
if ( log.isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "JobEntryDeleteFolders.TotalDeleted", foldername, String
.valueOf( Nr ) ) );
}
rcode = true;
} else {
// Error...This file is not a folder!
logError( BaseMessages.getString( PKG, "JobEntryDeleteFolders.Error.NotFolder" ) );
}
} else {
// File already deleted, no reason to try to delete it
if ( log.isBasic() ) {
logBasic( BaseMessages.getString( PKG, "JobEntryDeleteFolders.FolderAlreadyDeleted", foldername ) );
}
rcode = true;
}
} catch ( Exception e ) {
logError(
BaseMessages.getString( PKG, "JobEntryDeleteFolders.CouldNotDelete", foldername, e.getMessage() ), e );
} finally {
if ( filefolder != null ) {
try {
filefolder.close();
} catch ( IOException ex ) {
// Ignore
}
}
}
return rcode;
}
private class TextFileSelector implements FileSelector {
public boolean includeFile( FileSelectInfo info ) {
return true;
}
public boolean traverseDescendents( FileSelectInfo info ) {
return true;
}
}
public void setPrevious( boolean argFromPrevious ) {
this.argFromPrevious = argFromPrevious;
}
public boolean evaluates() {
return true;
}
public void check( List<CheckResultInterface> remarks, JobMeta jobMeta, VariableSpace space,
Repository repository, IMetaStore metaStore ) {
boolean res = JobEntryValidatorUtils.andValidator().validate( this, "arguments", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) );
if ( !res ) {
return;
}
ValidatorContext ctx = new ValidatorContext();
AbstractFileValidator.putVariableSpace( ctx, getVariables() );
AndValidator.putValidators( ctx, JobEntryValidatorUtils.notNullValidator(), JobEntryValidatorUtils.fileExistsValidator() );
for ( int i = 0; i < arguments.length; i++ ) {
JobEntryValidatorUtils.andValidator().validate( this, "arguments[" + i + "]", remarks, ctx );
}
}
public List<ResourceReference> getResourceDependencies( JobMeta jobMeta ) {
List<ResourceReference> references = super.getResourceDependencies( jobMeta );
if ( arguments != null ) {
ResourceReference reference = null;
for ( int i = 0; i < arguments.length; i++ ) {
String filename = jobMeta.environmentSubstitute( arguments[i] );
if ( reference == null ) {
reference = new ResourceReference( this );
references.add( reference );
}
reference.getEntries().add( new ResourceEntry( filename, ResourceType.FILE ) );
}
}
return references;
}
public boolean isArgFromPrevious() {
return argFromPrevious;
}
public String[] getArguments() {
return arguments;
}
public void setSuccessCondition( String success_condition ) {
this.success_condition = success_condition;
}
public String getSuccessCondition() {
return success_condition;
}
public void setLimitFolders( String limit_folders ) {
this.limit_folders = limit_folders;
}
public String getLimitFolders() {
return limit_folders;
}
}
| TatsianaKasiankova/pentaho-kettle | engine/src/main/java/org/pentaho/di/job/entries/deletefolders/JobEntryDeleteFolders.java | Java | apache-2.0 | 15,930 |
/*
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.epam.ta.reportportal.ws.controller;
import com.epam.ta.reportportal.commons.EntityUtils;
import com.epam.ta.reportportal.commons.ReportPortalUser;
import com.epam.ta.reportportal.core.file.DeleteFilesHandler;
import com.epam.ta.reportportal.core.file.GetFileHandler;
import com.epam.ta.reportportal.core.user.EditUserHandler;
import com.epam.ta.reportportal.entity.attachment.BinaryData;
import com.epam.ta.reportportal.exception.ReportPortalException;
import com.epam.ta.reportportal.util.ProjectExtractor;
import com.epam.ta.reportportal.ws.model.OperationCompletionRS;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.io.IOUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.security.core.annotation.AuthenticationPrincipal;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InputStream;
import static com.epam.ta.reportportal.auth.permissions.Permissions.*;
/**
* @author Dzianis_Shybeka
*/
@RestController
@RequestMapping("/v1/data")
public class FileStorageController {
private final ProjectExtractor projectExtractor;
private final EditUserHandler editUserHandler;
private final GetFileHandler getFileHandler;
private final DeleteFilesHandler deleteFilesHandler;
@Autowired
public FileStorageController(ProjectExtractor projectExtractor, EditUserHandler editUserHandler, GetFileHandler getFileHandler, DeleteFilesHandler deleteFilesHandler) {
this.projectExtractor = projectExtractor;
this.editUserHandler = editUserHandler;
this.getFileHandler = getFileHandler;
this.deleteFilesHandler = deleteFilesHandler;
}
@Transactional(readOnly = true)
@PreAuthorize(ASSIGNED_TO_PROJECT)
@GetMapping(value = "/{projectName}/{dataId}")
public void getFile(@PathVariable String projectName, @PathVariable("dataId") Long dataId, HttpServletResponse response,
@AuthenticationPrincipal ReportPortalUser user) {
toResponse(response, getFileHandler.loadFileById(dataId, projectExtractor.extractProjectDetails(user, projectName)));
}
/**
* (non-Javadoc)
*/
@Transactional(readOnly = true)
@GetMapping(value = "/photo")
@ApiOperation("Get photo of current user")
public void getMyPhoto(@AuthenticationPrincipal ReportPortalUser user, HttpServletResponse response,
@RequestParam(value = "loadThumbnail", required = false) boolean loadThumbnail) {
toResponse(response, getFileHandler.getUserPhoto(user, loadThumbnail));
}
/**
* (non-Javadoc)
*/
@Transactional(readOnly = true)
@PreAuthorize(NOT_CUSTOMER)
@GetMapping(value = "/{projectName}/userphoto")
@ApiOperation("Get user's photo")
public void getUserPhoto(@PathVariable String projectName, @RequestParam(value = "id") String username,
@RequestParam(value = "loadThumbnail", required = false) boolean loadThumbnail, HttpServletResponse response,
@AuthenticationPrincipal ReportPortalUser user) {
BinaryData userPhoto = getFileHandler.getUserPhoto(EntityUtils.normalizeId(username), user, projectName, loadThumbnail);
toResponse(response, userPhoto);
}
@Transactional
@PostMapping(value = "/photo", consumes = { MediaType.MULTIPART_FORM_DATA_VALUE })
@ApiOperation("Upload user's photo")
public OperationCompletionRS uploadPhoto(@RequestParam("file") MultipartFile file, @AuthenticationPrincipal ReportPortalUser user) {
return editUserHandler.uploadPhoto(EntityUtils.normalizeId(user.getUsername()), file);
}
@Transactional
@DeleteMapping(value = "/photo")
@ApiOperation("Delete user's photo")
public OperationCompletionRS deletePhoto(@AuthenticationPrincipal ReportPortalUser user) {
return editUserHandler.deletePhoto(EntityUtils.normalizeId(user.getUsername()));
}
@Transactional
@PreAuthorize(ADMIN_ONLY)
@PostMapping(value = "/clean", consumes = { MediaType.MULTIPART_FORM_DATA_VALUE })
@ApiOperation("Remove attachments from file storage according to uploaded csv file")
public OperationCompletionRS removeAttachmentsByCsv(@RequestParam("file") MultipartFile file,
@AuthenticationPrincipal ReportPortalUser user) {
return deleteFilesHandler.removeFilesByCsv(file);
}
/**
* Copies data from provided {@link InputStream} to Response
*
* @param response Response
* @param binaryData Stored data
*/
private void toResponse(HttpServletResponse response, BinaryData binaryData) {
//TODO investigate stream closing requirement
if (binaryData.getInputStream() != null) {
try {
response.setContentType(binaryData.getContentType());
IOUtils.copy(binaryData.getInputStream(), response.getOutputStream());
} catch (IOException e) {
throw new ReportPortalException("Unable to retrieve binary data from data storage", e);
}
} else {
response.setStatus(HttpStatus.NO_CONTENT.value());
}
}
}
| reportportal/service-api | src/main/java/com/epam/ta/reportportal/ws/controller/FileStorageController.java | Java | apache-2.0 | 5,705 |
package org.flysnow.cloud.buildmeta.ui.resteasy.exception;
import java.io.Serializable;
public class BuildMetadataServiceException extends RuntimeException implements Serializable {
private static final long serialVersionUID = 7786141544419367058L;
public BuildMetadataServiceException(){
super();
}
public BuildMetadataServiceException(String message, Throwable cause){
super(message, cause);
}
public BuildMetadataServiceException(Throwable cause){
super(cause);
}
public BuildMetadataServiceException(String msg){
super(msg);
}
}
| shensiduanxing/devops-metadata-svc | src/main/java/org/flysnow/cloud/buildmeta/ui/resteasy/exception/BuildMetadataServiceException.java | Java | apache-2.0 | 557 |
// Copyright 2005-2010 Gallio Project - http://www.gallio.org/
// Portions Copyright 2000-2004 Jonathan de Halleux
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using Gallio.VisualStudio.Shell.Core;
namespace Gallio.VisualStudio.Shell.UI.ToolWindows
{
/// <summary>
/// Default tool window manager.
/// </summary>
public class DefaultToolWindowManager : IToolWindowManager
{
private readonly DefaultShell shell;
/// <summary>
/// Initializes the tool window manager.
/// </summary>
/// <param name="shell">The shell.</param>
public DefaultToolWindowManager(IShell shell)
{
this.shell = (DefaultShell)shell;
}
/// <inheritdoc />
public ToolWindow FindToolWindow(string id)
{
if (id == null)
throw new ArgumentNullException("id");
int internalId = GenerateWindowId(id);
var pane = (ShellToolWindowPane)shell.ShellPackage.FindToolWindow(typeof(ShellToolWindowPane), internalId, false);
return pane.ToolWindowContainer.ToolWindow;
}
/// <inheritdoc />
public void OpenToolWindow(string id, ToolWindow window)
{
if (id == null)
throw new ArgumentNullException("id");
if (window == null)
throw new ArgumentNullException("window");
int internalId = GenerateWindowId(id);
var pane = (ShellToolWindowPane) shell.ShellPackage.FindToolWindow(typeof(ShellToolWindowPane), internalId, true);
if (pane == null)
throw new ShellException("Could not create an instance of the Shell tool window pane.");
pane.ToolWindowContainer.ToolWindow = window;
window.Show();
}
/// <inheritdoc />
public void CloseToolWindow(string id)
{
if (id == null)
throw new ArgumentNullException("id");
ToolWindow window = FindToolWindow(id);
if (window != null)
window.Close();
}
private static int GenerateWindowId(string id)
{
return id.GetHashCode() & 0x7fffffff;
}
}
}
| citizenmatt/gallio | src/Extensions/VisualStudio/Gallio.VisualStudio.Shell/UI/ToolWindows/DefaultToolWindowManager.cs | C# | apache-2.0 | 2,771 |
"""
.. module: lemur.authorizations.models
:platform: unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Netflix Secops <secops@netflix.com>
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy_utils import JSONType
from lemur.database import db
from lemur.plugins.base import plugins
class Authorization(db.Model):
__tablename__ = "pending_dns_authorizations"
id = Column(Integer, primary_key=True, autoincrement=True)
account_number = Column(String(128))
domains = Column(JSONType)
dns_provider_type = Column(String(128))
options = Column(JSONType)
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "Authorization(id={id})".format(id=self.id)
def __init__(self, account_number, domains, dns_provider_type, options=None):
self.account_number = account_number
self.domains = domains
self.dns_provider_type = dns_provider_type
self.options = options
| Netflix/lemur | lemur/authorizations/models.py | Python | apache-2.0 | 1,090 |
// Copyright (c) 2021 Alachisoft
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
using System;
using System.Collections;
using Alachisoft.NCache.Storage.Mmf;
using Alachisoft.NCache.Common.Enum;
using Alachisoft.NCache.Common;
namespace Alachisoft.NCache.Storage
{
/// <summary>
/// Implements the RAM based cache storage option. Also implements ICacheStore interface.
/// </summary>
class InMemoryStorageProvider : MmfStorageProvider
{
/// <summary> The default size of the memory block to use. </summary>
protected const UInt32 DEFAULT_SIZE = 16;
/// <summary>
/// Overloaded constructor. Takes the properties as a map.
/// </summary>
/// <param name="properties">properties collection</param>
public InMemoryStorageProvider(IDictionary properties,bool evictionEnabled)
{
Initialize(properties,evictionEnabled);
}
#region / Initialize Members /
/// <summary>
/// Initializes the view manager.
/// </summary>
/// <param name="properties">Properties to be set</param>
public new void Initialize(IDictionary properties, bool evictionEnabled)
{
if (properties == null)
throw new ArgumentNullException("properties");
try
{
properties.Remove("file-name");
properties["num-views"] = 1;
uint sizeInMB = DEFAULT_SIZE;
if (properties.Contains("max-size"))
sizeInMB = Convert.ToUInt32(properties["max-size"]);
properties["view-size"] = sizeInMB * StorageProviderBase.MB;
properties["initial-size-mb"] = sizeInMB;
base.Initialize(properties,evictionEnabled);
}
catch (Exception)
{
throw;
}
}
#endregion
}
}
| Alachisoft/NCache | Src/NCStorage/StorageProviders/InMemoryStorageProvider.cs | C# | apache-2.0 | 2,188 |
<?php
/**
* 允许的站内链接
*
* @version $Id: article_allowurl_edit.php 1 11:36 2010年10月8日Z tianya $
* @package DedeCMS.Administrator
* @copyright Copyright (c) 2007 - 2010, DesDev, Inc.
* @license http://help.dedecms.com/usersguide/license.html
* @link http://www.dedecms.com
*/
require_once(dirname(__FILE__)."/config.php");
require_once(DEDEINC."/oxwindow.class.php");
CheckPurview('sys_Source');
if(empty($dopost)) $dopost = '';
if(empty($allurls)) $allsource = '';
else $allurls = stripslashes($allurls);
$m_file = DEDEDATA."/admin/allowurl.txt";
//保存
if($dopost=='save')
{
$fp = fopen($m_file,'w');
flock($fp,3);
fwrite($fp,$allurls);
fclose($fp);
echo "<script>alert('Save OK!');</script>";
}
//读出
if(empty($allurls) && filesize($m_file)>0)
{
$fp = fopen($m_file,'r');
$allurls = fread($fp,filesize($m_file));
fclose($fp);
}
$wintitle = "";
$wecome_info = "允许的超链接";
$win = new OxWindow();
$win->Init('article_allowurl_edit.php','js/blank.js','POST');
$win->AddHidden('dopost','save');
$win->AddTitle("每行保存一个超链接:");
$win->AddMsgItem("<textarea name='allurls' id='allurls' style='width:100%;height:300px'>$allurls</textarea>");
$winform = $win->GetWindow('ok');
$win->Display(); | jackysong/huolg | nidongde/article_allowurl_edit.php | PHP | apache-2.0 | 1,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013, 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
from tests import base
from girder import events
from girder.constants import AccessType
from server import constants
def setUpModule():
base.enabledPlugins.append('provenance')
base.startServer()
def tearDownModule():
base.stopServer()
class ProvenanceTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create some test documents with an item
admin = {
'email': 'admin@email.com',
'login': 'adminlogin',
'firstName': 'Admin',
'lastName': 'Last',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
# Track folder, item, and setting provenance initially
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES, 'folder,setting')
coll1 = {
'name': 'Test Collection',
'description': 'test coll',
'public': True,
'creator': self.admin
}
self.coll1 = self.model('collection').createCollection(**coll1)
folder1 = {
'parent': self.coll1,
'parentType': 'collection',
'name': 'Public test folder',
'creator': self.admin
}
self.folder1 = self.model('folder').createFolder(**folder1)
self.model('folder').setUserAccess(
self.folder1, self.user, level=AccessType.WRITE, save=False)
self.model('folder').setPublic(self.folder1, True, save=True)
item1 = {
'name': 'Public object',
'creator': self.admin,
'folder': self.folder1
}
self.item1 = self.model('item').createItem(**item1)
def _checkProvenance(self, resp, item, version, user, eventType,
matches=None, fileInfo=None, resource='item'):
if resp is None:
resp = self._getProvenance(item, user, resource=resource)
self.assertStatusOk(resp)
itemProvenance = resp.json
self.assertEqual(itemProvenance['resourceId'], str(item['_id']))
provenance = itemProvenance['provenance']
self.assertEqual(provenance['eventType'], eventType)
self.assertEqual(provenance['version'], version)
self.assertEqual(str(provenance['eventUser']), str(user['_id']))
if matches:
for key in matches:
self.assertEqual(provenance[key], matches[key])
if fileInfo:
for key in fileInfo:
if isinstance(fileInfo[key], dict):
for subkey in fileInfo[key]:
self.assertEqual(provenance['file'][0][key][subkey],
fileInfo[key][subkey])
else:
self.assertEqual(provenance['file'][0][key], fileInfo[key])
def _getProvenance(self, item, user, version=None, resource='item',
checkOk=True):
params = {}
if version is not None:
params = {'version': version}
resp = self.request(
path='/%s/%s/provenance' % (resource, item['_id']),
method='GET', user=user, type='application/json', params=params)
if checkOk:
self.assertStatusOk(resp)
return resp
def _getProvenanceAfterMetadata(self, item, meta, user):
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=user, body=json.dumps(meta),
type='application/json')
self.assertStatusOk(resp)
return self._getProvenance(item, user)
def testProvenanceItemMetadata(self):
"""
Test item provenance endpoint with metadata and basic changes
"""
item = self.item1
user = self.user
admin = self.admin
# check that the first version of the item exists
# ensure version 1, created by admin user, with creation event
self._checkProvenance(None, item, 1, admin, 'creation')
# update meta to {x:y}
metadata1 = {'x': 'y'}
resp = self._getProvenanceAfterMetadata(item, metadata1, admin)
# ensure version 2, updated by admin user, with update event, and meta
# in provenance matches
self._checkProvenance(resp, item, 2, admin, 'update',
{'new': {'meta': metadata1}})
# update meta to {} by regular user, we have to send in the key to
# remove it but check the saved metadata against {}
metadata2 = {'x': None}
resp = self._getProvenanceAfterMetadata(item, metadata2, user)
# ensure version 3, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 3, user, 'update',
{'old': {'meta': metadata1},
'new': {'meta': {}}})
# update meta to {x:y} by regular user
metadata3 = {'x': 'y'}
resp = self._getProvenanceAfterMetadata(item, metadata3, user)
# ensure version 4, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 4, user, 'update',
{'old': {'meta': {}},
'new': {'meta': metadata3}})
# update meta to {x:z} by regular user
metadata4 = {'x': 'z'}
resp = self._getProvenanceAfterMetadata(item, metadata4, user)
# ensure version 5, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 5, user, 'update',
{'old': {'meta': metadata3},
'new': {'meta': metadata4}})
# update meta to {x:z, q:u} by regular user
metadata5 = {'x': 'z', 'q': 'u'}
resp = self._getProvenanceAfterMetadata(item, metadata5, user)
# ensure version 6, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 6, user, 'update',
{'old': {'meta': metadata4},
'new': {'meta': metadata5}})
# update meta to {q:a} by regular user
metadata6 = {'x': None, 'q': 'a'}
resp = self._getProvenanceAfterMetadata(item, metadata6, user)
# ensure version 7, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 7, user, 'update',
{'old': {'meta': metadata5},
'new': {'meta': {'q': 'a'}}})
# Change the item name and description
params = {'name': 'Renamed object', 'description': 'New description'}
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=admin, params=params)
self.assertStatusOk(resp)
params['lowerName'] = params['name'].lower()
self._checkProvenance(None, item, 8, admin, 'update', {'new': params})
# Copy the item and check that we marked it as copied
params = {'name': 'Copied object'}
resp = self.request(path='/item/%s/copy' % item['_id'],
method='POST', user=admin, params=params)
self.assertStatusOk(resp)
newItem = resp.json
self._checkProvenance(None, newItem, 9, admin, 'copy',
{'originalId': str(item['_id'])})
def testProvenanceItemFiles(self):
"""
Test item provenance when adding, modifying, and deleting files.
"""
item = self.item1
admin = self.admin
# Test adding a new file to an existing item
fileData1 = 'Hello world'
fileData2 = 'Hello world, again'
fileName1 = 'helloWorld.txt'
fileName2 = 'helloWorldEdit.txt'
resp = self.request(
path='/file', method='POST', user=admin, params={
'parentType': 'item',
'parentId': item['_id'],
'name': fileName1,
'size': len(fileData1),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', fileName1, fileData1)]
resp = self.multipartRequest(
path='/file/chunk', user=admin, fields=fields, files=files)
self.assertStatusOk(resp)
file1 = resp.json
self._checkProvenance(None, item, 2, admin, 'fileAdded',
fileInfo={'fileId': str(file1['_id']),
'new': {'mimeType': 'text/plain',
'size': len(fileData1),
'name': fileName1}})
# Edit the file name
resp = self.request(path='/file/%s' % file1['_id'], method='PUT',
user=admin, params={'name': fileName2})
self.assertStatusOk(resp)
self._checkProvenance(None, item, 3, admin, 'fileUpdate',
fileInfo={'fileId': str(file1['_id']),
'old': {'name': fileName1},
'new': {'name': fileName2}})
# Reupload the file
resp = self.request(path='/file/%s/contents' % file1['_id'],
method='PUT', user=admin,
params={'size': len(fileData2)})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', fileName1, fileData2)]
resp = self.multipartRequest(
path='/file/chunk', user=admin, fields=fields, files=files)
self.assertStatusOk(resp)
self.assertEqual(file1['_id'], resp.json['_id'])
self._checkProvenance(None, item, 4, admin, 'fileUpdate',
fileInfo={'fileId': str(file1['_id']),
'old': {'size': len(fileData1)},
'new': {'size': len(fileData2)}})
# Delete the file
resp = self.request(path='/file/%s' % file1['_id'],
method='DELETE', user=admin)
self.assertStatusOk(resp)
self._checkProvenance(None, item, 5, admin, 'fileRemoved',
fileInfo={'fileId': str(file1['_id']),
'old': {'size': len(fileData2),
'name': fileName2}})
def testProvenanceFolder(self):
"""
Test folder provenance, including turning off and on the provenance
handling of folders.
"""
folder1 = self.folder1
user = self.admin
# check that the first version of the folder provenance exists
self._checkProvenance(None, folder1, 1, user, 'creation',
resource='folder')
# Edit the folder and check again
params1 = {'name': 'Renamed folder', 'description': 'New description'}
resp = self.request(path='/folder/%s' % folder1['_id'],
method='PUT', user=user, params=params1)
self.assertStatusOk(resp)
params1['lowerName'] = params1['name'].lower()
self._checkProvenance(None, folder1, 2, user, 'update',
{'new': params1}, resource='folder')
# Turn off folder provenance and make sure asking for it fails
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES, 'setting')
resp = self._getProvenance(folder1, user, resource='folder',
checkOk=False)
self.assertStatus(resp, 400)
# While folder provenance is off, create a second folder and edit the
# first folder
params2 = {'name': 'Renamed Again', 'description': 'Description 2'}
resp = self.request(path='/folder/%s' % folder1['_id'],
method='PUT', user=user, params=params2)
self.assertStatusOk(resp)
params2['lowerName'] = params2['name'].lower()
folder2 = {
'parent': self.coll1,
'parentType': 'collection',
'name': 'Private test folder',
'creator': self.admin
}
folder2 = self.model('folder').createFolder(**folder2)
# Turn back on folder provenance and check that it didn't record the
# changes we made.
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES, 'folder,setting')
self._checkProvenance(None, folder1, 2, user, 'update',
{'new': params1}, resource='folder')
# Changing folder1 again should now show this change, and the old value
# should show the gap in the data
params3 = {'name': 'Renamed C', 'description': 'Description 3'}
resp = self.request(path='/folder/%s' % folder1['_id'],
method='PUT', user=user, params=params3)
self.assertStatusOk(resp)
params3['lowerName'] = params3['name'].lower()
self._checkProvenance(None, folder1, 3, user, 'update',
{'old': params2, 'new': params3},
resource='folder')
# The new folder should have no provenance
resp = self._getProvenance(folder2, user, resource='folder')
self.assertEqual(resp.json['resourceId'], str(folder2['_id']))
self.assertIsNone(resp.json['provenance'])
# Edit the new folder; it should show the unknown history followed by
# the edit
params4 = {'description': 'Folder 2 Description'}
resp = self.request(path='/folder/%s' % folder2['_id'],
method='PUT', user=user, params=params4)
self.assertStatusOk(resp)
resp = self._getProvenance(folder2, user, 1, resource='folder')
self._checkProvenance(resp, folder2, 1, user, 'unknownHistory',
resource='folder')
self._checkProvenance(None, folder2, 2, user, 'update',
{'new': params4}, resource='folder')
# We should also see the initial history using negative indexing
resp = self._getProvenance(folder2, user, -2, resource='folder')
self._checkProvenance(resp, folder2, 1, user, 'unknownHistory',
resource='folder')
# We should be able to get the entire history using 'all'
resp = self._getProvenance(folder2, user, 'all', resource='folder')
self.assertEqual(resp.json['resourceId'], str(folder2['_id']))
self.assertEqual(len(resp.json['provenance']), 2)
self.assertEqual(resp.json['provenance'][0]['eventType'],
'unknownHistory')
self.assertEqual(resp.json['provenance'][1]['eventType'], 'update')
# We should get an error if we ask for a nonsense version
resp = self._getProvenance(folder2, user, 'not_a_version',
resource='folder', checkOk=False)
self.assertStatus(resp, 400)
def testProvenanceSetting(self):
# After trying to set this set, only some of them should have events
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES,
'file,notification,unknown')
checkList = {
'item': True,
'file': True,
'notification': False,
'unknown': True}
for key in checkList:
eventName = 'model.%s.save' % key
self.assertTrue((eventName in events._mapping and 'provenance' in
[h['name'] for h in events._mapping[eventName]])
is checkList[key])
# Setting a blank should be okay. It should also remove all but item
# event mappings
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES, '')
for key in checkList:
eventName = 'model.%s.save' % key
self.assertTrue((eventName in events._mapping and 'provenance' in
[h['name'] for h in events._mapping[eventName]])
is (key == 'item'))
| opadron/girder | plugins/provenance/plugin_tests/provenance_test.py | Python | apache-2.0 | 17,716 |
require 'spec_helper_acceptance'
describe 'ini_setting resource' do
after :all do
shell("rm /tmp/*.ini", :acceptable_exit_codes => [0,1])
end
shared_examples 'has_content' do |path,pp,content|
before :all do
shell("rm #{path}", :acceptable_exit_codes => [0,1])
end
after :all do
shell("cat #{path}", :acceptable_exit_codes => [0,1])
shell("rm #{path}", :acceptable_exit_codes => [0,1])
end
it 'applies the manifest twice with no stderr' do
expect(apply_manifest(pp, :catch_failures => true).stderr).to eq("")
expect(apply_manifest(pp, :catch_changes => true).stderr).to eq("")
end
describe file(path) do
it { should be_file }
it { should contain(content) }
end
end
shared_examples 'has_error' do |path,pp,error|
before :all do
shell("rm #{path}", :acceptable_exit_codes => [0,1])
end
after :all do
shell("cat #{path}", :acceptable_exit_codes => [0,1])
shell("rm #{path}", :acceptable_exit_codes => [0,1])
end
it 'applies the manifest and gets a failure message' do
expect(apply_manifest(pp, :expect_failures => true).stderr).to match(error)
end
describe file(path) do
it { should_not be_file }
end
end
describe 'ensure parameter' do
context '=> present for global and section' do
pp = <<-EOS
ini_setting { 'ensure => present for section':
ensure => present,
path => '/tmp/ini_setting.ini',
section => 'one',
setting => 'two',
value => 'three',
}
ini_setting { 'ensure => present for global':
ensure => present,
path => '/tmp/ini_setting.ini',
section => '',
setting => 'four',
value => 'five',
}
EOS
it 'applies the manifest twice with no stderr' do
expect(apply_manifest(pp, :catch_failures => true).stderr).to eq("")
expect(apply_manifest(pp, :catch_changes => true).stderr).to eq("")
end
describe file('/tmp/ini_setting.ini') do
it { should be_file }
it { should contain("four = five\n[one]\ntwo = three") }
end
end
context '=> absent for key/value' do
before :all do
shell('echo -e "four = five\n[one]\ntwo = three" > /tmp/ini_setting.ini')
end
pp = <<-EOS
ini_setting { 'ensure => absent for key/value':
ensure => absent,
path => '/tmp/ini_setting.ini',
section => 'one',
setting => 'two',
value => 'three',
}
EOS
it 'applies the manifest twice with no stderr' do
expect(apply_manifest(pp, :catch_failures => true).stderr).to eq("")
expect(apply_manifest(pp, :catch_changes => true).stderr).to eq("")
end
describe file('/tmp/ini_setting.ini') do
it { should be_file }
it { should contain('four = five') }
it { should contain('[one]') }
it { should_not contain('two = three') }
end
end
context '=> absent for section', :pending => "cannot ensure absent on a section" do
before :all do
shell('echo -e "four = five\n[one]\ntwo = three" > /tmp/ini_setting.ini')
end
after :all do
shell("cat /tmp/ini_setting.ini", :acceptable_exit_codes => [0,1])
shell("rm /tmp/ini_setting.ini", :acceptable_exit_codes => [0,1])
end
pp = <<-EOS
ini_setting { 'ensure => absent for section':
ensure => absent,
path => '/tmp/ini_setting.ini',
section => 'one',
}
EOS
it 'applies the manifest twice with no stderr' do
expect(apply_manifest(pp, :catch_failures => true).stderr).to eq("")
expect(apply_manifest(pp, :catch_changes => true).stderr).to eq("")
end
describe file('/tmp/ini_setting.ini') do
it { should be_file }
it { should contain('four = five') }
it { should_not contain('[one]') }
it { should_not contain('two = three') }
end
end
context '=> absent for global' do
before :all do
shell('echo -e "four = five\n[one]\ntwo = three" > /tmp/ini_setting.ini')
end
after :all do
shell("cat /tmp/ini_setting.ini", :acceptable_exit_codes => [0,1])
shell("rm /tmp/ini_setting.ini", :acceptable_exit_codes => [0,1])
end
pp = <<-EOS
ini_setting { 'ensure => absent for global':
ensure => absent,
path => '/tmp/ini_setting.ini',
section => '',
setting => 'four',
value => 'five',
}
EOS
it 'applies the manifest twice with no stderr' do
expect(apply_manifest(pp, :catch_failures => true).stderr).to eq("")
expect(apply_manifest(pp, :catch_changes => true).stderr).to eq("")
end
describe file('/tmp/ini_setting.ini') do
it { should be_file }
it { should_not contain('four = five') }
it { should contain('[one]') }
it { should contain('two = three') }
end
end
end
describe 'section, setting, value parameters' do
{
"section => 'test', setting => 'foo', value => 'bar'," => "[test]\nfoo = bar",
"section => 'more', setting => 'baz', value => 'quux'," => "[more]\nbaz = quux",
"section => '', setting => 'top', value => 'level'," => "top = level",
}.each do |parameter_list, content|
context parameter_list do
pp = <<-EOS
ini_setting { "#{parameter_list}":
ensure => present,
path => '/tmp/ini_setting.ini',
#{parameter_list}
}
EOS
it_behaves_like 'has_content', '/tmp/ini_setting.ini', pp, content
end
end
{
"section => 'test'," => /setting is a required.+value is a required/,
"setting => 'foo', value => 'bar'," => /section is a required/,
"section => 'test', setting => 'foo'," => /value is a required/,
"section => 'test', value => 'bar'," => /setting is a required/,
"value => 'bar'," => /section is a required.+setting is a required/,
"setting => 'foo'," => /section is a required.+value is a required/,
}.each do |parameter_list, error|
context parameter_list, :pending => 'no error checking yet' do
pp = <<-EOS
ini_setting { "#{parameter_list}":
ensure => present,
path => '/tmp/ini_setting.ini',
#{parameter_list}
}
EOS
it_behaves_like 'has_error', '/tmp/ini_setting.ini', pp, error
end
end
end
describe 'path parameter' do
[
"/tmp/one.ini",
"/tmp/two.ini",
"/tmp/three.ini",
].each do |path|
context "path => #{path}" do
pp = <<-EOS
ini_setting { 'path => #{path}':
ensure => present,
section => 'one',
setting => 'two',
value => 'three',
path => '#{path}',
}
EOS
it_behaves_like 'has_content', path, pp, "[one]\ntwo = three"
end
end
context "path => foo" do
pp = <<-EOS
ini_setting { 'path => foo':
ensure => present,
section => 'one',
setting => 'two',
value => 'three',
path => 'foo',
}
EOS
it_behaves_like 'has_error', 'foo', pp, /must be fully qualified/
end
end
describe 'key_val_separator parameter' do
{
"" => "two = three",
"key_val_separator => '='," => "two=three",
"key_val_separator => ' = '," => "two = three",
}.each do |parameter, content|
context "with \"#{parameter}\" makes \"#{content}\"" do
pp = <<-EOS
ini_setting { "with #{parameter} makes #{content}":
ensure => present,
section => 'one',
setting => 'two',
value => 'three',
path => '/tmp/key_val_separator.ini',
#{parameter}
}
EOS
it_behaves_like 'has_content', '/tmp/key_val_separator.ini', pp, content
end
end
{
"key_val_separator => ''," => /must contain exactly one/,
"key_val_separator => ','," => /must contain exactly one/,
"key_val_separator => ' '," => /must contain exactly one/,
"key_val_separator => ' == '," => /must contain exactly one/,
}.each do |parameter, error|
context "with \"#{parameter}\" raises \"#{error}\"" do
pp = <<-EOS
ini_setting { "with #{parameter} raises #{error}":
ensure => present,
section => 'one',
setting => 'two',
value => 'three',
path => '/tmp/key_val_separator.ini',
#{parameter}
}
EOS
it_behaves_like 'has_error', '/tmp/key_val_separator.ini', pp, error
end
end
end
end
| justinstoller/puppetlabs-inifile | spec/acceptance/ini_setting_spec.rb | Ruby | apache-2.0 | 8,942 |
/*
* Copyright 2014 the original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package grails.plugin.console.charts.client.gin;
import com.gwtplatform.mvp.client.gin.AbstractPresenterModule;
import grails.plugin.console.charts.client.application.ApplicationDesktopModule;
/**
* @author <a href='mailto:donbeave@gmail.com'>Alexey Zhokhov</a>
*/
public class DesktopModule extends AbstractPresenterModule {
@Override
protected void configure() {
install(new ApplicationDesktopModule());
}
} | donbeave/grails-console-charts | src/gwt/grails/plugin/console/charts/client/gin/DesktopModule.java | Java | apache-2.0 | 1,050 |
/*
* In the name of Allah
* This file is part of The "Quran Teacher or Learn Arabic" Project. Use is subject to
* license terms.
*
* @author: Fazle Rabbi Rahat
*
*/
package QuranTeacher;
import javax.swing.ButtonGroup;
import javax.swing.JComboBox;
import javax.swing.JPanel;
import java.awt.GridBagLayout;
import java.awt.Color;
import javax.swing.JLabel;
import java.awt.GridBagConstraints;
import java.awt.Font;
import java.awt.Insets;
import javax.swing.JRadioButton;
import java.awt.event.ItemListener;
import java.awt.event.ItemEvent;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import QuranTeacher.Preferences.AudioPreferences;
import javax.swing.JTextArea;
public class AudioPreferencesPanel extends JPanel {
/**
* Preferences panel to handle audio preferences. It doesn't extends
* the PreferencesPanel class
*/
private static final long serialVersionUID = 1L;
private List<String> QariNames=new ArrayList<>();
private static List<String> audioSourceLinks=new ArrayList<>();
private AudioPreferences audioSetupPrefs;
private JRadioButton rdbtnOn;
private JRadioButton rdbtnOff;
private JComboBox<String>comboBox;
/**
* Create the panel.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public AudioPreferencesPanel(final AudioPreferences audioPrefs) {
this.audioSetupPrefs=audioPrefs;
setBackground(Color.DARK_GRAY);
setForeground(Color.RED);
GridBagLayout gridBagLayout = new GridBagLayout();
gridBagLayout.columnWidths = new int[]{0, 0, 0, 0};
gridBagLayout.rowHeights = new int[]{0, 0, 0, 0, 32, 0};
gridBagLayout.columnWeights = new double[]{0.0, 1.0, 0.0, Double.MIN_VALUE};
gridBagLayout.rowWeights = new double[]{0.0, 0.0, 0.0, 0.0, Double.MIN_VALUE, 1.0};
setLayout(gridBagLayout);
JLabel lblHeader = new JLabel("Recitation Preferences");
lblHeader.setForeground(Color.MAGENTA);
lblHeader.setFont(new Font("Tahoma", Font.PLAIN, 18));
GridBagConstraints gbc_lblHeader = new GridBagConstraints();
gbc_lblHeader.gridwidth = 4;
gbc_lblHeader.insets = new Insets(0, 0, 5, 0);
gbc_lblHeader.gridx = 0;
gbc_lblHeader.gridy = 0;
add(lblHeader, gbc_lblHeader);
JLabel lblAudioState = new JLabel("Recitation State :");
lblAudioState.setFont(new Font("Tahoma", Font.PLAIN, 18));
lblAudioState.setForeground(Color.ORANGE);
GridBagConstraints gbc_lblAudioState = new GridBagConstraints();
gbc_lblAudioState.insets = new Insets(0, 0, 5, 5);
gbc_lblAudioState.gridx = 0;
gbc_lblAudioState.gridy = 2;
add(lblAudioState, gbc_lblAudioState);
rdbtnOn = new JRadioButton("ON");
rdbtnOn.setFont(new Font("Tahoma", Font.PLAIN, 18));
rdbtnOn.addItemListener(new ItemListener() {
public void itemStateChanged(ItemEvent e) {
if(e.getStateChange()==ItemEvent.SELECTED)
{
audioSetupPrefs.setAudioON(true);
//System.out.println("On");
}
else
{
audioSetupPrefs.setAudioON(false);
//System.out.println("Off");
}
}
});
rdbtnOn.setBackground(Color.DARK_GRAY);
rdbtnOn.setForeground(Color.GREEN);
GridBagConstraints gbc_rdbtnOn = new GridBagConstraints();
gbc_rdbtnOn.insets = new Insets(0, 0, 5, 5);
gbc_rdbtnOn.gridx = 1;
gbc_rdbtnOn.gridy = 2;
add(rdbtnOn, gbc_rdbtnOn);
rdbtnOff = new JRadioButton("Off");
rdbtnOff.setFont(new Font("Tahoma", Font.PLAIN, 18));
rdbtnOff.setBackground(Color.DARK_GRAY);
rdbtnOff.setForeground(Color.GREEN);
GridBagConstraints gbc_rdbtnOff = new GridBagConstraints();
gbc_rdbtnOff.insets = new Insets(0, 0, 5, 5);
gbc_rdbtnOff.gridx = 2;
gbc_rdbtnOff.gridy = 2;
add(rdbtnOff, gbc_rdbtnOff);
ButtonGroup buttonGroup=new ButtonGroup();
buttonGroup.add(rdbtnOn);
buttonGroup.add(rdbtnOff);
JLabel lblSelectQari = new JLabel("Select Qari :");
lblSelectQari.setForeground(Color.ORANGE);
lblSelectQari.setFont(new Font("Tahoma", Font.PLAIN, 18));
GridBagConstraints gbc_lblSelectQari = new GridBagConstraints();
gbc_lblSelectQari.anchor = GridBagConstraints.WEST;
gbc_lblSelectQari.insets = new Insets(0, 0, 5, 5);
gbc_lblSelectQari.gridx = 0;
gbc_lblSelectQari.gridy = 4;
add(lblSelectQari, gbc_lblSelectQari);
storeQariSource();
comboBox = new JComboBox(QariNames.toArray());
comboBox.addItemListener(new ItemListener() {
public void itemStateChanged(ItemEvent e) {
audioSetupPrefs.setAudioSourceIndex(comboBox.getSelectedIndex());
//System.out.println(AudioPreferences.audioSource);
}
});
comboBox.setFont(new Font("Tahoma", Font.PLAIN, 16));
int k=audioSetupPrefs.getAudioSourceIndex();
if(k<0 || k>QariNames.size())k=0;
comboBox.setSelectedIndex(k);
GridBagConstraints gbc_comboBox = new GridBagConstraints();
gbc_comboBox.gridwidth = 0;
gbc_comboBox.insets = new Insets(0, 0, 5, 0);
gbc_comboBox.fill = GridBagConstraints.HORIZONTAL;
gbc_comboBox.gridx = 1;
gbc_comboBox.gridy = 4;
add(comboBox, gbc_comboBox);
JTextArea txtrNote = new JTextArea();
txtrNote.setFont(new Font("Monospaced", Font.PLAIN, 16));
txtrNote.setEditable(false);
txtrNote.setLineWrap(true);
txtrNote.setWrapStyleWord(true);
txtrNote.setForeground(Color.PINK);
txtrNote.setBackground(Color.DARK_GRAY);
txtrNote.setText("Note: If you change Qari name, it will take effect only for the \"next to be downoaded\" recitation files. So, the Qari for the previously downloaded files will not change. ");
GridBagConstraints gbc_txtrNote = new GridBagConstraints();
gbc_txtrNote.gridwidth = 0;
gbc_txtrNote.insets = new Insets(0, 0, 0, 5);
gbc_txtrNote.fill = GridBagConstraints.BOTH;
gbc_txtrNote.gridx = 0;
gbc_txtrNote.gridy = 5;
add(txtrNote, gbc_txtrNote);
updateButtonGroup();
}
private void updateButtonGroup() {
if(audioSetupPrefs.isAudioON())
rdbtnOn.setSelected(true);
else
rdbtnOff.setSelected(true);
}
private void storeQariSource()
{
InputStream inStream=this.getClass().getResourceAsStream("files/AudioLinks");
BufferedReader reader=new BufferedReader(new InputStreamReader(inStream));
String text;
try {
while((text=reader.readLine())!=null)
{
if(text.startsWith("name"))
{
QariNames.add(text.split("=")[1]);
}
else if(text.startsWith("link"))
{
audioSourceLinks.add(text.split("=")[1]);
}
}
reader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
public static String getAudioSourceLink(int index) {
return audioSourceLinks.get(index);
}
public void updateSetupPanel()
{
updateButtonGroup();
int k=audioSetupPrefs.getAudioSourceIndex();
if(k<0 || k>QariNames.size())k=0;
comboBox.setSelectedIndex(k);
}
}
| adeelahmedkhanabbasi/QuranTeacher | QuranTeacher/src/QuranTeacher/ AudioPreferencesPanel.java | Java | apache-2.0 | 6,792 |
/*分页JS*/
var rsss = false;
$(function () {
$(".leftNav_side").css("min-height", $(".leftNav_side").height());
$(window).resize(function () {
$(".leftNav_side").height($(window).height());
}).trigger("resize");//左侧菜单高度自适应,但是保留内容最小高度
//切换左导航一级菜单
$(".Nav_lvl dt").click(function () {
$(this).parent().siblings().find("dd").hide();
$(this).siblings().slideDown(300);
});
//切换左导航二级菜单
$(".Nav_lvl dd").click(function () {
$(".Nav_lvl dd").removeClass();
$(this).addClass("Nav_lvl_dd_on");
});
//切换顶部导航
$(".topNav_ul li").click(function () {
$(this).addClass("topNav_li_on").siblings().removeClass();
});
if(Number($("[name='totalCount']").val()) > 0){
var pages = [],
totalPage = Number($("[name='totalPage']").val()),
totalCount = Number($("[name='totalCount']").val()),
currentPage = Number($("[name='pageNum']").val())==0 ? 1 :Number($("[name='pageNum']").val());
pages[pages.length] = ' <th colspan="100"><i>当前第'+currentPage+'页/共'+totalPage+'页</i><i>共'+totalCount+'条记录</i>';
if (currentPage == 1) {
pages[pages.length] = ' <span>首页</span><span>上一页</span>';
}
else {
pages[pages.length] = ' <a class="first" href="#">首页</a><a class="prev" href="#">上一页</a>';
}
if (currentPage < 5) {
for (var i = 1; i <= (totalPage > 10 ? 10 : totalPage); i++) {
if (currentPage == i)
pages[pages.length] = '<span class="sel">' + i + '</span>';
else
pages[pages.length] = '<a href="#">' + i + '</a>';
}
}
else if (currentPage >= totalPage - 5)
for (var i = totalPage - 9; i <= totalPage; i++) {
if (currentPage == i)
pages[pages.length] = '<span class="sel">' + i + '</span>';
else
pages[pages.length] = '<a href="#">' + i + '</a>';
}
else {
for (var i = currentPage - 5; i <= currentPage + 4; i++) {
if (currentPage == i)
pages[pages.length] = '<span class="sel">' + i + '</span>';
else
pages[pages.length] = '<a href="#">' + i + '</a>';
}
}
if (currentPage < totalPage) {
pages[pages.length] = '<a class="next" href="#">下一页</a><a class="last" href="#">尾页</a>';
}
else {
pages[pages.length] = '<span>下一页</span><span>尾页</span>';
}
pages[pages.length] = '<input type="text" name="page" value="'+currentPage+'"/>';
pages[pages.length] = '<input type="button" value="跳转" class="btn_violet" />';
$(".pager").html(pages.join("")).find("a:not(.next):not(.prev)").click(function(){
$("[name='currentPage']").val($(this).text());
$("#pagerForm").submit();
});
$(".pager").find("a.first").click(function(){
num = 1;
$("[name='currentPage']").val(num);
$("#pagerForm").submit();
});
$(".pager").find("a.prev").click(function(){
num = Number($("[name='currentPage']").val()) - 1 < 0 ? 0 :Number($("[name='currentPage']").val()) - 1;
$("[name='currentPage']").val(num);
$("#pagerForm").submit();
});
$(".pager").find("a.next").click(function(){
$("[name='currentPage']").val(Number($("[name='currentPage']").val()) + 1);
$("#pagerForm").submit();
});
$(".pager").find("a.last").click(function(){
num = Number($("[name='totalPage']").val());
$("[name='currentPage']").val(num);
$("#pagerForm").submit();
});
$(".pager").find("input.btn_violet").click(function(){
num = Number($("[name='page']").val());
if(num > totalPage){
num = totalPage;
} else if(num < 1){
num = 1;
}
$("[name='currentPage']").val(num);
$("#pagerForm").submit();
});
}
});
| iminto/baicai | src/main/webapp/manage/js/navleft.js | JavaScript | apache-2.0 | 4,114 |
import {
store
} from '../store.js';
import {
selectGameCurrentState,
selectGameChest,
selectGameName
} from '../selectors.js';
import {
deepCopy,
getProperty,
setPropertyInClone
} from '../util.js';
export const UPDATE_GAME_ROUTE = 'UPDATE_GAME_ROUTE';
export const UPDATE_GAME_STATIC_INFO = "UPDATE_GAME_STATIC_INFO";
export const UPDATE_GAME_CURRENT_STATE = "UPDATE_GAME_CURRENT_STATE";
export const updateGameRoute = (pageExtra) => {
const pieces = pageExtra.split("/");
//remove the trailing slash
if (!pieces[pieces.length - 1]) pieces.pop();
if (pieces.length != 2) {
console.warn("URL for game didn't have expected number of pieces");
return null;
}
return {
type: UPDATE_GAME_ROUTE,
name: pieces[0],
id: pieces[1],
}
}
export const updateGameStaticInfo = (chest, playersInfo, hasEmptySlots, open, visible, isOwner) => {
return {
type: UPDATE_GAME_STATIC_INFO,
chest,
playersInfo,
hasEmptySlots,
open,
visible,
isOwner
}
}
//currentState should be the unexpanded state (as passed in from server). Timer
//infos should be game.ActiveTimers. originalWallClockTime should be the time
//the state was received from the server (so that we can compute how much time
//has elapsed from what the server reported). This will install the currentState
//in, but also set up callbacks to update timer.TimeLeft for any timers in the
//state automatically.
export const installGameState = (currentState, timerInfos, originalWallClockTime) => (dispatch, getState) => {
const state = getState();
const chest = selectGameChest(state);
const gameName = selectGameName(state);
let [expandedState, pathsToTick] = expandState(currentState, timerInfos, chest, gameName);
dispatch(updateGameState(expandedState, pathsToTick, originalWallClockTime));
if (pathsToTick.length) window.requestAnimationFrame(doTick);
}
const updateGameState = (expandedCurrentState, pathsToTick, originalWallClockTime) => {
return {
type: UPDATE_GAME_CURRENT_STATE,
currentState: expandedCurrentState,
pathsToTick,
originalWallClockTime
}
}
//return [expandedState, pathsToTick]
const expandState = (currentState, timerInfos, chest, gameName) => {
//Takes the currentState and returns an object where all of the Stacks are replaced by actual references to the component they reference.
var pathsToTick = [];
let newState = deepCopy(currentState);
expandLeafState(newState, newState.Game, ["Game"], pathsToTick, timerInfos, chest, gameName)
for (var i = 0; i < newState.Players.length; i++) {
expandLeafState(newState, newState.Players[i], ["Players", i], pathsToTick, timerInfos, chest, gameName)
}
return [newState, pathsToTick];
}
const expandLeafState = (wholeState, leafState, pathToLeaf, pathsToTick, timerInfos, chest, gameName) => {
//Returns an expanded version of leafState. leafState should have keys that are either bools, floats, strings, or Stacks.
var entries = Object.entries(leafState);
for (var i = 0; i < entries.length; i++) {
let item = entries[i];
let key = item[0];
let val = item[1];
//Note: null is typeof "object"
if (val && typeof val == "object") {
if (val.Deck) {
expandStack(val, wholeState, chest, gameName);
} else if (val.IsTimer) {
expandTimer(val, pathToLeaf.concat([key]), pathsToTick, timerInfos);
}
}
}
//Copy in Player computed state if it exists, for convenience. Do it after expanding properties
if (pathToLeaf && pathToLeaf.length == 2 && pathToLeaf[0] == "Players") {
if (wholeState.Computed && wholeState.Computed.Players && wholeState.Computed.Players.length) {
leafState.Computed = wholeState.Computed.Players[pathToLeaf[1]];
}
}
}
const expandStack = (stack, wholeState, chest, gameName) => {
if (!stack.Deck) {
//Meh, I guess it's not a stack
return;
}
let components = Array(stack.Indexes.length).fill(null);
for (var i = 0; i < stack.Indexes.length; i++) {
let index = stack.Indexes[i];
if (index == -1) {
components[i] = null;
continue;
}
//TODO: this should be a constant
if(index == -2) {
//TODO: to handle this appropriately we'd need to know how to
//produce a GenericComponent for each Deck clientside.
components[i] = {};
} else {
components[i] = componentForDeckAndIndex(stack.Deck, index, wholeState, chest);
}
if (stack.IDs) {
components[i].ID = stack.IDs[i];
}
components[i].Deck = stack.Deck;
components[i].GameName = gameName;
}
stack.GameName = gameName;
stack.Components = components;
}
const expandTimer = (timer, pathToLeaf, pathsToTick, timerInfo) => {
//Always make sure these default to a number so databinding can use them.
timer.TimeLeft = 0;
timer.originalTimeLeft = 0;
if (!timerInfo) return;
let info = timerInfo[timer.ID];
if (!info) return;
timer.TimeLeft = info.TimeLeft;
timer.originalTimeLeft = timer.TimeLeft;
pathsToTick.push(pathToLeaf);
}
const componentForDeckAndIndex = (deckName, index, wholeState, chest) => {
let deck = chest.Decks[deckName];
if (!deck) return null;
let result = {...deck[index]};
if (wholeState && wholeState.Components) {
if (wholeState.Components[deckName]) {
result.DynamicValues = wholeState.Components[deckName][index];
}
}
return result
}
const doTick = () => {
tick();
const state = store.getState();
const pathsToTick = state.game ? state.game.pathsToTick : [];
if (pathsToTick.length > 0) {
window.requestAnimationFrame(doTick);
}
}
const tick = () => {
const state = store.getState();
const currentState = selectGameCurrentState(state);
if (!currentState) return;
const pathsToTick = state.game ? state.game.pathsToTick : [];
const originalWallClockStartTime = state.game ? state.game.originalWallClockTime : 0;
if (pathsToTick.length == 0) return;
let newPaths = [];
//We'll use util.setPropertyInClone, so the newState will diverge from
//currentState as we write to it, but can start out the same.
let newState = currentState;
for (let i = 0; i < pathsToTick.length; i++) {
let currentPath = pathsToTick[i];
let timer = getProperty(newState, currentPath);
let now = Date.now();
let difference = now - originalWallClockStartTime;
let result = Math.max(0, timer.originalTimeLeft - difference);
newState = setPropertyInClone(newState, currentPath.concat(["TimeLeft"]), result);
//If we still have time to tick on this, then make sure it's still
//in the list of things to tick.
if (timer.TimeLeft > 0) {
newPaths.push(currentPath);
}
}
if (newPaths.length == pathsToTick.length) {
//If the length of pathsToTick didn't change, don't change it, so that
//strict equality matches in the new state will work.
newPaths = pathsToTick;
}
store.dispatch(updateGameState(newState, newPaths, originalWallClockStartTime));
} | jkomoros/boardgame | server/static/src/actions/game.js | JavaScript | apache-2.0 | 7,052 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/profiler/utils/event_span.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
namespace {
// Representing a boundary of an event.
struct EventBoundary {
// Time at this boundary.
uint64 time_ps;
// Type of the event.
EventType type;
// True if this is the start of the event; False if this is the end.
bool is_start;
EventBoundary(uint64 time_ps, EventType type, bool is_start)
: time_ps(time_ps), type(type), is_start(is_start) {}
};
// Returns true if EventBoundary a should appear before EventBoundary b.
bool CmpEventBoundaries(const EventBoundary& a, const EventBoundary& b) {
if (a.time_ps == b.time_ps) {
if (a.is_start == b.is_start) {
// Puts the higher-priority type before the lower-priority type if they
// have the same time and same boundary type.
return a.type > b.type;
} else {
// Puts the "end" bounary before the "start" boundary if they have the
// same time.
return !a.is_start;
}
}
// In ascending order of time.
return a.time_ps < b.time_ps;
}
// Generates vector of event boundaries from the given overlapped_events.
std::vector<EventBoundary> GenerateEventBoundaries(
const std::vector<EventTypeSpan>& overlapped_events) {
std::vector<EventBoundary> boundaries;
boundaries.reserve(2 * overlapped_events.size());
for (const auto& event : overlapped_events) {
boundaries.push_back(
{event.span.begin_ps(), event.type, /*is_start=*/true});
boundaries.push_back({event.span.end_ps(), event.type, /*is_start=*/false});
}
absl::c_sort(boundaries, CmpEventBoundaries);
return boundaries;
}
// A class to track the highest priority that an event should be assigned.
class PriorityTracker {
private:
// The current maximum priority.
EventType current_max_priority_;
// A count for each possible priority.
std::vector<int64> priority_count_;
public:
PriorityTracker() {
current_max_priority_ = UNKNOWN_TIME;
priority_count_.resize(LAST_EVENT_TYPE + 1, 0);
}
// Updates current_max_priority_ and priority_count_[] given the boundary.
// Returns the new current_max_priority_.
EventType Update(const EventBoundary& boundary) {
EventType event_type = boundary.type;
bool is_start = boundary.is_start;
if (is_start) {
priority_count_[event_type]++;
if (event_type > current_max_priority_) {
current_max_priority_ = event_type;
}
} else {
priority_count_[event_type]--;
if (event_type == current_max_priority_ &&
priority_count_[event_type] == 0) {
// Reduces current_max_priority_ to the first event type (starting from
// the highest priority) that has a non-zero count.
bool found = false;
for (int i = event_type - 1; i >= 0; i--) {
if (priority_count_[i] > 0) {
current_max_priority_ = static_cast<EventType>(i);
found = true;
break;
}
}
if (!found) current_max_priority_ = UNKNOWN_TIME;
}
}
return current_max_priority_;
}
};
std::vector<EventTypeSpan> ToNonOverlappedEvents(
const std::vector<EventTypeSpan>& overlapped_events) {
std::vector<EventBoundary> event_boundaries =
GenerateEventBoundaries(overlapped_events);
std::vector<EventTypeSpan> result;
if (event_boundaries.empty()) return result;
result.reserve(event_boundaries.size());
PriorityTracker priority_tracker;
for (int64 i = 0, end = (event_boundaries.size() - 1); i < end; i++) {
EventType highest_priority = priority_tracker.Update(event_boundaries[i]);
result.push_back({highest_priority, Timespan::FromEndPoints(
event_boundaries[i].time_ps,
event_boundaries[i + 1].time_ps)});
}
return result;
}
void CombineStepDetails(const StepDetails& src, StepDetails* dst) {
dst->AppendMarkers(src.Markers());
dst->AppendEvents(src.Events());
dst->AppendCollectives(src.Collectives());
dst->AggregateDeviceMemoryTransfers(src.DeviceMemoryTransfers());
}
EventType ClassifyDeviceCompute(absl::string_view event_name,
absl::string_view tensor_shapes) {
if (tensor_shapes.empty()) {
// Deduces the precision from the name.
if (absl::StrContains(event_name, "half") ||
absl::StrContains(event_name, "fp16"))
return DEVICE_COMPUTE_16;
else
return DEVICE_COMPUTE_32;
} else {
// Deduces the precision from the shapes.
if (absl::StrContains(tensor_shapes, "half"))
return DEVICE_COMPUTE_16;
else
return DEVICE_COMPUTE_32;
}
}
constexpr int kNumGenericEventTypes = GenericEventType::kLastGenericEventType -
GenericEventType::kFirstGenericEventType +
1;
using GenericEventTypeStrMap =
absl::flat_hash_map<GenericEventType, absl::string_view>;
const GenericEventTypeStrMap& GetGenericEventTypeStrMap() {
static const auto* generic_event_type_str_map = new GenericEventTypeStrMap({
{kDeviceCompute, "Device compute"},
{kDeviceToDevice, "Device to device"},
{kDeviceCollectives, "Device collective communication"},
{kHostCompute, "Host compute"},
{kHostPrepare, "Kernel launch"},
{kInput, "Input"},
{kOutput, "Output"},
{kCompile, "Compilation"},
{kAllOthers, "All others"},
});
DCHECK_EQ(generic_event_type_str_map->size(), kNumGenericEventTypes);
return *generic_event_type_str_map;
}
} // namespace
absl::string_view GetGenericEventTypeStr(GenericEventType event_type) {
return GetGenericEventTypeStrMap().at(event_type);
}
EventType ClassifyGpuEvent(absl::string_view event_name,
absl::string_view tensor_shapes) {
if (absl::StartsWithIgnoreCase(event_name, "MEMCPYHtoD"))
return HOST_TO_DEVICE;
if (absl::StartsWithIgnoreCase(event_name, "MEMCPYDtoH"))
return DEVICE_TO_HOST;
if (absl::StartsWithIgnoreCase(event_name, "MEMCPYDtoD"))
return DEVICE_TO_DEVICE;
if (absl::StartsWithIgnoreCase(event_name, "nccl")) {
return DEVICE_COLLECTIVES;
}
return ClassifyDeviceCompute(event_name, tensor_shapes);
}
EventType ClassifyCpuEvent(absl::string_view event_name, int64 correlation_id,
bool has_device) {
if (absl::StartsWithIgnoreCase(event_name, "MEMCPYHtoD") ||
absl::StrContains(event_name, "Infeed"))
return HOST_TO_DEVICE;
if (absl::StartsWithIgnoreCase(event_name, "MEMCPYHtoH")) return HOST_TO_HOST;
// TODO(b/150420972): Separate runtime overhead from actual compute for
// CPU-only.
if (has_device &&
(correlation_id >= 0 ||
absl::StartsWithIgnoreCase(event_name, "ExecutorState::Process"))) {
return HOST_PREPARE;
}
if (absl::StartsWithIgnoreCase(event_name, "IteratorGetNext"))
return HOST_WAIT_INPUT;
return HOST_COMPUTE;
}
std::string PrintEventType(EventType event_type) {
switch (event_type) {
case UNKNOWN_TIME:
return "unknown_time";
case HOST_COMPUTE:
return "host_compute";
case HOST_COMPILE:
return "host_compile";
case HOST_TO_HOST:
return "host_to_host";
case HOST_TO_DEVICE:
return "host_to_device";
case HOST_PREPARE:
return "host_prepare";
case DEVICE_COLLECTIVES:
return "device_collectives";
case HOST_WAIT_INPUT:
return "host_wait_input";
case DEVICE_TO_DEVICE:
return "device_to_device";
case DEVICE_TO_HOST:
return "device_to_host";
case DEVICE_COMPUTE_32:
return "device_compute_32";
case DEVICE_COMPUTE_16:
return "device_compute_16";
case DEVICE_WAIT_DEVICE:
return "device_wait_device";
case DEVICE_WAIT_HOST:
return "device_wait_host";
default:
return "unexpected";
}
}
std::string PrintEventTypeSpan(const EventTypeSpan& event_type_span) {
return absl::StrCat("(", PrintEventType(event_type_span.type), ", ",
event_type_span.span.DebugString(), ")");
}
absl::string_view PrintStepMarkerType(StepMarkerType type) {
switch (type) {
case StepMarkerType::kExplicitHostStepMarker:
return "ExplicitHostStepMarker";
case StepMarkerType::kImplicitHostStepMarker:
return "ImplicitHostStepMarker";
case StepMarkerType::kDeviceStepMarker:
return "DeviceStepMarker";
}
}
std::string PrintStepMarker(const StepMarker& step_marker) {
return absl::StrCat("(", PrintStepMarkerType(step_marker.type), ", ",
step_marker.event_name, ", ",
step_marker.span.DebugString(), ")");
}
std::string PrintStepEvents(const StepEvents& step_events) {
std::vector<int64> step_ids;
step_ids.reserve(step_events.size());
for (const auto& id_details : step_events) {
step_ids.push_back(id_details.first);
}
absl::c_sort(step_ids);
std::string result = "{";
for (auto id : step_ids) {
absl::StrAppend(&result, "\n");
auto* details = gtl::FindOrNull(step_events, id);
std::string details_str = details ? details->DebugString() : "()";
absl::StrAppend(&result, id, ":", details_str);
}
return absl::StrCat(result, "\n}");
}
void CombineStepEvents(const StepEvents& src, StepEvents* dst) {
for (const auto& step_details : src) {
int64 step_id = step_details.first;
const StepDetails& src_details = step_details.second;
StepDetails* dst_details = &(*dst)[step_id];
CombineStepDetails(src_details, dst_details);
}
}
// Converts from overlapped step-events to non-overlapped step-events.
StepEvents ToNonOverlappedStepEvents(const StepEvents& overlapped_step_events) {
StepEvents non_overlapped_step_events;
for (const auto& step_events : overlapped_step_events) {
const auto& step_id = step_events.first;
const auto& step_details = step_events.second;
*non_overlapped_step_events[step_id].MutableMarkers() =
step_details.Markers();
*non_overlapped_step_events[step_id].MutableEvents() =
ToNonOverlappedEvents(step_details.Events());
*non_overlapped_step_events[step_id].MutableCollectives() =
step_details.Collectives();
*non_overlapped_step_events[step_id].MutableDeviceMemoryTransfers() =
step_details.DeviceMemoryTransfers();
}
return non_overlapped_step_events;
}
void StepDetails::AddMarker(const StepMarker& m) { markers_.push_back(m); }
void StepDetails::AddEvent(const EventTypeSpan& e) { events_.push_back(e); }
void StepDetails::AppendMarkers(const std::vector<StepMarker>& other_markers) {
markers_.insert(markers_.end(), other_markers.begin(), other_markers.end());
}
void StepDetails::AppendEvents(const std::vector<EventTypeSpan>& other_events) {
events_.insert(events_.end(), other_events.begin(), other_events.end());
}
void StepDetails::AppendCollectives(
const absl::flat_hash_map<uint32, AllReduceDbResult>& collectives) {
for (const auto& it : collectives) {
collectives_[it.first] = it.second;
}
}
void StepDetails::AggregateDeviceMemoryTransfers(
const std::vector<DeviceMemoryTransfer> device_memory_transfers) {
if (device_memory_transfers.size() != device_memory_transfers_.size()) {
return; // Sanity check.
}
for (size_t i = 0; i < device_memory_transfers.size(); ++i) {
device_memory_transfers_[i].set_occurrence(
device_memory_transfers_[i].occurrence() +
device_memory_transfers[i].occurrence());
device_memory_transfers_[i].set_bytes_transferred(
device_memory_transfers_[i].bytes_transferred() +
device_memory_transfers[i].bytes_transferred());
device_memory_transfers_[i].set_time_us(
device_memory_transfers_[i].time_us() +
device_memory_transfers[i].time_us());
}
}
void StepDetails::AddCollectiveOpEvent(uint64 core_id, const AllReduceInfo& e) {
*collectives_[core_id].add_all_reduce_info() = e;
}
void StepDetails::AddDeviceMemoryTransferEvent(EventType event_type,
const Timespan& time_span,
uint64 bytes) {
int index = 0;
switch (event_type) {
case HOST_TO_DEVICE:
index = 0;
break;
case DEVICE_TO_HOST:
index = 1;
break;
case DEVICE_TO_DEVICE:
index = 2;
break;
default:
return;
}
device_memory_transfers_[index].set_occurrence(
device_memory_transfers_[index].occurrence() + 1);
device_memory_transfers_[index].set_time_us(
device_memory_transfers_[index].time_us() +
time_span.duration_ps() / 1000000.0);
device_memory_transfers_[index].set_bytes_transferred(
device_memory_transfers_[index].bytes_transferred() + bytes);
}
Timespan StepDetails::StepTime() const {
Timespan max_host_step_time;
Timespan max_device_step_time;
for (const auto& marker : markers_) {
Timespan& cur_max_step_time =
marker.type == StepMarkerType::kDeviceStepMarker ? max_device_step_time
: max_host_step_time;
const Timespan& new_step_time = marker.span;
if (new_step_time.duration_ps() > cur_max_step_time.duration_ps())
cur_max_step_time = new_step_time;
}
// CPU-only profile.
if (max_device_step_time.Empty()) {
return max_host_step_time;
}
// If the host step time includes the device step time, use the host step
// time. This covers the case where the device is synchronized at the end of
// each step.
if (max_host_step_time.Includes(max_device_step_time)) {
return max_host_step_time;
}
return max_device_step_time;
}
std::string StepDetails::DebugString() const {
std::string result = "([";
for (int i = 0, end = markers_.size(); i < end; i++) {
if (i > 0) absl::StrAppend(&result, ", ");
absl::StrAppend(&result, PrintStepMarker(markers_[i]));
}
absl::StrAppend(&result, "], [");
for (int i = 0, end = events_.size(); i < end; i++) {
if (i > 0) absl::StrAppend(&result, ", ");
absl::StrAppend(&result, PrintEventTypeSpan(events_[i]));
}
return absl::StrCat(result, "])");
}
bool StepDetails::operator==(const StepDetails& other) const {
const auto& other_markers = other.Markers();
if (markers_.size() != other_markers.size()) return false;
for (uint64 i = 0; i < markers_.size(); i++) {
if (markers_[i] != other_markers[i]) return false;
}
const auto& other_events = other.Events();
if (events_.size() != other_events.size()) return false;
for (uint64 i = 0; i < events_.size(); i++) {
if (events_[i] != other_events[i]) return false;
}
return true;
}
bool operator==(const StepEvents& a, const StepEvents& b) {
if (a.size() != b.size()) return false;
for (const auto& id_details : a) {
const auto a_id = id_details.first;
const auto& a_details = id_details.second;
const auto* b_details = gtl::FindOrNull(b, a_id);
if (b_details == nullptr) return false;
if (a_details != *b_details) return false;
}
return true;
}
PrecisionStats ComputePrecisionStats(
const StepEvents& nonoverlapped_step_events) {
int64 compute_32bit_ps = 0;
int64 compute_16bit_ps = 0;
for (const auto& id_details : nonoverlapped_step_events) {
for (const auto& event : id_details.second.Events()) {
switch (event.type) {
case DEVICE_COMPUTE_32:
compute_32bit_ps += event.span.duration_ps();
break;
case DEVICE_COMPUTE_16:
compute_16bit_ps += event.span.duration_ps();
break;
default:
break;
}
}
}
PrecisionStats precision_stats;
precision_stats.set_compute_32bit_ps(compute_32bit_ps);
precision_stats.set_compute_16bit_ps(compute_16bit_ps);
return precision_stats;
}
} // namespace profiler
} // namespace tensorflow
| aam-at/tensorflow | tensorflow/core/profiler/utils/event_span.cc | C++ | apache-2.0 | 16,952 |