code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
// Generated by esidl 0.3.0.
// This file is expected to be modified for the Web IDL interface
// implementation. Permission to use, copy, modify and distribute
// this file in any software license is hereby granted.
#include "AudioTrackListImp.h"
namespace org
{
namespace w3c
{
namespace dom
{
namespace bootstrap
{
unsigned int AudioTrackListImp::getLength()
{
// TODO: implement me!
return 0;
}
html::AudioTrack AudioTrackListImp::getElement(unsigned int index)
{
// TODO: implement me!
return nullptr;
}
html::AudioTrack AudioTrackListImp::getTrackById(const std::u16string& id)
{
// TODO: implement me!
return nullptr;
}
events::EventHandlerNonNull AudioTrackListImp::getOnchange()
{
// TODO: implement me!
return nullptr;
}
void AudioTrackListImp::setOnchange(events::EventHandlerNonNull onchange)
{
// TODO: implement me!
}
events::EventHandlerNonNull AudioTrackListImp::getOnaddtrack()
{
// TODO: implement me!
return nullptr;
}
void AudioTrackListImp::setOnaddtrack(events::EventHandlerNonNull onaddtrack)
{
// TODO: implement me!
}
events::EventHandlerNonNull AudioTrackListImp::getOnremovetrack()
{
// TODO: implement me!
return nullptr;
}
void AudioTrackListImp::setOnremovetrack(events::EventHandlerNonNull onremovetrack)
{
// TODO: implement me!
}
}
}
}
}
| esrille/escudo | src/html/AudioTrackListImp.cpp | C++ | apache-2.0 | 1,344 |
package org.annoconf;
/**
* Created by roma on 3/19/17.
*/
public interface PropertyValueSource {
boolean hasValue(String key);
String getValue(String key);
}
| Roma7-7-7/annoconf | src/main/java/org/annoconf/PropertyValueSource.java | Java | apache-2.0 | 173 |
/******************************************************************************
* 版权所有 刘大磊 2013-07-01 *
* 作者:刘大磊 *
* 电话:13336390671 *
* email:ldlqdsd@126.com *
*****************************************************************************/
package com.delmar.core.service;
import com.delmar.core.model.CorePage;
import com.delmar.core.service.CoreService;
/**
* @author 刘大磊 2016-08-26 17:08:24
*/
public interface CorePageService extends CoreService<CorePage> {
/**
* @param ids
*/
void deleteCorePageList(Integer[] ids);
} | ldlqdsdcn/wms | core/src/main/java/com/delmar/core/service/CorePageService.java | Java | apache-2.0 | 729 |
package de.hsmainz.pubapp.geocoder.controller;
import com.google.gson.Gson;
import de.hsmainz.pubapp.geocoder.model.ClientInputJson;
import de.hsmainz.pubapp.geocoder.model.ErrorJson;
import de.hsmainz.pubapp.geocoder.model.geojson.GeoJsonCollection;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Locale;
import java.util.ResourceBundle;
/**
* Interface for all different geocoder APIs
*
* @author Arno
* @since 15.12.2016
*/
public abstract class HttpAPIRequest {
//****************************************
// CONSTANTS
//****************************************
static final ResourceBundle lables = ResourceBundle.getBundle("lable", Locale.getDefault());
static final Logger logger = LogManager.getLogger(HttpGraphhopperRequest.class);
//****************************************
// VARIABLES
//****************************************
Gson gson = new Gson();
//****************************************
// INIT/CONSTRUCTOR
//****************************************
//****************************************
// GETTER/SETTER
//****************************************
//****************************************
// PUBLIC METHODS
//****************************************
/**
* Executes request to geocoder API and creates GeoJSON. Custom ClientJson is used for the input
*
* @param inputJson the request parameters combined in a custom ClientJson
* @return API response converted to a String
*/
public String requestGeocoder(ClientInputJson inputJson) {
String returnString;
if (!validateInput(inputJson)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
returnString = requestGeocoder(inputJson.getQueryString(), inputJson.getLocale());
}
return returnString;
}
/**
* Executes request to geocoder API and creates GeoJSON
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return API response converted to a String
*/
public String requestGeocoder(String queryString, String locale) {
String returnString;
if (!validateInput(queryString)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
try {
URI uri = buildUri(queryString, locale);
returnString = request(uri);
} catch (URISyntaxException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_incorrect_URI")));
}
}
return returnString;
}
//****************************************
// PRIVATE METHODS
//****************************************
/**
* Creates the URI for API request
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return Uri for geocoder request to graphhopper API
*/
abstract URI buildUri(String queryString, String locale) throws URISyntaxException;
/**
* Executes the request to the API
*
* @param uri the geocoder URL
* @return the requested geoJSON
* @throws throws an exception if the request fails
*/
abstract GeoJsonCollection doHttpGet(URI uri) throws IOException;
/**
* Method to catch exceptions and create ErrorJSONs
*
* @param uri
* @return returns the GeoJSON or ErrorJSON as a String
*/
String request(URI uri) {
String returnString;
try {
GeoJsonCollection geoJsonCollection = doHttpGet(uri);
if (validateOutput(geoJsonCollection)) {
returnString = gson.toJson(geoJsonCollection);
} else {
returnString = gson.toJson(new ErrorJson(lables.getString("message_no_location")));
}
} catch (IOException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_API_request_Faild")));
}
return returnString;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputJson the InputJSON to be validated
* @return returns true if InputJSON is valid
*/
boolean validateInput(ClientInputJson inputJson) {
boolean returnValue = true;
if (inputJson.getQueryString() == null || inputJson.getQueryString().isEmpty()) {
returnValue = false;
}
if (inputJson.getLocale() == null || inputJson.getLocale().isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputString the Input String to be validated
* @return true if Input String is not Empty
*/
boolean validateInput(String inputString) {
boolean returnValue = true;
if (inputString == null || inputString.isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the output from the API
*
* @param geoJsonCollection the API outputJSON to be validated
* @return returns true if the outputJSON is not empty
*/
private boolean validateOutput(GeoJsonCollection geoJsonCollection) {
return !geoJsonCollection.getFeatures().isEmpty();
}
//****************************************
// INNER CLASSES
//****************************************
}
| ArnoHeid/PubApp | geocoder/src/main/java/de/hsmainz/pubapp/geocoder/controller/HttpAPIRequest.java | Java | apache-2.0 | 5,824 |
package com.txtr.hibernatedelta.model;
import static javax.xml.bind.annotation.XmlAccessType.FIELD;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElementWrapper;
import javax.xml.bind.annotation.XmlType;
import org.apache.commons.lang3.StringUtils;
@XmlAccessorType(FIELD)
@XmlType(propOrder = {"name", "columns", "explicitIndexes"})
public class HibernateTable implements IHibernateDatabaseObject {
@XmlAttribute
private String name;
@XmlElementWrapper(name = "columns")
@XmlElement(name = "column")
private List<HibernateColumn> columns = new ArrayList<HibernateColumn>();
@XmlElementWrapper(name = "indexes")
@XmlElement(name = "index")
private List<ExplicitHibernateIndex> explicitIndexes = new ArrayList<ExplicitHibernateIndex>();
@XmlAttribute
private String sequenceName;
@XmlAttribute
private boolean virtualRootTable;
public HibernateTable(String name, String sequenceName, boolean virtualRootTable) {
this.sequenceName = sequenceName;
this.virtualRootTable = virtualRootTable;
this.name = name;
}
@SuppressWarnings("UnusedDeclaration")
public HibernateTable() {
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<HibernateColumn> getColumns() {
return columns;
}
public List<ExplicitHibernateIndex> getExplicitIndexes() {
return explicitIndexes;
}
public void addColumn(HibernateColumn column) {
columns.add(column);
}
public HibernateColumn getColumn(String name) {
for (HibernateColumn column : columns) {
if (column.getName().equalsIgnoreCase(name)) {
return column;
}
}
throw new IllegalArgumentException("column not found: " + name);
}
public void addExplicitIndex(ExplicitHibernateIndex hibernateIndex) {
explicitIndexes.add(hibernateIndex);
}
public String getIndexPrefix() {
return StringUtils.left(name, 28);
}
public List<HibernateColumn> getPrimaryKeyColumns() {
List<HibernateColumn> result = new ArrayList<HibernateColumn>();
for (HibernateColumn column : columns) {
if (column.isPrimaryKey()) {
result.add(column);
}
}
return result;
}
public String getSequenceName() {
return sequenceName;
}
public boolean isVirtualRootTable() {
return virtualRootTable;
}
}
| storecast/hibernate-delta | src/main/java/com/txtr/hibernatedelta/model/HibernateTable.java | Java | apache-2.0 | 2,751 |
/**
*
*/
package me.learn.personal.month5;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Title :
*
* Date : Dec 23, 2020
*
* @author bramanarayan
*
*/
public class WordBreakable {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
}
public boolean wordBreak(String s, List<String> wordDict) {
Set<String> wordDictSet = new HashSet<>(wordDict);
boolean[] dp = new boolean[s.length() + 1];
dp[0] = true;
for (int i = 1; i <= s.length(); i++) {
for (int j = 0; j < i; j++) {
if (dp[j] && wordDictSet.contains(s.substring(j, i))) {
dp[i] = true;
break;
}
}
}
return dp[s.length()];
}
}
| balajiboggaram/algorithms | src/me/learn/personal/month5/WordBreakable.java | Java | apache-2.0 | 724 |
/*
Copyright 2015 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
angular.module('snRequestsDirective', [
'ngMaterial',
'snCommonFilters', 'snApplicationBus',
'snRequestsFilters', 'snControllers', 'snJRPCServices'
])
.controller('snRequestSlotCtrl', [
'$scope', '$mdDialog', '$mdToast', 'satnetRPC', 'snDialog', 'snMessageBus',
/**
* Controller function for handling the SatNet requests dialog.
*
* @param {Object} $scope $scope for the controller
*/
function ($scope, $mdDialog, $mdToast, satnetRPC, snDialog, snMessageBus) {
$scope.gui = {
groundstation_id: '',
spacecraft_id: '',
primary: '',
hide: {
accept: true,
drop: true,
deny: true,
},
slot: {}
};
/**
* Function that handles the process of accepting a given request that
* has already been selected.
*/
$scope.accept = function () {
satnetRPC.rCall(
'gs.operational.accept', [
$scope.groundstation_id, [$scope.slot.identifier]
]
).then(function (results) {
snDialog.toastAction('Confirmed slot #',$scope.slot.identifier);
$scope.slot.state = 'RESERVED';
snMessageBus.send(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.accepted.id, {
gs_id: $scope.gui.groundstation_id,
sc_id: $scope.gui.spacecraft_id,
primary: $scope.gui.primary,
slot: $scope.gui.slot
}
);
}).catch(function (c) {
snDialog.exception('gs.operational.accept', '', c);
});
};
/**
* Function that handles the process of denying a given request that
* has already been selected.
*
* TODO :: Temporary, it has been linked to the drop function so that
* the slot does not stay forever with the DENIED state.
*/
$scope.deny = function () {
satnetRPC.rCall(
'gs.operational.drop', [
$scope.groundstation_id, [$scope.slot.identifier]
]
).then(function (results) {
snDialog.toastAction('Denied slot #', $scope.slot.identifier);
$scope.slot.state = 'FREE';
snMessageBus.send(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.denied.id, {
gs_id: $scope.gui.groundstation_id,
sc_id: $scope.gui.spacecraft_id,
primary: $scope.gui.primary,
slot: $scope.gui.slot
}
);
}).catch(function (c) {
snDialog.exception('gs.operational.drop', '', c);
});
};
/**
* Function that handles the process of droping a given request that
* has already been booked.
*
* IMPORTANT: This function works both for spacecraft and for
* groundstation slots; therefore, there is an inherent
* level of complexity added in addition in order to
* handle both cases.
*/
$scope.drop = function () {
var rpc = ($scope.gui.primary === 'groundstation') ?
'gs.operational.drop' : 'sc.cancel',
segment_id = ($scope.gui.primary === 'groundstation') ?
$scope.groundstation_id : $scope.spacecraft_id;
satnetRPC.rCall(
rpc, [segment_id, [$scope.slot.identifier]]
).then(function (results) {
snDialog.toastAction('Dropped slot #', $scope.slot.identifier);
$scope.slot.state = 'FREE';
snMessageBus.send(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.dropped.id, {
gs_id: $scope.gui.groundstation_id,
sc_id: $scope.gui.spacecraft_id,
primary: $scope.gui.primary,
slot: $scope.gui.slot
}
);
}).catch(function (c) {
snDialog.exception(rpc, '', c);
});
};
/**
* Function that returns whether o not the "accept" button should be
* displayed, taking into account the state of the controller.
*/
$scope.showAccept = function () {
return ($scope.gui.slot.state === 'SELECTED') &&
!($scope.gui.hide.accept);
};
/**
* Function that returns whether o not the "deny" button should be
* displayed, taking into account the state of the controller.
*/
$scope.showDeny = function () {
return ($scope.gui.slot.state === 'SELECTED') &&
!($scope.gui.hide.deny);
};
/**
* Function that returns whether o not the "drop" button should be
* displayed, taking into account the state of the controller.
*/
$scope.showDrop = function () {
if ($scope.gui.primary === 'spacecraft') {
return !($scope.gui.hide.drop) && (
($scope.gui.slot.state === 'SELECTED') ||
($scope.gui.slot.state === 'RESERVED')
);
} else {
return !($scope.gui.hide.drop) && (
($scope.gui.slot.state === 'RESERVED')
);
}
};
/**
* Initialization of the controller.
*/
$scope.init = function () {
$scope.gui.groundstation_id = $scope.gs;
$scope.gui.spacecraft_id = $scope.sc;
$scope.gui.primary = $scope.primary;
$scope.gui.slot = $scope.slot;
if ( $scope.gui.primary === 'spacecraft' ) {
$scope.gui.hide.drop = false;
} else {
$scope.gui.hide.accept = false;
$scope.gui.hide.deny = false;
$scope.gui.hide.drop = false;
}
};
$scope.init();
}
])
.directive('snRequestSlot',
/**
* Function that creates the directive itself returning the object required
* by Angular.
*
* @returns {Object} Object directive required by Angular, with restrict
* and templateUrl
*/
function () {
return {
restrict: 'E',
templateUrl: 'operations/templates/requests/slot.html',
controller: 'snRequestSlotCtrl',
scope: {
sc: '@',
gs: '@',
primary: '@',
slot: '='
}
};
}
)
.controller('snRequestsDlgCtrl', [
'$scope', '$log', '$mdDialog', 'satnetRPC','snDialog', 'snMessageBus',
/**
* Controller function for handling the SatNet requests dialog.
*
* @param {Object} $scope $scope for the controller
*/
function ($scope, $log, $mdDialog, satnetRPC, snDialog, snMessageBus) {
$scope.events = {
requests: {
accepted: {
id: snMessageBus.createName(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.accepted.id
)
},
denied: {
id: snMessageBus.createName(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.denied.id
)
},
dropped: {
id: snMessageBus.createName(
snMessageBus.CHANNELS.requests.id,
snMessageBus.EVENTS.dropped.id
)
}
}
};
/**
* This function finds the given slot within the dictionary/array of
* slots within this controller.
*
* @param {String} segmentId Identifier of the segment
* @param {String} slotId Identifier of the slot
*/
$scope._findSlot = function (segmentId, slotId) {
var slots = $scope.gui.slots[segmentId];
if ((slots === undefined) || (slots.length === 0)) {
throw 'No slots for ss = ' + segmentId;
}
for (var i = 0, L = slots.length; i < L; i++) {
if (slots[i].identifier === slotId) {
return {
index: i,
slot: slots[i]
};
}
}
throw 'Slot not found for ss = ' + segmentId;
};
/**
* Updates the slots dictionary when the slot that triggered the event
* was updated to the "FREE" state.
*
* @param {Object} data The data object attached to the event
*/
$scope._updateFree = function (data) {
var ss_id = (data.primary === 'spacecraft') ?
data.gs_id: data.sc_id,
other_ss_id = (data.primary === 'spacecraft') ?
data.sc_id: data.gs_id,
slot = $scope._findSlot(ss_id, data.slot.identifier),
slot_other = $scope._findSlot(
other_ss_id, data.slot.identifier
);
$scope.gui.slots[ss_id].splice(slot.index, 1);
$scope.gui.slots[other_ss_id].splice(slot_other.index, 1);
};
/**
* Updates the slots dictionary when the slot that triggered the event
* was not updated to the "FREE" state.
*
* @param {Object} data The data object attached to the event
*/
$scope._updateNonFree = function (data) {
var ss_id = (data.primary === 'spacecraft') ?
data.gs_id: data.sc_id,
slot = $scope._findSlot(ss_id, data.slot.identifier);
slot.slot.state = data.slot.state;
};
/**
* CALLBACK
* This function is the callback that handles the event triggered
* whenever a request slot has been accepted, canceled or denied.
*
* @param {String} event The name of the event
* @param {Object} data The data object generated by the event
*/
$scope._updateRequestCb = function (event, data) {
try {
if (data.slot.state === 'FREE') { $scope._updateFree(data); }
else { $scope._updateNonFree(data); }
} catch (e) { $log.info(e); }
};
$scope.$on(
$scope.events.requests.accepted.id, $scope._updateRequestCb
);
$scope.$on(
$scope.events.requests.denied.id, $scope._updateRequestCb
);
$scope.$on(
$scope.events.requests.dropped.id, $scope._updateRequestCb
);
$scope.gui = {
gss: [],
scs: [],
slots: {},
filtered: {}
};
/**
* Function that closes the dialog.
*/
$scope.close = function () { $mdDialog.hide(); };
/**
* This function is used to check whether the given slot has to be
* discarded from amongst the other slots or not.
*
* @param {Object} slot The slot to be checked
* @param {Boolean} 'true' if the slot has to be discarded
*/
$scope._filterByState = function(slot) {
return (slot.state !== 'SELECTED') && (slot.state !== 'RESERVED');
};
/**
* This function processes the slots received from the server in order
* to adapt them to a more JavaScript "friendly" data structure. It
* stores the results directly in the controller's data section.
*
* @param {String} segmentId Identifier of the segment
* @param {Object} results Object with the results from the server
*/
$scope._processSlots = function (segmentId, results) {
$scope.gui.slots[segmentId] = [];
if ((results === null) || (angular.equals({}, results))) {
return;
}
var ss_id = Object.keys(results)[0],
slots = results[ss_id];
for (var i = 0, L = slots.length; i < L; i++) {
if ($scope._filterByState(slots[i])) {continue;}
slots[i].segment_id = ss_id;
$scope.gui.slots[segmentId].push(slots[i]);
}
};
/**
* This function retrieves the operational slots from the server for a
* given segment and stores them internally in a single list for the
* controller.
* IMPORTANT: It processes the list so that it adds the reference to
* the other segment related in the slot by place its id inside the
* object of the slot rather than as a key to access the slot.
* IMPORTANT 2: It filters out all the slots whose states are neither
* 'SELECTED' nor 'BOOKED'.
*
* @param segmentType String that indicates whether the reference
* segment is a ground station ('sc') or a
* spacecraft ('sc')
* @param segmentId String Identifier of the segment
*/
$scope._pullSlots = function (segmentType, segmentId) {
var rpc_name = segmentType + '.operational';
satnetRPC.rCall(rpc_name, [segmentId]).then(function (results) {
$scope._processSlots(segmentId, results);
}).catch(function (cause) {
snDialog.exception(segmentType + '.operational', '-', cause);
});
};
/**
* Retrieves the slots for all the ground stations owned by the
* currently logged-in user.
* @returns
*/
$scope._pullGsSlots = function () {
satnetRPC.rCall('gs.list.mine', []).then(function (results) {
$scope.gui.gss = results;
for (var i = 0, l = $scope.gui.gss.length;i < l; i++) {
$scope._pullSlots('gs', $scope.gui.gss[i]);
}
}).catch(function (cause) {
snDialog.exception('gs.list.mine', '-', cause);
});
};
/**
* Retrieves the slots for all the spacecraft owned by the
* currently logged-in user.
* @returns
*/
$scope._pullScSlots = function () {
satnetRPC.rCall('sc.list.mine', []).then(function (results) {
$scope.gui.scs = results;
for (var i = 0, l = $scope.gui.scs.length; i < l; i++ ) {
$scope._pullSlots('sc', $scope.gui.scs[i]);
}
}).catch(function (cause) {
snDialog.exception('sc.list.mine', '-', cause);
});
};
/**
* Initialization of the controller.
*/
$scope.init = function () {
$scope._pullGsSlots();
$scope._pullScSlots();
};
$scope.init();
}
])
.controller('snRequestsCtrl', [
'$scope', '$mdDialog',
/**
* Controller function for opening the SatNet requests dialog.
*
* @param {Object} $scope $scope for the controller
* @param {Object} $mdDialog Angular material Dialog service
*/
function ($scope, $mdDialog) {
/**
* Function that opens the dialog when the snRequests button is
* clicked.
*/
$scope.openDialog = function () {
$mdDialog.show({
templateUrl: 'operations/templates/requests/list.html',
controller: 'snRequestsDlgCtrl'
});
};
}
])
.directive('snRequests',
/**
* Function that creates the directive itself returning the object required
* by Angular.
*
* @returns {Object} Object directive required by Angular, with restrict
* and templateUrl
*/
function () {
return {
restrict: 'E',
templateUrl: 'operations/templates/requests/menu.html',
controller: 'snRequestsCtrl'
};
}
);
| satnet-project/satnet-ng | src/operations/directives/Requests.js | JavaScript | apache-2.0 | 17,236 |
package web;
import graphUtil.CycleChainDecomposition;
import graphUtil.EdgeChain;
import ilog.concert.IloException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import kepLib.KepInstance;
import kepLib.KepProblemData;
import kepModeler.ChainsForcedRemainOpenOptions;
import kepModeler.KepModeler;
import kepModeler.ModelerInputs;
import kepModeler.ObjectiveMode;
import replicator.DonorEdge;
import threading.FixedThreadPool;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import data.Donor;
import data.ExchangeUnit;
import database.KidneyDataBase;
import exchangeGraph.CycleChainPackingSubtourElimination;
import exchangeGraph.SolverOption;
public class KidneyServerSolver {
private KidneyDataBase database;
private Map<String, ModelerInputs<ExchangeUnit, DonorEdge>> dataCache = new HashMap<String, ModelerInputs<ExchangeUnit, DonorEdge>>();
private Optional<FixedThreadPool> threadPool;
Optional<Double> maxSolveTimeMs = Optional.of(100.0);
public KidneyServerSolver(KidneyDataBase database,
Optional<FixedThreadPool> threadPool) {
this.database = database;
this.threadPool = threadPool;
}
public ImmutableList<String> availableDatasets() {
return database.availableDatasets();
}
public Map<Object, Object> getInputs(String databaseName) {
return flattenModelerInputs(getModelerInputs(databaseName));
}
public Map<Object, Object> getSolution(String databaseName)
throws IloException {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = getModelerInputs(databaseName);
KepModeler modeler = new KepModeler(3, Integer.MAX_VALUE,
ChainsForcedRemainOpenOptions.none,
new ObjectiveMode.MaximumCardinalityMode());
KepInstance<ExchangeUnit, DonorEdge> instance = modeler.makeKepInstance(
inputs, null);
CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge> solver = new CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge>(
instance, true, maxSolveTimeMs, threadPool,
SolverOption.makeCheckedOptions(SolverOption.cutsetMode,
SolverOption.lazyConstraintCallback, SolverOption.userCutCallback));
solver.solve();
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution = solver
.getSolution();
solver.cleanUp();
return flattenSolution(inputs.getKepProblemData(), solution);
}
private ModelerInputs<ExchangeUnit, DonorEdge> getModelerInputs(
String databaseName) {
if (this.dataCache.containsKey(databaseName)) {
return this.dataCache.get(databaseName);
} else {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = database
.loadInputs(databaseName);
this.dataCache.put(databaseName, inputs);
return inputs;
}
}
public static Map<Object, Object> flattenModelerInputs(
ModelerInputs<ExchangeUnit, DonorEdge> inputs) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatUnits = Lists.newArrayList();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (ExchangeUnit unit : inputs.getKepProblemData().getGraph()
.getVertices()) {
flatUnits.add(flattenExchangeUnit(inputs, unit));
}
for (DonorEdge edge : inputs.getKepProblemData().getGraph().getEdges()) {
flatEdges.add(flattenDonorEdge(inputs.getKepProblemData(), edge));
}
ans.put("nodes", flatUnits);
ans.put("links", flatEdges);
return ans;
}
public static Map<Object, Object> flattenSolution(
KepProblemData<ExchangeUnit, DonorEdge> problemData,
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (EdgeChain<DonorEdge> edgeChain : solution.getEdgeChains()) {
for (DonorEdge edge : edgeChain) {
flatEdges.add(flattenDonorEdge(problemData, edge));
}
}
ans.put("links", flatEdges);
return ans;
}
private static Map<Object, Object> flattenDonorEdge(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, DonorEdge edge) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ExchangeUnit source = kepProblemData.getGraph().getSource(edge);
ExchangeUnit dest = kepProblemData.getGraph().getDest(edge);
String sourceId = makeNodeId(kepProblemData, source);
String destId = makeNodeId(kepProblemData, dest);
ans.put("sourceId", sourceId);
ans.put("targetId", destId);
ans.put("id", sourceId + destId);
return ans;
}
private static Map<Object, Object> flattenExchangeUnit(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ans.put("id", makeNodeId(inputs.getKepProblemData(), unit));
ans.put("type", makeType(inputs.getKepProblemData(), unit));
ans.put("reachable", true);
ans.put("sensitized", computeSensitization(inputs, unit));
return ans;
}
private static String makeNodeId(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return unit.getDonor().get(0).getId();
} else {
return unit.getReceiver().getId();
}
}
private static String makeType(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return "root";
} else if (kepProblemData.getPairedNodes().contains(unit)) {
return "paired";
} else if (kepProblemData.getTerminalNodes().contains(unit)) {
return "terminal";
} else {
throw new RuntimeException();
}
}
private static int computeSensitization(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<ExchangeUnit, Double> donorPower = inputs.getAuxiliaryInputStatistics()
.getDonorPowerPostPreference();
Map<ExchangeUnit, Double> receiverPower = inputs
.getAuxiliaryInputStatistics().getReceiverPowerPostPreference();
// System.out.println(donorPower);
// System.out.println(receiverPower);
if (inputs.getKepProblemData().getRootNodes().contains(unit)) {
if (donorPower.containsKey(unit.getDonor().get(0))) {
return singlePersonSensitization(donorPower.get(unit.getDonor().get(0)));
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getPairedNodes().contains(unit)) {
double unitDonorPower = 0;
for (Donor donor : unit.getDonor()) {
if (donorPower.containsKey(donor)) {
unitDonorPower += donorPower.get(donor);
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
}
if (receiverPower.containsKey(unit.getReceiver())) {
return twoPersonSensitization(unitDonorPower,
receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getTerminalNodes().contains(unit)) {
if (receiverPower.containsKey(unit.getReceiver())) {
return singlePersonSensitization(receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else {
throw new RuntimeException();
}
}
private static int singlePersonSensitization(double matchPower) {
if (matchPower < .01) {
return 3;
} else if (matchPower < .08) {
return 2;
} else if (matchPower < .2) {
return 1;
} else {
return 0;
}
}
private static int twoPersonSensitization(double donorMatchPower,
double receiverMatchPower) {
double pmp = 10000 * donorMatchPower * receiverMatchPower;
if (pmp < .1) {
return 4;
} else if (pmp < 5) {
return 3;
} else if (pmp < 20) {
return 2;
} else if (pmp < 60) {
return 1;
} else {
return 0;
}
}
}
| rma350/kidneyExchange | kidneyMatching/src/web/KidneyServerSolver.java | Java | apache-2.0 | 8,249 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Journey'
db.create_table('places_journey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('route', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Route'])),
('external_ref', self.gf('django.db.models.fields.TextField')()),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('runs_on_monday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_tuesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_wednesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_thursday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_friday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_saturday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_sunday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_termtime', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_school_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_non_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_from', self.gf('django.db.models.fields.DateField')()),
('runs_until', self.gf('django.db.models.fields.DateField')()),
('vehicle', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('places', ['Journey'])
# Adding model 'ScheduledStop'
db.create_table('places_scheduledstop', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'])),
('journey', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Journey'])),
('order', self.gf('django.db.models.fields.IntegerField')()),
('sta', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('std', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('times_estimated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fare_stage', self.gf('django.db.models.fields.BooleanField')(default=False)),
('activity', self.gf('django.db.models.fields.CharField')(default='B', max_length=1)),
))
db.send_create_signal('places', ['ScheduledStop'])
def backwards(self, orm):
# Deleting model 'Journey'
db.delete_table('places_journey')
# Deleting model 'ScheduledStop'
db.delete_table('places_scheduledstop')
models = {
'places.entity': {
'Meta': {'object_name': 'Entity'},
'_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}),
'_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'absolute_url': ('django.db.models.fields.TextField', [], {}),
'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.EntityGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}),
'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroup': {
'Meta': {'object_name': 'EntityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroupname': {
'Meta': {'unique_together': "(('entity_group', 'language_code'),)", 'object_name': 'EntityGroupName'},
'entity_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entityname': {
'Meta': {'unique_together': "(('entity', 'language_code'),)", 'object_name': 'EntityName'},
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entitytype': {
'Meta': {'object_name': 'EntityType'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityTypeCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"})
},
'places.entitytypecategory': {
'Meta': {'object_name': 'EntityTypeCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'places.entitytypename': {
'Meta': {'unique_together': "(('entity_type', 'language_code'),)", 'object_name': 'EntityTypeName'},
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'verbose_name': ('django.db.models.fields.TextField', [], {}),
'verbose_name_plural': ('django.db.models.fields.TextField', [], {}),
'verbose_name_singular': ('django.db.models.fields.TextField', [], {})
},
'places.identifier': {
'Meta': {'object_name': 'Identifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'places.journey': {
'Meta': {'object_name': 'Journey'},
'external_ref': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"}),
'runs_from': ('django.db.models.fields.DateField', [], {}),
'runs_in_school_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_in_termtime': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_non_bank_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_on_wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runs_until': ('django.db.models.fields.DateField', [], {}),
'vehicle': ('django.db.models.fields.TextField', [], {})
},
'places.route': {
'Meta': {'object_name': 'Route'},
'external_ref': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'service_id': ('django.db.models.fields.TextField', [], {}),
'service_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'stops': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Entity']", 'through': "orm['places.StopOnRoute']", 'symmetrical': 'False'})
},
'places.scheduledstop': {
'Meta': {'ordering': "['order']", 'object_name': 'ScheduledStop'},
'activity': ('django.db.models.fields.CharField', [], {'default': "'B'", 'max_length': '1'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
'fare_stage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Journey']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'sta': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'std': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'times_estimated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'places.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'places.stoponroute': {
'Meta': {'ordering': "['order']", 'object_name': 'StopOnRoute'},
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Route']"})
}
}
complete_apps = ['places']
| mollyproject/mollyproject | molly/apps/places/migrations/0009_auto__add_journey__add_scheduledstop.py | Python | apache-2.0 | 13,531 |
package com.jaivox.ui.appmaker;
import java.io.*;
import java.util.*;
import bitpix.list.*;
public class Rule2Fsm {
static String dir = "./";
basicTree tree;
TreeMap <String, String> states;
TreeMap <String, String> tags;
static String name = "data/road1.tree";
static String yes = "yes";
String startState = "def";
static String casedefault = "(default) (def)";
static basicNode casedefaultnode;
Vector <String> store;
public Rule2Fsm () {
String filename = dir + name;
startState = startState;
tree = new basicTree (filename);
// tree.WriteTree ();
states = new TreeMap <String, String> ();
tags = new TreeMap <String, String> ();
Vector <bitpix.list.basicNode> list = tree.Root.ListChild;
casedefaultnode = new basicNode (casedefault);
store = new Vector <String> ();
store.add ("\n#include errors.dlg\n");
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, startState);
}
int pos = filename.lastIndexOf (".");
String outfile = filename.substring (0, pos+1) + "dlg";
// writefile (outfile, store);
}
void Debug (String s) {
System.out.println ("[Rule2Fsm]" + s);
}
void gt (basicNode node, String sofar) {
Vector <bitpix.list.basicNode> list = node.ListChild;
if (list == null || list.size () == 0) {
// emit a state with def
emit (node, sofar, "def");
}
else {
String nextstate = createNextState (node);
String morefar = sofar + " " + nextstate;
emit (node, sofar, nextstate);
list.add (casedefaultnode);
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, morefar);
}
}
}
void emit (basicNode node, String sofar, String next) {
int pos = sofar.lastIndexOf (" ");
pos++;
String last = sofar.substring (pos);
String tag = sofar.replaceAll (" ", "_");
tag = tag + "_" + next;
tag = getuniquetag (tag);
StringBuffer sb = new StringBuffer ();
sb.append ("{\n["+tag+"]\n");
String t = (String)node.Tag;
if (t.trim ().length () == 0) return;
StringTokenizer st = new StringTokenizer (t, "()");
if (st.countTokens () < 2) {
Debug ("Don't have two tokens from "+t);
return;
}
String input = filter (st.nextToken ()).trim ();
String output = filter (st.nextToken ()).trim ();
while (output.length () == 0)
output = filter (st.nextToken ()).trim ();
// Debug ("tag="+t+" / input="+input+" output="+output);
// sb.append ("\t"+sofar+" ;\n");
// with Gui2Gram, convert input and output to use dotted head tag form
String indot = input.replaceAll (" ", ".");
String outdot = output.replaceAll (" ", ".");
sb.append ("\t"+last+" ;\n");
// sb.append ("\t"+input+" ;\n");
// sb.append ("\t"+output+" ;\n");
sb.append ("\t"+indot+" ;\n");
sb.append ("\t"+outdot+" ;\n");
sb.append ("\t"+next+" ;\n");
sb.append ("}\n");
String all = new String (sb);
store.add (all);
// System.out.println (all);
}
static String filter (String line) {
return Gui2Gram.filter (line);
}
String createNextState (basicNode node) {
String tag = (String)(node.Tag);
StringTokenizer st = new StringTokenizer (tag, "()");
if (st.countTokens () < 2) {
Debug ("don't have two tokens in "+tag);
return "def";
}
String input = st.nextToken ().trim ();
String output = st.nextToken ().trim ();
while (output.length () == 0)
output = st.nextToken ().trim ();
StringTokenizer tt = new StringTokenizer (output);
int n = tt.countTokens ();
StringBuffer sb = new StringBuffer ();
for (int i=0; i<Math.min (n, 3); i++) {
String token = tt.nextToken ();
sb.append (token.charAt (0));
}
if (n < 3) {
for (int j=n; j<3; j++) {
sb.append ('x');
}
}
String s = new String (sb);
String test = states.get (s);
if (test != null) {
for (int i=1; i<10; i++) {
String next = s + i;
if (states.get (next) == null) {
s = next;
break;
}
}
}
states.put (s, yes);
return s;
}
String getuniquetag (String in) {
if (tags.get (in) == null) {
tags.put (in, yes);
return in;
}
else {
for (int i=1; i<99; i++) {
String next = in+"_"+i;
if (tags.get (next) != null) {
continue;
}
tags.put (next, yes);
return next;
}
Debug ("More than 99 tags starting with "+in);
return "error";
}
}
void writeRules (PrintWriter out) {
try {
for (int i=0; i<store.size (); i++) {
out.println (store.elementAt (i));
}
}
catch (Exception e) {
e.printStackTrace ();
}
}
}
| jaivox/tools | v2/com/jaivox/ui/appmaker/Rule2Fsm.java | Java | apache-2.0 | 4,520 |
/*
* Copyright (C) 2012 Jason Gedge <http://www.gedge.ca>
*
* This file is part of the OpGraph project.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Provides commands for the Application API.
*/
package ca.phon.opgraph.app.commands;
| ghedlund/opgraph | app/src/main/java/ca/phon/opgraph/app/commands/package-info.java | Java | apache-2.0 | 855 |
<?php
class Gep_ScoreController extends Zend_Controller_Action {
public function init()
{
/* Initialize action controller here */
header('content-type: text/html; charset=utf8');
}
public function indexAction(){
}
public function fullResultAction(){
}
}
| samlanh/aseansm | application/modules/gep/controllers/ScoreController.php | PHP | apache-2.0 | 307 |
/*
* Copyright 2013 DigitasLBi Netherlands B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System.Collections.Generic;
using System.ComponentModel.Composition.Hosting;
using System.Diagnostics;
using LBi.Cli.Arguments;
namespace LBi.LostDoc.ConsoleApplication.Extensibility
{
public abstract class Command : ICommand
{
[Parameter(HelpMessage = "Include errors and warning output only.")]
public LBi.Cli.Arguments.Switch Quiet { get; set; }
[Parameter(HelpMessage = "Include verbose output.")]
public LBi.Cli.Arguments.Switch Verbose { get; set; }
public abstract void Invoke(CompositionContainer container);
protected void ConfigureTraceLevels(IEnumerable<TraceSource> sources)
{
SourceLevels currentLevel;
if (this.Quiet.IsPresent)
{
currentLevel = SourceLevels.Error | SourceLevels.Warning | SourceLevels.Critical;
}
else if (this.Verbose.IsPresent)
{
currentLevel = SourceLevels.All;
}
else
{
currentLevel = SourceLevels.Information |
SourceLevels.Warning |
SourceLevels.Error |
SourceLevels.Critical |
SourceLevels.ActivityTracing;
}
foreach (TraceSource source in sources)
source.Switch.Level = currentLevel;
}
}
} | LBiNetherlands/LBi.LostDoc | LBi.LostDoc.ConsoleApplication.Extensibility/Command.cs | C# | apache-2.0 | 2,048 |
using UnityEngine;
namespace DefaultNamespace
{
public class FieldGenerationWithRespectToCodeStyleTest : MonoBehaviour
{
public void Update()
{
int[,] test = new int[2,2];
test[0, 0] = 5;
test[test[0,{caret} 1], test[0, test[0,1]]] = 5;
}
}
} | JetBrains/resharper-unity | resharper/resharper-unity/test/data/Unity/CSharp/Intentions/QuickFixes/InefficientMultidimensionalArrayUsage/LocalDeclarationType2.cs | C# | apache-2.0 | 316 |
/*
* Copyright 2013 JCertifLab.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jcertif.android.fragments;
import android.app.Activity;
import android.content.Intent;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Bundle;
import android.provider.CalendarContract;
import android.provider.CalendarContract.Events;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ListView;
import android.widget.Toast;
import com.actionbarsherlock.view.ActionMode;
import com.actionbarsherlock.view.Menu;
import com.actionbarsherlock.view.MenuInflater;
import com.actionbarsherlock.view.MenuItem;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.jcertif.android.JcertifApplication;
import com.jcertif.android.MainActivity;
import com.jcertif.android.R;
import com.jcertif.android.adapters.SessionAdapter;
import com.jcertif.android.adapters.SpeedScrollListener;
import com.jcertif.android.dao.SessionProvider;
import com.jcertif.android.dao.SpeakerProvider;
import com.jcertif.android.model.Session;
import com.jcertif.android.model.Speaker;
import com.jcertif.android.service.RESTService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import uk.co.senab.actionbarpulltorefresh.extras.actionbarsherlock.PullToRefreshAttacher;
/**
*
* @author Patrick Bashizi
*
*/
public class SessionListFragment extends RESTResponderFragment implements PullToRefreshAttacher.OnRefreshListener{
public static final String SESSIONS_LIST_URI = JcertifApplication.BASE_URL
+ "/session/list";
public static final String CATEGORY_LIST_URI = JcertifApplication.BASE_URL
+ "/ref/category/list";
private static String TAG = SessionListFragment.class.getName();
private List<Session> mSessions = new ArrayList<Session>();;
private ListView mLvSessions;
private SessionAdapter mAdapter;
private SessionProvider mProvider;
private SpeedScrollListener mListener;
private ActionMode mActionMode;
private Session mSelectedSession;
private PullToRefreshAttacher mPullToRefreshAttacher ;
public SessionListFragment() {
// Empty constructor required for fragment subclasses
}
public interface OnSessionUpdatedListener {
void onSessionUpdated(Session session);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// setRetainInstance(true);
View rootView = inflater.inflate(R.layout.fragment_session, container,
false);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
String session = getResources().getStringArray(R.array.menu_array)[0];
setHasOptionsMenu(true);
getActivity().setTitle(session);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
mPullToRefreshAttacher=((MainActivity)getSherlockActivity()).getmPullToRefreshAttacher();
mPullToRefreshAttacher.addRefreshableView(mLvSessions, this);
mLvSessions.setOnItemClickListener(new OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int pos,
long position) {
mAdapter.setSelectedIndex(pos);
mSelectedSession = ((Session) parent
.getItemAtPosition((int) position));
updateSession(mSelectedSession);
}
});
mLvSessions
.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() {
@Override
public boolean onItemLongClick(AdapterView<?> arg0,
View arg1, int pos, long arg3) {
if (mActionMode != null) {
return false;
}
mActionMode = getSherlockActivity().startActionMode(
mActionModeCallback);
mSelectedSession = ((Session) arg0
.getItemAtPosition((int) pos));
mAdapter.setSelectedIndex(pos);
return true;
}
});
return rootView;
}
private ActionMode.Callback mActionModeCallback = new ActionMode.Callback() {
@Override
public boolean onCreateActionMode(ActionMode mode, Menu menu) {
MenuInflater inflater = mode.getMenuInflater();
inflater.inflate(R.menu.context_menu_session, menu);
return true;
}
@Override
public boolean onPrepareActionMode(ActionMode mode, Menu menu) {
return false;
}
@Override
public boolean onActionItemClicked(ActionMode mode, MenuItem item) {
switch (item.getItemId()) {
case R.id.menu_share:
shareSessionItem();
mode.finish(); // Action picked, so close the CAB
break;
case R.id.menu_add_to_schedule:
addSessionItemToSchedule();
mode.finish(); // Action picked, so close the CAB
break;
default:
return false;
}
return true;
}
public void onDestroyActionMode(ActionMode mode) {
mActionMode = null;
}
};
private void addSessionItemToSchedule() {
if (android.os.Build.VERSION.SDK_INT >= 14){
Intent intent = new Intent(Intent.ACTION_INSERT);
intent.setType("vnd.android.cursor.item/event");
intent.putExtra(Events.TITLE, mSelectedSession.getTitle());
intent.putExtra(Events.EVENT_LOCATION,"Room"+ mSelectedSession.getSalle());
intent.putExtra(Events.DESCRIPTION, mSelectedSession.getDescription());
Date evStartDate= mSelectedSession.getStart();
Date evEndDate= mSelectedSession.getStart();
// Setting dates
GregorianCalendar startcalDate = new GregorianCalendar();
startcalDate.setTime(evStartDate);
// Setting dates
GregorianCalendar endCalDate = new GregorianCalendar();
endCalDate.setTime(evEndDate);
intent.putExtra(CalendarContract.EXTRA_EVENT_BEGIN_TIME,startcalDate.getTimeInMillis());
intent.putExtra(CalendarContract.EXTRA_EVENT_END_TIME,endCalDate.getTimeInMillis());
// Make it a full day event
intent.putExtra(CalendarContract.EXTRA_EVENT_ALL_DAY, true);
// Make it a recurring Event
// intent.putExtra(Events.RRULE, "WKST=SU");
// Making it private and shown as busy
intent.putExtra(Events.ACCESS_LEVEL, Events.ACCESS_PRIVATE);
intent.putExtra(Events.AVAILABILITY, Events.AVAILABILITY_BUSY);
//intent.putExtra(Events.DISPLAY_COLOR, Events.EVENT_COLOR);
startActivity(intent);
}else{
Toast.makeText(this.getSherlockActivity(),
"Not supported for your device :(", Toast.LENGTH_SHORT).show();
}
}
private void shareSessionItem() {
Speaker sp = new SpeakerProvider(this.getSherlockActivity())
.getByEmail(mSelectedSession.getSpeakers()[0]);
Intent intent = new Intent(android.content.Intent.ACTION_SEND);
intent.setType("text/plain");
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
intent.putExtra(Intent.EXTRA_SUBJECT, "Share Session");
intent.putExtra(
Intent.EXTRA_TEXT,
"Checking out this #Jcertif2013 session : "
+ mSelectedSession.getTitle() + " by "
+ sp.getFirstname() + " " + sp.getLastname());
startActivity(intent);
}
protected void updateSession(Session s) {
if(onTablet()){
((OnSessionUpdatedListener) getParentFragment()).onSessionUpdated(s);
}else{
Intent intent = new Intent(this.getActivity().getApplicationContext(),
SessionDetailFragmentActivity.class);
String sessionJson= new Gson().toJson(s);
intent.putExtra("session",sessionJson);
startActivity(intent);
getSherlockActivity().overridePendingTransition ( 0 , R.anim.slide_up_left);
}
}
public SessionProvider getProvider() {
if (mProvider == null)
mProvider = new SessionProvider(this.getSherlockActivity());
return mProvider;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
// This gets called each time our Activity has finished creating itself.
// First check the local cache, if it's empty data will be fetched from
// web
mSessions = loadSessionsFromCache();
setSessions();
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
}
/**
* We cache our stored session here so that we can return right away on
* multiple calls to setSession() during the Activity lifecycle events (such
* as when the user rotates their device).
*/
private void setSessions() {
MainActivity activity = (MainActivity) getActivity();
setLoading(true);
if (mSessions.isEmpty() && activity != null) {
// This is where we make our REST call to the service. We also pass
// in our ResultReceiver
// defined in the RESTResponderFragment super class.
// We will explicitly call our Service since we probably want to
// keep it as a private component in our app.
Intent intent = new Intent(activity, RESTService.class);
intent.setData(Uri.parse(SESSIONS_LIST_URI));
// Here we are going to place our REST call parameters.
Bundle params = new Bundle();
params.putString(RESTService.KEY_JSON_PLAYLOAD, null);
intent.putExtra(RESTService.EXTRA_PARAMS, params);
intent.putExtra(RESTService.EXTRA_RESULT_RECEIVER,getResultReceiver());
// Here we send our Intent to our RESTService.
activity.startService(intent);
} else if (activity != null) {
// Here we check to see if our activity is null or not.
// We only want to update our views if our activity exists.
// Load our list adapter with our session.
updateList();
setLoading(false);
}
}
void updateList() {
mListener = new SpeedScrollListener();
mLvSessions.setOnScrollListener(mListener);
mAdapter = new SessionAdapter(this.getActivity(), mListener, mSessions);
mLvSessions.setAdapter(mAdapter);
if(refreshing){
refreshing=false;
mPullToRefreshAttacher.setRefreshComplete();
}
}
private boolean onTablet() {
return ((getResources().getConfiguration().screenLayout & Configuration.SCREENLAYOUT_SIZE_MASK) >= Configuration.SCREENLAYOUT_SIZE_LARGE);
}
public void updateList(String cat) {
if (cat.equals("All") || cat.equals("Tous")) {
mSessions = loadSessionsFromCache();
} else {
mSessions = getProvider().getSessionsByCategory(cat);
}
updateList();
}
@Override
public void onRESTResult(int code, Bundle resultData) {
// Here is where we handle our REST response.
// Check to see if we got an HTTP 200 code and have some data.
String result = null;
if (resultData != null) {
result = resultData.getString(RESTService.REST_RESULT);
} else {
return;
}
if (code == 200 && result != null) {
mSessions = parseSessionJson(result);
Log.d(TAG, result);
setSessions();
saveToCache(mSessions);
} else {
Activity activity = getActivity();
if (activity != null) {
Toast.makeText(
activity,
"Failed to load Session data. Check your internet settings.",
Toast.LENGTH_SHORT).show();
}
}
setLoading(false);
}
private List<Session> parseSessionJson(String result) {
Gson gson = new GsonBuilder().setDateFormat("dd/MM/yyyy hh:mm")
.create();
Session[] sessions = gson.fromJson(result, Session[].class);
return Arrays.asList(sessions);
}
protected void saveToCache(final List<Session> sessions) {
new Thread(new Runnable() {
@Override
public void run() {
for (Session session : sessions)
mProvider.store(session);
}
}).start();
}
private List<Session> loadSessionsFromCache() {
List<Session> list = getProvider().getAll(Session.class);
return list;
}
@Override
public void onPause() {
super.onDestroy();
}
@Override
public void onDestroy() {
super.onDestroy();
}
@Override
public void onRefreshStarted(View view) {
mProvider.deleteAll(Session.class);
//mLvSessions.setAdapter(null);
mSessions = loadSessionsFromCache();
setSessions();
refreshing=true;
}
}
| JCERTIFLab/jcertif-android-2013 | src/main/java/com/jcertif/android/fragments/SessionListFragment.java | Java | apache-2.0 | 12,340 |
/**
* Copyright 2014 Jordan Zimmerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.soabase.core.features.attributes;
import io.soabase.core.listening.Listenable;
import java.util.Collection;
/**
* Gives access to dynamic attributes. The various get methods return
* the current value for the given key after applying overrides and scopes, etc.
* Always call the methods to get the current value as it may change during runtime.
*/
public interface DynamicAttributes
{
public String getAttribute(String key);
public String getAttribute(String key, String defaultValue);
public boolean getAttributeBoolean(String key);
public boolean getAttributeBoolean(String key, boolean defaultValue);
public int getAttributeInt(String key);
public int getAttributeInt(String key, int defaultValue);
public long getAttributeLong(String key);
public long getAttributeLong(String key, long defaultValue);
public double getAttributeDouble(String key);
public double getAttributeDouble(String key, double defaultValue);
public void temporaryOverride(String key, boolean value);
public void temporaryOverride(String key, int value);
public void temporaryOverride(String key, long value);
public void temporaryOverride(String key, double value);
public void temporaryOverride(String key, String value);
public boolean removeOverride(String key);
public Collection<String> getKeys();
public Listenable<DynamicAttributeListener> getListenable();
}
| soabase/soabase | soabase-core/src/main/java/io/soabase/core/features/attributes/DynamicAttributes.java | Java | apache-2.0 | 2,045 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterContentInit,
ChangeDetectionStrategy,
Component,
ElementRef,
EventEmitter,
Input,
OnInit,
Output,
ViewChild
} from '@angular/core';
import * as d3 from 'd3';
import { select, Selection } from 'd3-selection';
import { zoom, ZoomBehavior } from 'd3-zoom';
import { SafeAny } from 'interfaces';
@Component({
selector: 'flink-svg-container',
templateUrl: './svg-container.component.html',
styleUrls: ['./svg-container.component.less'],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class SvgContainerComponent implements OnInit, AfterContentInit {
zoom = 1;
width: number;
height: number;
transform = 'translate(0, 0) scale(1)';
containerTransform = { x: 0, y: 0, k: 1 };
svgSelect: Selection<SafeAny, SafeAny, SafeAny, SafeAny>;
zoomController: ZoomBehavior<SafeAny, SafeAny>;
@ViewChild('svgContainer', { static: true }) svgContainer: ElementRef<SVGAElement>;
@ViewChild('svgInner', { static: true }) svgInner: ElementRef<SVGAElement>;
@Input() nzMaxZoom = 5;
@Input() nzMinZoom = 0.1;
@Output() clickBgEvent: EventEmitter<MouseEvent> = new EventEmitter();
@Output() zoomEvent: EventEmitter<number> = new EventEmitter();
@Output() transformEvent: EventEmitter<{ x: number; y: number; scale: number }> = new EventEmitter();
/**
* Zoom to spec level
*
* @param zoomLevel
*/
zoomTo(zoomLevel: number): void {
this.svgSelect
.transition()
.duration(0)
.call(this.zoomController.scaleTo, zoomLevel);
}
/**
* Set transform position
*
* @param transform
* @param animate
*/
setPositionByTransform(transform: { x: number; y: number; k: number }, animate = false): void {
this.svgSelect
.transition()
.duration(animate ? 500 : 0)
.call(this.zoomController.transform, transform);
}
constructor(private el: ElementRef) {}
ngOnInit(): void {
this.svgSelect = select(this.svgContainer.nativeElement);
this.zoomController = zoom()
.scaleExtent([this.nzMinZoom, this.nzMaxZoom])
.on('zoom', () => {
this.containerTransform = d3.event.transform;
this.zoom = this.containerTransform.k;
if (!isNaN(this.containerTransform.x)) {
this.transform = `translate(${this.containerTransform.x} ,${this.containerTransform.y})scale(${this.containerTransform.k})`;
}
this.zoomEvent.emit(this.zoom);
this.transformEvent.emit(this.containerTransform as SafeAny);
});
this.svgSelect.call(this.zoomController).on('wheel.zoom', null);
}
ngAfterContentInit(): void {
const hostElem = this.el.nativeElement;
if (hostElem.parentNode !== null) {
const dims = hostElem.parentNode.getBoundingClientRect();
this.width = dims.width;
this.height = dims.height;
this.zoomTo(this.zoom);
}
}
}
| StephanEwen/incubator-flink | flink-runtime-web/web-dashboard/src/app/share/common/dagre/svg-container.component.ts | TypeScript | apache-2.0 | 3,657 |
/*
* Copyright © 2009 HotPads (admin@hotpads.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.datarouter.instrumentation.trace;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
import java.util.Random;
import java.util.regex.Pattern;
public class Traceparent{
private static final Pattern TRACEPARENT_PATTERN = Pattern.compile(
"^[0-9a-f]{2}-[0-9a-f]{32}-[0-9a-f]{16}-[0-9a-f]{2}$");
private static final String TRACEPARENT_DELIMITER = "-";
private static final Integer MIN_CHARS_TRACEPARENT = 55;
private static final String CURRENT_VERSION = "00";
public static final int TRACE_ID_HEX_SIZE = 32;
public static final int PARENT_ID_HEX_SIZE = 16;
public final String version = CURRENT_VERSION;
public final String traceId;
public final String parentId;
private String traceFlags;
public Traceparent(String traceId, String parentId, String traceFlags){
this.traceId = traceId;
this.parentId = parentId;
this.traceFlags = traceFlags;
}
public Traceparent(String traceId){
this(traceId, createNewParentId());
}
public Traceparent(String traceId, String parentId){
this(traceId, parentId, createDefaultTraceFlag());
}
public static Traceparent generateNew(long createdTimestamp){
return new Traceparent(createNewTraceId(createdTimestamp), createNewParentId(),
createDefaultTraceFlag());
}
public static Traceparent generateNewWithCurrentTimeInNs(){
return new Traceparent(createNewTraceId(Trace2Dto.getCurrentTimeInNs()), createNewParentId(),
createDefaultTraceFlag());
}
public Traceparent updateParentId(){
return new Traceparent(traceId, createNewParentId(), traceFlags);
}
/*
* TraceId is a 32 hex digit String. We convert the root request created unix time into lowercase base16
* and append it with a randomly generated long lowercase base16 representation.
* */
private static String createNewTraceId(long createdTimestamp){
return String.format("%016x", createdTimestamp) + String.format("%016x", new Random().nextLong());
}
/*
* ParentId is a 16 hex digit String. We use a randomly generated long and convert it into lowercase base16
* representation.
* */
public static String createNewParentId(){
return String.format("%016x", new Random().nextLong());
}
public long getTimestampInMs(){
return Long.parseLong(traceId.substring(0, 16), 16);
}
public Instant getInstant(){
return Instant.ofEpochMilli(getTimestampInMs());
}
/*----------- trace flags ------------*/
private static String createDefaultTraceFlag(){
return TraceContextFlagMask.DEFAULT.toHexCode();
}
public void enableSample(){
this.traceFlags = TraceContextFlagMask.enableTrace(traceFlags);
}
public void enableLog(){
this.traceFlags = TraceContextFlagMask.enableLog(traceFlags);
}
public boolean shouldSample(){
return TraceContextFlagMask.isTraceEnabled(traceFlags);
}
public boolean shouldLog(){
return TraceContextFlagMask.isLogEnabled(traceFlags);
}
@Override
public String toString(){
return String.join(TRACEPARENT_DELIMITER, version, traceId, parentId, traceFlags);
}
@Override
public boolean equals(Object obj){
if(!(obj instanceof Traceparent)){
return false;
}
Traceparent other = (Traceparent)obj;
return Objects.equals(version, other.version)
&& Objects.equals(traceId, other.traceId)
&& Objects.equals(parentId, other.parentId)
&& Objects.equals(traceFlags, other.traceFlags);
}
@Override
public int hashCode(){
return Objects.hash(version, traceId, parentId, traceFlags);
}
public static Optional<Traceparent> parse(String traceparentStr){
if(traceparentStr == null || traceparentStr.isEmpty()){
return Optional.empty();
}else if(traceparentStr.length() < MIN_CHARS_TRACEPARENT){
return Optional.empty();
}else if(!TRACEPARENT_PATTERN.matcher(traceparentStr).matches()){
return Optional.empty();
}
String[] tokens = traceparentStr.split(Traceparent.TRACEPARENT_DELIMITER);
if(!Traceparent.CURRENT_VERSION.equals(tokens[0])){
return Optional.empty();
}
return Optional.of(new Traceparent(tokens[1], tokens[2], tokens[3]));
}
}
| hotpads/datarouter | datarouter-instrumentation/src/main/java/io/datarouter/instrumentation/trace/Traceparent.java | Java | apache-2.0 | 4,663 |
package com.bzu.yhd.pocketcampus.bottomnav.user.view;
import android.content.Context;
import android.util.AttributeSet;
import com.facebook.rebound.SimpleSpringListener;
import com.facebook.rebound.Spring;
import com.facebook.rebound.SpringSystem;
import de.hdodenhof.circleimageview.CircleImageView;
/**
* Created by xmuSistone.
*/
public class AnimateImageView extends CircleImageView {
private Spring springX, springY;
private SimpleSpringListener followerListenerX, followerListenerY; // 此为跟踪的回调,当前面一个view移动的时候,此为后面的view,需要更新endValue
public AnimateImageView(Context context) {
this(context, null);
}
public AnimateImageView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public AnimateImageView(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
SpringSystem mSpringSystem = SpringSystem.create();
springX = mSpringSystem.createSpring();
springY = mSpringSystem.createSpring();
springX.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
setScreenX(xPos);
}
});
springY.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
setScreenY(yPos);
}
});
followerListenerX = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
springX.setEndValue(xPos);
}
};
followerListenerY = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
springY.setEndValue(yPos);
}
};
}
private void setScreenX(int screenX) {
this.offsetLeftAndRight(screenX - getLeft());
}
private void setScreenY(int screenY) {
this.offsetTopAndBottom(screenY - getTop());
}
public void animTo(int xPos, int yPos) {
springX.setEndValue(xPos);
springY.setEndValue(yPos);
}
/**
* 顶部ImageView强行停止动画
*/
public void stopAnimation() {
springX.setAtRest();
springY.setAtRest();
}
/**
* 只为最顶部的view调用,触点松开后,回归原点
*/
public void onRelease(int xPos, int yPos) {
setCurrentSpringPos(getLeft(), getTop());
animTo(xPos, yPos);
}
/**
* 设置当前spring位置
*/
public void setCurrentSpringPos(int xPos, int yPos) {
springX.setCurrentValue(xPos);
springY.setCurrentValue(yPos);
}
public Spring getSpringX() {
return springX;
}
public Spring getSpringY() {
return springY;
}
public SimpleSpringListener getFollowerListenerX() {
return followerListenerX;
}
public SimpleSpringListener getFollowerListenerY() {
return followerListenerY;
}
}
| GolvenH/PocketCampus | app/src/main/java/com/bzu/yhd/pocketcampus/bottomnav/user/view/AnimateImageView.java | Java | apache-2.0 | 3,350 |
import Vue from 'vue'
import { hasFetch, normalizeError, addLifecycleHook } from '../utils'
const isSsrHydration = (vm) => vm.$vnode && vm.$vnode.elm && vm.$vnode.elm.dataset && vm.$vnode.elm.dataset.fetchKey
const nuxtState = window.<%= globals.context %>
export default {
beforeCreate () {
if (!hasFetch(this)) {
return
}
this._fetchDelay = typeof this.$options.fetchDelay === 'number' ? this.$options.fetchDelay : 200
Vue.util.defineReactive(this, '$fetchState', {
pending: false,
error: null,
timestamp: Date.now()
})
this.$fetch = $fetch.bind(this)
addLifecycleHook(this, 'created', created)
addLifecycleHook(this, 'beforeMount', beforeMount)
}
}
function beforeMount() {
if (!this._hydrated) {
return this.$fetch()
}
}
function created() {
if (!isSsrHydration(this)) {
return
}
// Hydrate component
this._hydrated = true
this._fetchKey = +this.$vnode.elm.dataset.fetchKey
const data = nuxtState.fetch[this._fetchKey]
// If fetch error
if (data && data._error) {
this.$fetchState.error = data._error
return
}
// Merge data
for (const key in data) {
Vue.set(this.$data, key, data[key])
}
}
async function $fetch() {
this.$nuxt.nbFetching++
this.$fetchState.pending = true
this.$fetchState.error = null
this._hydrated = false
let error = null
const startTime = Date.now()
try {
await this.$options.fetch.call(this)
} catch (err) {
error = normalizeError(err)
}
const delayLeft = this._fetchDelay - (Date.now() - startTime)
if (delayLeft > 0) {
await new Promise(resolve => setTimeout(resolve, delayLeft))
}
this.$fetchState.error = error
this.$fetchState.pending = false
this.$fetchState.timestamp = Date.now()
this.$nextTick(() => this.$nuxt.nbFetching--)
}
| BigBoss424/portfolio | v6/node_modules/@nuxt/vue-app-edge/template/mixins/fetch.client.js | JavaScript | apache-2.0 | 1,830 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import static com.amazonaws.util.StringUtils.UTF8;
import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.regex.Pattern;
import com.amazonaws.AmazonClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.BinaryUtils;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.StringInputStream;
import com.amazonaws.util.json.*;
/**
* DeleteAssociationRequest Marshaller
*/
public class DeleteAssociationRequestMarshaller implements
Marshaller<Request<DeleteAssociationRequest>, DeleteAssociationRequest> {
public Request<DeleteAssociationRequest> marshall(
DeleteAssociationRequest deleteAssociationRequest) {
if (deleteAssociationRequest == null) {
throw new AmazonClientException(
"Invalid argument passed to marshall(...)");
}
Request<DeleteAssociationRequest> request = new DefaultRequest<DeleteAssociationRequest>(
deleteAssociationRequest, "AWSSimpleSystemsManagement");
request.addHeader("X-Amz-Target", "AmazonSSM.DeleteAssociation");
request.setHttpMethod(HttpMethodName.POST);
request.setResourcePath("");
try {
StringWriter stringWriter = new StringWriter();
JSONWriter jsonWriter = new JSONWriter(stringWriter);
jsonWriter.object();
if (deleteAssociationRequest.getName() != null) {
jsonWriter.key("Name")
.value(deleteAssociationRequest.getName());
}
if (deleteAssociationRequest.getInstanceId() != null) {
jsonWriter.key("InstanceId").value(
deleteAssociationRequest.getInstanceId());
}
jsonWriter.endObject();
String snippet = stringWriter.toString();
byte[] content = snippet.getBytes(UTF8);
request.setContent(new StringInputStream(snippet));
request.addHeader("Content-Length",
Integer.toString(content.length));
request.addHeader("Content-Type", "application/x-amz-json-1.1");
} catch (Throwable t) {
throw new AmazonClientException(
"Unable to marshall request to JSON: " + t.getMessage(), t);
}
return request;
}
}
| trasa/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DeleteAssociationRequestMarshaller.java | Java | apache-2.0 | 3,452 |
# Encoding: utf-8
#
# Author:: api.dklimkin@gmail.com (Danial Klimkin)
#
# Copyright:: Copyright 2012, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This class extracts data received from Savon and enriches it.
module AdsCommon
class ResultsExtractor
# Instance initializer.
#
# Args:
# - registry: a registry that defines service
#
def initialize(registry)
@registry = registry
end
# Extracts the finest results possible for the given result. Returns the
# response itself in worst case (contents unknown).
def extract_result(response, action_name)
method = @registry.get_method_signature(action_name)
action = method[:output][:name].to_sym
result = response.to_hash
result = result[action] if result.include?(action)
result = normalize_output(result, method)
return result[:rval] || result
end
# Extracts misc data from response header.
def extract_header_data(response)
header_type = get_full_type_signature(:SoapResponseHeader)
headers = response.header[:response_header].dup
process_attributes(headers, false)
headers = normalize_fields(headers, header_type[:fields])
return headers
end
# Extracts misc data from SOAP fault.
def extract_exception_data(soap_fault, exception_name)
exception_type = get_full_type_signature(exception_name)
process_attributes(soap_fault, false)
soap_fault = normalize_fields(soap_fault, exception_type[:fields])
return soap_fault
end
private
# Normalizes output starting with root output node.
def normalize_output(output_data, method_definition)
fields = method_definition[:output][:fields]
result = normalize_fields(output_data, fields)
end
# Normalizes all fields for the given data based on the fields list
# provided.
def normalize_fields(data, fields)
fields.each do |field|
field_name = field[:name]
if data.include?(field_name)
field_data = data[field_name]
field_data = normalize_output_field(field_data, field)
field_data = check_array_collapse(field_data, field)
data[field_name] = field_data unless field_data.nil?
end
end
return data
end
# Normalizes one field of a given data recursively.
#
# Args:
# - field_data: XML data to normalize
# - field_def: field type definition for the data
#
def normalize_output_field(field_data, field_def)
return case field_data
when Array
normalize_array_field(field_data, field_def)
when Hash
normalize_hash_field(field_data, field_def)
else
normalize_item(field_data, field_def)
end
end
# Normalizes every item of an Array.
def normalize_array_field(data, field_def)
return data.map {|item| normalize_output_field(item, field_def)}
end
# Normalizes every item of a Hash.
def normalize_hash_field(field, field_def)
process_attributes(field, true)
field_type = field_def[:type]
field_def = get_full_type_signature(field_type)
# First checking for xsi:type provided.
xsi_type_override = determine_xsi_type_override(field, field_def)
unless xsi_type_override.nil?
field_def = get_full_type_signature(xsi_type_override)
return (field_def.nil?) ? field :
normalize_fields(field, field_def[:fields])
end
# Now checking for choice options from wsdl.
choice_type_override = determine_choice_type_override(field, field_def)
unless choice_type_override.nil?
# For overrides we need to process sub-field and than return it
# in the original structure.
field_key = field.keys.first
field_data = field[field_key]
field_def = get_full_type_signature(choice_type_override)
if !field_def.nil? and field_data.kind_of?(Hash)
field_data = normalize_fields(field_data, field_def[:fields])
end
return {field_key => field_data}
end
# Otherwise using the best we have.
field = normalize_fields(field, field_def[:fields]) unless field_def.nil?
return field
end
# Determines an xsi:type override for for the field. Returns nil if no
# override found.
def determine_xsi_type_override(field_data, field_def)
result = nil
if field_data.kind_of?(Hash) and field_data.include?(:xsi_type)
result = field_data[:xsi_type]
end
return result
end
# Determines a choice type override for for the field. Returns nil if no
# override found.
def determine_choice_type_override(field_data, field_def)
result = nil
if field_data.kind_of?(Hash) and field_def.include?(:choices)
result = determine_choice(field_data, field_def[:choices])
end
return result
end
# Finds the choice option matching data provided.
def determine_choice(field_data, field_choices)
result = nil
key_name = field_data.keys.first
unless key_name.nil?
choice = find_named_entry(field_choices, key_name)
result = choice[:type] unless choice.nil?
end
return result
end
# Finds an item in an Array based on its ':name' field.
def find_named_entry(data_array, name)
index = data_array.index {|item| name.eql?(item[:name])}
return index.nil? ? nil : data_array[index]
end
# Converts one leaf item to a built-in type.
def normalize_item(item, field_def)
return case field_def[:type]
when 'long', 'int' then Integer(item)
when 'double', 'float' then Float(item)
when 'boolean' then item.kind_of?(String) ?
item.casecmp('true') == 0 : item
else item
end
end
# Checks if the field signature allows an array and forces array structure
# even for a signle item.
def check_array_collapse(data, field_def)
result = data
if !field_def[:min_occurs].nil? and
(field_def[:max_occurs] == :unbounded ||
(!field_def[:max_occurs].nil? and field_def[:max_occurs] > 1))
result = arrayize(result)
end
return result
end
# Makes sure object is an array.
def arrayize(object)
return [] if object.nil?
return object.is_a?(Array) ? object : [object]
end
# Returns all inherited fields of superclasses for given type.
def implode_parent(data_type)
result = []
if data_type[:base]
parent_type = @registry.get_type_signature(data_type[:base])
result += implode_parent(parent_type) unless parent_type.nil?
end
data_type[:fields].each do |field|
# If the parent type includes a field with the same name, overwrite it.
result.reject! {|parent_field| parent_field[:name].eql?(field[:name])}
result << field
end
return result
end
# Returns type signature with all inherited fields.
def get_full_type_signature(type_name)
result = (type_name.nil?) ? nil : @registry.get_type_signature(type_name)
result[:fields] = implode_parent(result) if result and result[:base]
return result
end
# Handles attributes received from Savon.
def process_attributes(data, keep_xsi_type = false)
if keep_xsi_type
xsi_type = data.delete(:"@xsi:type")
data[:xsi_type] = xsi_type if xsi_type
end
data.reject! {|key, value| key.to_s.start_with?('@')}
end
end
end
| voke/google-ads-common | lib/ads_common/results_extractor.rb | Ruby | apache-2.0 | 8,192 |
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import ee
import math
from cmt.mapclient_qt import addToMap
from cmt.util.miscUtilities import safe_get_info
import modis_utilities
'''
Contains implementations of several simple MODIS-based flood detection algorithms.
'''
#==============================================================
def dem_threshold(domain, b):
'''Just use a height threshold on the DEM!'''
heightLevel = float(domain.algorithm_params['dem_threshold'])
dem = domain.get_dem().image
return dem.lt(heightLevel).select(['elevation'], ['b1'])
#==============================================================
def evi(domain, b):
'''Simple EVI based classifier'''
#no_clouds = b['b3'].lte(2100).select(['sur_refl_b03'], ['b1'])
criteria1 = b['EVI'].lte(0.3).And(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1'])
criteria2 = b['EVI'].lte(0.05).And(b['LSWI'].lte(0.0)).select(['sur_refl_b02'], ['b1'])
#return no_clouds.And(criteria1.Or(criteria2))
return criteria1.Or(criteria2)
def xiao(domain, b):
'''Method from paper: Xiao, Boles, Frolking, et. al. Mapping paddy rice agriculture in South and Southeast Asia using
multi-temporal MODIS images, Remote Sensing of Environment, 2006.
This method implements a very simple decision tree from several standard MODIS data products.
The default constants were tuned for (wet) rice paddy detection.
'''
return b['LSWI'].subtract(b['NDVI']).gte(0.05).Or(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1']);
#==============================================================
def get_diff(b):
'''Just the internals of the difference method'''
return b['b2'].subtract(b['b1']).select(['sur_refl_b02'], ['b1'])
def diff_learned(domain, b):
'''modis_diff but with the threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_diff(unflooded_b), water_mask, domain.bounds)
return modis_diff(domain, b, threshold)
def modis_diff(domain, b, threshold=None):
'''Compute (b2-b1) < threshold, a simple water detection index.
This method may be all that is needed in cases where the threshold can be hand tuned.
'''
if threshold == None: # If no threshold value passed in, load it based on the data set.
threshold = float(domain.algorithm_params['modis_diff_threshold'])
return get_diff(b).lte(threshold)
#==============================================================
def get_dartmouth(b):
A = 500
B = 2500
return b['b2'].add(A).divide(b['b1'].add(B)).select(['sur_refl_b02'], ['b1'])
def dart_learned(domain, b):
'''The dartmouth method but with threshold calculation included (training image required)'''
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_dartmouth(unflooded_b), water_mask, domain.bounds)
return dartmouth(domain, b, threshold)
def dartmouth(domain, b, threshold=None):
'''A flood detection method from the Dartmouth Flood Observatory.
This method is a refinement of the simple b2-b1 detection method.
'''
if threshold == None:
threshold = float(domain.algorithm_params['dartmouth_threshold'])
return get_dartmouth(b).lte(threshold)
#==============================================================
def get_mod_ndwi(b):
return b['b6'].subtract(b['b4']).divide(b['b4'].add(b['b6'])).select(['sur_refl_b06'], ['b1'])
def mod_ndwi_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_mod_ndwi(unflooded_b), water_mask, domain.bounds)
return mod_ndwi(domain, b, threshold)
def mod_ndwi(domain, b, threshold=None):
if threshold == None:
threshold = float(domain.algorithm_params['mod_ndwi_threshold'])
return get_mod_ndwi(b).lte(threshold)
#==============================================================
def get_fai(b):
'''Just the internals of the FAI method'''
return b['b2'].subtract(b['b1'].add(b['b5'].subtract(b['b1']).multiply((859.0 - 645) / (1240 - 645)))).select(['sur_refl_b02'], ['b1'])
def fai_learned(domain, b):
if domain.unflooded_domain == None:
print('No unflooded training domain provided.')
return None
unflooded_b = modis_utilities.compute_modis_indices(domain.unflooded_domain)
water_mask = modis_utilities.get_permanent_water_mask()
threshold = modis_utilities.compute_binary_threshold(get_fai(unflooded_b), water_mask, domain.bounds)
return fai(domain, b, threshold)
def fai(domain, b, threshold=None):
''' Floating Algae Index. Method from paper: Feng, Hu, Chen, Cai, Tian, Gan,
Assessment of inundation changes of Poyang Lake using MODIS observations
between 2000 and 2010. Remote Sensing of Environment, 2012.
'''
if threshold == None:
threshold = float(domain.algorithm_params['fai_threshold'])
return get_fai(b).lte(threshold)
| nasa/CrisisMappingToolkit | cmt/modis/simple_modis_algorithms.py | Python | apache-2.0 | 6,672 |
package com.qmx.wxmp.common.web;
/**
* 带UTF-8 charset 定义的MediaType.
*
* Jax-RS和Spring的MediaType没有UTF-8的版本,
* Google的MediaType必须再调用toString()函数而不是常量,不能用于Restful方法的annotation。
*
* @author free lance
*/
public class MediaTypes {
public static final String APPLICATION_XML = "application/xml";
public static final String APPLICATION_XML_UTF_8 = "application/xml; charset=UTF-8";
public static final String JSON = "application/json";
public static final String JSON_UTF_8 = "application/json; charset=UTF-8";
public static final String JAVASCRIPT = "application/javascript";
public static final String JAVASCRIPT_UTF_8 = "application/javascript; charset=UTF-8";
public static final String APPLICATION_XHTML_XML = "application/xhtml+xml";
public static final String APPLICATION_XHTML_XML_UTF_8 = "application/xhtml+xml; charset=UTF-8";
public static final String TEXT_PLAIN = "text/plain";
public static final String TEXT_PLAIN_UTF_8 = "text/plain; charset=UTF-8";
public static final String TEXT_XML = "text/xml";
public static final String TEXT_XML_UTF_8 = "text/xml; charset=UTF-8";
public static final String TEXT_HTML = "text/html";
public static final String TEXT_HTML_UTF_8 = "text/html; charset=UTF-8";
}
| lingyi2017/wxmp | src/main/java/com/qmx/wxmp/common/web/MediaTypes.java | Java | apache-2.0 | 1,306 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.directory.fortress.core.samples;
import org.apache.directory.fortress.core.DelAdminMgr;
import org.apache.directory.fortress.core.DelAdminMgrFactory;
import org.apache.directory.fortress.core.SecurityException;
import org.apache.directory.fortress.core.model.OrgUnit;
import org.apache.directory.fortress.core.impl.TestUtils;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* CreateUserOrgHierarchySample JUnit Test. This test program will show how to build a simple User OrgUnit hierarchy which are
* used to enable administrators to group Users by organizational structure. This system supports multiple
* inheritance between OrgUnits and there are no limits on how deep a hierarchy can be. The OrgUnits require name and type. Optionally can
* include a description. The User OrgUnit must be associated with Users and are used to provide Administratrive RBAC control
* over who may perform User Role assigns and deassigns in directory.
* @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
*/
public class CreateUserOrgHierarchySample extends TestCase
{
private static final String CLS_NM = CreateUserOrgHierarchySample.class.getName();
private static final Logger LOG = LoggerFactory.getLogger( CLS_NM );
// This constant will be added to index for creation of multiple nodes in directory.
public static final String TEST_HIER_USERORG_PREFIX = "sampleHierUserOrg";
public static final String TEST_HIER_BASE_USERORG = "sampleHierUserOrg1";
public static final int TEST_NUMBER = 6;
public static final String TEST_HIER_DESC_USERORG_PREFIX = "sampleHierUserOrgD";
public static final String TEST_HIER_ASC_USERORG_PREFIX = "sampleHierUserOrgA";
/**
* Simple constructor kicks off JUnit test suite.
* @param name
*/
public CreateUserOrgHierarchySample(String name)
{
super(name);
}
/**
* Run the User OrgUnit test cases.
*
* @return Test
*/
public static Test suite()
{
TestSuite suite = new TestSuite();
if(!AllSamplesJUnitTest.isFirstRun())
{
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
}
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
/*
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
*/
return suite;
}
/**
* Remove the simple hierarchical OrgUnits from the directory. Before removal call the API to move the relationship
* between the parent and child OrgUnits. Once the relationship is removed the parent OrgUnit can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testDeleteHierUserOrgs()
{
String szLocation = ".testDeleteHierUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
for (int i = 1; i < TEST_NUMBER; i++)
{
// The key that must be set to locate any OrgUnit is simply the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(new OrgUnit(TEST_HIER_USERORG_PREFIX + TEST_NUMBER, OrgUnit.Type.USER));
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Add a simple OrgUnit hierarchy to ldap. The OrgUnits will named to include a name,'sampleHierUserOrg', appended with the
* sequence of 1 - 6. 'sampleHierUserOrg1' is the root or highest level OrgUnit in the structure while sampleHierUserOrg6 is the lowest
* most child. Fortress OrgUnits may have multiple parents which is demonstrated in testCreateAscendantUserOrgs sample.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testCreateHierUserOrgs()
{
String szLocation = ".testCreateHierUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the root OrgUnit entity. OrgUnit requires name and type before addition.
OrgUnit baseOrgUnit = new OrgUnit(TEST_HIER_BASE_USERORG, OrgUnit.Type.USER);
// Add the root OrgUnit entity to the directory.
delAdminMgr.add(baseOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrg2' - 'sampleHierUserOrg6'.
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the OrgUnit entity.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Add the OrgUnit entity to the directory.
delAdminMgr.add(childOrgUnit);
// Instantiate the parent OrgUnit. The key is the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i - 1), OrgUnit.Type.USER);
// Add a relationship between the parent and child OrgUnits:
delAdminMgr.addInheritance(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate teardown of a parent to child relationship of one-to-many. Each child must first remove the inheritance
* relationship with parent before being removed from ldap. The parent OrgUnit will be removed from ldap last.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testDeleteDescendantUserOrgs()
{
String szLocation = ".testDeleteDescendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This parent has many children. They must be deleted before parent itself can.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// There are N User OrgUnits to process:
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the child OrgUnit entity. The key is the name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
}
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a parent to child OrgUnit structure of one-to-many. The parent OrgUnit must be created before
* the call to addDescendant which will Add a new OrgUnit node and set a OrgUnit relationship with parent node.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testCreateDescendantUserOrgs()
{
String szLocation = ".testCreateDescendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the parent User OrgUnit entity. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This parent will have many children:
delAdminMgr.add(parentOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrgD2' - 'sampleHierUserOrgD6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Now add relationship to the directory between parent and child User OrgUnits.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add child OrgUnit entity to directory and add relationship with existing parent OrgUnit.
delAdminMgr.addDescendant(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* This example demonstrates tear down of a child to parent represented as one-to-many. The parents must all
* be removed from the child before the child can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testDeleteAscendantUserOrgs()
{
String szLocation = ".testDeleteAscendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This child OrgUnit has many parents:
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the parent. This needs a name and type before it can be used in operation.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship between parent and child OrgUnits:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a child to parent OrgUnit structure of one-to-many. To use this API, the child OrgUnit must be created before
* the call to addAscendant which will Add a new OrgUnit node and set a OrgUnit relationship with child node.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testCreateAscendantUserOrgs()
{
String szLocation = ".testCreateAscendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the child OrgUnit. This needs a name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This child will have many parents:
delAdminMgr.add(childOrgUnit);
// Create OrgUnits, 'sampleHierUserOrgA2' - 'sampleHierUserOrgA6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Instantiate the parent OrgUnit. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add parent OrgUnit entity to directory and add relationship with existing child OrgUnit.
delAdminMgr.addAscendant(childOrgUnit, parentOrgUnit);
}
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
}
| PennState/directory-fortress-core-1 | src/test/java/org/apache/directory/fortress/core/samples/CreateUserOrgHierarchySample.java | Java | apache-2.0 | 15,806 |
//
// Copyright (c) 2014 Limit Point Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tools.viewer.user;
import tools.viewer.common.*;
import tools.viewer.render.*;
import tools.common.gui.*;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import javax.swing.*;
import javax.swing.event.*;
import javax.swing.border.*;
import java.text.*;
import vtk.*;
/**
* Implementation of <code>G3DFieldActorPropertiesPanel</code> for editing the
* values of a <code>HedgeHogFieldActorDescriptor</code>.
*/
public class HedgeHogFieldActorPropertiesPanel
extends G3DFieldActorPropertiesPanel
{
// CONSTANTS FACET
protected static final String[] VECTOR_MODES =
{ ViewerConstants.VECTOR_MAGNITUDE,
ViewerConstants.VECTOR_NORMAL };
// GUI FACET
protected JPanel hedgeHogPanel;
protected JSpinner scaleFactorSpinner;
protected JComboBox vectorModeComboBox;
// CONSTRUCTORS
/**
* Constructor
*/
public HedgeHogFieldActorPropertiesPanel(G3DViewer xviewer,
FieldActorDescriptor[] xdescriptors)
{
super(xviewer, xdescriptors);
hedgeHogPanel = createHedgeHogPanel();
tabbedPane.addTab("Hedge Hog", hedgeHogPanel);
initValues();
}
// CREATE FACET
/**
* Create hedge hog panel
*/
protected JPanel createHedgeHogPanel()
{
JPanel result = new JPanel();
result.setLayout(new BoxLayout(result, BoxLayout.PAGE_AXIS));
result.setBorder(
BorderFactory.createCompoundBorder(
BorderFactory.createEmptyBorder(6, 12, 6, 12),
BorderFactory.createTitledBorder("Hedge Hog:")));
//=====
result.add(Box.createVerticalGlue());
JPanel panel = new JPanel();
JLabel scaleFactorLabel = new JLabel("Scale Factor: ", JLabel.RIGHT);
scaleFactorLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
SpinnerModel scaleFactorModel = new SpinnerNumberModel(1.0, 0.0,
10000000.0, 0.01);
scaleFactorSpinner = new JSpinner(scaleFactorModel);
panel.add(scaleFactorLabel);
panel.add(scaleFactorSpinner);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
panel = new JPanel();
JLabel vectorModeLabel = new JLabel("Vector Mode:", JLabel.RIGHT);
vectorModeLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
vectorModeComboBox = new JComboBox(VECTOR_MODES);
panel.add(vectorModeLabel);
panel.add(vectorModeComboBox);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
return result;
}
// INITIALIZE FACET
/**
*
*/
public void initValues()
{
super.initValues();
// Use the first actor in the list to initialize the
// user interface.
HedgeHogFieldActorDescriptor actor =
(HedgeHogFieldActorDescriptor) descriptors[0];
initHedgeHogPanel(actor);
}
/**
*
*/
protected void initHedgeHogPanel(HedgeHogFieldActorDescriptor actor)
{
scaleFactorSpinner.setValue(actor.scaleFactor);
vectorModeComboBox.setSelectedItem(actor.vectorMode);
}
// APPLY FACET
/**
*
*/
public void doApply()
{
// Set the wait state to true, it is restored by
// UpdatePropertiesPanelEvent.
setWaitState(true);
synchronized (viewer.getScript())
{
synchronized (viewer.getScene())
{
// Apply the changed to the descriptors
HedgeHogFieldActorDescriptor actor;
for(int i=0; i<descriptors.length; i++)
{
actor = (HedgeHogFieldActorDescriptor) descriptors[i];
applyHedgeHog(actor);
}
}
}
super.doApply(false);
}
/**
*
*/
public void applyHedgeHog(HedgeHogFieldActorDescriptor actor)
{
actor.scaleFactor = ((SpinnerNumberModel)scaleFactorSpinner.getModel()).getNumber().doubleValue();
actor.vectorMode = (String) vectorModeComboBox.getSelectedItem();
}
}
| LimitPointSystems/SheafSystem | tools/viewer/user/HedgeHogFieldActorPropertiesPanel.java | Java | apache-2.0 | 4,423 |
/*!
* Copyright 2012 Sakai Foundation (SF) Licensed under the
* Educational Community License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.osedu.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
require(['jquery', 'oae.core'], function($, oae) {
// Get the group id from the URL. The expected URL is /group/<groupId>
var groupId = document.location.pathname.split('/')[2];
if (!groupId) {
oae.api.util.redirect().login();
}
// Variable used to cache the requested user's profile
var groupProfile = null;
// Variable used to cache the group's base URL
var baseUrl = '/group/' + groupId;
/**
* Get the group's basic profile and set up the screen. If the groups
* can't be found or is private to the current user, the appropriate
* error page will be shown
*/
var getGroupProfile = function() {
oae.api.group.getGroup(groupId, function(err, profile) {
if (err && err.code === 404) {
oae.api.util.redirect().notfound();
} else if (err && err.code === 401) {
oae.api.util.redirect().accessdenied();
}
groupProfile = profile;
setUpClip();
setUpNavigation();
// Set the browser title
oae.api.util.setBrowserTitle(groupProfile.displayName);
});
};
$(document).on('oae.context.get', function() {
$(document).trigger('oae.context.send', groupProfile);
});
$(document).trigger('oae.context.send', groupProfile);
/**
* Render the group's clip, containing the profile picture, display name as well as the
* group's admin options
*/
var setUpClip = function() {
oae.api.util.template().render($('#group-clip-template'), {'group': groupProfile}, $('#group-clip-container'));
// Only show the create and upload clips to managers
if (groupProfile.isManager) {
$('#group-actions').show();
}
};
/**
* Set up the left hand navigation with the me space page structure
*/
var setUpNavigation = function() {
// Structure that will be used to construct the left hand navigation
var lhNavigation = [
{
'id': 'activity',
'title': oae.api.i18n.translate('__MSG__RECENT_ACTIVITY__'),
'icon': 'icon-dashboard',
'layout': [
{
'width': 'span8',
'widgets': [
{
'id': 'activity',
'settings': {
'principalId': groupProfile.id,
'canManage': groupProfile.isManager
}
}
]
}
]
},
{
'id': 'library',
'title': oae.api.i18n.translate('__MSG__LIBRARY__'),
'icon': 'icon-briefcase',
'layout': [
{
'width': 'span12',
'widgets': [
{
'id': 'library',
'settings': {
'principalId': groupProfile.id,
'canManage': groupProfile.isManager
}
}
]
}
]
},
{
'id': 'members',
'title': oae.api.i18n.translate('__MSG__MEMBERS__'),
'icon': 'icon-user',
'layout': [
{
'width': 'span12',
'widgets': [
{
'id': 'participants',
'settings': {
'principalId': groupProfile.id,
'canManage': groupProfile.isManager
}
}
]
}
]
}
];
$(window).trigger('oae.trigger.lhnavigation', [lhNavigation, baseUrl]);
$(window).on('oae.ready.lhnavigation', function() {
$(window).trigger('oae.trigger.lhnavigation', [lhNavigation, baseUrl]);
});
};
getGroupProfile();
});
| Coenego/3akai-ux | ui/js/group.js | JavaScript | apache-2.0 | 5,033 |
package cn.oeaom.CoolWeather;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Typeface;
import android.media.Image;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import java.io.IOException;
import cn.oeaom.CoolWeather.GSON.Weather;
import cn.oeaom.CoolWeather.Util.Utility;
import okhttp3.Call;
import okhttp3.Callback;
import cn.oeaom.CoolWeather.Util.HttpUtil;
import okhttp3.Response;
public class WeatherActivity extends AppCompatActivity {
private static final String TAG = "WeatherActivity";
private static final String API_KEY = "bc0418b57b2d4918819d3974ac1285d9";
//鉴权码
//天气信息面板所要展现的东西
public DrawerLayout drawerLayout; //左侧滑动和点击小房子展现的界面
//public TextView tvTitle; //标题 *弃用
private TextView weatherTime; //天气信息的时间
private TextView weatherDegree; //天气信息的温度值
private TextView measure2; //天气信息的温度单位
private TextView weatherPlace; //天气信息的地点
private TextView weatherType; //天气信息的类型
private String mWeatherId; //城市的编号
private ImageView weatherStat;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_weather);
//Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
//setSupportActionBar(toolbar);
// FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab);
// fab.setOnClickListener(new View.OnClickListener() {
// @Override
// public void onClick(View view) {
// Snackbar.make(view, "Replace with your own action", Snackbar.LENGTH_LONG)
// .setAction("Action", null).show();
// }
// });
Typeface fontFace = Typeface.createFromAsset(getAssets(), "fonts/AndroidClock.ttf");
// 字体文件必须是true type font的格式(ttf);
// 当使用外部字体却又发现字体没有变化的时候(以 Droid Sans代替),通常是因为
// 这个字体android没有支持,而非你的程序发生了错误
weatherTime = (TextView)findViewById(R.id.weather_info_time);
weatherTime.setTypeface(fontFace);
//
weatherDegree = (TextView)findViewById(R.id.degree_value);
weatherDegree.setTypeface(fontFace);
TextView measure = (TextView)findViewById(R.id.degree_measure);
// measure.setTypeface(fontFace);
measure2 = (TextView)findViewById(R.id.degree_measure2);
//measure2.setTypeface(fontFace);
weatherPlace = (TextView)findViewById(R.id.weather_info_place);
//weatherPlace.setTypeface(fontFace);
weatherType = (TextView)findViewById(R.id.weather_info_text);
//weatherType.setTypeface(fontFace);
weatherStat = (ImageView)findViewById(R.id.weatherIcon);
//
// TextView weatherInfo = (TextView)findViewById(R.id.weather_info_text);
//
// weatherInfo.setTypeface(fontFace);
//
//text.setTextSize(50);
Intent intent=getIntent();
//获取这个Intent对象的Extra中对应键的值
String weatherId=intent.getStringExtra("weather_id");
String CountryName = intent.getStringExtra("CountryName");
// tvTitle = (TextView)findViewById(R.id.title_text_weather);
// //tvTitle.setText(weatherId);
// tvTitle.setText(CountryName);
// // tvTitle.setTextSize(60);
// tvTitle.setTypeface(fontFace);
drawerLayout = (DrawerLayout)findViewById(R.id.drawer_layout);
Button btnBack = (Button)findViewById(R.id.btn_home);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
//Intent intent = new Intent(WeatherActivity.this,MainActivity.class);
//startActivity(intent);
// WeatherActivity.this.finish();
drawerLayout.openDrawer(GravityCompat.START);
Log.v(TAG,"Clicked nav btn");
}
});
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this);
String weatherString = prefs.getString("weather", null);
if (weatherString != null) {
// 有缓存时直接解析天气数据
Weather weather = Utility.handleWeatherResponse(weatherString);
mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
// 无缓存时去服务器查询天气
mWeatherId = getIntent().getStringExtra("weather_id");
// weatherLayout.setVisibility(View.INVISIBLE);
requestWeather(mWeatherId);
}
// swipeRefresh.setOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() {
// @Override
// public void onRefresh() {
// requestWeather(mWeatherId);
// }
// });
}
// public void requestWeather(final String weatherId){
// tvTitle.setText(weatherId);
// }
public void requestWeather(final String weatherId) {
String weatherUrl = "http://guolin.tech/api/weather?cityid=" + weatherId + "&key="+API_KEY;
HttpUtil.sendOkHttpRequest(weatherUrl, new Callback() {
@Override
public void onResponse(Call call, Response response) throws IOException {
final String responseText = response.body().string();
Log.v(TAG,"=======================================================================");
Log.v(TAG,responseText);
Log.v(TAG,"=======================================================================");
final Weather weather = Utility.handleWeatherResponse(responseText);
runOnUiThread(new Runnable() {
@Override
public void run() {
if (weather != null && "ok".equals(weather.status)) {
SharedPreferences.Editor editor = PreferenceManager.getDefaultSharedPreferences(WeatherActivity.this).edit();
editor.putString("weather", responseText);
editor.apply();
//mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
}
//swipeRefresh.setRefreshing(false);
}
});
}
@Override
public void onFailure(Call call, IOException e) {
e.printStackTrace();
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
//swipeRefresh.setRefreshing(false);
}
});
}
});
//loadBingPic();
}
private int findWeatherIconByName(String weatherName)
{
switch(weatherName)
{
case "晴":return R.drawable.a044;
case "多云":return R.drawable.a045;
case "少云":return R.drawable.a046;
case "晴间多云":return R.drawable.a047;
case "阴":return R.drawable.a048;
case "有风":return R.drawable.a049;
case "平静":return R.drawable.a050;
case "微风":return R.drawable.a000;
case "和风":return R.drawable.a001;
case "清风":return R.drawable.a002;
case "强风":return R.drawable.a003;
case "劲风":return R.drawable.a003;
case "大风":return R.drawable.a004;
case "烈风":return R.drawable.a005;
case "风暴":return R.drawable.a006;
case "狂爆风":return R.drawable.a007;
case "龙卷风":return R.drawable.a008;
case "热带风暴":return R.drawable.a009;
case "阵雨":return R.drawable.a012;
case "强阵雨":return R.drawable.a013;
case "雷阵雨":return R.drawable.a014;
case "强雷阵雨":return R.drawable.a015;
case "雷阵雨伴有冰雹":return R.drawable.a016;
case "小雨":return R.drawable.a017;
case "中雨":return R.drawable.a018;
case "大雨":return R.drawable.a019;
case "极端降雨":return R.drawable.a020;
case "毛毛雨":return R.drawable.a021;
case "细雨":return R.drawable.a021;
case "暴雨":return R.drawable.a022;
case "大暴雨":return R.drawable.a023;
case "特大暴雨":return R.drawable.a024;
case "冻雨":return R.drawable.a025;
case "小雪":return R.drawable.a026;
case "中雪":return R.drawable.a027;
case "大雪":return R.drawable.a028;
case "暴雪":return R.drawable.a029;
case "雨夹雪":return R.drawable.a030;
case "雨雪天气":return R.drawable.a031;
case "阵雨夹雪":return R.drawable.a032;
case "阵雪":return R.drawable.a033;
case "薄雾":return R.drawable.a034;
case "雾":return R.drawable.a035;
case "霾":return R.drawable.a036;
case "扬沙":return R.drawable.a037;
case "浮尘":return R.drawable.a038;
case "沙尘暴":return R.drawable.a039;
case "热":return R.drawable.a041;
case "冷":return R.drawable.a042;
case "强沙尘暴":return R.drawable.a040;
case "未知":return R.drawable.a043;
default:{
break;
}
}
return -1;
}
private void showWeatherInfo(Weather weather) {
String cityName = weather.basic.cityName;
String updateTime = weather.basic.update.updateTime.split(" ")[1];
String degree = weather.now.temperature;
String weatherInfo = weather.now.more.info;
weatherPlace.setText(cityName);
weatherTime.setText(updateTime);
weatherDegree.setText(degree);
weatherType.setText(weatherInfo);
weatherStat.setImageResource(findWeatherIconByName(weatherInfo));
// forecastLayout.removeAllViews();
// for (Forecast forecast : weather.forecastList) {
// View view = LayoutInflater.from(this).inflate(R.layout.forecast_item, forecastLayout, false);
// TextView dateText = (TextView) view.findViewById(R.id.date_text);
// TextView infoText = (TextView) view.findViewById(R.id.info_text);
// TextView maxText = (TextView) view.findViewById(R.id.max_text);
// TextView minText = (TextView) view.findViewById(R.id.min_text);
// dateText.setText(forecast.date);
// infoText.setText(forecast.more.info);
// maxText.setText(forecast.temperature.max);
// minText.setText(forecast.temperature.min);
// forecastLayout.addView(view);
// }
// if (weather.aqi != null) {
// aqiText.setText(weather.aqi.city.aqi);
// pm25Text.setText(weather.aqi.city.pm25);
// }
// String comfort = "舒适度:" + weather.suggestion.comfort.info;
// String carWash = "洗车指数:" + weather.suggestion.carWash.info;
// String sport = "运行建议:" + weather.suggestion.sport.info;
// comfortText.setText(comfort);
// carWashText.setText(carWash);
// sportText.setText(sport);
// weatherLayout.setVisibility(View.VISIBLE);
// Intent intent = new Intent(this, AutoUpdateService.class);
// startService(intent);
}
}
| applicationsky/MyCoolWeather | app/src/main/java/cn/oeaom/CoolWeather/WeatherActivity.java | Java | apache-2.0 | 12,783 |
#ifdef WINDOWS_PLATFORM
#include "WindowsInputService.hpp"
#include "WindowsMouseInterface.hpp"
#include "WindowsKeyboardInterface.hpp"
namespace MPACK
{
namespace Input
{
WindowsInputService::WindowsInputService()
{
m_pMouse = new WindowsMouseInterface;
m_pKeyboard = new WindowsKeyboardInterface;
Reset();
}
WindowsInputService::~WindowsInputService()
{
}
void WindowsInputService::Update()
{
m_pMouse->Update();
m_pKeyboard->Update();
}
void WindowsInputService::Reset()
{
m_pMouse->Reset();
m_pKeyboard->Reset();
}
MouseInterface* WindowsInputService::GetMouse() const
{
return m_pMouse;
}
KeyboardInterface* WindowsInputService::GetKeyboard() const
{
return m_pKeyboard;
}
}
}
#endif
| mirceamt/MPACK | jni/MPACK/Input/Desktop/Windows/WindowsInputService.cpp | C++ | apache-2.0 | 764 |
angular.module('app.services', [
'app.services.actions',
'app.services.connection',
'app.services.coverart',
'app.services.locale',
'app.services.logging',
'app.services.mopidy',
'app.services.paging',
'app.services.platform',
'app.services.router',
'app.services.servers',
'app.services.settings'
]);
| tkem/mopidy-mobile | www/app/services/services.js | JavaScript | apache-2.0 | 324 |
//-----------------------------------------------------------------------
// <copyright file="FilterTreeDragDropArgs.cs" company="Development In Progress Ltd">
// Copyright © Development In Progress Ltd 2015. All rights reserved.
// </copyright>
//-----------------------------------------------------------------------
namespace DevelopmentInProgress.WPFControls.FilterTree
{
/// <summary>
/// Arguments for a drag and drop operation in the <see cref="XamlFilterTree"/>.
/// </summary>
public class FilterTreeDragDropArgs
{
/// <summary>
/// Initialises a new instance of the FilterTreeDragDropArgs class.
/// </summary>
/// <param name="dragItem">The item being dragged.</param>
/// <param name="dropTarget">The target where the dragged item will be dropped.</param>
public FilterTreeDragDropArgs(object dragItem, object dropTarget)
{
DragItem = dragItem;
DropTarget = dropTarget;
}
/// <summary>
/// Gets the object being dragged.
/// </summary>
public object DragItem { get; private set; }
/// <summary>
/// Gets the drop target for the object being dragged.
/// </summary>
public object DropTarget { get; private set; }
}
}
| grantcolley/wpfcontrols | DevelopmentInProgress.WPFControls/FilterTree/FilterTreeDragDropArgs.cs | C# | apache-2.0 | 1,316 |
package com.structurizr.view;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.structurizr.model.*;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.stream.Collectors;
@JsonIgnoreProperties(ignoreUnknown=true)
public abstract class View implements Comparable<View> {
private SoftwareSystem softwareSystem;
private String softwareSystemId;
private String description = "";
private PaperSize paperSize = PaperSize.A4_Portrait;
private Set<ElementView> elementViews = new LinkedHashSet<>();
View() {
}
public View(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
@JsonIgnore
public Model getModel() {
return softwareSystem.getModel();
}
@JsonIgnore
public SoftwareSystem getSoftwareSystem() {
return softwareSystem;
}
public void setSoftwareSystem(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
public String getSoftwareSystemId() {
if (this.softwareSystem != null) {
return this.softwareSystem.getId();
} else {
return this.softwareSystemId;
}
}
void setSoftwareSystemId(String softwareSystemId) {
this.softwareSystemId = softwareSystemId;
}
public abstract ViewType getType();
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public PaperSize getPaperSize() {
return paperSize;
}
public void setPaperSize(PaperSize paperSize) {
this.paperSize = paperSize;
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllSoftwareSystems() {
getModel().getSoftwareSystems().forEach(this::addElement);
}
/**
* Adds the given software system to this view.
*
* @param softwareSystem the SoftwareSystem to add
*/
public void addSoftwareSystem(SoftwareSystem softwareSystem) {
addElement(softwareSystem);
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllPeople() {
getModel().getPeople().forEach(this::addElement);
}
/**
* Adds the given person to this view.
*
* @param person the Person to add
*/
public void addPerson(Person person) {
addElement(person);
}
protected void addElement(Element element) {
if (softwareSystem.getModel().contains(element)) {
elementViews.add(new ElementView(element));
}
}
protected void removeElement(Element element) {
ElementView elementView = new ElementView(element);
elementViews.remove(elementView);
}
/**
* Gets the set of elements in this view.
*
* @return a Set of ElementView objects
*/
public Set<ElementView> getElements() {
return elementViews;
}
public Set<RelationshipView> getRelationships() {
Set<Relationship> relationships = new HashSet<>();
Set<Element> elements = getElements().stream()
.map(ElementView::getElement)
.collect(Collectors.toSet());
elements.forEach(b -> relationships.addAll(b.getRelationships()));
return relationships.stream()
.filter(r -> elements.contains(r.getSource()) && elements.contains(r.getDestination()))
.map(RelationshipView::new)
.collect(Collectors.toSet());
}
public void setRelationships(Set<RelationshipView> relationships) {
// do nothing ... this are determined automatically
}
/**
* Removes all elements that have no relationships
* to other elements in this view.
*/
public void removeElementsWithNoRelationships() {
Set<RelationshipView> relationships = getRelationships();
Set<String> elementIds = new HashSet<>();
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getSourceId()));
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getDestinationId()));
elementViews.removeIf(ev -> !elementIds.contains(ev.getId()));
}
public void removeElementsThatCantBeReachedFrom(Element element) {
Set<String> elementIdsToShow = new HashSet<>();
findElementsToShow(element, elementIdsToShow, 1);
elementViews.removeIf(ev -> !elementIdsToShow.contains(ev.getId()));
}
private void findElementsToShow(Element element, Set<String> elementIds, int depth) {
if (elementViews.contains(new ElementView(element))) {
elementIds.add(element.getId());
if (depth < 100) {
element.getRelationships().forEach(r -> findElementsToShow(r.getDestination(), elementIds, depth + 1));
}
}
}
public abstract String getName();
@Override
public int compareTo(View view) {
return getTitle().compareTo(view.getTitle());
}
private String getTitle() {
return getName() + " - " + getDescription();
}
ElementView findElementView(Element element) {
for (ElementView elementView : getElements()) {
if (elementView.getElement().equals(element)) {
return elementView;
}
}
return null;
}
public void copyLayoutInformationFrom(View source) {
this.setPaperSize(source.getPaperSize());
for (ElementView sourceElementView : source.getElements()) {
ElementView destinationElementView = findElementView(sourceElementView.getElement());
if (destinationElementView != null) {
destinationElementView.copyLayoutInformationFrom(sourceElementView);
}
}
}
} | JDriven/structurizr-java | structurizr-core/src/com/structurizr/view/View.java | Java | apache-2.0 | 5,965 |
/*
* Copyright (c) 2016 The original author or authors
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* The Apache License v2.0 is available at
* http://www.opensource.org/licenses/apache2.0.php
*
* You may elect to redistribute this code under either of these licenses.
*/
package io.vertx.ext.consul;
import io.vertx.codegen.annotations.DataObject;
import io.vertx.core.json.JsonObject;
import java.util.List;
/**
* Holds network coordinates of node
*
* @author <a href="mailto:ruslan.sennov@gmail.com">Ruslan Sennov</a>
* @see <a href="https://www.consul.io/docs/internals/coordinates.html">Network coordinates</a>
*/
@DataObject(generateConverter = true)
public class Coordinate {
private String node;
private float adj;
private float err;
private float height;
private List<Float> vec;
/**
* Default constructor
*/
public Coordinate() {}
/**
* Copy constructor
*
* @param coordinate the one to copy
*/
public Coordinate(Coordinate coordinate) {
this.node = coordinate.node;
this.adj = coordinate.adj;
this.err = coordinate.err;
this.height = coordinate.height;
this.vec = coordinate.vec;
}
/**
* Constructor from JSON
*
* @param coordinate the JSON
*/
public Coordinate(JsonObject coordinate) {
CoordinateConverter.fromJson(coordinate, this);
}
/**
* Convert to JSON
*
* @return the JSON
*/
public JsonObject toJson() {
JsonObject jsonObject = new JsonObject();
CoordinateConverter.toJson(this, jsonObject);
return jsonObject;
}
/**
* Get name of node
*
* @return name of node
*/
public String getNode() {
return node;
}
/**
* Get adjustment
*
* @return adjustment
*/
public float getAdj() {
return adj;
}
/**
* Get error
*
* @return error
*/
public float getErr() {
return err;
}
/**
* Get height
*
* @return height
*/
public float getHeight() {
return height;
}
/**
* Get vector
*
* @return vector
*/
public List<Float> getVec() {
return vec;
}
/**
* Set name of node
*
* @param node name of node
* @return reference to this, for fluency
*/
public Coordinate setNode(String node) {
this.node = node;
return this;
}
/**
* Set adjustment
*
* @param adj adjustment
* @return reference to this, for fluency
*/
public Coordinate setAdj(float adj) {
this.adj = adj;
return this;
}
/**
* Set error
*
* @param err error
* @return reference to this, for fluency
*/
public Coordinate setErr(float err) {
this.err = err;
return this;
}
/**
* Set height
*
* @param height height
* @return reference to this, for fluency
*/
public Coordinate setHeight(float height) {
this.height = height;
return this;
}
/**
* Set vector
*
* @param vec vector
* @return reference to this, for fluency
*/
public Coordinate setVec(List<Float> vec) {
this.vec = vec;
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Coordinate that = (Coordinate) o;
if (Float.compare(that.adj, adj) != 0) return false;
if (Float.compare(that.err, err) != 0) return false;
if (Float.compare(that.height, height) != 0) return false;
if (node != null ? !node.equals(that.node) : that.node != null) return false;
return vec != null ? vec.equals(that.vec) : that.vec == null;
}
@Override
public int hashCode() {
int result = node != null ? node.hashCode() : 0;
result = 31 * result + (adj != +0.0f ? Float.floatToIntBits(adj) : 0);
result = 31 * result + (err != +0.0f ? Float.floatToIntBits(err) : 0);
result = 31 * result + (height != +0.0f ? Float.floatToIntBits(height) : 0);
result = 31 * result + (vec != null ? vec.hashCode() : 0);
return result;
}
}
| ruslansennov/vertx-consul-client | src/main/java/io/vertx/ext/consul/Coordinate.java | Java | apache-2.0 | 4,253 |
/*
* Copyright (C) 2010 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zzn.aeassistant.zxing.decoding;
import android.app.Activity;
import android.content.DialogInterface;
/**
* Simple listener used to exit the app in a few cases.
*
* @author Sean Owen
*/
public final class FinishListener
implements DialogInterface.OnClickListener, DialogInterface.OnCancelListener, Runnable {
private final Activity activityToFinish;
public FinishListener(Activity activityToFinish) {
this.activityToFinish = activityToFinish;
}
@Override
public void onCancel(DialogInterface dialogInterface) {
run();
}
@Override
public void onClick(DialogInterface dialogInterface, int i) {
run();
}
@Override
public void run() {
activityToFinish.finish();
}
}
| ShawnDongAi/AEASSISTANT | AEAssistant/src/com/zzn/aeassistant/zxing/decoding/FinishListener.java | Java | apache-2.0 | 1,311 |
import { Component, Input, EventEmitter, SimpleChanges, OnChanges } from '@angular/core';
import { ToasterService } from 'angular2-toaster';
import { TranslateService } from 'ng2-translate';
import { Notification } from '../notification.model';
import { NotificationService } from '../notification.service';
import { PaginationComponent } from '../../shared/pagination/pagination.component';
@Component({
moduleId: module.id,
selector: 'hip-notifications-list',
templateUrl: 'notifications-list.component.html',
styleUrls: ['notifications-list.component.css']
})
export class NotificationsListComponent {
@Input() notifications: Notification[];
// @Input() selectedStatus: String;
// @Input() selectedNotificationType: String;
translatedResponse: any;
// pagination parameters
currentPage = 1;
pageSize = 10;
totalItems: number;
// will contain the notification satisfying the selected status and type
filteredNotifications: Notification[] = [];
constructor(private notificationService: NotificationService,
private toasterService: ToasterService,
private translateService: TranslateService) {}
private markAsRead(notificationId: number) {
this.notificationService.markNotificationAsRead(notificationId)
.then(
(response: any) => {
let readNotification = this.notifications.filter(
function (notification) {
return notification.id === notificationId;
}
)[0];
readNotification.read = true;
// notify change to the service which notifies the toolbar
this.notificationService.announceUnreadNotificationCountDecrease(1);
}
).catch(
(error: any) => {
this.toasterService.pop('error', this.getTranslatedString('Could not mark notification as read'));
}
);
}
getTranslatedString(data: any) {
this.translateService.get(data).subscribe(
(value: any) => {
this.translatedResponse = value;
}
);
return this.translatedResponse;
}
getPage(page: number) {
this.currentPage = page;
}
}
| HiP-App/HiP-CmsAngularApp | app/notifications/notifications-list/notifications-list.component.ts | TypeScript | apache-2.0 | 2,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $Header:$
*/
package org.apache.beehive.netui.util;
import java.util.Map;
import java.util.List;
import java.lang.reflect.Array;
import org.apache.beehive.netui.util.logging.Logger;
/**
* This class is used by NetUI tags that use parameters.
*/
public class ParamHelper
{
private static final Logger logger = Logger.getInstance(ParamHelper.class);
/**
* Add a new parameter or update an existing parameter's list of values.
* <p/>
* <em>Implementation Note:</em> in the case that a Map was provided for
* the <code>value</code> parameter, the this returns without doing
* anything; in any other case, params is updated (even in
* <code>value</code> is null).
* </p>
* <p/>
* If value is some object (not an array or list), the string
* representation of that object is added as a value for name. If the
* value is a list (or array) of objects, then the string representation
* of each element is added as a value for name. When there are multiple
* values for a name, then an array of Strings is used in Map.
* </p>
*
* @param params an existing Map of names and values to update
* @param name the name of the parameter to add or update
* @param value an item or list of items to put into the map
* @throws IllegalArgumentException in the case that either the params
* <p/>
* or name given was null
*/
public static void addParam(Map params, String name, Object value)
{
if (params == null)
throw new IllegalArgumentException("Parameter map cannot be null");
if (name == null)
throw new IllegalArgumentException("Parameter name cannot be null");
if (value instanceof Map) {
logger.warn(Bundle.getString("Tags_BadParameterType", name));
return;
}
if (value == null)
value = "";
// check to see if we are adding a new element
// or if this is an existing element
Object o = params.get(name);
int length = 0;
if (o != null) {
assert (o instanceof String ||
o instanceof String[]);
if (o.getClass().isArray()) {
length = Array.getLength(o);
}
else {
length++;
}
}
// check how much size the output needs to be
if (value.getClass().isArray()) {
length += Array.getLength(value);
}
else if (value instanceof List) {
length += ((List) value).size();
}
else {
length++;
}
if (length == 0)
return;
//System.err.println("Number of vaues:" + length);
// if there is only a single value push it to the parameter table
if (length == 1) {
if (value.getClass().isArray()) {
Object val = Array.get(value, 0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else if (value instanceof List) {
List list = (List) value;
Object val = list.get(0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else
params.put(name,value.toString());
return;
}
// allocate the string for the multiple values
String[] values = new String[length];
int offset = 0;
// if we had old values, push them to the new array
if (o != null) {
if (o.getClass().isArray()) {
String[] obs = (String[]) o;
for (;offset<obs.length;offset++) {
values[offset] = obs[offset];
}
}
else {
values[0] = o.toString();
offset = 1;
}
}
// now move the new values to the array starting at the offset
// position
if (value.getClass().isArray())
{
//need to convert this array into a String[]
int size = Array.getLength(value);
for (int i=0; i < size; i++)
{
Object val = Array.get(value, i);
if (val != null)
values[i+offset] = val.toString();
else
values[i+offset] = "";
}
}
else if (value instanceof List)
{
List list = (List) value;
int size = list.size();
for (int i=0; i < size; i++)
{
if (list.get(i) != null)
values[i+offset] = list.get(i).toString();
else
values[i+offset] = "";
}
}
else {
values[offset] = value.toString();
}
// store the new values array
params.put(name, values);
}
}
| moparisthebest/beehive | beehive-netui-core/src/main/java/org/apache/beehive/netui/util/ParamHelper.java | Java | apache-2.0 | 5,988 |
package org.jboss.resteasy.spi;
import org.jboss.resteasy.specimpl.MultivaluedMapImpl;
import org.jboss.resteasy.specimpl.PathSegmentImpl;
import org.jboss.resteasy.specimpl.ResteasyUriBuilder;
import org.jboss.resteasy.util.Encode;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.PathSegment;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
/**
* UriInfo implementation with some added extra methods to help process requests
*
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
public class ResteasyUriInfo implements UriInfo
{
private String path;
private String encodedPath;
private String matchingPath;
private MultivaluedMap<String, String> queryParameters;
private MultivaluedMap<String, String> encodedQueryParameters;
private MultivaluedMap<String, String> pathParameters;
private MultivaluedMap<String, String> encodedPathParameters;
private MultivaluedMap<String, PathSegment[]> pathParameterPathSegments;
private MultivaluedMap<String, PathSegment[]> encodedPathParameterPathSegments;
private List<PathSegment> pathSegments;
private List<PathSegment> encodedPathSegments;
private URI absolutePath;
private URI requestURI;
private URI baseURI;
private List<String> matchedUris;
private List<String> encodedMatchedUris;
private List<String> encodedMatchedPaths = new ArrayList<String>();
private List<Object> ancestors;
public ResteasyUriInfo(URI base, URI relative)
{
String b = base.toString();
if (!b.endsWith("/")) b += "/";
String r = relative.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = relative.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + relative.getPath();
}
requestURI = UriBuilder.fromUri(base).path(relative.getRawPath()).replaceQuery(relative.getRawQuery()).build();
baseURI = base;
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
public ResteasyUriInfo(URI requestURI)
{
String r = requestURI.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = requestURI.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + requestURI.getPath();
}
this.requestURI = requestURI;
baseURI = UriBuilder.fromUri(requestURI).replacePath("").build();
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
/**
* matching path without matrix parameters
*
* @param encodedPathSegments
*/
protected void extractMatchingPath(List<PathSegment> encodedPathSegments)
{
StringBuilder preprocessedPath = new StringBuilder();
for (PathSegment pathSegment : encodedPathSegments)
{
preprocessedPath.append("/").append(pathSegment.getPath());
}
matchingPath = preprocessedPath.toString();
}
/**
* Encoded path without matrix parameters
*
* @return
*/
public String getMatchingPath()
{
return matchingPath;
}
/**
* Create a UriInfo from the baseURI
*
* @param relative
* @return
*/
public ResteasyUriInfo setRequestUri(URI relative)
{
String rel = relative.toString();
if (rel.startsWith(baseURI.toString()))
{
relative = URI.create(rel.substring(baseURI.toString().length()));
}
return new ResteasyUriInfo(baseURI, relative);
}
public String getPath()
{
return path;
}
public String getPath(boolean decode)
{
if (decode) return getPath();
return encodedPath;
}
public List<PathSegment> getPathSegments()
{
return pathSegments;
}
public List<PathSegment> getPathSegments(boolean decode)
{
if (decode) return getPathSegments();
return encodedPathSegments;
}
public URI getRequestUri()
{
return requestURI;
}
public UriBuilder getRequestUriBuilder()
{
return UriBuilder.fromUri(requestURI);
}
public URI getAbsolutePath()
{
return absolutePath;
}
public UriBuilder getAbsolutePathBuilder()
{
return UriBuilder.fromUri(absolutePath);
}
public URI getBaseUri()
{
return baseURI;
}
public UriBuilder getBaseUriBuilder()
{
return UriBuilder.fromUri(baseURI);
}
public MultivaluedMap<String, String> getPathParameters()
{
if (pathParameters == null)
{
pathParameters = new MultivaluedMapImpl<String, String>();
}
return pathParameters;
}
public void addEncodedPathParameter(String name, String value)
{
getEncodedPathParameters().add(name, value);
String value1 = Encode.decodePath(value);
getPathParameters().add(name, value1);
}
private MultivaluedMap<String, String> getEncodedPathParameters()
{
if (encodedPathParameters == null)
{
encodedPathParameters = new MultivaluedMapImpl<String, String>();
}
return encodedPathParameters;
}
public MultivaluedMap<String, PathSegment[]> getEncodedPathParameterPathSegments()
{
if (encodedPathParameterPathSegments == null)
{
encodedPathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return encodedPathParameterPathSegments;
}
public MultivaluedMap<String, PathSegment[]> getPathParameterPathSegments()
{
if (pathParameterPathSegments == null)
{
pathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return pathParameterPathSegments;
}
public MultivaluedMap<String, String> getPathParameters(boolean decode)
{
if (decode) return getPathParameters();
return getEncodedPathParameters();
}
public MultivaluedMap<String, String> getQueryParameters()
{
if (queryParameters == null)
{
queryParameters = new MultivaluedMapImpl<String, String>();
}
return queryParameters;
}
protected MultivaluedMap<String, String> getEncodedQueryParameters()
{
if (encodedQueryParameters == null)
{
this.encodedQueryParameters = new MultivaluedMapImpl<String, String>();
}
return encodedQueryParameters;
}
public MultivaluedMap<String, String> getQueryParameters(boolean decode)
{
if (decode) return getQueryParameters();
else return getEncodedQueryParameters();
}
protected void extractParameters(String queryString)
{
if (queryString == null || queryString.equals("")) return;
String[] params = queryString.split("&");
for (String param : params)
{
if (param.indexOf('=') >= 0)
{
String[] nv = param.split("=", 2);
try
{
String name = URLDecoder.decode(nv[0], "UTF-8");
String val = nv.length > 1 ? nv[1] : "";
getEncodedQueryParameters().add(name, val);
getQueryParameters().add(name, URLDecoder.decode(val, "UTF-8"));
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
else
{
try
{
String name = URLDecoder.decode(param, "UTF-8");
getEncodedQueryParameters().add(name, "");
getQueryParameters().add(name, "");
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
}
}
public List<String> getMatchedURIs(boolean decode)
{
if (decode)
{
if (matchedUris == null) matchedUris = new ArrayList<String>();
return matchedUris;
}
else
{
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
return encodedMatchedUris;
}
}
public List<String> getMatchedURIs()
{
return getMatchedURIs(true);
}
public List<Object> getMatchedResources()
{
if (ancestors == null) ancestors = new ArrayList<Object>();
return ancestors;
}
public void pushCurrentResource(Object resource)
{
if (ancestors == null) ancestors = new ArrayList<Object>();
ancestors.add(0, resource);
}
public void pushMatchedPath(String encoded)
{
encodedMatchedPaths.add(0, encoded);
}
public List<String> getEncodedMatchedPaths()
{
return encodedMatchedPaths;
}
public void popMatchedPath()
{
encodedMatchedPaths.remove(0);
}
public void pushMatchedURI(String encoded)
{
if (encoded.endsWith("/")) encoded = encoded.substring(0, encoded.length() - 1);
if (encoded.startsWith("/")) encoded = encoded.substring(1);
String decoded = Encode.decode(encoded);
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
encodedMatchedUris.add(0, encoded);
if (matchedUris == null) matchedUris = new ArrayList<String>();
matchedUris.add(0, decoded);
}
@Override
public URI resolve(URI uri)
{
return getBaseUri().resolve(uri);
}
@Override
public URI relativize(URI uri)
{
URI from = getRequestUri();
URI to = uri;
if (uri.getScheme() == null && uri.getHost() == null)
{
to = getBaseUriBuilder().replaceQuery(null).path(uri.getPath()).replaceQuery(uri.getQuery()).fragment(uri.getFragment()).build();
}
return ResteasyUriBuilder.relativize(from, to);
}
}
| raphaelning/resteasy-client-android | jaxrs/resteasy-jaxrs/src/main/java/org/jboss/resteasy/spi/ResteasyUriInfo.java | Java | apache-2.0 | 10,699 |
package com.cabinetms.client;
import java.util.List;
import com.google.common.collect.Lists;
public class TacticMediaCommand {
private String command; // 指令
private String clientIp; // 终端IP地址
private String destination; // 终端队列地址
private Integer startDate;// 策略开始日期
private Integer endDate;// 策略结束日期
private List<TacticDetailMediaCommand> detailList = Lists.newLinkedList();
public List<TacticDetailMediaCommand> getDetailList() {
return detailList;
}
public void setDetailList(List<TacticDetailMediaCommand> detailList) {
this.detailList = detailList;
}
public String getCommand() {
return command;
}
public void setCommand(String command) {
this.command = command;
}
public String getClientIp() {
return clientIp;
}
public void setClientIp(String clientIp) {
this.clientIp = clientIp;
}
public String getDestination() {
return destination;
}
public void setDestination(String destination) {
this.destination = destination;
}
public Integer getStartDate() {
return startDate;
}
public void setStartDate(Integer startDate) {
this.startDate = startDate;
}
public Integer getEndDate() {
return endDate;
}
public void setEndDate(Integer endDate) {
this.endDate = endDate;
}
}
| wangning82/CabinetMS | src/main/java/com/cabinetms/client/TacticMediaCommand.java | Java | apache-2.0 | 1,292 |
/**
* @license Highcharts JS v7.1.1 (2019-04-09)
*
* (c) 2014-2019 Highsoft AS
* Authors: Jon Arild Nygard / Oystein Moseng
*
* License: www.highcharts.com/license
*/
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
factory['default'] = factory;
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define('highcharts/modules/treemap', ['highcharts'], function (Highcharts) {
factory(Highcharts);
factory.Highcharts = Highcharts;
return factory;
});
} else {
factory(typeof Highcharts !== 'undefined' ? Highcharts : undefined);
}
}(function (Highcharts) {
var _modules = Highcharts ? Highcharts._modules : {};
function _registerModule(obj, path, args, fn) {
if (!obj.hasOwnProperty(path)) {
obj[path] = fn.apply(null, args);
}
}
_registerModule(_modules, 'mixins/tree-series.js', [_modules['parts/Globals.js']], function (H) {
var extend = H.extend,
isArray = H.isArray,
isBoolean = function (x) {
return typeof x === 'boolean';
},
isFn = function (x) {
return typeof x === 'function';
},
isObject = H.isObject,
isNumber = H.isNumber,
merge = H.merge,
pick = H.pick;
// TODO Combine buildTree and buildNode with setTreeValues
// TODO Remove logic from Treemap and make it utilize this mixin.
var setTreeValues = function setTreeValues(tree, options) {
var before = options.before,
idRoot = options.idRoot,
mapIdToNode = options.mapIdToNode,
nodeRoot = mapIdToNode[idRoot],
levelIsConstant = (
isBoolean(options.levelIsConstant) ?
options.levelIsConstant :
true
),
points = options.points,
point = points[tree.i],
optionsPoint = point && point.options || {},
childrenTotal = 0,
children = [],
value;
extend(tree, {
levelDynamic: tree.level - (levelIsConstant ? 0 : nodeRoot.level),
name: pick(point && point.name, ''),
visible: (
idRoot === tree.id ||
(isBoolean(options.visible) ? options.visible : false)
)
});
if (isFn(before)) {
tree = before(tree, options);
}
// First give the children some values
tree.children.forEach(function (child, i) {
var newOptions = extend({}, options);
extend(newOptions, {
index: i,
siblings: tree.children.length,
visible: tree.visible
});
child = setTreeValues(child, newOptions);
children.push(child);
if (child.visible) {
childrenTotal += child.val;
}
});
tree.visible = childrenTotal > 0 || tree.visible;
// Set the values
value = pick(optionsPoint.value, childrenTotal);
extend(tree, {
children: children,
childrenTotal: childrenTotal,
isLeaf: tree.visible && !childrenTotal,
val: value
});
return tree;
};
var getColor = function getColor(node, options) {
var index = options.index,
mapOptionsToLevel = options.mapOptionsToLevel,
parentColor = options.parentColor,
parentColorIndex = options.parentColorIndex,
series = options.series,
colors = options.colors,
siblings = options.siblings,
points = series.points,
getColorByPoint,
chartOptionsChart = series.chart.options.chart,
point,
level,
colorByPoint,
colorIndexByPoint,
color,
colorIndex;
function variation(color) {
var colorVariation = level && level.colorVariation;
if (colorVariation) {
if (colorVariation.key === 'brightness') {
return H.color(color).brighten(
colorVariation.to * (index / siblings)
).get();
}
}
return color;
}
if (node) {
point = points[node.i];
level = mapOptionsToLevel[node.level] || {};
getColorByPoint = point && level.colorByPoint;
if (getColorByPoint) {
colorIndexByPoint = point.index % (colors ?
colors.length :
chartOptionsChart.colorCount
);
colorByPoint = colors && colors[colorIndexByPoint];
}
// Select either point color, level color or inherited color.
if (!series.chart.styledMode) {
color = pick(
point && point.options.color,
level && level.color,
colorByPoint,
parentColor && variation(parentColor),
series.color
);
}
colorIndex = pick(
point && point.options.colorIndex,
level && level.colorIndex,
colorIndexByPoint,
parentColorIndex,
options.colorIndex
);
}
return {
color: color,
colorIndex: colorIndex
};
};
/**
* Creates a map from level number to its given options.
*
* @private
* @function getLevelOptions
*
* @param {object} params
* Object containing parameters.
* - `defaults` Object containing default options. The default options
* are merged with the userOptions to get the final options for a
* specific level.
* - `from` The lowest level number.
* - `levels` User options from series.levels.
* - `to` The highest level number.
*
* @return {Highcharts.Dictionary<object>}
* Returns a map from level number to its given options.
*/
var getLevelOptions = function getLevelOptions(params) {
var result = null,
defaults,
converted,
i,
from,
to,
levels;
if (isObject(params)) {
result = {};
from = isNumber(params.from) ? params.from : 1;
levels = params.levels;
converted = {};
defaults = isObject(params.defaults) ? params.defaults : {};
if (isArray(levels)) {
converted = levels.reduce(function (obj, item) {
var level,
levelIsConstant,
options;
if (isObject(item) && isNumber(item.level)) {
options = merge({}, item);
levelIsConstant = (
isBoolean(options.levelIsConstant) ?
options.levelIsConstant :
defaults.levelIsConstant
);
// Delete redundant properties.
delete options.levelIsConstant;
delete options.level;
// Calculate which level these options apply to.
level = item.level + (levelIsConstant ? 0 : from - 1);
if (isObject(obj[level])) {
extend(obj[level], options);
} else {
obj[level] = options;
}
}
return obj;
}, {});
}
to = isNumber(params.to) ? params.to : 1;
for (i = 0; i <= to; i++) {
result[i] = merge(
{},
defaults,
isObject(converted[i]) ? converted[i] : {}
);
}
}
return result;
};
/**
* Update the rootId property on the series. Also makes sure that it is
* accessible to exporting.
*
* @private
* @function updateRootId
*
* @param {object} series
* The series to operate on.
*
* @return {string}
* Returns the resulting rootId after update.
*/
var updateRootId = function (series) {
var rootId,
options;
if (isObject(series)) {
// Get the series options.
options = isObject(series.options) ? series.options : {};
// Calculate the rootId.
rootId = pick(series.rootNode, options.rootId, '');
// Set rootId on series.userOptions to pick it up in exporting.
if (isObject(series.userOptions)) {
series.userOptions.rootId = rootId;
}
// Set rootId on series to pick it up on next update.
series.rootNode = rootId;
}
return rootId;
};
var result = {
getColor: getColor,
getLevelOptions: getLevelOptions,
setTreeValues: setTreeValues,
updateRootId: updateRootId
};
return result;
});
_registerModule(_modules, 'mixins/draw-point.js', [], function () {
var isFn = function (x) {
return typeof x === 'function';
};
/**
* Handles the drawing of a component.
* Can be used for any type of component that reserves the graphic property, and
* provides a shouldDraw on its context.
*
* @private
* @function draw
*
* @param {object} params
* Parameters.
*
* TODO: add type checking.
* TODO: export this function to enable usage
*/
var draw = function draw(params) {
var component = this,
graphic = component.graphic,
animatableAttribs = params.animatableAttribs,
onComplete = params.onComplete,
css = params.css,
renderer = params.renderer;
if (component.shouldDraw()) {
if (!graphic) {
component.graphic = graphic =
renderer[params.shapeType](params.shapeArgs).add(params.group);
}
graphic
.css(css)
.attr(params.attribs)
.animate(
animatableAttribs,
params.isNew ? false : undefined,
onComplete
);
} else if (graphic) {
var destroy = function () {
component.graphic = graphic = graphic.destroy();
if (isFn(onComplete)) {
onComplete();
}
};
// animate only runs complete callback if something was animated.
if (Object.keys(animatableAttribs).length) {
graphic.animate(animatableAttribs, undefined, function () {
destroy();
});
} else {
destroy();
}
}
};
/**
* An extended version of draw customized for points.
* It calls additional methods that is expected when rendering a point.
*
* @param {object} params Parameters
*/
var drawPoint = function drawPoint(params) {
var point = this,
attribs = params.attribs = params.attribs || {};
// Assigning class in dot notation does go well in IE8
// eslint-disable-next-line dot-notation
attribs['class'] = point.getClassName();
// Call draw to render component
draw.call(point, params);
};
return drawPoint;
});
_registerModule(_modules, 'modules/treemap.src.js', [_modules['parts/Globals.js'], _modules['mixins/tree-series.js'], _modules['mixins/draw-point.js']], function (H, mixinTreeSeries, drawPoint) {
/* *
* (c) 2014-2019 Highsoft AS
*
* Authors: Jon Arild Nygard / Oystein Moseng
*
* License: www.highcharts.com/license
*/
var seriesType = H.seriesType,
seriesTypes = H.seriesTypes,
addEvent = H.addEvent,
merge = H.merge,
extend = H.extend,
error = H.error,
defined = H.defined,
noop = H.noop,
fireEvent = H.fireEvent,
getColor = mixinTreeSeries.getColor,
getLevelOptions = mixinTreeSeries.getLevelOptions,
isArray = H.isArray,
isBoolean = function (x) {
return typeof x === 'boolean';
},
isNumber = H.isNumber,
isObject = H.isObject,
isString = H.isString,
pick = H.pick,
Series = H.Series,
stableSort = H.stableSort,
color = H.Color,
eachObject = function (list, func, context) {
context = context || this;
H.objectEach(list, function (val, key) {
func.call(context, val, key, list);
});
},
// @todo find correct name for this function.
// @todo Similar to reduce, this function is likely redundant
recursive = function (item, func, context) {
var next;
context = context || this;
next = func.call(context, item);
if (next !== false) {
recursive(next, func, context);
}
},
updateRootId = mixinTreeSeries.updateRootId;
/**
* @private
* @class
* @name Highcharts.seriesTypes.treemap
*
* @augments Highcharts.Series
*/
seriesType(
'treemap',
'scatter'
/**
* A treemap displays hierarchical data using nested rectangles. The data
* can be laid out in varying ways depending on options.
*
* @sample highcharts/demo/treemap-large-dataset/
* Treemap
*
* @extends plotOptions.scatter
* @excluding marker, jitter
* @product highcharts
* @optionparent plotOptions.treemap
*/
, {
/**
* When enabled the user can click on a point which is a parent and
* zoom in on its children. Deprecated and replaced by
* [allowTraversingTree](#plotOptions.treemap.allowTraversingTree).
*
* @sample {highcharts} highcharts/plotoptions/treemap-allowdrilltonode/
* Enabled
*
* @deprecated
* @type {boolean}
* @default false
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.allowDrillToNode
*/
/**
* When enabled the user can click on a point which is a parent and
* zoom in on its children.
*
* @sample {highcharts} highcharts/plotoptions/treemap-allowtraversingtree/
* Enabled
*
* @since 7.0.3
* @product highcharts
*/
allowTraversingTree: false,
animationLimit: 250,
/**
* When the series contains less points than the crop threshold, all
* points are drawn, event if the points fall outside the visible plot
* area at the current zoom. The advantage of drawing all points
* (including markers and columns), is that animation is performed on
* updates. On the other hand, when the series contains more points than
* the crop threshold, the series data is cropped to only contain points
* that fall within the plot area. The advantage of cropping away
* invisible points is to increase performance on large series.
*
* @type {number}
* @default 300
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.cropThreshold
*/
/**
* Fires on a request for change of root node for the tree, before the
* update is made. An event object is passed to the function, containing
* additional properties `newRootId`, `previousRootId`, `redraw` and
* `trigger`.
*
* @type {function}
* @default undefined
* @sample {highcharts} highcharts/plotoptions/treemap-events-setrootnode/
* Alert update information on setRootNode event.
* @since 7.0.3
* @product highcharts
* @apioption plotOptions.treemap.events.setRootNode
*/
/**
* This option decides if the user can interact with the parent nodes
* or just the leaf nodes. When this option is undefined, it will be
* true by default. However when allowTraversingTree is true, then it
* will be false by default.
*
* @sample {highcharts} highcharts/plotoptions/treemap-interactbyleaf-false/
* False
* @sample {highcharts} highcharts/plotoptions/treemap-interactbyleaf-true-and-allowtraversingtree/
* InteractByLeaf and allowTraversingTree is true
*
* @type {boolean}
* @since 4.1.2
* @product highcharts
* @apioption plotOptions.treemap.interactByLeaf
*/
/**
* The sort index of the point inside the treemap level.
*
* @sample {highcharts} highcharts/plotoptions/treemap-sortindex/
* Sort by years
*
* @type {number}
* @since 4.1.10
* @product highcharts
* @apioption plotOptions.treemap.sortIndex
*/
/**
* When using automatic point colors pulled from the `options.colors`
* collection, this option determines whether the chart should receive
* one color per series or one color per point.
*
* @see [series colors](#plotOptions.treemap.colors)
*
* @type {boolean}
* @default false
* @since 2.0
* @product highcharts
* @apioption plotOptions.treemap.colorByPoint
*/
/**
* A series specific or series type specific color set to apply instead
* of the global [colors](#colors) when
* [colorByPoint](#plotOptions.treemap.colorByPoint) is true.
*
* @type {Array<Highcharts.ColorString|Highcharts.GradientColorObject|Highcharts.PatternObject>}
* @since 3.0
* @product highcharts
* @apioption plotOptions.treemap.colors
*/
/**
* Whether to display this series type or specific series item in the
* legend.
*/
showInLegend: false,
/**
* @ignore-option
*/
marker: false,
colorByPoint: false,
/**
* @since 4.1.0
*/
dataLabels: {
/** @ignore-option */
defer: false,
/** @ignore-option */
enabled: true,
/** @ignore-option */
formatter: function () {
var point = this && this.point ? this.point : {},
name = isString(point.name) ? point.name : '';
return name;
},
/** @ignore-option */
inside: true,
/** @ignore-option */
verticalAlign: 'middle'
},
tooltip: {
headerFormat: '',
pointFormat: '<b>{point.name}</b>: {point.value}<br/>'
},
/**
* Whether to ignore hidden points when the layout algorithm runs.
* If `false`, hidden points will leave open spaces.
*
* @since 5.0.8
*/
ignoreHiddenPoint: true,
/**
* This option decides which algorithm is used for setting position
* and dimensions of the points.
*
* @see [How to write your own algorithm](https://www.highcharts.com/docs/chart-and-series-types/treemap)
*
* @sample {highcharts} highcharts/plotoptions/treemap-layoutalgorithm-sliceanddice/
* SliceAndDice by default
* @sample {highcharts} highcharts/plotoptions/treemap-layoutalgorithm-stripes/
* Stripes
* @sample {highcharts} highcharts/plotoptions/treemap-layoutalgorithm-squarified/
* Squarified
* @sample {highcharts} highcharts/plotoptions/treemap-layoutalgorithm-strip/
* Strip
*
* @since 4.1.0
* @validvalue ["sliceAndDice", "stripes", "squarified", "strip"]
*/
layoutAlgorithm: 'sliceAndDice',
/**
* Defines which direction the layout algorithm will start drawing.
*
* @since 4.1.0
* @validvalue ["vertical", "horizontal"]
*/
layoutStartingDirection: 'vertical',
/**
* Enabling this option will make the treemap alternate the drawing
* direction between vertical and horizontal. The next levels starting
* direction will always be the opposite of the previous.
*
* @sample {highcharts} highcharts/plotoptions/treemap-alternatestartingdirection-true/
* Enabled
*
* @since 4.1.0
*/
alternateStartingDirection: false,
/**
* Used together with the levels and allowTraversingTree options. When
* set to false the first level visible to be level one, which is
* dynamic when traversing the tree. Otherwise the level will be the
* same as the tree structure.
*
* @since 4.1.0
*/
levelIsConstant: true,
/**
* Options for the button appearing when drilling down in a treemap.
* Deprecated and replaced by
* [traverseUpButton](#plotOptions.treemap.traverseUpButton).
*
* @deprecated
*/
drillUpButton: {
/**
* The position of the button.
*
* @deprecated
*/
position: {
/**
* Vertical alignment of the button.
*
* @deprecated
* @type {Highcharts.VerticalAlignValue}
* @default top
* @product highcharts
* @apioption plotOptions.treemap.drillUpButton.position.verticalAlign
*/
/**
* Horizontal alignment of the button.
*
* @deprecated
* @type {Highcharts.AlignValue}
*/
align: 'right',
/**
* Horizontal offset of the button.
*
* @deprecated
*/
x: -10,
/**
* Vertical offset of the button.
*
* @deprecated
*/
y: 10
}
},
/**
* Options for the button appearing when traversing down in a treemap.
*/
traverseUpButton: {
/**
* The position of the button.
*/
position: {
/**
* Vertical alignment of the button.
*
* @type {Highcharts.VerticalAlignValue}
* @default top
* @product highcharts
* @apioption plotOptions.treemap.traverseUpButton.position.verticalAlign
*/
/**
* Horizontal alignment of the button.
*
* @type {Highcharts.AlignValue}
*/
align: 'right',
/**
* Horizontal offset of the button.
*/
x: -10,
/**
* Vertical offset of the button.
*/
y: 10
}
},
/**
* Set options on specific levels. Takes precedence over series options,
* but not point options.
*
* @sample {highcharts} highcharts/plotoptions/treemap-levels/
* Styling dataLabels and borders
* @sample {highcharts} highcharts/demo/treemap-with-levels/
* Different layoutAlgorithm
*
* @type {Array<*>}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels
*/
/**
* Can set a `borderColor` on all points which lies on the same level.
*
* @type {Highcharts.ColorString}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.borderColor
*/
/**
* Set the dash style of the border of all the point which lies on the
* level. See <a href"#plotoptions.scatter.dashstyle">
* plotOptions.scatter.dashStyle</a> for possible options.
*
* @type {string}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.borderDashStyle
*/
/**
* Can set the borderWidth on all points which lies on the same level.
*
* @type {number}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.borderWidth
*/
/**
* Can set a color on all points which lies on the same level.
*
* @type {Highcharts.ColorString|Highcharts.GradientColorObject|Highcharts.PatternObject}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.color
*/
/**
* A configuration object to define how the color of a child varies from
* the parent's color. The variation is distributed among the children
* of node. For example when setting brightness, the brightness change
* will range from the parent's original brightness on the first child,
* to the amount set in the `to` setting on the last node. This allows a
* gradient-like color scheme that sets children out from each other
* while highlighting the grouping on treemaps and sectors on sunburst
* charts.
*
* @sample highcharts/demo/sunburst/
* Sunburst with color variation
*
* @since 6.0.0
* @product highcharts
* @apioption plotOptions.treemap.levels.colorVariation
*/
/**
* The key of a color variation. Currently supports `brightness` only.
*
* @type {string}
* @since 6.0.0
* @product highcharts
* @validvalue ["brightness"]
* @apioption plotOptions.treemap.levels.colorVariation.key
*/
/**
* The ending value of a color variation. The last sibling will receive
* this value.
*
* @type {number}
* @since 6.0.0
* @product highcharts
* @apioption plotOptions.treemap.levels.colorVariation.to
*/
/**
* Can set the options of dataLabels on each point which lies on the
* level.
* [plotOptions.treemap.dataLabels](#plotOptions.treemap.dataLabels) for
* possible values.
*
* @type {object}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.dataLabels
*/
/**
* Can set the layoutAlgorithm option on a specific level.
*
* @type {string}
* @since 4.1.0
* @product highcharts
* @validvalue ["sliceAndDice", "stripes", "squarified", "strip"]
* @apioption plotOptions.treemap.levels.layoutAlgorithm
*/
/**
* Can set the layoutStartingDirection option on a specific level.
*
* @type {string}
* @since 4.1.0
* @product highcharts
* @validvalue ["vertical", "horizontal"]
* @apioption plotOptions.treemap.levels.layoutStartingDirection
*/
/**
* Decides which level takes effect from the options set in the levels
* object.
*
* @sample {highcharts} highcharts/plotoptions/treemap-levels/
* Styling of both levels
*
* @type {number}
* @since 4.1.0
* @product highcharts
* @apioption plotOptions.treemap.levels.level
*/
// Presentational options
/**
* The color of the border surrounding each tree map item.
*
* @type {Highcharts.ColorString}
*/
borderColor: '#e6e6e6',
/**
* The width of the border surrounding each tree map item.
*/
borderWidth: 1,
/**
* The opacity of a point in treemap. When a point has children, the
* visibility of the children is determined by the opacity.
*
* @since 4.2.4
*/
opacity: 0.15,
/**
* A wrapper object for all the series options in specific states.
*
* @extends plotOptions.heatmap.states
*/
states: {
/**
* Options for the hovered series
*
* @extends plotOptions.heatmap.states.hover
* @excluding halo
*/
hover: {
/**
* The border color for the hovered state.
*/
borderColor: '#999999',
/**
* Brightness for the hovered point. Defaults to 0 if the
* heatmap series is loaded first, otherwise 0.1.
*
* @type {number}
* @default undefined
*/
brightness: seriesTypes.heatmap ? 0 : 0.1,
/**
* @extends plotOptions.heatmap.states.hover.halo
*/
halo: false,
/**
* The opacity of a point in treemap. When a point has children,
* the visibility of the children is determined by the opacity.
*
* @since 4.2.4
*/
opacity: 0.75,
/**
* The shadow option for hovered state.
*/
shadow: false
}
}
// Prototype members
}, {
pointArrayMap: ['value'],
directTouch: true,
optionalAxis: 'colorAxis',
getSymbol: noop,
parallelArrays: ['x', 'y', 'value', 'colorValue'],
colorKey: 'colorValue', // Point color option key
trackerGroups: ['group', 'dataLabelsGroup'],
/**
* Creates an object map from parent id to childrens index.
*
* @private
* @function Highcharts.Series#getListOfParents
*
* @param {Highcharts.SeriesTreemapDataOptions} data
* List of points set in options.
*
* @param {Array<string>} existingIds
* List of all point ids.
*
* @return {object}
* Map from parent id to children index in data.
*/
getListOfParents: function (data, existingIds) {
var arr = isArray(data) ? data : [],
ids = isArray(existingIds) ? existingIds : [],
listOfParents = arr.reduce(function (prev, curr, i) {
var parent = pick(curr.parent, '');
if (prev[parent] === undefined) {
prev[parent] = [];
}
prev[parent].push(i);
return prev;
}, {
'': [] // Root of tree
});
// If parent does not exist, hoist parent to root of tree.
eachObject(listOfParents, function (children, parent, list) {
if ((parent !== '') && (ids.indexOf(parent) === -1)) {
children.forEach(function (child) {
list[''].push(child);
});
delete list[parent];
}
});
return listOfParents;
},
// Creates a tree structured object from the series points
getTree: function () {
var series = this,
allIds = this.data.map(function (d) {
return d.id;
}),
parentList = series.getListOfParents(this.data, allIds);
series.nodeMap = [];
return series.buildNode('', -1, 0, parentList, null);
},
// Define hasData function for non-cartesian series.
// Returns true if the series has points at all.
hasData: function () {
return !!this.processedXData.length; // != 0
},
init: function (chart, options) {
var series = this,
colorSeriesMixin = H.colorSeriesMixin;
// If color series logic is loaded, add some properties
if (H.colorSeriesMixin) {
this.translateColors = colorSeriesMixin.translateColors;
this.colorAttribs = colorSeriesMixin.colorAttribs;
this.axisTypes = colorSeriesMixin.axisTypes;
}
// Handle deprecated options.
addEvent(series, 'setOptions', function (event) {
var options = event.userOptions;
if (
defined(options.allowDrillToNode) &&
!defined(options.allowTraversingTree)
) {
options.allowTraversingTree = options.allowDrillToNode;
delete options.allowDrillToNode;
}
if (
defined(options.drillUpButton) &&
!defined(options.traverseUpButton)
) {
options.traverseUpButton = options.drillUpButton;
delete options.drillUpButton;
}
});
Series.prototype.init.call(series, chart, options);
if (series.options.allowTraversingTree) {
addEvent(series, 'click', series.onClickDrillToNode);
}
},
buildNode: function (id, i, level, list, parent) {
var series = this,
children = [],
point = series.points[i],
height = 0,
node,
child;
// Actions
((list[id] || [])).forEach(function (i) {
child = series.buildNode(
series.points[i].id,
i,
(level + 1),
list,
id
);
height = Math.max(child.height + 1, height);
children.push(child);
});
node = {
id: id,
i: i,
children: children,
height: height,
level: level,
parent: parent,
visible: false // @todo move this to better location
};
series.nodeMap[node.id] = node;
if (point) {
point.node = node;
}
return node;
},
setTreeValues: function (tree) {
var series = this,
options = series.options,
idRoot = series.rootNode,
mapIdToNode = series.nodeMap,
nodeRoot = mapIdToNode[idRoot],
levelIsConstant = (
isBoolean(options.levelIsConstant) ?
options.levelIsConstant :
true
),
childrenTotal = 0,
children = [],
val,
point = series.points[tree.i];
// First give the children some values
tree.children.forEach(function (child) {
child = series.setTreeValues(child);
children.push(child);
if (!child.ignore) {
childrenTotal += child.val;
}
});
// Sort the children
stableSort(children, function (a, b) {
return a.sortIndex - b.sortIndex;
});
// Set the values
val = pick(point && point.options.value, childrenTotal);
if (point) {
point.value = val;
}
extend(tree, {
children: children,
childrenTotal: childrenTotal,
// Ignore this node if point is not visible
ignore: !(pick(point && point.visible, true) && (val > 0)),
isLeaf: tree.visible && !childrenTotal,
levelDynamic: (
tree.level - (levelIsConstant ? 0 : nodeRoot.level)
),
name: pick(point && point.name, ''),
sortIndex: pick(point && point.sortIndex, -val),
val: val
});
return tree;
},
/**
* Recursive function which calculates the area for all children of a
* node.
*
* @private
* @function Highcharts.Series#calculateChildrenAreas
*
* @param {object} node
* The node which is parent to the children.
*
* @param {object} area
* The rectangular area of the parent.
*/
calculateChildrenAreas: function (parent, area) {
var series = this,
options = series.options,
mapOptionsToLevel = series.mapOptionsToLevel,
level = mapOptionsToLevel[parent.level + 1],
algorithm = pick(
(
series[level &&
level.layoutAlgorithm] &&
level.layoutAlgorithm
),
options.layoutAlgorithm
),
alternate = options.alternateStartingDirection,
childrenValues = [],
children;
// Collect all children which should be included
children = parent.children.filter(function (n) {
return !n.ignore;
});
if (level && level.layoutStartingDirection) {
area.direction = level.layoutStartingDirection === 'vertical' ?
0 :
1;
}
childrenValues = series[algorithm](area, children);
children.forEach(function (child, index) {
var values = childrenValues[index];
child.values = merge(values, {
val: child.childrenTotal,
direction: (alternate ? 1 - area.direction : area.direction)
});
child.pointValues = merge(values, {
x: (values.x / series.axisRatio),
width: (values.width / series.axisRatio)
});
// If node has children, then call method recursively
if (child.children.length) {
series.calculateChildrenAreas(child, child.values);
}
});
},
setPointValues: function () {
var series = this,
xAxis = series.xAxis,
yAxis = series.yAxis;
series.points.forEach(function (point) {
var node = point.node,
values = node.pointValues,
x1,
x2,
y1,
y2,
crispCorr = 0;
// Get the crisp correction in classic mode. For this to work in
// styled mode, we would need to first add the shape (without x,
// y, width and height), then read the rendered stroke width
// using point.graphic.strokeWidth(), then modify and apply the
// shapeArgs. This applies also to column series, but the
// downside is performance and code complexity.
if (!series.chart.styledMode) {
crispCorr = (
(series.pointAttribs(point)['stroke-width'] || 0) % 2
) / 2;
}
// Points which is ignored, have no values.
if (values && node.visible) {
x1 = Math.round(
xAxis.translate(values.x, 0, 0, 0, 1)
) - crispCorr;
x2 = Math.round(
xAxis.translate(values.x + values.width, 0, 0, 0, 1)
) - crispCorr;
y1 = Math.round(
yAxis.translate(values.y, 0, 0, 0, 1)
) - crispCorr;
y2 = Math.round(
yAxis.translate(values.y + values.height, 0, 0, 0, 1)
) - crispCorr;
// Set point values
point.shapeArgs = {
x: Math.min(x1, x2),
y: Math.min(y1, y2),
width: Math.abs(x2 - x1),
height: Math.abs(y2 - y1)
};
point.plotX =
point.shapeArgs.x + (point.shapeArgs.width / 2);
point.plotY =
point.shapeArgs.y + (point.shapeArgs.height / 2);
} else {
// Reset visibility
delete point.plotX;
delete point.plotY;
}
});
},
// Set the node's color recursively, from the parent down.
setColorRecursive: function (
node,
parentColor,
colorIndex,
index,
siblings
) {
var series = this,
chart = series && series.chart,
colors = chart && chart.options && chart.options.colors,
colorInfo,
point;
if (node) {
colorInfo = getColor(node, {
colors: colors,
index: index,
mapOptionsToLevel: series.mapOptionsToLevel,
parentColor: parentColor,
parentColorIndex: colorIndex,
series: series,
siblings: siblings
});
point = series.points[node.i];
if (point) {
point.color = colorInfo.color;
point.colorIndex = colorInfo.colorIndex;
}
// Do it all again with the children
(node.children || []).forEach(function (child, i) {
series.setColorRecursive(
child,
colorInfo.color,
colorInfo.colorIndex,
i,
node.children.length
);
});
}
},
algorithmGroup: function (h, w, d, p) {
this.height = h;
this.width = w;
this.plot = p;
this.direction = d;
this.startDirection = d;
this.total = 0;
this.nW = 0;
this.lW = 0;
this.nH = 0;
this.lH = 0;
this.elArr = [];
this.lP = {
total: 0,
lH: 0,
nH: 0,
lW: 0,
nW: 0,
nR: 0,
lR: 0,
aspectRatio: function (w, h) {
return Math.max((w / h), (h / w));
}
};
this.addElement = function (el) {
this.lP.total = this.elArr[this.elArr.length - 1];
this.total = this.total + el;
if (this.direction === 0) {
// Calculate last point old aspect ratio
this.lW = this.nW;
this.lP.lH = this.lP.total / this.lW;
this.lP.lR = this.lP.aspectRatio(this.lW, this.lP.lH);
// Calculate last point new aspect ratio
this.nW = this.total / this.height;
this.lP.nH = this.lP.total / this.nW;
this.lP.nR = this.lP.aspectRatio(this.nW, this.lP.nH);
} else {
// Calculate last point old aspect ratio
this.lH = this.nH;
this.lP.lW = this.lP.total / this.lH;
this.lP.lR = this.lP.aspectRatio(this.lP.lW, this.lH);
// Calculate last point new aspect ratio
this.nH = this.total / this.width;
this.lP.nW = this.lP.total / this.nH;
this.lP.nR = this.lP.aspectRatio(this.lP.nW, this.nH);
}
this.elArr.push(el);
};
this.reset = function () {
this.nW = 0;
this.lW = 0;
this.elArr = [];
this.total = 0;
};
},
algorithmCalcPoints: function (
directionChange, last, group, childrenArea
) {
var pX,
pY,
pW,
pH,
gW = group.lW,
gH = group.lH,
plot = group.plot,
keep,
i = 0,
end = group.elArr.length - 1;
if (last) {
gW = group.nW;
gH = group.nH;
} else {
keep = group.elArr[group.elArr.length - 1];
}
group.elArr.forEach(function (p) {
if (last || (i < end)) {
if (group.direction === 0) {
pX = plot.x;
pY = plot.y;
pW = gW;
pH = p / pW;
} else {
pX = plot.x;
pY = plot.y;
pH = gH;
pW = p / pH;
}
childrenArea.push({
x: pX,
y: pY,
width: pW,
height: H.correctFloat(pH)
});
if (group.direction === 0) {
plot.y = plot.y + pH;
} else {
plot.x = plot.x + pW;
}
}
i = i + 1;
});
// Reset variables
group.reset();
if (group.direction === 0) {
group.width = group.width - gW;
} else {
group.height = group.height - gH;
}
plot.y = plot.parent.y + (plot.parent.height - group.height);
plot.x = plot.parent.x + (plot.parent.width - group.width);
if (directionChange) {
group.direction = 1 - group.direction;
}
// If not last, then add uncalculated element
if (!last) {
group.addElement(keep);
}
},
algorithmLowAspectRatio: function (directionChange, parent, children) {
var childrenArea = [],
series = this,
pTot,
plot = {
x: parent.x,
y: parent.y,
parent: parent
},
direction = parent.direction,
i = 0,
end = children.length - 1,
group = new this.algorithmGroup( // eslint-disable-line new-cap
parent.height,
parent.width,
direction,
plot
);
// Loop through and calculate all areas
children.forEach(function (child) {
pTot =
(parent.width * parent.height) * (child.val / parent.val);
group.addElement(pTot);
if (group.lP.nR > group.lP.lR) {
series.algorithmCalcPoints(
directionChange,
false,
group,
childrenArea,
plot
);
}
// If last child, then calculate all remaining areas
if (i === end) {
series.algorithmCalcPoints(
directionChange,
true,
group,
childrenArea,
plot
);
}
i = i + 1;
});
return childrenArea;
},
algorithmFill: function (directionChange, parent, children) {
var childrenArea = [],
pTot,
direction = parent.direction,
x = parent.x,
y = parent.y,
width = parent.width,
height = parent.height,
pX,
pY,
pW,
pH;
children.forEach(function (child) {
pTot =
(parent.width * parent.height) * (child.val / parent.val);
pX = x;
pY = y;
if (direction === 0) {
pH = height;
pW = pTot / pH;
width = width - pW;
x = x + pW;
} else {
pW = width;
pH = pTot / pW;
height = height - pH;
y = y + pH;
}
childrenArea.push({
x: pX,
y: pY,
width: pW,
height: pH
});
if (directionChange) {
direction = 1 - direction;
}
});
return childrenArea;
},
strip: function (parent, children) {
return this.algorithmLowAspectRatio(false, parent, children);
},
squarified: function (parent, children) {
return this.algorithmLowAspectRatio(true, parent, children);
},
sliceAndDice: function (parent, children) {
return this.algorithmFill(true, parent, children);
},
stripes: function (parent, children) {
return this.algorithmFill(false, parent, children);
},
translate: function () {
var series = this,
options = series.options,
// NOTE: updateRootId modifies series.
rootId = updateRootId(series),
rootNode,
pointValues,
seriesArea,
tree,
val;
// Call prototype function
Series.prototype.translate.call(series);
// @todo Only if series.isDirtyData is true
tree = series.tree = series.getTree();
rootNode = series.nodeMap[rootId];
series.renderTraverseUpButton(rootId);
series.mapOptionsToLevel = getLevelOptions({
from: rootNode.level + 1,
levels: options.levels,
to: tree.height,
defaults: {
levelIsConstant: series.options.levelIsConstant,
colorByPoint: options.colorByPoint
}
});
if (
rootId !== '' &&
(!rootNode || !rootNode.children.length)
) {
series.setRootNode('', false);
rootId = series.rootNode;
rootNode = series.nodeMap[rootId];
}
// Parents of the root node is by default visible
recursive(series.nodeMap[series.rootNode], function (node) {
var next = false,
p = node.parent;
node.visible = true;
if (p || p === '') {
next = series.nodeMap[p];
}
return next;
});
// Children of the root node is by default visible
recursive(
series.nodeMap[series.rootNode].children,
function (children) {
var next = false;
children.forEach(function (child) {
child.visible = true;
if (child.children.length) {
next = (next || []).concat(child.children);
}
});
return next;
}
);
series.setTreeValues(tree);
// Calculate plotting values.
series.axisRatio = (series.xAxis.len / series.yAxis.len);
series.nodeMap[''].pointValues = pointValues =
{ x: 0, y: 0, width: 100, height: 100 };
series.nodeMap[''].values = seriesArea = merge(pointValues, {
width: (pointValues.width * series.axisRatio),
direction: (
options.layoutStartingDirection === 'vertical' ? 0 : 1
),
val: tree.val
});
series.calculateChildrenAreas(tree, seriesArea);
// Logic for point colors
if (series.colorAxis) {
series.translateColors();
} else if (!options.colorByPoint) {
series.setColorRecursive(series.tree);
}
// Update axis extremes according to the root node.
if (options.allowTraversingTree) {
val = rootNode.pointValues;
series.xAxis.setExtremes(val.x, val.x + val.width, false);
series.yAxis.setExtremes(val.y, val.y + val.height, false);
series.xAxis.setScale();
series.yAxis.setScale();
}
// Assign values to points.
series.setPointValues();
},
/**
* Extend drawDataLabels with logic to handle custom options related to
* the treemap series:
*
* - Points which is not a leaf node, has dataLabels disabled by
* default.
*
* - Options set on series.levels is merged in.
*
* - Width of the dataLabel is set to match the width of the point
* shape.
*
* @private
* @function Highcharts.Series#drawDataLabels
*/
drawDataLabels: function () {
var series = this,
mapOptionsToLevel = series.mapOptionsToLevel,
points = series.points.filter(function (n) {
return n.node.visible;
}),
options,
level;
points.forEach(function (point) {
level = mapOptionsToLevel[point.node.level];
// Set options to new object to avoid problems with scope
options = { style: {} };
// If not a leaf, then label should be disabled as default
if (!point.node.isLeaf) {
options.enabled = false;
}
// If options for level exists, include them as well
if (level && level.dataLabels) {
options = merge(options, level.dataLabels);
series._hasPointLabels = true;
}
// Set dataLabel width to the width of the point shape.
if (point.shapeArgs) {
options.style.width = point.shapeArgs.width;
if (point.dataLabel) {
point.dataLabel.css({
width: point.shapeArgs.width + 'px'
});
}
}
// Merge custom options with point options
point.dlOptions = merge(options, point.options.dataLabels);
});
Series.prototype.drawDataLabels.call(this);
},
// Over the alignment method by setting z index
alignDataLabel: function (point, dataLabel, labelOptions) {
var style = labelOptions.style;
// #8160: Prevent the label from exceeding the point's
// boundaries in treemaps by applying ellipsis overflow.
// The issue was happening when datalabel's text contained a
// long sequence of characters without a whitespace.
if (
!H.defined(style.textOverflow) &&
dataLabel.text &&
dataLabel.getBBox().width > dataLabel.text.textWidth
) {
dataLabel.css({
textOverflow: 'ellipsis',
// unit (px) is required when useHTML is true
width: style.width += 'px'
});
}
seriesTypes.column.prototype.alignDataLabel.apply(this, arguments);
if (point.dataLabel) {
// point.node.zIndex could be undefined (#6956)
point.dataLabel.attr({ zIndex: (point.node.zIndex || 0) + 1 });
}
},
// Get presentational attributes
pointAttribs: function (point, state) {
var series = this,
mapOptionsToLevel = (
isObject(series.mapOptionsToLevel) ?
series.mapOptionsToLevel :
{}
),
level = point && mapOptionsToLevel[point.node.level] || {},
options = this.options,
attr,
stateOptions = (state && options.states[state]) || {},
className = (point && point.getClassName()) || '',
opacity;
// Set attributes by precedence. Point trumps level trumps series.
// Stroke width uses pick because it can be 0.
attr = {
'stroke':
(point && point.borderColor) ||
level.borderColor ||
stateOptions.borderColor ||
options.borderColor,
'stroke-width': pick(
point && point.borderWidth,
level.borderWidth,
stateOptions.borderWidth,
options.borderWidth
),
'dashstyle':
(point && point.borderDashStyle) ||
level.borderDashStyle ||
stateOptions.borderDashStyle ||
options.borderDashStyle,
'fill': (point && point.color) || this.color
};
// Hide levels above the current view
if (className.indexOf('highcharts-above-level') !== -1) {
attr.fill = 'none';
attr['stroke-width'] = 0;
// Nodes with children that accept interaction
} else if (
className.indexOf('highcharts-internal-node-interactive') !== -1
) {
opacity = pick(stateOptions.opacity, options.opacity);
attr.fill = color(attr.fill).setOpacity(opacity).get();
attr.cursor = 'pointer';
// Hide nodes that have children
} else if (className.indexOf('highcharts-internal-node') !== -1) {
attr.fill = 'none';
} else if (state) {
// Brighten and hoist the hover nodes
attr.fill = color(attr.fill)
.brighten(stateOptions.brightness)
.get();
}
return attr;
},
// Override drawPoints
drawPoints: function () {
var series = this,
chart = series.chart,
renderer = chart.renderer,
points = series.points,
styledMode = chart.styledMode,
options = series.options,
shadow = styledMode ? {} : options.shadow,
borderRadius = options.borderRadius,
withinAnimationLimit =
chart.pointCount < options.animationLimit,
allowTraversingTree = options.allowTraversingTree;
points.forEach(function (point) {
var levelDynamic = point.node.levelDynamic,
animate = {},
attr = {},
css = {},
groupKey = 'level-group-' + levelDynamic,
hasGraphic = !!point.graphic,
shouldAnimate = withinAnimationLimit && hasGraphic,
shapeArgs = point.shapeArgs;
// Don't bother with calculate styling if the point is not drawn
if (point.shouldDraw()) {
if (borderRadius) {
attr.r = borderRadius;
}
merge(
true, // Extend object
// Which object to extend
shouldAnimate ? animate : attr,
// Add shapeArgs to animate/attr if graphic exists
hasGraphic ? shapeArgs : {},
// Add style attribs if !styleMode
styledMode ?
{} :
series.pointAttribs(
point, point.selected && 'select'
)
);
// In styled mode apply point.color. Use CSS, otherwise the
// fill used in the style sheet will take precedence over
// the fill attribute.
if (series.colorAttribs && styledMode) {
// Heatmap is loaded
extend(css, series.colorAttribs(point));
}
if (!series[groupKey]) {
series[groupKey] = renderer.g(groupKey)
.attr({
// @todo Set the zIndex based upon the number of
// levels, instead of using 1000
zIndex: 1000 - levelDynamic
})
.add(series.group);
}
}
// Draw the point
point.draw({
animatableAttribs: animate,
attribs: attr,
css: css,
group: series[groupKey],
renderer: renderer,
shadow: shadow,
shapeArgs: shapeArgs,
shapeType: 'rect'
});
// If setRootNode is allowed, set a point cursor on clickables &
// add drillId to point
if (allowTraversingTree && point.graphic) {
point.drillId = options.interactByLeaf ?
series.drillToByLeaf(point) :
series.drillToByGroup(point);
}
});
},
// Add drilling on the suitable points
onClickDrillToNode: function (event) {
var series = this,
point = event.point,
drillId = point && point.drillId;
// If a drill id is returned, add click event and cursor.
if (isString(drillId)) {
point.setState(''); // Remove hover
series.setRootNode(drillId, true, { trigger: 'click' });
}
},
/**
* Finds the drill id for a parent node. Returns false if point should
* not have a click event.
*
* @private
* @function Highcharts.Series#drillToByGroup
*
* @param {object} point
*
* @return {boolean|string}
* Drill to id or false when point should not have a click
* event.
*/
drillToByGroup: function (point) {
var series = this,
drillId = false;
if ((point.node.level - series.nodeMap[series.rootNode].level) ===
1 &&
!point.node.isLeaf
) {
drillId = point.id;
}
return drillId;
},
/**
* Finds the drill id for a leaf node. Returns false if point should not
* have a click event
*
* @private
* @function Highcharts.Series#drillToByLeaf
*
* @param {object} point
*
* @return {boolean|string}
* Drill to id or false when point should not have a click
* event.
*/
drillToByLeaf: function (point) {
var series = this,
drillId = false,
nodeParent;
if ((point.node.parent !== series.rootNode) &&
point.node.isLeaf
) {
nodeParent = point.node;
while (!drillId) {
nodeParent = series.nodeMap[nodeParent.parent];
if (nodeParent.parent === series.rootNode) {
drillId = nodeParent.id;
}
}
}
return drillId;
},
drillUp: function () {
var series = this,
node = series.nodeMap[series.rootNode];
if (node && isString(node.parent)) {
series.setRootNode(
node.parent,
true,
{ trigger: 'traverseUpButton' }
);
}
},
// TODO remove this function at a suitable version.
drillToNode: function (id, redraw) {
error(
'WARNING: treemap.drillToNode has been renamed to treemap.' +
'setRootNode, and will be removed in the next major version.'
);
this.setRootNode(id, redraw);
},
/**
* Sets a new root node for the series.
*
* @private
* @function Highcharts.Series#setRootNode
*
* @param {string} id The id of the new root node.
* @param {boolean} [redraw=true] Wether to redraw the chart or not.
* @param {object} [eventArguments] Arguments to be accessed in
* event handler.
* @param {string} [eventArguments.newRootId] Id of the new root.
* @param {string} [eventArguments.previousRootId] Id of the previous
* root.
* @param {boolean} [eventArguments.redraw] Wether to redraw the
* chart after.
* @param {object} [eventArguments.series] The series to update the root
* of.
* @param {string} [eventArguments.trigger] The action which
* triggered the event. Undefined if the setRootNode is called
* directly.
*/
setRootNode: function (id, redraw, eventArguments) {
var series = this,
eventArgs = extend({
newRootId: id,
previousRootId: series.rootNode,
redraw: pick(redraw, true),
series: series
}, eventArguments);
/**
* The default functionality of the setRootNode event.
*
* @private
* @param {object} args The event arguments.
* @param {string} args.newRootId Id of the new root.
* @param {string} args.previousRootId Id of the previous root.
* @param {boolean} args.redraw Wether to redraw the chart after.
* @param {object} args.series The series to update the root of.
* @param {string} [args.trigger=undefined] The action which
* triggered the event. Undefined if the setRootNode is called
* directly.
*/
var defaultFn = function (args) {
var series = args.series;
// Store previous and new root ids on the series.
series.idPreviousRoot = args.previousRootId;
series.rootNode = args.newRootId;
// Redraw the chart
series.isDirty = true; // Force redraw
if (args.redraw) {
series.chart.redraw();
}
};
// Fire setRootNode event.
fireEvent(series, 'setRootNode', eventArgs, defaultFn);
},
renderTraverseUpButton: function (rootId) {
var series = this,
nodeMap = series.nodeMap,
node = nodeMap[rootId],
name = node.name,
buttonOptions = series.options.traverseUpButton,
backText = pick(buttonOptions.text, name, '< Back'),
attr,
states;
if (rootId === '') {
if (series.drillUpButton) {
series.drillUpButton = series.drillUpButton.destroy();
}
} else if (!this.drillUpButton) {
attr = buttonOptions.theme;
states = attr && attr.states;
this.drillUpButton = this.chart.renderer.button(
backText,
null,
null,
function () {
series.drillUp();
},
attr,
states && states.hover,
states && states.select
)
.addClass('highcharts-drillup-button')
.attr({
align: buttonOptions.position.align,
zIndex: 7
})
.add()
.align(
buttonOptions.position,
false,
buttonOptions.relativeTo || 'plotBox'
);
} else {
this.drillUpButton.placed = false;
this.drillUpButton.attr({
text: backText
})
.align();
}
},
buildKDTree: noop,
drawLegendSymbol: H.LegendSymbolMixin.drawRectangle,
getExtremes: function () {
// Get the extremes from the value data
Series.prototype.getExtremes.call(this, this.colorValueData);
this.valueMin = this.dataMin;
this.valueMax = this.dataMax;
// Get the extremes from the y data
Series.prototype.getExtremes.call(this);
},
getExtremesFromAll: true,
bindAxes: function () {
var treeAxis = {
endOnTick: false,
gridLineWidth: 0,
lineWidth: 0,
min: 0,
dataMin: 0,
minPadding: 0,
max: 100,
dataMax: 100,
maxPadding: 0,
startOnTick: false,
title: null,
tickPositions: []
};
Series.prototype.bindAxes.call(this);
H.extend(this.yAxis.options, treeAxis);
H.extend(this.xAxis.options, treeAxis);
},
/**
* Workaround for `inactive` state. Since `series.opacity` option is
* already reserved, don't use that state at all by disabling
* `inactiveOtherPoints` and not inheriting states by points.
*
* @private
*/
setState: function (state) {
this.options.inactiveOtherPoints = true;
Series.prototype.setState.call(this, state, false);
this.options.inactiveOtherPoints = false;
},
utils: {
recursive: recursive
}
// Point class
}, {
draw: drawPoint,
getClassName: function () {
var className = H.Point.prototype.getClassName.call(this),
series = this.series,
options = series.options;
// Above the current level
if (this.node.level <= series.nodeMap[series.rootNode].level) {
className += ' highcharts-above-level';
} else if (
!this.node.isLeaf &&
!pick(options.interactByLeaf, !options.allowTraversingTree)
) {
className += ' highcharts-internal-node-interactive';
} else if (!this.node.isLeaf) {
className += ' highcharts-internal-node';
}
return className;
},
/**
* A tree point is valid if it has han id too, assume it may be a parent
* item.
*
* @private
* @function Highcharts.Point#isValid
*/
isValid: function () {
return this.id || isNumber(this.value);
},
setState: function (state) {
H.Point.prototype.setState.call(this, state);
// Graphic does not exist when point is not visible.
if (this.graphic) {
this.graphic.attr({
zIndex: state === 'hover' ? 1 : 0
});
}
},
setVisible: seriesTypes.pie.prototype.pointClass.prototype.setVisible,
shouldDraw: function () {
var point = this;
return isNumber(point.plotY) && point.y !== null;
}
}
);
/**
* A `treemap` series. If the [type](#series.treemap.type) option is
* not specified, it is inherited from [chart.type](#chart.type).
*
* @extends series,plotOptions.treemap
* @excluding dataParser, dataURL, stack
* @product highcharts
* @apioption series.treemap
*/
/**
* An array of data points for the series. For the `treemap` series
* type, points can be given in the following ways:
*
* 1. An array of numerical values. In this case, the numerical values will be
* interpreted as `value` options. Example:
* ```js
* data: [0, 5, 3, 5]
* ```
*
* 2. An array of objects with named values. The following snippet shows only a
* few settings, see the complete options set below. If the total number of
* data points exceeds the series'
* [turboThreshold](#series.treemap.turboThreshold),
* this option is not available.
* ```js
* data: [{
* value: 9,
* name: "Point2",
* color: "#00FF00"
* }, {
* value: 6,
* name: "Point1",
* color: "#FF00FF"
* }]
* ```
*
* @sample {highcharts} highcharts/chart/reflow-true/
* Numerical values
* @sample {highcharts} highcharts/series/data-array-of-objects/
* Config objects
*
* @type {Array<number|null|*>}
* @extends series.heatmap.data
* @excluding x, y
* @product highcharts
* @apioption series.treemap.data
*/
/**
* The value of the point, resulting in a relative area of the point
* in the treemap.
*
* @type {number|null}
* @product highcharts
* @apioption series.treemap.data.value
*/
/**
* Serves a purpose only if a `colorAxis` object is defined in the chart
* options. This value will decide which color the point gets from the
* scale of the colorAxis.
*
* @type {number}
* @since 4.1.0
* @product highcharts
* @apioption series.treemap.data.colorValue
*/
/**
* Only for treemap. Use this option to build a tree structure. The
* value should be the id of the point which is the parent. If no points
* has a matching id, or this option is undefined, then the parent will
* be set to the root.
*
* @sample {highcharts} highcharts/point/parent/
* Point parent
* @sample {highcharts} highcharts/demo/treemap-with-levels/
* Example where parent id is not matching
*
* @type {string}
* @since 4.1.0
* @product highcharts
* @apioption series.treemap.data.parent
*/
});
_registerModule(_modules, 'masters/modules/treemap.src.js', [], function () {
});
}));
| Qesy/Q-Frame | Static/bootstrap/Highcharts-7.1.1/modules/treemap.src.js | JavaScript | apache-2.0 | 92,176 |
"""Neural network operations."""
from __future__ import absolute_import as _abs
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and weight_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
weight_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, out_layout, out_dtype)
def softmax(data, axis):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.softmax(data, axis)
def log_softmax(data, axis):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : relay.Expr
The input data to the operator.
scale : relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : relay.Expr
The input data to the operator.
Returns
-------
result: relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/op/nn/nn.py | Python | apache-2.0 | 10,085 |
<?php
/**
* Project Name: map-board
* File Name: create_benchmark_fixture.php
* Last modified: 2017/11/20 12:51
* Author: Hiroaki Goto
*
* Copyright (c) 2017 Hiroaki Goto. All rights reserved.
*/
require_once __DIR__.'/../vendor/autoload.php';
$db = connectDB();
$redis = connectRedis();
const USER_SIZE = 1000;
const THREAD_NUM = 200;
const MIN_POST = 0;
const MAX_POST = 10000;
const CONTENT_MIN = 3;
const CONTENT_MAX = 1000;
function unichr( $unicode , $encoding = 'UTF-8' ) {
return mb_convert_encoding("&#{$unicode};", $encoding, 'HTML-ENTITIES');
}
class RandomStringGenerator {
private $seed;
private $strtmp;
private $seedSize;
private $cnt = 0;
public function __construct(bool $only_ascii = false, bool $multi_line = true) {
$this->seed = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
if(!$only_ascii) {
// ひらがな
for ($i = 12353; $i <= 12435; $i++) {
$this->seed .= unichr($i);
}
// カタカナ
for ($i = 12449; $i <= 12534; $i++) {
$this->seed .= unichr($i);
}
// 常用漢字
$file_content = file_get_contents('joyo.csv');
$unicode_list = preg_split('/(\r\n)|[\r\n]/', $file_content);
foreach ($unicode_list as $code_point) {
$this->seed .= unichr(hexdec($code_point));
}
}
// 改行文字
if($multi_line) {
$this->seed .= '\n';
}
$this->seedSize = mb_strlen($this->seed);
$this->shuffle();
}
private function shuffle() {
$this->strtmp = '';
for($i = 0; $i < $this->seedSize; $i++) {
$this->strtmp .= mb_substr($this->seed, mt_rand(0, $this->seedSize - 1), 1);
}
}
public function pseudo(int $length) {
if(++$this->cnt > 1000) {
$this->cnt = 0;
$this->shuffle();
}
$offset_max = $this->seedSize - $length;
return mb_substr($this->strtmp, mt_rand(0, $offset_max), $length);
}
public function generate(int $length) {
$str = '';
for($i = 0; $i < $length; $i++) {
$str .= mb_substr($this->seed, mt_rand(0, $this->seedSize - 1), 1);
}
return $str;
}
}
$single_gen = new RandomStringGenerator(true, false);
$content_gen = new RandomStringGenerator();
$gen_content = function() use($content_gen) {
return $content_gen->pseudo(mt_rand(CONTENT_MIN, CONTENT_MAX));
};
echo "Creating users...\n";
$user_ids = [];
for($i = 0; $i < USER_SIZE; $i++) {
$password = $single_gen->pseudo(mt_rand(7, 40));
$user_name = $single_gen->generate(mt_rand(4, 8));
$user = new mb\models\User($user_name, $user_name.'@example.com', $password, $password);
if($user->create($db)) {
$user_ids[] = $user->id;
}
}
$user_count = count($user_ids);
$gen_user_id = function() use($user_ids, $user_count) {
return $user_ids[mt_rand(0, $user_count - 1)];
};
echo "End creating users.\n";
echo "Creating threads...\n";
for($i = 0; $i < THREAD_NUM; $i++) {
$thread_owner = $gen_user_id();
$thread = new mb\models\Thread($db, $single_gen->generate(mt_rand(5, 80)), $thread_owner);
$thread->create($db, $redis);
$post_first = new mb\models\Post($db, $thread->id, $thread_owner, $gen_content());
$post_first->create($db);
$post_num = mt_rand(MIN_POST, MAX_POST);
for($j = 0; $j < $post_num; $j++) {
$post = new mb\models\Post($db, $thread->id, $gen_user_id(), $gen_content());
$post->create($db);
}
}
echo "End creating thread.\n";
| StoneDot/map-board | database/create_benchmark_fixture.php | PHP | apache-2.0 | 3,675 |
package com.jason.showcase.lambdas;
/**
* Created by Qinjianf on 2016/7/19.
*/
public class Lambda {
public void execute(Action action) {
action.run("Hello Lambda!");
}
public void test() {
execute(System.out::println);
}
public static void main(String[] args) {
new Lambda().test();
}
}
| fuyongde/jason | showcase/src/main/java/com/jason/showcase/lambdas/Lambda.java | Java | apache-2.0 | 342 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.action.hadoop;
import com.google.common.annotations.VisibleForTesting;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang.StringUtils;
import org.apache.directory.api.util.Strings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
import static org.apache.oozie.action.hadoop.SparkActionExecutor.SPARK_DEFAULT_OPTS;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "Properties file should be specified by user")
class SparkArgsExtractor {
private static final Pattern SPARK_DEFAULTS_FILE_PATTERN = Pattern.compile("spark-defaults.conf");
private static final String FILES_OPTION = "--files";
private static final String ARCHIVES_OPTION = "--archives";
private static final String LOG4J_CONFIGURATION_JAVA_OPTION = "-Dlog4j.configuration=";
private static final String SECURITY_TOKENS_HADOOPFS = "spark.yarn.security.tokens.hadoopfs.enabled";
private static final String SECURITY_TOKENS_HIVE = "spark.yarn.security.tokens.hive.enabled";
private static final String SECURITY_TOKENS_HBASE = "spark.yarn.security.tokens.hbase.enabled";
private static final String SECURITY_CREDENTIALS_HADOOPFS = "spark.yarn.security.credentials.hadoopfs.enabled";
private static final String SECURITY_CREDENTIALS_HIVE = "spark.yarn.security.credentials.hive.enabled";
private static final String SECURITY_CREDENTIALS_HBASE = "spark.yarn.security.credentials.hbase.enabled";
private static final String PWD = "$PWD" + File.separator + "*";
private static final String MASTER_OPTION = "--master";
private static final String MODE_OPTION = "--deploy-mode";
private static final String JOB_NAME_OPTION = "--name";
private static final String CLASS_NAME_OPTION = "--class";
private static final String VERBOSE_OPTION = "--verbose";
private static final String DRIVER_CLASSPATH_OPTION = "--driver-class-path";
private static final String EXECUTOR_CLASSPATH = "spark.executor.extraClassPath=";
private static final String DRIVER_CLASSPATH = "spark.driver.extraClassPath=";
private static final String EXECUTOR_EXTRA_JAVA_OPTIONS = "spark.executor.extraJavaOptions=";
private static final String DRIVER_EXTRA_JAVA_OPTIONS = "spark.driver.extraJavaOptions=";
private static final Pattern SPARK_VERSION_1 = Pattern.compile("^1.*");
private static final String SPARK_YARN_JAR = "spark.yarn.jar";
private static final String SPARK_YARN_JARS = "spark.yarn.jars";
private static final String OPT_SEPARATOR = "=";
private static final String OPT_VALUE_SEPARATOR = ",";
private static final String CONF_OPTION = "--conf";
private static final String MASTER_OPTION_YARN_CLUSTER = "yarn-cluster";
private static final String MASTER_OPTION_YARN_CLIENT = "yarn-client";
private static final String MASTER_OPTION_YARN = "yarn";
private static final String DEPLOY_MODE_CLUSTER = "cluster";
private static final String DEPLOY_MODE_CLIENT = "client";
private static final String SPARK_YARN_TAGS = "spark.yarn.tags";
private static final String OPT_PROPERTIES_FILE = "--properties-file";
public static final String SPARK_DEFAULTS_GENERATED_PROPERTIES = "spark-defaults-oozie-generated.properties";
private boolean pySpark = false;
private final Configuration actionConf;
SparkArgsExtractor(final Configuration actionConf) {
this.actionConf = actionConf;
}
boolean isPySpark() {
return pySpark;
}
List<String> extract(final String[] mainArgs) throws OozieActionConfiguratorException, IOException, URISyntaxException {
final List<String> sparkArgs = new ArrayList<>();
sparkArgs.add(MASTER_OPTION);
final String master = actionConf.get(SparkActionExecutor.SPARK_MASTER);
sparkArgs.add(master);
// In local mode, everything runs here in the Launcher Job.
// In yarn-client mode, the driver runs here in the Launcher Job and the
// executor in Yarn.
// In yarn-cluster mode, the driver and executor run in Yarn.
final String sparkDeployMode = actionConf.get(SparkActionExecutor.SPARK_MODE);
if (sparkDeployMode != null) {
sparkArgs.add(MODE_OPTION);
sparkArgs.add(sparkDeployMode);
}
final boolean yarnClusterMode = master.equals(MASTER_OPTION_YARN_CLUSTER)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLUSTER));
final boolean yarnClientMode = master.equals(MASTER_OPTION_YARN_CLIENT)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLIENT));
sparkArgs.add(JOB_NAME_OPTION);
sparkArgs.add(actionConf.get(SparkActionExecutor.SPARK_JOB_NAME));
final String className = actionConf.get(SparkActionExecutor.SPARK_CLASS);
if (className != null) {
sparkArgs.add(CLASS_NAME_OPTION);
sparkArgs.add(className);
}
appendOoziePropertiesToSparkConf(sparkArgs);
String jarPath = actionConf.get(SparkActionExecutor.SPARK_JAR);
if (jarPath != null && jarPath.endsWith(".py")) {
pySpark = true;
}
boolean addedSecurityTokensHadoopFS = false;
boolean addedSecurityTokensHive = false;
boolean addedSecurityTokensHBase = false;
boolean addedSecurityCredentialsHadoopFS = false;
boolean addedSecurityCredentialsHive = false;
boolean addedSecurityCredentialsHBase = false;
boolean addedLog4jDriverSettings = false;
boolean addedLog4jExecutorSettings = false;
final StringBuilder driverClassPath = new StringBuilder();
final StringBuilder executorClassPath = new StringBuilder();
final StringBuilder userFiles = new StringBuilder();
final StringBuilder userArchives = new StringBuilder();
final String sparkOpts = actionConf.get(SparkActionExecutor.SPARK_OPTS);
String propertiesFile = null;
if (StringUtils.isNotEmpty(sparkOpts)) {
final List<String> sparkOptions = SparkOptionsSplitter.splitSparkOpts(sparkOpts);
for (int i = 0; i < sparkOptions.size(); i++) {
String opt = sparkOptions.get(i);
boolean addToSparkArgs = true;
if (yarnClusterMode || yarnClientMode) {
if (opt.startsWith(EXECUTOR_CLASSPATH)) {
appendWithPathSeparator(opt.substring(EXECUTOR_CLASSPATH.length()), executorClassPath);
addToSparkArgs = false;
}
if (opt.startsWith(DRIVER_CLASSPATH)) {
appendWithPathSeparator(opt.substring(DRIVER_CLASSPATH.length()), driverClassPath);
addToSparkArgs = false;
}
if (opt.equals(DRIVER_CLASSPATH_OPTION)) {
// we need the next element after this option
appendWithPathSeparator(sparkOptions.get(i + 1), driverClassPath);
// increase i to skip the next element.
i++;
addToSparkArgs = false;
}
}
if (opt.startsWith(SECURITY_TOKENS_HADOOPFS)) {
addedSecurityTokensHadoopFS = true;
}
if (opt.startsWith(SECURITY_TOKENS_HIVE)) {
addedSecurityTokensHive = true;
}
if (opt.startsWith(SECURITY_TOKENS_HBASE)) {
addedSecurityTokensHBase = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HADOOPFS)) {
addedSecurityCredentialsHadoopFS = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HIVE)) {
addedSecurityCredentialsHive = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HBASE)) {
addedSecurityCredentialsHBase = true;
}
if (opt.startsWith(OPT_PROPERTIES_FILE)){
i++;
propertiesFile = sparkOptions.get(i);
addToSparkArgs = false;
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS) || opt.startsWith(DRIVER_EXTRA_JAVA_OPTIONS)) {
if (!opt.contains(LOG4J_CONFIGURATION_JAVA_OPTION)) {
opt += " " + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS;
} else {
System.out.println("Warning: Spark Log4J settings are overwritten." +
" Child job IDs may not be available");
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS)) {
addedLog4jExecutorSettings = true;
} else {
addedLog4jDriverSettings = true;
}
}
if (opt.startsWith(FILES_OPTION)) {
final String userFile;
if (opt.contains(OPT_SEPARATOR)) {
userFile = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userFile = sparkOptions.get(i + 1);
i++;
}
if (userFiles.length() > 0) {
userFiles.append(OPT_VALUE_SEPARATOR);
}
userFiles.append(userFile);
addToSparkArgs = false;
}
if (opt.startsWith(ARCHIVES_OPTION)) {
final String userArchive;
if (opt.contains(OPT_SEPARATOR)) {
userArchive = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userArchive = sparkOptions.get(i + 1);
i++;
}
if (userArchives.length() > 0) {
userArchives.append(OPT_VALUE_SEPARATOR);
}
userArchives.append(userArchive);
addToSparkArgs = false;
}
if (addToSparkArgs) {
sparkArgs.add(opt);
}
else if (sparkArgs.get(sparkArgs.size() - 1).equals(CONF_OPTION)) {
sparkArgs.remove(sparkArgs.size() - 1);
}
}
}
if ((yarnClusterMode || yarnClientMode)) {
// Include the current working directory (of executor container)
// in executor classpath, because it will contain localized
// files
appendWithPathSeparator(PWD, executorClassPath);
appendWithPathSeparator(PWD, driverClassPath);
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_CLASSPATH + executorClassPath.toString());
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_CLASSPATH + driverClassPath.toString());
}
if (actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS) != null) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_TAGS + OPT_SEPARATOR + actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS));
}
if (!addedSecurityTokensHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedLog4jExecutorSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
if (!addedLog4jDriverSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
mergeAndAddPropertiesFile(sparkArgs, propertiesFile);
if ((yarnClusterMode || yarnClientMode)) {
final Map<String, URI> fixedFileUrisMap =
SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.getCacheFiles(actionConf));
fixedFileUrisMap.put(SparkMain.SPARK_LOG4J_PROPS, new Path(SparkMain.SPARK_LOG4J_PROPS).toUri());
fixedFileUrisMap.put(SparkMain.HIVE_SITE_CONF, new Path(SparkMain.HIVE_SITE_CONF).toUri());
addUserDefined(userFiles.toString(), fixedFileUrisMap);
final Collection<URI> fixedFileUris = fixedFileUrisMap.values();
final JarFilter jarFilter = new JarFilter(fixedFileUris, jarPath);
jarFilter.filter();
jarPath = jarFilter.getApplicationJar();
final String cachedFiles = StringUtils.join(fixedFileUris, OPT_VALUE_SEPARATOR);
if (cachedFiles != null && !cachedFiles.isEmpty()) {
sparkArgs.add(FILES_OPTION);
sparkArgs.add(cachedFiles);
}
final Map<String, URI> fixedArchiveUrisMap = SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.
getCacheArchives(actionConf));
addUserDefined(userArchives.toString(), fixedArchiveUrisMap);
final String cachedArchives = StringUtils.join(fixedArchiveUrisMap.values(), OPT_VALUE_SEPARATOR);
if (cachedArchives != null && !cachedArchives.isEmpty()) {
sparkArgs.add(ARCHIVES_OPTION);
sparkArgs.add(cachedArchives);
}
setSparkYarnJarsConf(sparkArgs, jarFilter.getSparkYarnJar(), jarFilter.getSparkVersion());
}
if (!sparkArgs.contains(VERBOSE_OPTION)) {
sparkArgs.add(VERBOSE_OPTION);
}
sparkArgs.add(jarPath);
sparkArgs.addAll(Arrays.asList(mainArgs));
return sparkArgs;
}
private void mergeAndAddPropertiesFile(final List<String> sparkArgs, final String userDefinedPropertiesFile)
throws IOException {
final Properties properties = new Properties();
loadServerDefaultProperties(properties);
loadLocalizedDefaultPropertiesFile(properties);
loadUserDefinedPropertiesFile(userDefinedPropertiesFile, properties);
final boolean persisted = persistMergedProperties(properties);
if (persisted) {
sparkArgs.add(OPT_PROPERTIES_FILE);
sparkArgs.add(SPARK_DEFAULTS_GENERATED_PROPERTIES);
}
}
private boolean persistMergedProperties(final Properties properties) throws IOException {
if (!properties.isEmpty()) {
try (final Writer writer = new OutputStreamWriter(
new FileOutputStream(new File(SPARK_DEFAULTS_GENERATED_PROPERTIES)),
StandardCharsets.UTF_8.name())) {
properties.store(writer, "Properties file generated by Oozie");
System.out.println(String.format("Persisted merged Spark configs in file %s. Merged properties are: %s",
SPARK_DEFAULTS_GENERATED_PROPERTIES, Arrays.toString(properties.stringPropertyNames().toArray())));
return true;
} catch (IOException e) {
System.err.println(String.format("Could not persist derived Spark config file. Reason: %s", e.getMessage()));
throw e;
}
}
return false;
}
private void loadUserDefinedPropertiesFile(final String userDefinedPropertiesFile, final Properties properties) {
if (userDefinedPropertiesFile != null) {
System.out.println(String.format("Reading Spark config from %s %s...", OPT_PROPERTIES_FILE, userDefinedPropertiesFile));
loadProperties(new File(userDefinedPropertiesFile), properties);
}
}
private void loadLocalizedDefaultPropertiesFile(final Properties properties) {
final File localizedDefaultConfFile = SparkMain.getMatchingFile(SPARK_DEFAULTS_FILE_PATTERN);
if (localizedDefaultConfFile != null) {
System.out.println(String.format("Reading Spark config from file %s...", localizedDefaultConfFile.getName()));
loadProperties(localizedDefaultConfFile, properties);
}
}
private void loadServerDefaultProperties(final Properties properties) {
final String sparkDefaultsFromServer = actionConf.get(SPARK_DEFAULT_OPTS, "");
if (!sparkDefaultsFromServer.isEmpty()) {
System.out.println("Reading Spark config propagated from Oozie server...");
try (final StringReader reader = new StringReader(sparkDefaultsFromServer)) {
properties.load(reader);
} catch (IOException e) {
System.err.println(String.format("Could not read propagated Spark config! Reason: %s", e.getMessage()));
}
}
}
private void loadProperties(final File file, final Properties target) {
try (final Reader reader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8.name())) {
final Properties properties = new Properties();
properties.load(reader);
for(String key :properties.stringPropertyNames()) {
Object prevProperty = target.setProperty(key, properties.getProperty(key));
if(prevProperty != null){
System.out.println(String.format("Value of %s was overwritten from %s", key, file.getName()));
}
}
} catch (IOException e) {
System.err.println(String.format("Could not read Spark configs from file %s. Reason: %s", file.getName(),
e.getMessage()));
}
}
private void appendWithPathSeparator(final String what, final StringBuilder to) {
if (to.length() > 0) {
to.append(File.pathSeparator);
}
to.append(what);
}
private void addUserDefined(final String userList, final Map<String, URI> urisMap) {
if (userList != null) {
for (final String file : userList.split(OPT_VALUE_SEPARATOR)) {
if (!Strings.isEmpty(file)) {
final Path p = new Path(file);
urisMap.put(p.getName(), p.toUri());
}
}
}
}
/*
* Get properties that needs to be passed to Spark as Spark configuration from actionConf.
*/
@VisibleForTesting
void appendOoziePropertiesToSparkConf(final List<String> sparkArgs) {
for (final Map.Entry<String, String> oozieConfig : actionConf
.getValByRegex("^oozie\\.(?!launcher|spark).+").entrySet()) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(String.format("spark.%s=%s", oozieConfig.getKey(), oozieConfig.getValue()));
}
}
/**
* Sets spark.yarn.jars for Spark 2.X. Sets spark.yarn.jar for Spark 1.X.
*
* @param sparkArgs
* @param sparkYarnJar
* @param sparkVersion
*/
private void setSparkYarnJarsConf(final List<String> sparkArgs, final String sparkYarnJar, final String sparkVersion) {
if (SPARK_VERSION_1.matcher(sparkVersion).find()) {
// In Spark 1.X.X, set spark.yarn.jar to avoid
// multiple distribution
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JAR + OPT_SEPARATOR + sparkYarnJar);
} else {
// In Spark 2.X.X, set spark.yarn.jars
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JARS + OPT_SEPARATOR + sparkYarnJar);
}
}
}
| cbaenziger/oozie | sharelib/spark/src/main/java/org/apache/oozie/action/hadoop/SparkArgsExtractor.java | Java | apache-2.0 | 22,247 |
package com.common.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.logging.Logger;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.PersistenceException;
/**
* La Clase BaseDAO implementa las operaciones básicas de acceso a datos DAO
* utilizando usado por las clases DAO del módulo de ejecución de transacciones.
*
* @author Gestorinc S.A.
* @version $Rev $
*/
public class BaseDAO {
/**
* Constante que representa el character '%'.
*/
public static final String SYMBOLO_LIKE = "%";
/**
* Constante que representa la cadena "'".
*/
public static final String SYMBOLO_APOSTROFE = "'";
/**
* Creación del log de auditoría.
*/
protected static final Logger LOGGER = Logger.getLogger(BaseDAO.class.getName());
/**
* Objeto que maneja las operaciones de persistencia.
*/
@PersistenceContext(name = "punit")
private EntityManager em;
/**
* Constructor por defecto.
*/
public BaseDAO() {
}
/**
* Retorna una referencia al objeto que maneja las operaciones de
* persistencia definidas por JPA.
*
* @return Referencia al objeto que maneja las operaciones de persistencia.
* En caso de que el objeto no este inicializado lanza la excepción
* @see java.lang.IllegalStateException
*/
protected EntityManager getEntityManager() {
if (em == null) {
throw new IllegalStateException(
"EntityManager no ha sido asignado a DAO antes del uso.");
} else {
return em;
}
}
/**
* Ejecuta una sentencia SQL obteniendo una conexión a la BD, referenciado
* por la unidad de persistencia: <b>punit</b>.<br/>
* No utilizar este método para ejecutar sentencias SELECT.
*
* @param sentencia Sentencia SQL que será ejecutada.
*/
public void ejecutarNativo(String sentencia) {
try {
java.sql.Connection connection = em.unwrap(java.sql.Connection.class);
PreparedStatement ps = connection.prepareStatement(sentencia);
ps.execute();
ps.close();
} catch (PersistenceException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
} catch (SQLException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
}
}
/**
* Pone apóstrofes a una cadena de caracteres.
*
* @param cadena la cadena
* @return la cadena con apóstrofes
*/
protected String comillar(String cadena) {
return SYMBOLO_APOSTROFE + cadena + SYMBOLO_APOSTROFE;
}
}
| ServicioReparaciones/ServicioReparaciones | ServicioReparaciones-ejb/src/main/java/com/common/dao/BaseDAO.java | Java | apache-2.0 | 2,856 |
/*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.exec.planner.sql.parser;
import java.util.List;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlSpecialOperator;
import org.apache.calcite.sql.SqlWriter;
import org.apache.calcite.sql.parser.SqlParserPos;
import com.dremio.service.namespace.NamespaceKey;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
public class SqlTruncateTable extends SqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("TRUNCATE_TABLE", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
Preconditions.checkArgument(operands.length == 3, "SqlTruncateTable.createCall() " +
"has to get 3 operands!");
return new SqlTruncateTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1], (SqlLiteral) operands[2]);
}
};
private SqlIdentifier tableName;
private boolean tableExistenceCheck;
private boolean tableKeywordPresent;
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, SqlLiteral tableExistenceCheck,
SqlLiteral tableKeywordPresent) {
this(pos, tableName, tableExistenceCheck.booleanValue(), tableKeywordPresent.booleanValue());
}
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, boolean tableExistenceCheck,
boolean tableKeywordPresent) {
super(pos);
this.tableName = tableName;
this.tableExistenceCheck = tableExistenceCheck;
this.tableKeywordPresent = tableKeywordPresent;
}
@Override
public SqlOperator getOperator() {
return OPERATOR;
}
@Override
public List<SqlNode> getOperandList() {
return ImmutableList.of(
tableName,
SqlLiteral.createBoolean(tableExistenceCheck, SqlParserPos.ZERO),
SqlLiteral.createBoolean(tableKeywordPresent, SqlParserPos.ZERO)
);
}
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("TRUNCATE");
if (tableKeywordPresent) {
writer.keyword("TABLE");
}
if (tableExistenceCheck) {
writer.keyword("IF");
writer.keyword("EXISTS");
}
tableName.unparse(writer, leftPrec, rightPrec);
}
public NamespaceKey getPath() {
return new NamespaceKey(tableName.names);
}
public boolean checkTableExistence() {
return tableExistenceCheck;
}
}
| dremio/dremio-oss | sabot/kernel/src/main/java/com/dremio/exec/planner/sql/parser/SqlTruncateTable.java | Java | apache-2.0 | 3,280 |
/*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uci.ics.asterix.lexergenerator.rules;
public class RuleAnythingUntil implements Rule {
private char expected;
public RuleAnythingUntil clone() {
return new RuleAnythingUntil(expected);
}
public RuleAnythingUntil(char expected) {
this.expected = expected;
}
@Override
public String toString() {
return " .* " + String.valueOf(expected);
}
@Override
public int hashCode() {
return 10 * (int) expected;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (o instanceof RuleAnythingUntil) {
if (((RuleAnythingUntil) o).expected == this.expected) {
return true;
}
}
return false;
}
@Override
public String javaAction() {
return "currentChar = readNextChar();";
}
@Override
public String javaMatch(String action) {
return "boolean escaped = false;\n" + "while (currentChar != '" + expected + "' || escaped) {\n"
+ "if(!escaped && currentChar == '\\\\\\\\') {\n" + "escaped = true;\n" + "containsEscapes = true;\n"
+ "} else {\n" + "escaped = false;\n" + "}\n" + "currentChar = readNextChar();\n" + "}\n"
+ "if (currentChar == '" + expected + "') {" + action + "}\n";
}
}
| parshimers/incubator-asterixdb | asterix-maven-plugins/lexer-generator-maven-plugin/src/main/java/edu/uci/ics/asterix/lexergenerator/rules/RuleAnythingUntil.java | Java | apache-2.0 | 2,007 |
package com.netwebx.hackerrank.rpc.client;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.net.Socket;
/**
* Created by apple on 2017/2/26.
*/
public class RpcImporter<S> {
public S importer(final Class<?> serviceClass, final InetSocketAddress addr) {
return (S) Proxy.newProxyInstance(
serviceClass.getClassLoader(),
new Class<?>[]{serviceClass.getInterfaces()[0]},
new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Socket socket = null;
ObjectOutputStream output = null;
ObjectInputStream input = null;
try {
socket = new Socket();
socket.connect(addr);
output = new ObjectOutputStream(socket.getOutputStream());
output.writeUTF(serviceClass.getName());
output.writeUTF(method.getName());
output.writeObject(method.getParameterTypes());
output.writeObject(args);
input = new ObjectInputStream(socket.getInputStream());
return input.readObject();
} finally {
if (socket != null) {
socket.close();
}
if (output != null) {
output.close();
}
if (input != null) {
input.close();
}
}
}
}
);
}
}
| WengJunFeng/hackerrank_java | src/main/java/com/netwebx/hackerrank/rpc/client/RpcImporter.java | Java | apache-2.0 | 2,053 |
require 'adrian/queue'
require 'fileutils'
module Adrian
class DirectoryQueue < Adrian::Queue
include Filters
def self.create(options = {})
queue = new(options)
FileUtils.mkdir_p(queue.available_path)
FileUtils.mkdir_p(queue.reserved_path)
queue
end
attr_reader :available_path, :reserved_path, :logger
# Note:
# There is the possibility of an item being consumed by multiple processes when its still in the queue after its lock expires.
# The reason for allowing this is:
# 1. It's much simpler than introducing a seperate monitoring process to handle lock expiry.
# 2. This is an acceptable and rare event. e.g. it only happens when the process working on the item crashes without being able to release the lock
def initialize(options = {})
super
@available_path = options.fetch(:path)
@reserved_path = options.fetch(:reserved_path, default_reserved_path)
@logger = options[:logger]
filters << Filters::FileLock.new(:duration => options[:lock_duration], :reserved_path => reserved_path)
filters << Filters::Delay.new(:duration => options[:delay]) if options[:delay]
end
def pop_item
items.each do |item|
return item if reserve(item)
end
nil
end
def push_item(value)
item = wrap_item(value)
item.move(available_path)
item.touch
self
end
def length
available_files.count { |file| File.file?(file) }
end
def include?(value)
item = wrap_item(value)
items.include?(item)
end
protected
def wrap_item(value)
item = value.is_a?(FileItem) ? value : FileItem.new(value)
item.logger ||= logger
item
end
def reserve(item)
item.move(reserved_path)
item.touch
true
rescue Errno::ENOENT => e
false
end
def items
items = files.map { |file| wrap_item(file) }
items.reject! { |item| !item.exist? || filter?(item) }
items.sort_by(&:updated_at)
end
def files
(available_files + reserved_files).select { |file| File.file?(file) }
end
def available_files
Dir.glob("#{available_path}/*")
end
def reserved_files
Dir.glob("#{reserved_path}/*")
end
def default_reserved_path
File.join(@available_path, 'cur')
end
end
end
| staugaard/adrian | lib/adrian/directory_queue.rb | Ruby | apache-2.0 | 2,384 |
// SERVER-4516 and SERVER-6913: test that update and findAndModify tolerate
// an _id in the update document, as long as the _id will not be modified
var t = db.jstests_server4516;
var startingDoc = {_id: 1, a: 1};
function prepare() {
t.drop();
t.save(startingDoc);
}
function update_succeeds(updateDoc, qid, resultDoc) {
prepare();
t.update({_id: qid}, updateDoc, true);
assert.eq(t.findOne({_id: qid}), resultDoc);
prepare();
t.findAndModify({query: {_id: qid}, update: updateDoc, upsert: true});
assert.eq(t.findOne({_id: qid}), resultDoc);
}
update_succeeds({_id: 1, a: 2}, 1, {_id: 1, a: 2});
update_succeeds({$set: {_id: 1}}, 1, {_id: 1, a: 1});
update_succeeds({_id: 1, b: "a"}, 1, {_id: 1, b: "a"});
update_succeeds({_id: 2, a: 3}, 2, {_id: 2, a: 3});
function update_fails(updateDoc, qid) {
prepare();
var res = t.update({_id: qid}, updateDoc, true);
assert.writeError(res);
assert.eq(t.count(), 1);
assert.eq(t.findOne(), startingDoc);
prepare();
assert.throws(function() {
t.findAndModify({query: {_id: qid}, update: updateDoc, upsert: true});
});
assert.eq(t.count(), 1);
assert.eq(t.findOne(), startingDoc);
}
update_fails({$set: {_id: 2}}, 1);
update_fails({_id: 2, a: 3}, 1);
update_fails({_id: 2, a: 3}, 3);
| christkv/mongo-shell | test/jstests/core/update_find_and_modify_id.js | JavaScript | apache-2.0 | 1,313 |
package com.oath.cyclops.internal.stream.spliterators.push;
import com.oath.cyclops.types.persistent.PersistentCollection;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* Created by johnmcclean on 12/01/2017.
*/
public class GroupedByTimeOperator<T,C extends PersistentCollection<? super T>,R> extends BaseOperator<T,R> {
private final Supplier<? extends C> factory;
private final Function<? super C, ? extends R> finalizer;
private final long time;
private final TimeUnit t;
public GroupedByTimeOperator(Operator<T> source, Supplier<? extends C> factory,
Function<? super C, ? extends R> finalizer,long time,
TimeUnit t){
super(source);
this.factory = factory;
this.finalizer = finalizer;
this.time = time;
this.t = t;
}
@Override
public StreamSubscription subscribe(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onComplete) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
StreamSubscription[] upstream = {null};
StreamSubscription sub = new StreamSubscription(){
@Override
public void request(long n) {
if(n<=0) {
onError.accept(new IllegalArgumentException("3.9 While the Subscription is not cancelled, Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the argument is <= 0."));
return;
}
if(!isOpen)
return;
super.request(n);
upstream[0].request(n);
}
@Override
public void cancel() {
upstream[0].cancel();
super.cancel();
}
};
upstream[0] = source.subscribe(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
sub.requested.decrementAndGet();
next[0] = factory.get();
start[0] = System.nanoTime();
}
else{
request( upstream,1l);
}
} catch (Throwable t) {
onError.accept(t);
}
}
,t->{onError.accept(t);
sub.requested.decrementAndGet();
if(sub.isActive())
request( upstream,1);
},()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
sub.requested.decrementAndGet();
}
sub.cancel();
onComplete.run();
});
return sub;
}
@Override
public void subscribeAll(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onCompleteDs) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
source.subscribeAll(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
next[0] = factory.get();
start[0] = System.nanoTime();
}
} catch (Throwable t) {
onError.accept(t);
}
}
,onError,()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
}
onCompleteDs.run();
});
}
}
| aol/cyclops | cyclops/src/main/java/com/oath/cyclops/internal/stream/spliterators/push/GroupedByTimeOperator.java | Java | apache-2.0 | 4,567 |
<?php
/***************************************************************************
* *
* (c) 2004 Vladimir V. Kalynyak, Alexey V. Vinokurov, Ilya M. Shalnev *
* *
* This is commercial software, only users who have purchased a valid *
* license and accept to the terms of the License Agreement can install *
* and use this program. *
* *
****************************************************************************
* PLEASE READ THE FULL TEXT OF THE SOFTWARE LICENSE AGREEMENT IN THE *
* "copyright.txt" FILE PROVIDED WITH THIS DISTRIBUTION PACKAGE. *
****************************************************************************/
namespace Tygh\Exceptions;
class ClassNotFoundException extends AException
{
}
| sandymariscal22/BrandsCsCart | public_html/app/Tygh/Exceptions/ClassNotFoundException.php | PHP | apache-2.0 | 1,012 |
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package linux
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/boltdb/bolt"
eventstypes "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events/exchange"
"github.com/containerd/containerd/identifiers"
"github.com/containerd/containerd/linux/proc"
"github.com/containerd/containerd/linux/runctypes"
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/runtime"
runc "github.com/containerd/go-runc"
"github.com/containerd/typeurl"
ptypes "github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
var (
pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux")
empty = &ptypes.Empty{}
)
const (
configFilename = "config.json"
defaultRuntime = "runc"
defaultShim = "containerd-shim"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.RuntimePlugin,
ID: "linux",
InitFn: New,
Requires: []plugin.Type{
plugin.TaskMonitorPlugin,
plugin.MetadataPlugin,
},
Config: &Config{
Shim: defaultShim,
Runtime: defaultRuntime,
},
})
}
var _ = (runtime.Runtime)(&Runtime{})
// Config options for the runtime
type Config struct {
// Shim is a path or name of binary implementing the Shim GRPC API
Shim string `toml:"shim"`
// Runtime is a path or name of an OCI runtime used by the shim
Runtime string `toml:"runtime"`
// RuntimeRoot is the path that shall be used by the OCI runtime for its data
RuntimeRoot string `toml:"runtime_root"`
// NoShim calls runc directly from within the pkg
NoShim bool `toml:"no_shim"`
// Debug enable debug on the shim
ShimDebug bool `toml:"shim_debug"`
}
// New returns a configured runtime
func New(ic *plugin.InitContext) (interface{}, error) {
ic.Meta.Platforms = []ocispec.Platform{platforms.DefaultSpec()}
if err := os.MkdirAll(ic.Root, 0711); err != nil {
return nil, err
}
if err := os.MkdirAll(ic.State, 0711); err != nil {
return nil, err
}
monitor, err := ic.Get(plugin.TaskMonitorPlugin)
if err != nil {
return nil, err
}
m, err := ic.Get(plugin.MetadataPlugin)
if err != nil {
return nil, err
}
cfg := ic.Config.(*Config)
r := &Runtime{
root: ic.Root,
state: ic.State,
monitor: monitor.(runtime.TaskMonitor),
tasks: runtime.NewTaskList(),
db: m.(*metadata.DB),
address: ic.Address,
events: ic.Events,
config: cfg,
}
tasks, err := r.restoreTasks(ic.Context)
if err != nil {
return nil, err
}
// TODO: need to add the tasks to the monitor
for _, t := range tasks {
if err := r.tasks.AddWithNamespace(t.namespace, t); err != nil {
return nil, err
}
}
return r, nil
}
// Runtime for a linux based system
type Runtime struct {
root string
state string
address string
monitor runtime.TaskMonitor
tasks *runtime.TaskList
db *metadata.DB
events *exchange.Exchange
config *Config
}
// ID of the runtime
func (r *Runtime) ID() string {
return pluginID
}
// Create a new task
func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, err error) {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
if err := identifiers.Validate(id); err != nil {
return nil, errors.Wrapf(err, "invalid task id")
}
ropts, err := r.getRuncOptions(ctx, id)
if err != nil {
return nil, err
}
bundle, err := newBundle(id,
filepath.Join(r.state, namespace),
filepath.Join(r.root, namespace),
opts.Spec.Value)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
bundle.Delete()
}
}()
shimopt := ShimLocal(r.config, r.events)
if !r.config.NoShim {
var cgroup string
if opts.Options != nil {
v, err := typeurl.UnmarshalAny(opts.Options)
if err != nil {
return nil, err
}
cgroup = v.(*runctypes.CreateOptions).ShimCgroup
}
exitHandler := func() {
log.G(ctx).WithField("id", id).Info("shim reaped")
t, err := r.tasks.Get(ctx, id)
if err != nil {
// Task was never started or was already sucessfully deleted
return
}
lc := t.(*Task)
// Stop the monitor
if err := r.monitor.Stop(lc); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"id": id,
"namespace": namespace,
}).Warn("failed to stop monitor")
}
log.G(ctx).WithFields(logrus.Fields{
"id": id,
"namespace": namespace,
}).Warn("cleaning up after killed shim")
if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"id": id,
"namespace": namespace,
}).Warn("failed to clen up after killed shim")
}
}
shimopt = ShimRemote(r.config, r.address, cgroup, exitHandler)
}
s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if kerr := s.KillShim(ctx); kerr != nil {
log.G(ctx).WithError(err).Error("failed to kill shim")
}
}
}()
rt := r.config.Runtime
if ropts != nil && ropts.Runtime != "" {
rt = ropts.Runtime
}
sopts := &shim.CreateTaskRequest{
ID: id,
Bundle: bundle.path,
Runtime: rt,
Stdin: opts.IO.Stdin,
Stdout: opts.IO.Stdout,
Stderr: opts.IO.Stderr,
Terminal: opts.IO.Terminal,
Checkpoint: opts.Checkpoint,
Options: opts.Options,
}
for _, m := range opts.Rootfs {
sopts.Rootfs = append(sopts.Rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
}
cr, err := s.Create(ctx, sopts)
if err != nil {
return nil, errdefs.FromGRPC(err)
}
t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor, r.events,
proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup))
if err != nil {
return nil, err
}
if err := r.tasks.Add(ctx, t); err != nil {
return nil, err
}
// after the task is created, add it to the monitor if it has a cgroup
// this can be different on a checkpoint/restore
if t.cg != nil {
if err = r.monitor.Monitor(t); err != nil {
if _, err := r.Delete(ctx, t); err != nil {
log.G(ctx).WithError(err).Error("deleting task after failed monitor")
}
return nil, err
}
}
r.events.Publish(ctx, runtime.TaskCreateEventTopic, &eventstypes.TaskCreate{
ContainerID: sopts.ID,
Bundle: sopts.Bundle,
Rootfs: sopts.Rootfs,
IO: &eventstypes.TaskIO{
Stdin: sopts.Stdin,
Stdout: sopts.Stdout,
Stderr: sopts.Stderr,
Terminal: sopts.Terminal,
},
Checkpoint: sopts.Checkpoint,
Pid: uint32(t.pid),
})
return t, nil
}
// Delete a task removing all on disk state
func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
lc, ok := c.(*Task)
if !ok {
return nil, fmt.Errorf("task cannot be cast as *linux.Task")
}
if err := r.monitor.Stop(lc); err != nil {
return nil, err
}
bundle := loadBundle(
lc.id,
filepath.Join(r.state, namespace, lc.id),
filepath.Join(r.root, namespace, lc.id),
)
rsp, err := lc.shim.Delete(ctx, empty)
if err != nil {
if cerr := r.cleanupAfterDeadShim(ctx, bundle, namespace, c.ID(), lc.pid); cerr != nil {
log.G(ctx).WithError(err).Error("unable to cleanup task")
}
return nil, errdefs.FromGRPC(err)
}
r.tasks.Delete(ctx, lc.id)
if err := lc.shim.KillShim(ctx); err != nil {
log.G(ctx).WithError(err).Error("failed to kill shim")
}
if err := bundle.Delete(); err != nil {
log.G(ctx).WithError(err).Error("failed to delete bundle")
}
r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{
ContainerID: lc.id,
ExitStatus: rsp.ExitStatus,
ExitedAt: rsp.ExitedAt,
Pid: rsp.Pid,
})
return &runtime.Exit{
Status: rsp.ExitStatus,
Timestamp: rsp.ExitedAt,
Pid: rsp.Pid,
}, nil
}
// Tasks returns all tasks known to the runtime
func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) {
return r.tasks.GetAll(ctx)
}
func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) {
dir, err := ioutil.ReadDir(r.state)
if err != nil {
return nil, err
}
var o []*Task
for _, namespace := range dir {
if !namespace.IsDir() {
continue
}
name := namespace.Name()
log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace")
tasks, err := r.loadTasks(ctx, name)
if err != nil {
return nil, err
}
o = append(o, tasks...)
}
return o, nil
}
// Get a specific task by task id
func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) {
return r.tasks.Get(ctx, id)
}
func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
dir, err := ioutil.ReadDir(filepath.Join(r.state, ns))
if err != nil {
return nil, err
}
var o []*Task
for _, path := range dir {
if !path.IsDir() {
continue
}
id := path.Name()
bundle := loadBundle(
id,
filepath.Join(r.state, ns, id),
filepath.Join(r.root, ns, id),
)
ctx = namespaces.WithNamespace(ctx, ns)
pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile))
s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() {
err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid)
if err != nil {
log.G(ctx).WithError(err).WithField("bundle", bundle.path).
Error("cleaning up after dead shim")
}
}), nil)
if err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"id": id,
"namespace": ns,
}).Error("connecting to shim")
err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid)
if err != nil {
log.G(ctx).WithError(err).WithField("bundle", bundle.path).
Error("cleaning up after dead shim")
}
continue
}
ropts, err := r.getRuncOptions(ctx, id)
if err != nil {
log.G(ctx).WithError(err).WithField("id", id).
Error("get runtime options")
continue
}
t, err := newTask(id, ns, pid, s, r.monitor, r.events,
proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup))
if err != nil {
log.G(ctx).WithError(err).Error("loading task type")
continue
}
o = append(o, t)
}
return o, nil
}
func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int) error {
ctx = namespaces.WithNamespace(ctx, ns)
if err := r.terminate(ctx, bundle, ns, id); err != nil {
if r.config.ShimDebug {
return errors.Wrap(err, "failed to terminate task, leaving bundle for debugging")
}
log.G(ctx).WithError(err).Warn("failed to terminate task")
}
// Notify Client
exitedAt := time.Now().UTC()
r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventstypes.TaskExit{
ContainerID: id,
ID: id,
Pid: uint32(pid),
ExitStatus: 128 + uint32(unix.SIGKILL),
ExitedAt: exitedAt,
})
r.tasks.Delete(ctx, id)
if err := bundle.Delete(); err != nil {
log.G(ctx).WithError(err).Error("delete bundle")
}
r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{
ContainerID: id,
Pid: uint32(pid),
ExitStatus: 128 + uint32(unix.SIGKILL),
ExitedAt: exitedAt,
})
return nil
}
func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string) error {
rt, err := r.getRuntime(ctx, ns, id)
if err != nil {
return err
}
if err := rt.Delete(ctx, id, &runc.DeleteOpts{
Force: true,
}); err != nil {
log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
}
if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"path": bundle.path,
"id": id,
}).Warnf("unmount task rootfs")
}
return nil
}
func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, error) {
ropts, err := r.getRuncOptions(ctx, id)
if err != nil {
return nil, err
}
var (
cmd = r.config.Runtime
root = proc.RuncRoot
)
if ropts != nil {
if ropts.Runtime != "" {
cmd = ropts.Runtime
}
if ropts.RuntimeRoot != "" {
root = ropts.RuntimeRoot
}
}
return &runc.Runc{
Command: cmd,
LogFormat: runc.JSON,
PdeathSignal: unix.SIGKILL,
Root: filepath.Join(root, ns),
}, nil
}
func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) {
var container containers.Container
if err := r.db.View(func(tx *bolt.Tx) error {
store := metadata.NewContainerStore(tx)
var err error
container, err = store.Get(ctx, id)
return err
}); err != nil {
return nil, err
}
if container.Runtime.Options != nil {
v, err := typeurl.UnmarshalAny(container.Runtime.Options)
if err != nil {
return nil, err
}
ropts, ok := v.(*runctypes.RuncOptions)
if !ok {
return nil, errors.New("invalid runtime options format")
}
return ropts, nil
}
return &runctypes.RuncOptions{}, nil
}
| mikebrow/cri-containerd | vendor/github.com/containerd/containerd/linux/runtime.go | GO | apache-2.0 | 14,104 |
package net.tcp.socket;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
/**
* 必须先启动服务器 后连接 1、创建服务器 指定端口 ServerSocket(int port) 2、接收客户端连接 3、发送数据+接收数据
*
*/
public class Server {
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// 1、创建服务器 指定端口 ServerSocket(int port)
ServerSocket server = new ServerSocket(8888);
// 2、接收客户端连接 阻塞式
while (true) {
Socket socket = server.accept();
System.out.println("一个客户端建立连接");
// 3、发送数据
String msg = "欢迎使用";
// 输出流
/*
* BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(
* socket.getOutputStream()));
*
* bw.write(msg); bw.newLine(); bw.flush();
*/
DataOutputStream dos = new DataOutputStream(socket.getOutputStream());
dos.writeUTF(msg);
dos.flush();
}
}
}
| zhangxx0/Java_Topic_prictice | src/net/tcp/socket/Server.java | Java | apache-2.0 | 1,059 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.management.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Hashtable;
import java.util.List;
import java.util.NoSuchElementException;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import org.neo4j.jmx.ManagementInterface;
/**
* Does not have any public methods - since the public interface of
* {@link org.neo4j.management.Neo4jManager} should be defined completely in
* that class.
*
* Does not have any (direct or transitive) dependencies on any part of the jmx
* component - since this class is used in
* {@link org.neo4j.management.impl.jconsole.Neo4jPlugin the JConsole plugin},
* and the jmx component is not on the class path in JConsole.
*
* @author Tobias Ivarsson <tobias.ivarsson@neotechnology.com>
*/
public abstract class KernelProxy
{
static final String KERNEL_BEAN_TYPE = "org.neo4j.jmx.Kernel";
protected static final String KERNEL_BEAN_NAME = "Kernel";
static final String MBEAN_QUERY = "MBeanQuery";
protected final MBeanServerConnection server;
protected final ObjectName kernel;
protected KernelProxy( MBeanServerConnection server, ObjectName kernel )
{
String className = null;
try
{
className = server.getMBeanInfo( kernel ).getClassName();
}
catch ( Exception e )
{
// fall through
}
if ( !KERNEL_BEAN_TYPE.equals( className ) )
{
throw new IllegalArgumentException(
"The specified ObjectName does not represent a Neo4j Kernel bean in the specified MBean server." );
}
this.server = server;
this.kernel = kernel;
}
protected List<Object> allBeans()
{
List<Object> beans = new ArrayList<Object>();
Iterable<ObjectInstance> mbeans;
try
{
mbeans = server.queryMBeans( mbeanQuery(), null );
}
catch ( IOException handled )
{
return beans;
}
for ( ObjectInstance instance : mbeans )
{
String className = instance.getClassName();
Class<?> beanType = null;
try
{
if ( className != null ) beanType = Class.forName( className );
}
catch ( Exception ignored )
{
// fall through
}
catch ( LinkageError ignored )
{
// fall through
}
if ( beanType != null )
{
try
{
beans.add( BeanProxy.load( server, beanType, instance.getObjectName() ) );
}
catch ( Exception ignored )
{
// fall through
}
}
}
return beans;
}
private ObjectName assertExists( ObjectName name )
{
try
{
if ( !server.queryNames( name, null ).isEmpty() )
{
return name;
}
}
catch ( IOException handled )
{
// fall through
}
throw new NoSuchElementException( "No MBeans matching " + name );
}
protected <T> T getBean( Class<T> beanInterface )
{
return BeanProxy.load( server, beanInterface, createObjectName( beanInterface ) );
}
protected <T> Collection<T> getBeans( Class<T> beanInterface )
{
return BeanProxy.loadAll( server, beanInterface, createObjectNameQuery( beanInterface ) );
}
private ObjectName createObjectNameQuery( Class<?> beanInterface )
{
return createObjectNameQuery( mbeanQuery(), beanInterface );
}
private ObjectName createObjectName( Class<?> beanInterface )
{
return assertExists( createObjectName( mbeanQuery(), beanInterface ) );
}
protected ObjectName createObjectName( String beanName )
{
return assertExists( createObjectName( mbeanQuery(), beanName, false ) );
}
protected ObjectName mbeanQuery()
{
try
{
return (ObjectName) server.getAttribute( kernel, MBEAN_QUERY );
}
catch ( Exception cause )
{
throw new IllegalStateException( "Could not get MBean query.", cause );
}
}
protected static ObjectName createObjectName( String kernelIdentifier, Class<?> beanInterface )
{
return createObjectName( kernelIdentifier, beanName( beanInterface ) );
}
protected static ObjectName createObjectName( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, false, extraNaming );
}
static ObjectName createObjectNameQuery( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, true, extraNaming );
}
static ObjectName createObjectName( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), false );
}
static ObjectName createObjectNameQuery( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), true );
}
private static ObjectName createObjectName( ObjectName query, String beanName, boolean isQuery )
{
Hashtable<String, String> properties = new Hashtable<String, String>(query.getKeyPropertyList());
return createObjectName( query.getDomain(), properties, beanName, isQuery );
}
static String beanName( Class<?> beanInterface )
{
if ( beanInterface.isInterface() )
{
ManagementInterface management = beanInterface.getAnnotation( ManagementInterface.class );
if ( management != null )
{
return management.name();
}
}
throw new IllegalArgumentException( beanInterface + " is not a Neo4j Management Been interface" );
}
private static ObjectName createObjectName( String domain, Hashtable<String, String> properties, String beanName,
boolean query, String... extraNaming )
{
properties.put( "name", beanName );
for ( int i = 0; i < extraNaming.length; i++ )
{
properties.put( "name" + i, extraNaming[i] );
}
ObjectName result;
try
{
result = new ObjectName( domain, properties );
if ( query ) result = ObjectName.getInstance( result.toString() + ",*" );
}
catch ( MalformedObjectNameException e )
{
return null;
}
return result;
}
}
| HuangLS/neo4j | advanced/management/src/main/java/org/neo4j/management/impl/KernelProxy.java | Java | apache-2.0 | 8,016 |
package de.mhus.cha.cao.action;
import java.io.File;
import de.mhus.lib.cao.CaoElement;
import de.mhus.lib.cao.CaoException;
import de.mhus.lib.cao.CaoList;
import de.mhus.lib.cao.CaoMonitor;
import de.mhus.lib.cao.CaoOperation;
import de.mhus.cap.core.Access;
import de.mhus.cha.cao.ChaConnection;
import de.mhus.cha.cao.ChaElement;
import de.mhus.lib.MFile;
import de.mhus.lib.form.MForm;
import de.mhus.lib.form.annotations.FormElement;
import de.mhus.lib.form.annotations.FormSortId;
@FormElement("name='cha_copy_to_folder' title='Copy'")
public class CopyToOperation extends CaoOperation implements MForm {
private CaoList<Access> sources;
private ChaElement target;
private ChaConnection connection;
public CopyToOperation(ChaElement ChaElement) {
target = ChaElement;
}
@Override
public void dispose() throws CaoException {
}
@Override
public void execute() throws CaoException {
connection = (ChaConnection)target.getConnection();
//collect all affected entries
monitor.beginTask("count", CaoMonitor.UNKNOWN);
int cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = count( ((ChaElement)element).getFile(), cnt );
}
monitor.beginTask("copy", cnt);
cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = copy( target.getFile(), ((ChaElement)element).getFile(), cnt );
}
}
private int copy(File target, File file, int cnt) {
// validate action
if (monitor.isCanceled()) return cnt;
if ( !file.isDirectory()) return cnt; // for secure
// new path
File newTarget = null;
cnt++;
monitor.worked(cnt);
newTarget = new File(target,connection.createUID());
monitor.log().debug("Create Dir: " + newTarget.getAbsolutePath());
monitor.subTask(file.getAbsolutePath());
// validate path
if ( newTarget.exists() ) {
monitor.log().warn("Folder already exists: " + newTarget.getAbsolutePath());
return cnt;
}
// create
if ( ! newTarget.mkdir() ) {
newTarget = null;
monitor.log().warn("Can't create folder: " + target.getAbsolutePath() + "/" + file.getName());
return cnt;
}
// set id
connection.addIdPath(newTarget.getName(), newTarget.getAbsolutePath());
// events
connection.fireElementCreated(newTarget.getName());
connection.fireElementLink(target.getName(), newTarget.getName());
// copy files
for ( File sub : file.listFiles()) {
if (sub.isFile()) {
monitor.log().debug("Copy File: " + file.getAbsolutePath());
File targetFile = new File(target,file.getName());
if (targetFile.exists()) {
monitor.log().warn("Can't overwrite file: " + file.getAbsolutePath());
} else
if ( !MFile.copyFile(file, targetFile) ) {
monitor.log().warn("Can't copy file: " + file.getAbsolutePath());
}
}
}
// copy sub folders
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = copy(newTarget, sub,cnt);
}
return cnt;
}
private int count(File file, int cnt) {
if (monitor.isCanceled()) return cnt;
if ( file.isDirectory() ) cnt++;
if (!file.isDirectory()) return cnt; // for secure
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = count(sub,cnt);
}
return cnt;
}
@Override
public void initialize() throws CaoException {
}
public void setSources(CaoList<Access> list) {
sources = list;
}
}
| mhus/mhus-inka | de.mhus.hair/hair3/de.mhus.cha.app/src/de/mhus/cha/cao/action/CopyToOperation.java | Java | apache-2.0 | 3,411 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.util.csv;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.Base64;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nullable;
import org.apache.commons.csv.CSVRecord;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.expression.function.EncodeFormat;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.types.PBinary;
import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDataType.PDataCodec;
import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.util.ColumnInfo;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.UpsertExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
/** {@link UpsertExecutor} over {@link CSVRecord}s. */
public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
protected final String arrayElementSeparator;
/** Testing constructor. Do not use in prod. */
@VisibleForTesting
protected CsvUpsertExecutor(Connection conn, List<ColumnInfo> columnInfoList,
PreparedStatement stmt, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, columnInfoList, stmt, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
public CsvUpsertExecutor(Connection conn, String tableName,
List<ColumnInfo> columnInfoList, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, tableName, columnInfoList, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
@Override
protected void execute(CSVRecord csvRecord) {
try {
if (csvRecord.size() < conversionFunctions.size()) {
String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
csvRecord.size(), conversionFunctions.size());
throw new IllegalArgumentException(message);
}
for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
if (sqlValue != null) {
preparedStatement.setObject(fieldIndex + 1, sqlValue);
} else {
preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType());
}
}
preparedStatement.execute();
upsertListener.upsertDone(++upsertCount);
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
// Even though this is an error we only log it with debug logging because we're notifying the
// listener, and it can do its own logging if needed
LOG.debug("Error on CSVRecord " + csvRecord, e);
}
upsertListener.errorOnRecord(csvRecord, e);
}
}
@Override
protected Function<String, Object> createConversionFunction(PDataType dataType) {
if (dataType.isArrayType()) {
return new ArrayDatatypeConversionFunction(
new StringToArrayConverter(
conn,
arrayElementSeparator,
PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE)));
} else {
return new SimpleDatatypeConversionFunction(dataType, this.conn);
}
}
/**
* Performs typed conversion from String values to a given column value type.
*/
static class SimpleDatatypeConversionFunction implements Function<String, Object> {
private final PDataType dataType;
private final PDataCodec codec;
private final DateUtil.DateTimeParser dateTimeParser;
private final String binaryEncoding;
SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
ReadOnlyProps props;
try {
props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
} catch (SQLException e) {
throw new RuntimeException(e);
}
this.dataType = dataType;
PDataCodec codec = dataType.getCodec();
if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) {
codec = DateUtil.getCodecFor(dataType);
// TODO: move to DateUtil
String dateFormat;
int dateSqlType = dataType.getResultSetSqlType();
if (dateSqlType == Types.DATE) {
dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
DateUtil.DEFAULT_DATE_FORMAT);
} else if (dateSqlType == Types.TIME) {
dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIME_FORMAT);
} else {
dateFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIMESTAMP_FORMAT);
}
String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId);
} else {
this.dateTimeParser = null;
}
this.codec = codec;
this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING);
}
@Nullable
@Override
public Object apply(@Nullable String input) {
if (input == null || input.isEmpty()) {
return null;
}
if (dataType == PTimestamp.INSTANCE) {
return DateUtil.parseTimestamp(input);
}
if (dateTimeParser != null) {
long epochTime = dateTimeParser.parseDateTime(input);
byte[] byteValue = new byte[dataType.getByteSize()];
codec.encodeLong(epochTime, byteValue, 0);
return dataType.toObject(byteValue);
} else if (dataType == PBoolean.INSTANCE) {
switch (input.toLowerCase()) {
case "true":
case "t":
case "1":
return Boolean.TRUE;
case "false":
case "f":
case "0":
return Boolean.FALSE;
default:
throw new RuntimeException("Invalid boolean value: '" + input
+ "', must be one of ['true','t','1','false','f','0']");
}
}else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){
EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase());
Object object = null;
switch (format) {
case BASE64:
object = Base64.getDecoder().decode(input);
if (object == null) { throw new IllegalDataException(
"Input: [" + input + "] is not base64 encoded"); }
break;
case ASCII:
object = Bytes.toBytes(input);
break;
default:
throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\"");
}
return object;
}
return dataType.toObject(input);
}
}
/**
* Converts string representations of arrays into Phoenix arrays of the correct type.
*/
private static class ArrayDatatypeConversionFunction implements Function<String, Object> {
private final StringToArrayConverter arrayConverter;
private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) {
this.arrayConverter = arrayConverter;
}
@Nullable
@Override
public Object apply(@Nullable String input) {
try {
return arrayConverter.toArray(input);
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
}
| ohadshacham/phoenix | phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java | Java | apache-2.0 | 10,075 |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.reteoo.common;
import org.drools.core.SessionConfiguration;
import org.drools.core.WorkingMemoryEntryPoint;
import org.drools.core.base.DroolsQuery;
import org.drools.core.common.BaseNode;
import org.drools.core.common.InternalAgenda;
import org.drools.core.common.InternalFactHandle;
import org.drools.core.common.InternalWorkingMemory;
import org.drools.core.common.WorkingMemoryAction;
import org.drools.core.event.AgendaEventSupport;
import org.drools.core.event.RuleEventListenerSupport;
import org.drools.core.event.RuleRuntimeEventSupport;
import org.drools.core.impl.InternalKnowledgeBase;
import org.drools.core.impl.StatefulKnowledgeSessionImpl;
import org.drools.core.phreak.PropagationEntry;
import org.drools.core.reteoo.LIANodePropagation;
import org.drools.core.spi.FactHandleFactory;
import org.drools.core.spi.PropagationContext;
import org.kie.api.runtime.Environment;
import org.kie.api.runtime.rule.AgendaFilter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
public class ReteWorkingMemory extends StatefulKnowledgeSessionImpl {
private List<LIANodePropagation> liaPropagations;
private Queue<WorkingMemoryAction> actionQueue;
private AtomicBoolean evaluatingActionQueue = new AtomicBoolean(false);
/** Flag to determine if a rule is currently being fired. */
private volatile AtomicBoolean firing = new AtomicBoolean(false);
public ReteWorkingMemory() {
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase) {
super(id, kBase);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, boolean initInitFactHandle, SessionConfiguration config, Environment environment) {
super(id, kBase, initInitFactHandle, config, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, long propagationContext, SessionConfiguration config, InternalAgenda agenda, Environment environment) {
super(id, kBase, handleFactory, propagationContext, config, agenda, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, InternalFactHandle initialFactHandle, long propagationContext, SessionConfiguration config, Environment environment, RuleRuntimeEventSupport workingMemoryEventSupport, AgendaEventSupport agendaEventSupport, RuleEventListenerSupport ruleEventListenerSupport, InternalAgenda agenda) {
super(id, kBase, handleFactory, false, propagationContext, config, environment, workingMemoryEventSupport, agendaEventSupport, ruleEventListenerSupport, agenda);
}
@Override
protected void init() {
this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>();
this.propagationList = new RetePropagationList(this);
}
@Override
public void reset() {
super.reset();
actionQueue.clear();
}
@Override
public void reset(int handleId,
long handleCounter,
long propagationCounter) {
super.reset(handleId, handleCounter, propagationCounter );
if (liaPropagations != null) liaPropagations.clear();
actionQueue.clear();
}
@Override
public WorkingMemoryEntryPoint getWorkingMemoryEntryPoint(String name) {
WorkingMemoryEntryPoint ep = this.entryPoints.get(name);
return ep != null ? new ReteWorkingMemoryEntryPoint( this, ep ) : null;
}
public void addLIANodePropagation(LIANodePropagation liaNodePropagation) {
if (liaPropagations == null) liaPropagations = new ArrayList<LIANodePropagation>();
liaPropagations.add( liaNodePropagation );
}
private final Object syncLock = new Object();
public void initInitialFact() {
if ( initialFactHandle == null ) {
synchronized ( syncLock ) {
if ( initialFactHandle == null ) {
// double check, inside of sync point incase some other thread beat us to it.
initInitialFact(kBase, null);
}
}
}
}
@Override
public void fireUntilHalt(final AgendaFilter agendaFilter) {
initInitialFact();
super.fireUntilHalt( agendaFilter );
}
@Override
public int fireAllRules(final AgendaFilter agendaFilter,
int fireLimit) {
checkAlive();
if ( this.firing.compareAndSet( false,
true ) ) {
initInitialFact();
try {
startOperation();
return internalFireAllRules(agendaFilter, fireLimit);
} finally {
endOperation();
this.firing.set( false );
}
}
return 0;
}
private int internalFireAllRules(AgendaFilter agendaFilter, int fireLimit) {
int fireCount = 0;
try {
kBase.readLock();
// If we're already firing a rule, then it'll pick up the firing for any other assertObject(..) that get
// nested inside, avoiding concurrent-modification exceptions, depending on code paths of the actions.
if ( liaPropagations != null && isSequential() ) {
for ( LIANodePropagation liaPropagation : liaPropagations ) {
( liaPropagation ).doPropagation( this );
}
}
// do we need to call this in advance?
executeQueuedActionsForRete();
fireCount = this.agenda.fireAllRules( agendaFilter,
fireLimit );
} finally {
kBase.readUnlock();
if (kBase.flushModifications()) {
fireCount += internalFireAllRules(agendaFilter, fireLimit);
}
}
return fireCount;
}
@Override
public void closeLiveQuery(final InternalFactHandle factHandle) {
try {
startOperation();
this.kBase.readLock();
this.lock.lock();
final PropagationContext pCtx = pctxFactory.createPropagationContext(getNextPropagationIdCounter(), PropagationContext.INSERTION,
null, null, factHandle, getEntryPoint());
getEntryPointNode().retractQuery( factHandle,
pCtx,
this );
pCtx.evaluateActionQueue(this);
getFactHandleFactory().destroyFactHandle( factHandle );
} finally {
this.lock.unlock();
this.kBase.readUnlock();
endOperation();
}
}
@Override
protected BaseNode[] evalQuery(String queryName, DroolsQuery queryObject, InternalFactHandle handle, PropagationContext pCtx) {
initInitialFact();
BaseNode[] tnodes = kBase.getReteooBuilder().getTerminalNodesForQuery( queryName );
// no need to call retract, as no leftmemory used.
getEntryPointNode().assertQuery( handle,
pCtx,
this );
pCtx.evaluateActionQueue( this );
return tnodes;
}
public Collection<WorkingMemoryAction> getActionQueue() {
return actionQueue;
}
@Override
public void queueWorkingMemoryAction(final WorkingMemoryAction action) {
try {
startOperation();
actionQueue.add(action);
notifyWaitOnRest();
} finally {
endOperation();
}
}
public void addPropagation(PropagationEntry propagationEntry) {
if (propagationEntry instanceof WorkingMemoryAction) {
actionQueue.add((WorkingMemoryAction) propagationEntry);
} else {
super.addPropagation(propagationEntry);
}
}
@Override
public void executeQueuedActionsForRete() {
try {
startOperation();
if ( evaluatingActionQueue.compareAndSet( false,
true ) ) {
try {
if ( actionQueue!= null && !actionQueue.isEmpty() ) {
WorkingMemoryAction action;
while ( (action = actionQueue.poll()) != null ) {
try {
action.execute( (InternalWorkingMemory) this );
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected exception executing action " + action.toString(),
e );
}
}
}
} finally {
evaluatingActionQueue.compareAndSet( true,
false );
}
}
} finally {
endOperation();
}
}
@Override
public Iterator<? extends PropagationEntry> getActionsIterator() {
return actionQueue.iterator();
}
}
| mrietveld/drools | drools-reteoo/src/main/java/org/drools/reteoo/common/ReteWorkingMemory.java | Java | apache-2.0 | 9,985 |
/*
Copyright (c) DataStax, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "integration.hpp"
/**
* Prepared metadata related tests
*/
class PreparedMetadataTests : public Integration {
public:
void SetUp() {
Integration::SetUp();
session_.execute(
format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int"));
session_.execute(
format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, table_name_.c_str(), "1", "99"));
}
/**
* Check the column count of a bound statement before and after adding a
* column to a table.
*
* @param session
* @param expected_column_count_after_update
*/
void prepared_check_column_count_after_alter(Session session,
size_t expected_column_count_after_update) {
Statement bound_statement =
session.prepare(format_string("SELECT * FROM %s WHERE key = 1", table_name_.c_str()))
.bind();
// Verify that the table has two columns in the metadata
{
Result result = session.execute(bound_statement);
EXPECT_EQ(2u, result.column_count());
}
// Add a column to the table
session.execute(format_string("ALTER TABLE %s ADD value2 int", table_name_.c_str()));
// The column count should match the expected after the alter
{
Result result = session.execute(bound_statement);
EXPECT_EQ(expected_column_count_after_update, result.column_count());
}
}
};
/**
* Verify that the column count of a bound statement's result metadata doesn't
* change for older protocol versions (v4 and less) when a table's schema is altered.
*
* @since 2.8
*/
CASSANDRA_INTEGRATION_TEST_F(PreparedMetadataTests, AlterDoesntUpdateColumnCount) {
CHECK_FAILURE;
// Ensure beta protocol is not set
Session session = default_cluster()
.with_beta_protocol(false)
.with_protocol_version(CASS_PROTOCOL_VERSION_V4)
.connect(keyspace_name_);
// The column count will stay the same even after the alter
prepared_check_column_count_after_alter(session, 2u);
}
/**
* Verify that the column count of a bound statement's result metadata is
* properly updated for newer protocol versions (v5 and greater) when a table's
* schema is altered.
*
* @since 2.8
*/
CASSANDRA_INTEGRATION_TEST_F(PreparedMetadataTests, AlterProperlyUpdatesColumnCount) {
CHECK_FAILURE;
CHECK_VERSION(4.0.0);
// Ensure protocol v5 or greater
Session session = default_cluster().with_beta_protocol(true).connect(keyspace_name_);
// The column count will properly update after the alter
prepared_check_column_count_after_alter(session, 3u);
}
| datastax/cpp-driver | tests/src/integration/tests/test_prepared_metadata.cpp | C++ | apache-2.0 | 3,220 |
package jp.hashiwa.elasticsearch.authplugin;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.rest.*;
import java.util.*;
import java.util.regex.Pattern;
import java.util.stream.Stream;
public class AuthRestHandler implements RestHandler {
private final Logger logger = Loggers.getLogger(AuthRestHandler.class);
private final RestHandler originalHandler;
private final RestResponse unauthorizedResponse = new RestResponse() {
@Override
public String contentType() {
return "application/json";
}
@Override
public BytesReference content() {
return new BytesArray("");
}
@Override
public RestStatus status() {
return RestStatus.UNAUTHORIZED;
}
};
private final Map<RestRequest.Method, Stream<Pattern>> authPatterns = new HashMap<RestRequest.Method, Stream<Pattern>>() {
{
this.put(RestRequest.Method.POST, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
this.put(RestRequest.Method.PUT, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
// all methods
this.put(null, Stream.of(
Pattern.compile("^/adminindex(/.*)?$")
));
}
};
AuthRestHandler(RestHandler restHandler) {
this.originalHandler = restHandler;
}
@Override
public void handleRequest(RestRequest restRequest, RestChannel restChannel, NodeClient nodeClient) throws Exception {
this.logger.debug(restRequest.path());
this.logger.debug(restRequest.rawPath());
if (isOk(restRequest)) {
this.originalHandler.handleRequest(restRequest, restChannel, nodeClient);
} else {
restChannel.sendResponse(unauthorizedResponse);
}
}
private boolean needAuth(RestRequest.Method method, String path) {
if (authPatterns.containsKey(method)) {
Stream<Pattern> patterns = authPatterns.get(method);
boolean match = patterns.anyMatch(
p -> p.matcher(path).matches()
);
return match;
}
return false;
}
private boolean isOk(RestRequest restRequest) {
RestRequest.Method method = restRequest.method();
String path = restRequest.path(); // use rawpath() ?
boolean needAuth = needAuth(method, path)
|| needAuth(null, path);
if (! needAuth) {
return true;
}
for (java.util.Map.Entry<String, String> entry: restRequest.headers()) {
String key = entry.getKey();
String value = entry.getValue();
if (key.equals("user") && value.equals("admin")) {
return true;
}
}
return false;
// ES 5.4
// return restRequest.getHeaders().get("user").equals("admin");
}
}
| hashiwa000/Elasticsearch-Auth-Plugin | src/jp/hashiwa/elasticsearch/authplugin/AuthRestHandler.java | Java | apache-2.0 | 2,877 |
/*
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License. See accompanying LICENSE file.
*/
package io.s4.persist;
import io.s4.util.clock.Clock;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.log4j.Logger;
public class ConMapPersister implements Persister {
private AtomicInteger persistCount = new AtomicInteger(0);
private boolean selfClean = false;
private int cleanWaitTime = 40; // 20 seconds by default
private String loggerName = "s4";
ConcurrentHashMap<String, CacheEntry> cache;
Clock s4Clock;
private int startCapacity = 5000;
public void setStartCapacity(int startCapacity) {
this.startCapacity = startCapacity;
}
public int getStartCapacity() {
return startCapacity;
}
public void setSelfClean(boolean selfClean) {
this.selfClean = selfClean;
}
public void setCleanWaitTime(int cleanWaitTime) {
this.cleanWaitTime = cleanWaitTime;
}
public void setLoggerName(String loggerName) {
this.loggerName = loggerName;
}
public ConMapPersister(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public void setS4Clock(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public ConMapPersister() {
}
public void init() {
cache = new ConcurrentHashMap<String, CacheEntry>(this.getStartCapacity());
if (selfClean) {
Runnable r = new Runnable() {
public void run() {
while (!Thread.interrupted()) {
int cleanCount = ConMapPersister.this.cleanOutGarbage();
Logger.getLogger(loggerName).info("Cleaned out "
+ cleanCount + " entries; Persister has "
+ cache.size() + " entries");
try {
Thread.sleep(cleanWaitTime * 1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
};
Thread t = new Thread(r);
t.start();
t.setPriority(Thread.MIN_PRIORITY);
}
}
public int getQueueSize() {
return 0;
}
public int getPersistCount() {
return persistCount.get();
}
public int getCacheEntryCount() {
return cache.size();
}
public void setAsynch(String key, Object value, int period) {
// there really is no asynch for the local cache
set(key, value, period);
}
public void set(String key, Object value, int period) {
persistCount.getAndIncrement();
CacheEntry ce = new CacheEntry();
ce.value = value;
ce.period = period;
ce.addTime = s4Clock.getCurrentTime();
cache.put(key, ce);
}
public Object get(String key) {
CacheEntry ce = cache.get(key);
if (ce == null) {
return null;
}
if (ce.isExpired()) {
return null;
}
return ce.value;
}
public Map<String, Object> getBulk(String[] keys) {
HashMap map = new HashMap<String, Object>();
for (String key : keys) {
Object value = get(key);
if (value != null) {
map.put(key, value);
}
}
return map;
}
public Object getObject(String key) {
return get(key);
}
public Map<String, Object> getBulkObjects(String[] keys) {
return getBulk(keys);
}
public void remove(String key) {
cache.remove(key);
}
public int cleanOutGarbage() {
int count = 0;
for (Enumeration en = cache.keys(); en.hasMoreElements();) {
String key = (String) en.nextElement();
CacheEntry ce = cache.get(key);
if (ce != null && ce.isExpired()) {
count++;
cache.remove(key);
}
}
return count;
}
public Set<String> keySet() {
return cache.keySet();
}
public class CacheEntry {
Object value;
long addTime;
int period;
public boolean isExpired() {
if (period > 0) {
if ((addTime + (1000 * (long) period)) <= s4Clock.getCurrentTime()) {
return true;
}
}
return false;
}
}
}
| s4/core | src/main/java/io/s4/persist/ConMapPersister.java | Java | apache-2.0 | 5,403 |
#
# Copyright 2015, SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
shared_examples "a request class" do |with_body|
before(:each) do
Crowbar::Client::Config.configure(
Crowbar::Client::Config.defaults.merge(
server: "http://crowbar:80"
)
)
end
it "provides a method value" do
expect(subject.method).to(
eq(method)
)
end
it "provides a specific url" do
expect(subject.url).to(
eq(url)
)
end
it "provides a valid payload" do
expect(subject.content).to(
eq(params)
)
end
it "submits payload to an API" do
content = if with_body
params
else
""
end
allow(Crowbar::Client::Request::Rest).to receive(:new).and_return(
Crowbar::Client::Request::Rest.new(
url: url,
auth_type: nil
)
)
stub_request(
method,
"http://crowbar:80/#{url}"
).to_return(
status: 200,
body: "",
headers: {}
)
subject.process
expect(
Crowbar::Client::Request::Rest.new(url: url).send(
method,
content
).code
).to eq(200)
end
end
| crowbar/crowbar-client | spec/support/request_examples.rb | Ruby | apache-2.0 | 1,652 |
--TEST--
swoole_server/ssl: dtls
--SKIPIF--
<?php require __DIR__ . '/../../include/skipif.inc'; ?>
--FILE--
<?php
require __DIR__ . '/../../include/bootstrap.php';
$pm = new SwooleTest\ProcessManager;
$pm->parentFunc = function ($pid) use ($pm) {
$client = new Swoole\Client(SWOOLE_SOCK_UDP | SWOOLE_SSL, SWOOLE_SOCK_SYNC); //同步阻塞
if (!$client->connect('127.0.0.1', $pm->getFreePort()))
{
exit("connect failed\n");
}
$client->send("hello world");
Assert::same($client->recv(), "Swoole hello world");
$pm->kill();
};
$pm->childFunc = function () use ($pm) {
$serv = new Swoole\Server('127.0.0.1', $pm->getFreePort(), SWOOLE_BASE, SWOOLE_SOCK_UDP | SWOOLE_SSL);
$serv->set([
'log_file' => '/dev/null',
'ssl_cert_file' => SSL_FILE_DIR . '/server.crt',
'ssl_key_file' => SSL_FILE_DIR . '/server.key',
]);
$serv->on("workerStart", function ($serv) use ($pm) {
$pm->wakeup();
});
$serv->on('receive', function ($serv, $fd, $tid, $data) {
$serv->send($fd, "Swoole $data");
});
$serv->on('packet', function ($serv, $fd, $tid, $data) {
$serv->send($fd, "Swoole $data");
});
$serv->start();
};
$pm->childFirst();
$pm->run();
?>
--EXPECT--
| swoole/swoole-src | tests/swoole_server/ssl/dtls.phpt | PHP | apache-2.0 | 1,266 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/orchestration/airflow/service/v1/environments.proto
package com.google.cloud.orchestration.airflow.service.v1;
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public final class DatabaseConfig extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
DatabaseConfigOrBuilder {
private static final long serialVersionUID = 0L;
// Use DatabaseConfig.newBuilder() to construct.
private DatabaseConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DatabaseConfig() {
machineType_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
return new DatabaseConfig();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private DatabaseConfig(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10:
{
java.lang.String s = input.readStringRequireUtf8();
machineType_ = s;
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
public static final int MACHINE_TYPE_FIELD_NUMBER = 1;
private volatile java.lang.Object machineType_;
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
@java.lang.Override
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
@java.lang.Override
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, machineType_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, machineType_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)) {
return super.equals(obj);
}
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) obj;
if (!getMachineType().equals(other.getMachineType())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + MACHINE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getMachineType().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfigOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
// Construct using com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
machineType_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance();
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig build() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig buildPartial() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result =
new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig(this);
result.machineType_ = machineType_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) {
return mergeFrom((com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other) {
if (other
== com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance())
return this;
if (!other.getMachineType().isEmpty()) {
machineType_ = other.machineType_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object machineType_ = "";
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineType(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
machineType_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return This builder for chaining.
*/
public Builder clearMachineType() {
machineType_ = getDefaultInstance().getMachineType();
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The bytes for machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineTypeBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
machineType_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
}
// @@protoc_insertion_point(class_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
private static final com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig();
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<DatabaseConfig> PARSER =
new com.google.protobuf.AbstractParser<DatabaseConfig>() {
@java.lang.Override
public DatabaseConfig parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatabaseConfig(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DatabaseConfig> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DatabaseConfig> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| googleapis/java-orchestration-airflow | proto-google-cloud-orchestration-airflow-v1/src/main/java/com/google/cloud/orchestration/airflow/service/v1/DatabaseConfig.java | Java | apache-2.0 | 23,250 |
package com.bagri.server.hazelcast.task.schema;
import static com.bagri.core.Constants.pn_schema_password;
import static com.bagri.server.hazelcast.serialize.TaskSerializationFactory.cli_UpdateSchemaTask;
import static com.bagri.support.security.Encryptor.encrypt;
import java.io.IOException;
import java.util.Properties;
import java.util.Map.Entry;
import com.bagri.core.system.Schema;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
public class SchemaUpdater extends SchemaProcessor implements IdentifiedDataSerializable {
private boolean override;
private Properties properties;
public SchemaUpdater() {
//
}
public SchemaUpdater(int version, String admin, boolean override, Properties properties) {
super(version, admin);
this.override = override;
this.properties = properties;
}
@Override
public Object process(Entry<String, Schema> entry) {
logger.debug("process.enter; entry: {}", entry);
if (entry.getValue() != null) {
Schema schema = entry.getValue();
if (schema.getVersion() == getVersion()) {
//if (schema.isActive()) {
// if (denitSchemaInCluster(schema) > 0) {
// don't go further
// return null;
// }
//}
if (override) {
String pwd = properties.getProperty(pn_schema_password);
if (pwd != null) {
properties.setProperty(pn_schema_password, encrypt(pwd));
}
schema.setProperties(properties);
} else {
for (String name: properties.stringPropertyNames()) {
String value = properties.getProperty(name);
if (pn_schema_password.equals(name)) {
value = encrypt(value);
}
schema.setProperty(name, value);
}
}
//if (schema.isActive()) {
// if (initSchemaInCluster(schema) == 0) {
// schema.setActive(false);
// }
//}
schema.updateVersion(getAdmin());
entry.setValue(schema);
auditEntity(AuditType.update, schema);
return schema;
}
}
return null;
}
@Override
public int getId() {
return cli_UpdateSchemaTask;
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
override = in.readBoolean();
properties = in.readObject();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeBoolean(override);
out.writeObject(properties);
}
}
| dsukhoroslov/bagri | bagri-server/bagri-server-hazelcast/src/main/java/com/bagri/server/hazelcast/task/schema/SchemaUpdater.java | Java | apache-2.0 | 2,552 |
/*
* Copyright 2015-2016 DevCon5 GmbH, info@devcon5.ch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.devcon5.cli;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import org.junit.Test;
/**
*/
public class CLIExample {
@CliOption(value = "x",
hasArg = true)
private String example;
@CliOptionGroup
private Structured credentials;
private String postProcessed;
@PostInject
private void init(){
postProcessed = "an " + example;
}
@Test
public void example() {
//arrange
String[] exampleArgs = {"-u", "hans", "-p", "wurst", "-x", "example"};
//act
CLI.parse(exampleArgs).into(this);
run();
//assert
assertEquals("an example", postProcessed);
}
public void run() {
assertThat(example, is(not(nullValue())));
assertThat(credentials.user, is(not(nullValue())));
assertThat(credentials.password, is(not(nullValue())));
}
static class Structured {
@CliOption(value = "u",
hasArg = true)
private String user;
@CliOption(value = "p",
hasArg = true)
private String password;
}
}
| devcon5io/common | cli/src/test/java/io/devcon5/cli/CLIExample.java | Java | apache-2.0 | 1,919 |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD17
Description:
This module downloads MOD17 GPP data from
http://e4ftl01.cr.usgs.gov/. Use the MOD17.GPP_8daily function to
download and create 8 daily GPP images in Gtiff format.
The data is available between 2000-02-18 till present.
Examples:
from wa.Collect import MOD17
MOD17.GPP_8daily(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
MOD17.NPP_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
"""
from .GPP_8daily import main as GPP_8daily
from .NPP_yearly import main as NPP_yearly
__all__ = ['GPP_8daily', 'NPP_yearly']
__version__ = '0.1'
| wateraccounting/wa | Collect/MOD17/__init__.py | Python | apache-2.0 | 860 |
/*
* Copyright 2015-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.docksidestage.app.web.product;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.docksidestage.dbflute.allcommon.CDef;
import org.hibernate.validator.constraints.Length;
/**
* @author jflute
*/
public class ProductSearchForm {
@Length(max = 10) // #simple_for_example just for validtion example
public String productName;
public CDef.ProductStatus productStatus;
@Length(max = 5) // #simple_for_example just for validtion example
public String purchaseMemberName;
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this);
}
}
| dbflute-session/lastaflute-test-catalog | src/main/java/org/docksidestage/app/web/product/ProductSearchForm.java | Java | apache-2.0 | 1,252 |
/**
* @fileoverview Defines compressors utility methods.
*
* @see https://google.github.io/styleguide/javascriptguide.xml
* @see https://developers.google.com/closure/compiler/docs/js-for-compiler
* @module glize/compressors
*/
import * as lzw from 'lzw-compressor';
/**
* Enumeration of available compression types.
* @enum {string}
*/
export const TYPE = {
LZW: 'lzw'
};
/**
* Compress data string using specified compression type.
* @param {string} data Data to compress.
* @param {string=} [opt_type=TYPE.LZW] Optional compression type.
* @return {string} Returns compressed data.
* @method
* @example
* const result = compress(
* 'Any string of any length. Any string of any length. Any string of any length.');
* console.log(result);
* //> Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ.
*/
export const compress = (data, opt_type = TYPE.LZW) => {
let result = '';
if (TYPE.LZW === opt_type) {
result = lzw.compress(data);
}
return result;
};
/**
* Decompress data string using specified compression type.
* @param {string} data Data to compress.
* @param {string=} [opt_type=TYPE.LZW] Optional compression type.
* @return {string} Returns compressed data.
* @method
* @example
* const result = decompress('Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ.');
* console.log(result);
* //> Any string of any length. Any string of any length. Any string of any length.
*/
export const decompress = (data, opt_type = TYPE.LZW) => {
let result = '';
if (TYPE.LZW === opt_type) {
result = lzw.decompress(data);
}
return result;
};
| Datamart/Glize | src/compressors/index.js | JavaScript | apache-2.0 | 1,646 |
package com.flora.support;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Map;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.context.Context;
import com.flora.Config;
public class VelocityTemplate {
private VelocityEngine velocityEngine;
private Config config;
public VelocityTemplate(){
}
public String parseTemplate(String template, Map model){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new StringWriter();
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
return writer.toString();
}
public void parseTemplate(String template, Map model, Writer writer){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void parseTemplate(String template, Map model, OutputStream os){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new OutputStreamWriter(os);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void setVelocityEngine(VelocityEngine velocityEngine) {
this.velocityEngine = velocityEngine;
}
public Config getConfig() {
return config;
}
public void setConfig(Config config) {
this.config = config;
}
}
| liqilun/flora | src/main/java/com/flora/support/VelocityTemplate.java | Java | apache-2.0 | 1,644 |
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
* Subclass of FilteredTermEnum for enumerating all terms that match the
* specified wildcard filter term.
* <p>
* Term enumerations are always ordered by Term.compareTo(). Each term in
* the enumeration is greater than all that precede it.
*
* @version $Id: WildcardTermEnum.java 329859 2005-10-31 17:05:36Z bmesser $
*/
public class WildcardTermEnum extends FilteredTermEnum {
Term searchTerm;
String field = "";
String text = "";
String pre = "";
int preLen = 0;
boolean endEnum = false;
/**
* Creates a new <code>WildcardTermEnum</code>. Passing in a
* {@link org.apache.lucene.index.Term Term} that does not contain a
* <code>WILDCARD_CHAR</code> will cause an exception to be thrown.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*/
public WildcardTermEnum(IndexReader reader, Term term) throws IOException {
super();
searchTerm = term;
field = searchTerm.field();
text = searchTerm.text();
int sidx = text.indexOf(WILDCARD_STRING);
int cidx = text.indexOf(WILDCARD_CHAR);
int idx = sidx;
if (idx == -1) {
idx = cidx;
}
else if (cidx >= 0) {
idx = Math.min(idx, cidx);
}
pre = searchTerm.text().substring(0,idx);
preLen = pre.length();
text = text.substring(preLen);
setEnum(reader.terms(new Term(searchTerm.field(), pre)));
}
protected final boolean termCompare(Term term) {
if (field == term.field()) {
String searchText = term.text();
if (searchText.startsWith(pre)) {
return wildcardEquals(text, 0, searchText, preLen);
}
}
endEnum = true;
return false;
}
public final float difference() {
return 1.0f;
}
public final boolean endEnum() {
return endEnum;
}
/********************************************
* String equality with support for wildcards
********************************************/
public static final char WILDCARD_STRING = '*';
public static final char WILDCARD_CHAR = '?';
/**
* Determines if a word matches a wildcard pattern.
* <small>Work released by Granta Design Ltd after originally being done on
* company time.</small>
*/
public static final boolean wildcardEquals(String pattern, int patternIdx,
String string, int stringIdx)
{
int p = patternIdx;
for (int s = stringIdx; ; ++p, ++s)
{
// End of string yet?
boolean sEnd = (s >= string.length());
// End of pattern yet?
boolean pEnd = (p >= pattern.length());
// If we're looking at the end of the string...
if (sEnd)
{
// Assume the only thing left on the pattern is/are wildcards
boolean justWildcardsLeft = true;
// Current wildcard position
int wildcardSearchPos = p;
// While we haven't found the end of the pattern,
// and haven't encountered any non-wildcard characters
while (wildcardSearchPos < pattern.length() && justWildcardsLeft)
{
// Check the character at the current position
char wildchar = pattern.charAt(wildcardSearchPos);
// If it's not a wildcard character, then there is more
// pattern information after this/these wildcards.
if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING)
{
justWildcardsLeft = false;
}
else
{
// to prevent "cat" matches "ca??"
if (wildchar == WILDCARD_CHAR) {
return false;
}
// Look at the next character
wildcardSearchPos++;
}
}
// This was a prefix wildcard search, and we've matched, so
// return true.
if (justWildcardsLeft)
{
return true;
}
}
// If we've gone past the end of the string, or the pattern,
// return false.
if (sEnd || pEnd)
{
break;
}
// Match a single character, so continue.
if (pattern.charAt(p) == WILDCARD_CHAR)
{
continue;
}
//
if (pattern.charAt(p) == WILDCARD_STRING)
{
// Look at the character beyond the '*'.
++p;
// Examine the string, starting at the last character.
for (int i = string.length(); i >= s; --i)
{
if (wildcardEquals(pattern, p, string, i))
{
return true;
}
}
break;
}
if (pattern.charAt(p) != string.charAt(s))
{
break;
}
}
return false;
}
public void close() throws IOException
{
super.close();
searchTerm = null;
field = null;
text = null;
}
}
| lpxz/grail-lucene358684 | src/java/org/apache/lucene/search/WildcardTermEnum.java | Java | apache-2.0 | 5,708 |
package com.github.ayltai.foscam.client;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import android.support.annotation.NonNull;
import android.support.annotation.VisibleForTesting;
import android.support.v4.util.Pair;
import rx.Subscriber;
import rx.Subscription;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public /* final */ class RxBus {
private static final ThreadLocal<RxBus> INSTANCE = new ThreadLocal<>();
private final Map<Pair<Class, Subscriber>, Subscription> subscriptions = new HashMap<>();
private final Subject<Object, ?> bus = new SerializedSubject<>(PublishSubject.create());
public static RxBus getInstance() {
final RxBus instance = RxBus.INSTANCE.get();
if (instance == null) {
RxBus.INSTANCE.set(new RxBus());
return RxBus.INSTANCE.get();
}
return instance;
}
@VisibleForTesting
RxBus() {
}
public <T> void register(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) throw new IllegalArgumentException("The given subscriber is already registered");
this.subscriptions.put(key, this.bus.filter(event -> event != null && event.getClass().equals(eventType)).subscribe(value -> subscriber.onNext((T)value)));
}
public <T> void unregister(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) this.subscriptions.remove(key).unsubscribe();
}
public void unregisterAll() {
for (final Pair<Class, Subscriber> pair : new HashSet<>(this.subscriptions.keySet())) {
this.unregister(pair.first, pair.second);
}
}
public <T> void send(@NonNull final T event) {
if (!this.subscriptions.isEmpty()) this.bus.onNext(event);
}
}
| ayltai/Foscam-CGI-Client | app/src/main/java/com/github/ayltai/foscam/client/RxBus.java | Java | apache-2.0 | 2,141 |
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Term aggregations."""
from __future__ import unicode_literals
from timesketch.lib.aggregators import manager
from timesketch.lib.aggregators import interface
def get_spec(field, limit=10, query='', query_dsl=''):
"""Returns aggregation specs for a term of filtered events.
The aggregation spec will summarize values of an attribute
whose events fall under a filter.
Args:
field (str): this denotes the event attribute that is used
for aggregation.
limit (int): How many buckets to return, defaults to 10.
query (str): the query field to run on all documents prior to
aggregating the results.
query_dsl (str): the query DSL field to run on all documents prior
to aggregating the results (optional). Either a query string
or a query DSL has to be present.
Raises:
ValueError: if neither query_string or query_dsl is provided.
Returns:
a dict value that can be used as an aggregation spec.
"""
if query:
query_filter = {
'bool': {
'must': [
{
'query_string': {
'query': query
}
}
]
}
}
elif query_dsl:
query_filter = query_dsl
else:
raise ValueError('Neither query nor query DSL provided.')
return {
'query': query_filter,
'aggs': {
'aggregation': {
'terms': {
'field': field,
'size': limit
}
}
}
}
class FilteredTermsAggregation(interface.BaseAggregator):
"""Query Filter Term Aggregation."""
NAME = 'query_bucket'
DISPLAY_NAME = 'Filtered Terms Aggregation'
DESCRIPTION = 'Aggregating values of a field after applying a filter'
SUPPORTED_CHARTS = frozenset(
['barchart', 'circlechart', 'hbarchart', 'linechart', 'table'])
FORM_FIELDS = [
{
'type': 'ts-dynamic-form-select-input',
'name': 'supported_charts',
'label': 'Chart type to render',
'options': list(SUPPORTED_CHARTS),
'display': True
},
{
'name': 'query_string',
'type': 'ts-dynamic-form-text-input',
'label': 'The filter query to narrow down the result set',
'placeholder': 'Query',
'default_value': '',
'display': True
},
{
'name': 'query_dsl',
'type': 'ts-dynamic-form-text-input',
'label': 'The filter query DSL to narrow down the result',
'placeholder': 'Query DSL',
'default_value': '',
'display': False
},
{
'name': 'field',
'type': 'ts-dynamic-form-text-input',
'label': 'What field to aggregate.',
'display': True
},
{
'type': 'ts-dynamic-form-datetime-input',
'name': 'start_time',
'label': (
'ISO formatted timestamp for the start time '
'of the aggregated data'),
'placeholder': 'Enter a start date for the aggregation',
'default_value': '',
'display': True
},
{
'type': 'ts-dynamic-form-datetime-input',
'name': 'end_time',
'label': 'ISO formatted end time for the aggregation',
'placeholder': 'Enter an end date for the aggregation',
'default_value': '',
'display': True
},
{
'type': 'ts-dynamic-form-text-input',
'name': 'limit',
'label': 'Number of results to return',
'placeholder': 'Enter number of results to return',
'default_value': '10',
'display': True
}
]
@property
def chart_title(self):
"""Returns a title for the chart."""
if self.field:
return 'Top filtered results for "{0:s}"'.format(self.field)
return 'Top results for an unknown field after filtering'
# pylint: disable=arguments-differ
def run(
self, field, query_string='', query_dsl='',
supported_charts='table', start_time='', end_time='', limit=10):
"""Run the aggregation.
Args:
field (str): this denotes the event attribute that is used
for aggregation.
query_string (str): the query field to run on all documents prior to
aggregating the results.
query_dsl (str): the query DSL field to run on all documents prior
to aggregating the results. Either a query string or a query
DSL has to be present.
supported_charts: Chart type to render. Defaults to table.
start_time: Optional ISO formatted date string that limits the time
range for the aggregation.
end_time: Optional ISO formatted date string that limits the time
range for the aggregation.
limit (int): How many buckets to return, defaults to 10.
Returns:
Instance of interface.AggregationResult with aggregation result.
Raises:
ValueError: if neither query_string or query_dsl is provided.
"""
if not (query_string or query_dsl):
raise ValueError('Both query_string and query_dsl are missing')
self.field = field
formatted_field_name = self.format_field_by_type(field)
aggregation_spec = get_spec(
field=formatted_field_name, limit=limit, query=query_string,
query_dsl=query_dsl)
aggregation_spec = self._add_query_to_aggregation_spec(
aggregation_spec, start_time=start_time, end_time=end_time)
# Encoding information for Vega-Lite.
encoding = {
'x': {
'field': field,
'type': 'nominal',
'sort': {
'op': 'sum',
'field': 'count',
'order': 'descending'
}
},
'y': {'field': 'count', 'type': 'quantitative'},
'tooltip': [
{'field': field, 'type': 'nominal'},
{'field': 'count', 'type': 'quantitative'}],
}
response = self.opensearch_aggregation(aggregation_spec)
aggregations = response.get('aggregations', {})
aggregation = aggregations.get('aggregation', {})
buckets = aggregation.get('buckets', [])
values = []
for bucket in buckets:
d = {
field: bucket.get('key', 'N/A'),
'count': bucket.get('doc_count', 0)
}
values.append(d)
if query_string:
extra_query_url = 'AND {0:s}'.format(query_string)
else:
extra_query_url = ''
return interface.AggregationResult(
encoding=encoding, values=values, chart_type=supported_charts,
sketch_url=self._sketch_url, field=field,
extra_query_url=extra_query_url)
manager.AggregatorManager.register_aggregator(FilteredTermsAggregation)
| google/timesketch | timesketch/lib/aggregators/term.py | Python | apache-2.0 | 7,953 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.retail.v2;
/**
* Available OAuth 2.0 scopes for use with the Retail API.
*
* @since 1.4
*/
public class CloudRetailScopes {
/** See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account.. */
public static final String CLOUD_PLATFORM = "https://www.googleapis.com/auth/cloud-platform";
/**
* Returns an unmodifiable set that contains all scopes declared by this class.
*
* @since 1.16
*/
public static java.util.Set<String> all() {
java.util.Set<String> set = new java.util.HashSet<String>();
set.add(CLOUD_PLATFORM);
return java.util.Collections.unmodifiableSet(set);
}
private CloudRetailScopes() {
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-retail/v2/1.31.0/com/google/api/services/retail/v2/CloudRetailScopes.java | Java | apache-2.0 | 1,411 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from textwrap import dedent
import unittest
from eventlet.green import ssl
import mock
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.middleware import memcache
from swift.common.memcached import MemcacheRing
from swift.common.swob import Request
from swift.common.wsgi import loadapp
from test.unit import with_tempdir, patch_policies
class FakeApp(object):
def __call__(self, env, start_response):
return env
class ExcConfigParser(object):
def read(self, path):
raise Exception('read called with %r' % path)
class EmptyConfigParser(object):
def read(self, path):
return False
def get_config_parser(memcache_servers='1.2.3.4:5',
memcache_serialization_support='1',
memcache_max_connections='4',
section='memcache'):
_srvs = memcache_servers
_sers = memcache_serialization_support
_maxc = memcache_max_connections
_section = section
class SetConfigParser(object):
def items(self, section_name):
if section_name != section:
raise NoSectionError(section_name)
return {
'memcache_servers': memcache_servers,
'memcache_serialization_support':
memcache_serialization_support,
'memcache_max_connections': memcache_max_connections,
}
def read(self, path):
return True
def get(self, section, option):
if _section == section:
if option == 'memcache_servers':
if _srvs == 'error':
raise NoOptionError(option, section)
return _srvs
elif option == 'memcache_serialization_support':
if _sers == 'error':
raise NoOptionError(option, section)
return _sers
elif option in ('memcache_max_connections',
'max_connections'):
if _maxc == 'error':
raise NoOptionError(option, section)
return _maxc
else:
raise NoOptionError(option, section)
else:
raise NoSectionError(option)
return SetConfigParser
def start_response(*args):
pass
class TestCacheMiddleware(unittest.TestCase):
def setUp(self):
self.app = memcache.MemcacheMiddleware(FakeApp(), {})
def test_cache_middleware(self):
req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertTrue('swift.cache' in resp)
self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))
def test_conf_default_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
for d in ({},
{'memcache_servers': '6.7.8.9:10'},
{'memcache_serialization_support': '0'},
{'memcache_max_connections': '30'},
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'},
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '30'},
{'memcache_serialization_support': '0',
'memcache_max_connections': '30'}
):
with self.assertRaises(Exception) as catcher:
memcache.MemcacheMiddleware(FakeApp(), d)
self.assertEqual(
str(catcher.exception),
"read called with '/etc/swift/memcache.conf'")
def test_conf_set_no_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
exc = None
try:
memcache.MemcacheMiddleware(
FakeApp(), {'memcache_servers': '1.2.3.4:5',
'memcache_serialization_support': '2',
'memcache_max_connections': '30'})
except Exception as err:
exc = err
self.assertIsNone(exc)
def test_conf_default(self):
with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'memcache_max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_ratelimiting(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'error_suppression_limit': '5',
'error_suppression_interval': '2.5'})
self.assertEqual(app.memcache._error_limit_count, 5)
self.assertEqual(app.memcache._error_limit_time, 2.5)
self.assertEqual(app.memcache._error_limit_duration, 2.5)
def test_conf_inline_tls(self):
fake_context = mock.Mock()
with mock.patch.object(ssl, 'create_default_context',
return_value=fake_context):
with mock.patch.object(memcache, 'ConfigParser',
get_config_parser()):
memcache.MemcacheMiddleware(
FakeApp(),
{'tls_enabled': 'true',
'tls_cafile': 'cafile',
'tls_certfile': 'certfile',
'tls_keyfile': 'keyfile'})
ssl.create_default_context.assert_called_with(cafile='cafile')
fake_context.load_cert_chain.assert_called_with('certfile',
'keyfile')
def test_conf_extra_no_section(self):
with mock.patch.object(memcache, 'ConfigParser',
get_config_parser(section='foobar')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_extra_no_option(self):
replacement_parser = get_config_parser(
memcache_servers='error', memcache_serialization_support='error',
memcache_max_connections='error')
with mock.patch.object(memcache, 'ConfigParser', replacement_parser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline_other_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': 'bad42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 4)
def test_conf_from_extra_conf_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser(
memcache_max_connections='bad42')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 2)
def test_conf_from_inline_and_maxc_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_inline_and_sers_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 42)
def test_filter_factory(self):
factory = memcache.filter_factory({'max_connections': '3'},
memcache_servers='10.10.10.10:10',
memcache_serialization_support='1')
thefilter = factory('myapp')
self.assertEqual(thefilter.app, 'myapp')
self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10')
self.assertEqual(thefilter.memcache._allow_pickle, False)
self.assertEqual(thefilter.memcache._allow_unpickle, True)
self.assertEqual(
thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3)
@patch_policies
def _loadapp(self, proxy_config_path):
"""
Load a proxy from an app.conf to get the memcache_ring
:returns: the memcache_ring of the memcache middleware filter
"""
with mock.patch('swift.proxy.server.Ring'):
app = loadapp(proxy_config_path)
memcache_ring = None
while True:
memcache_ring = getattr(app, 'memcache', None)
if memcache_ring:
break
app = app.app
return memcache_ring
@with_tempdir
def test_real_config(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
# only one server by default
self.assertEqual(list(memcache_ring._client_cache.keys()),
['127.0.0.1:11211'])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 0.3)
self.assertEqual(memcache_ring._pool_timeout, 1.0)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 1)
self.assertEqual(memcache_ring._io_timeout, 2.0)
@with_tempdir
def test_real_config_with_options(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 1.0
pool_timeout = 0.5
tries = 4
io_timeout = 1.0
tls_enabled = true
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 1.0)
self.assertEqual(memcache_ring._pool_timeout, 0.5)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 4)
self.assertEqual(memcache_ring._io_timeout, 1.0)
self.assertEqual(memcache_ring._error_limit_count, 10)
self.assertEqual(memcache_ring._error_limit_time, 60)
self.assertEqual(memcache_ring._error_limit_duration, 60)
self.assertIsInstance(
list(memcache_ring._client_cache.values())[0]._tls_context,
ssl.SSLContext)
@with_tempdir
def test_real_memcache_config(self, tempdir):
proxy_config = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
connect_timeout = 1.0
""" % tempdir
proxy_config_path = os.path.join(tempdir, 'test.conf')
with open(proxy_config_path, 'w') as f:
f.write(dedent(proxy_config))
memcache_config = """
[memcache]
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 0.5
io_timeout = 1.0
error_suppression_limit = 0
error_suppression_interval = 1.5
"""
memcache_config_path = os.path.join(tempdir, 'memcache.conf')
with open(memcache_config_path, 'w') as f:
f.write(dedent(memcache_config))
memcache_ring = self._loadapp(proxy_config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# proxy option takes precedence
self.assertEqual(memcache_ring._connect_timeout, 1.0)
# default tries are not limited by servers
self.assertEqual(memcache_ring._tries, 3)
# memcache conf options are defaults
self.assertEqual(memcache_ring._io_timeout, 1.0)
self.assertEqual(memcache_ring._error_limit_count, 0)
self.assertEqual(memcache_ring._error_limit_time, 1.5)
self.assertEqual(memcache_ring._error_limit_duration, 1.5)
if __name__ == '__main__':
unittest.main()
| swiftstack/swift | test/unit/common/middleware/test_memcache.py | Python | apache-2.0 | 17,061 |
<?php
/**
* HiPay fullservice Magento2
*
* NOTICE OF LICENSE
*
* This source file is subject to the Apache 2.0 Licence
* that is bundled with this package in the file LICENSE.md.
* It is also available through the world-wide-web at this URL:
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright Copyright (c) 2019 - HiPay
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache 2.0 Licence
*/
namespace HiPay\FullserviceMagento\Model\Request\ThreeDS;
use HiPay\FullserviceMagento\Model\Request\AbstractRequest;
use HiPay\Fullservice\Gateway\Model\Request\ThreeDSTwo\BrowserInfo;
/**
*
* @author HiPay <support@hipay.com>
* @copyright Copyright (c) 2019 - HiPay
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache 2.0 Licence
* @link https://github.com/hipay/hipay-fullservice-sdk-magento2
*/
class BrowserInfoFormatter extends AbstractRequest
{
/**
* @var \HiPay\FullserviceMagento\Helper\ThreeDSTwo
*/
protected $_threeDSHelper;
/**
* @var \Magento\Sales\Model\Order
*/
protected $_order;
/**
* BrowserInfoFormatter constructor.
*
* @param \Psr\Log\LoggerInterface $logger
* @param \Magento\Checkout\Helper\Data $checkoutData
* @param \Magento\Customer\Model\Session $customerSession
* @param \Magento\Checkout\Model\Session $checkoutSession
* @param \Magento\Framework\Locale\ResolverInterface $localeResolver
* @param \HiPay\FullserviceMagento\Model\Request\Type\Factory $requestFactory
* @param \Magento\Framework\UrlInterface $urlBuilder
* @param \HiPay\FullserviceMagento\Helper\Data $helper
* @param \HiPay\FullserviceMagento\Helper\ThreeDSTwo $threeDSHelper
* @param array $params
* @throws \Magento\Framework\Exception\LocalizedException
*/
public function __construct(
\Psr\Log\LoggerInterface $logger,
\Magento\Checkout\Helper\Data $checkoutData,
\Magento\Customer\Model\Session $customerSession,
\Magento\Checkout\Model\Session $checkoutSession,
\Magento\Framework\Locale\ResolverInterface $localeResolver,
\HiPay\FullserviceMagento\Model\Request\Type\Factory $requestFactory,
\Magento\Framework\UrlInterface $urlBuilder,
\HiPay\FullserviceMagento\Helper\Data $helper,
\HiPay\FullserviceMagento\Helper\ThreeDSTwo $threeDSHelper,
$params = []
) {
parent::__construct(
$logger,
$checkoutData,
$customerSession,
$checkoutSession,
$localeResolver,
$requestFactory,
$urlBuilder,
$helper,
$params
);
$this->_threeDSHelper = $threeDSHelper;
$this->_order = $params["order"];
}
/**
* {@inheritDoc}
*
* @return BrowserInfo
* @see \HiPay\FullserviceMagento\Model\Request\AbstractRequest::mapRequest()
*/
protected function mapRequest()
{
$browserInfo = new BrowserInfo();
$browserData = json_decode($this->_order->getPayment()->getAdditionalInformation('browser_info'));
$browserInfo->ipaddr = $this->_order->getRemoteIp();
$browserInfo->http_accept = isset($_SERVER['HTTP_ACCEPT']) ? $_SERVER['HTTP_ACCEPT'] : null;
$browserInfo->javascript_enabled = true;
if ($browserData !== null) {
$browserInfo->java_enabled = isset($browserData->java_enabled) ? $browserData->java_enabled : null;
$browserInfo->language = isset($browserData->language) ? $browserData->language : null;
$browserInfo->color_depth = isset($browserData->color_depth) ? $browserData->color_depth : null;
$browserInfo->screen_height = isset($browserData->screen_height) ? $browserData->screen_height : null;
$browserInfo->screen_width = isset($browserData->screen_width) ? $browserData->screen_width : null;
$browserInfo->timezone = isset($browserData->timezone) ? $browserData->timezone : null;
$browserInfo->http_user_agent = isset($browserData->http_user_agent) ? $browserData->http_user_agent : null;
}
return $browserInfo;
}
}
| hipay/hipay-fullservice-sdk-magento2 | Model/Request/ThreeDS/BrowserInfoFormatter.php | PHP | apache-2.0 | 4,402 |
package com.github.andriell.collection;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Created by Andrey on 13.02.2016
*/
public class HashThreeTest {
public static void main(String[] args) {
HashThreeTest test = new HashThreeTest();
test.test1();
}
@Test
public void test1() {
ObjectTest test1 = new ObjectTest(0x50000000);
ObjectTest test2 = new ObjectTest(0x60000000);
ObjectTest test3 = new ObjectTest(0x70000000);
ObjectTest test4 = new ObjectTest(0x00000005);
ObjectTest test5 = new ObjectTest(0x00000006);
ObjectTest test6 = new ObjectTest(0x00000007);
HashThree<ObjectTest> three = new HashThree<ObjectTest>();
assertEquals(0, three.getSize());
assertEquals(false, three.remove(test1));
assertEquals(true, three.add(test1));
assertEquals(1, three.getSize());
assertEquals(true, three.add(test2));
assertEquals(2, three.getSize());
assertEquals(true, three.add(test3));
assertEquals(3, three.getSize());
assertEquals(true, three.add(test4));
assertEquals(4, three.getSize());
assertEquals(true, three.add(test5));
assertEquals(5, three.getSize());
assertEquals(true, three.add(test6));
assertEquals(6, three.getSize());
assertEquals(false, three.add(test1));
assertEquals(false, three.add(test2));
assertEquals(false, three.add(test3));
assertEquals(false, three.add(test4));
assertEquals(true, three.replace(test1));
assertEquals(true, three.replace(test2));
assertEquals(true, three.replace(test3));
assertEquals(true, three.replace(test4));
System.out.println(three);
assertEquals(true, three.exist(test2));
assertEquals(true, three.remove(test2));
//assertEquals(false, three.remove(test2));
//assertEquals(true, three.exist(test1));
//assertEquals(false, three.exist(test2));
//assertEquals(true, three.exist(test3));
//assertEquals(true, three.exist(test4));
System.out.println(three);
}
private class ObjectTest {
private int hashCode;
public ObjectTest(int hashCode) {
this.hashCode = hashCode;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return Integer.toString(hashCode);
}
}
}
| andriell/craftyfox | src/test/java/com/github/andriell/collection/HashThreeTest.java | Java | apache-2.0 | 2,540 |
package com.ryanharter.auto.value.moshi.example;
import com.google.auto.value.AutoValue;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.lang.reflect.Type;
@AutoValue public abstract class GenericsExample<A, B, C> {
public abstract A a();
public abstract B b();
public abstract C c();
@AutoValue.Builder
public interface Builder<A, B, C> {
Builder<A, B, C> a(A a);
Builder<A, B, C> b(B b);
Builder<A, B, C> c(C c);
GenericsExample<A, B, C> build();
}
public static <A, B, C> Builder<A, B, C> builder() {
return new AutoValue_GenericsExample.Builder<A, B, C>();
}
public static <A, B, C> JsonAdapter<GenericsExample<A, B, C>> jsonAdapter(Moshi moshi, Type[] types) {
return new AutoValue_GenericsExample.MoshiJsonAdapter(moshi, types);
}
}
| rharter/auto-value-moshi | example/src/main/java/com/ryanharter/auto/value/moshi/example/GenericsExample.java | Java | apache-2.0 | 870 |
# Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from src.utils import labels as utils_labels
from src.utils import load_ncbi_taxinfo
from src import binning_classes
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import numpy as np
import os, sys, inspect
import pandas as pd
from collections import OrderedDict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def create_colors_list():
colors_list = []
for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
colors_list.append(tuple(color))
colors_list.append("black")
for color in plt.cm.Set2(np.linspace(0, 1, 8)):
colors_list.append(tuple(color))
for color in plt.cm.Set3(np.linspace(0, 1, 12)):
colors_list.append(tuple(color))
return colors_list
def create_legend(color_indices, available_tools, output_dir):
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
colors_iter = iter(colors_list)
circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools]
fig = plt.figure(figsize=(0.5, 0.5))
fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1)
fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_precision_vs_bin_size(pd_bins, output_dir):
pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]
for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
fig, axs = plt.subplots(figsize=(5, 4.5))
axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o')
axs.set_xlim([None, np.log(pd_tool['total_length'].max())])
axs.set_ylim([0.0, 1.0])
axs.set_title(tool_label, fontsize=12)
plt.ylabel('Purity per bin (%)', fontsize=12)
plt.xlabel('Bin size [log(# bp)]', fontsize=12)
fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir):
colors_list = create_colors_list()
if len(available_tools) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
for i, (color, tool) in enumerate(zip(colors_list, available_tools)):
pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index'])
axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0])
window = 50
rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean()
axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i])
axs.set_ylim([-0.01, 1.01])
axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12)
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12)
axs.tick_params(axis='x', labelsize=12)
if pd_target_column == 'precision_bp':
ylabel = 'Purity per bin (%)'
file_name = 'purity_by_genome_coverage'
else:
ylabel = 'Completeness per genome (%)'
file_name = 'completeness_by_genome_coverage'
plt.ylabel(ylabel, fontsize=15)
plt.xlabel('Average genome coverage', fontsize=15)
colors_iter = iter(colors_list)
circles = []
for x in range(len(available_tools)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14)
fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_pd_genomes_recall(sample_id_to_queries_list):
pd_genomes_recall = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
if not isinstance(query, binning_classes.GenomeQuery):
continue
recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy()
recall_df[utils_labels.TOOL] = query.label
recall_df['sample_id'] = sample_id
recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df])
return pd_genomes_recall
def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir):
# compute average genome coverage if coverages for multiple samples were provided
coverages_pd = coverages_pd.groupby(['GENOMEID']).mean()
coverages_pd.rename(columns={'GENOMEID': 'genome_id'})
coverages_pd = coverages_pd.sort_values(by=['COVERAGE'])
coverages_pd['rank'] = coverages_pd['COVERAGE'].rank()
pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list)
pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict())
pd_genomes_recall = pd_genomes_recall.reset_index()
pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir)
pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp'])
pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict())
pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir)
def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
if log_scale:
df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
fig, axs = plt.subplots(figsize=(10, 8))
fontsize = 20
# replace columns and rows labels by numbers
d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)}
df_confusion = df_confusion.rename(index=str, columns=d)
df_confusion.index = range(1, len(df_confusion) + 1)
xticklabels = int(round(df_confusion.shape[1] / 10, -1))
yticklabels = int(round(df_confusion.shape[0] / 10, -1))
sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True)
# sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True)
sns_plot.set_xlabel("Genomes", fontsize=fontsize)
sns_plot.set_ylabel("Predicted bins", fontsize=fontsize)
plt.yticks(fontsize=12, rotation=0)
plt.xticks(fontsize=12)
mappable = sns_plot.get_children()[0]
cbar_ax = fig.add_axes([.915, .11, .017, .77])
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical')
if log_scale:
cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)')
else:
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt))
cbar.set_label(fontsize=fontsize, label='Millions of base pairs')
cbar.ax.tick_params(labelsize=fontsize)
cbar.outline.set_edgecolor(None)
axs.set_title(label, fontsize=fontsize, pad=10)
axs.set_ylim([len(df_confusion), 0])
# plt.yticks(fontsize=14, rotation=0)
# plt.xticks(fontsize=14)
output_dir = os.path.join(output_dir, 'genome', label)
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
if not separate_bar:
return
# create separate figure for bar
fig = plt.figure(figsize=(6, 6))
mappable = sns_plot.get_children()[0]
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt))
text = cbar.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(size=16)
text.set_font_properties(font)
cbar.outline.set_visible(False)
cbar.ax.tick_params(labelsize=14)
# store separate bar figure
plt.gca().set_visible(False)
fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools):
pd_bins = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy()
metric_df[utils_labels.TOOL] = query.label
metric_df['sample_id'] = sample_id
metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_bins = pd.concat([pd_bins, metric_df])
metric_all = []
for tool in available_tools:
pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool]
metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist())
fig, axs = plt.subplots(figsize=(6, 5))
medianprops = dict(linewidth=2.5, color='gold')
bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.')
colors_iter = iter(create_colors_list())
# turn on grid
axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey')
# force axes to be from 0 to 100%
axs.set_xlim([-0.01, 1.01])
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals])
# enable code to rotate labels
tick_labels = axs.get_yticklabels()
plt.setp(tick_labels, fontsize=13) ## rotation=55
for box in bplot['boxes']:
box.set(facecolor=next(colors_iter), linewidth=0.1)
plt.ylim(plt.ylim()[::-1])
if metric_name == 'precision_bp':
axs.set_xlabel('Purity per bin (%)', fontsize=13)
metric_name = 'purity_bp'
else:
axs.set_xlabel('Completeness per genome (%)', fontsize=13)
metric_name = 'completeness_bp'
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight')
# remove labels but keep grid
# axs.get_yaxis().set_ticklabels([])
# for tic in axs.yaxis.get_major_ticks():
# tic.tick1line.set_visible(False)
# tic.tick2line.set_visible(False)
# tic.label1.set_visible(False)
# tic.label2.set_visible(False)
# fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
available_tools = df_results[utils_labels.TOOL].unique()
tools = [tool for tool in labels if tool in available_tools]
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools)
binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0]
if len(df_mean) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
# force axes to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
if plot_type == 'e':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'f':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'w':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10)
if plot_type == 'x':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10)
elif plot_type == 'p':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10)
# turn on grid
# axs.minorticks_on()
axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
if plot_type != 'p':
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
else:
axs.tick_params(axis='x', labelsize=12)
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
if rank:
file_name = rank + '_' + file_name
plt.title(rank)
ylabel = ylabel.replace('genome', 'taxon')
plt.xlabel(xlabel, fontsize=13)
plt.ylabel(ylabel, fontsize=13)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight')
colors_iter = iter(colors_list)
circles = []
for x in range(len(df_mean)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12)
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None):
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'e',
'avg_purity_completeness_bp',
'Average purity per bin (%)',
'Average completeness per genome (%)')
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'f',
'avg_purity_completeness_seq',
'Average purity per bin (%)',
'Average completeness per genome (%)')
def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'w',
'purity_recall_bp',
'Purity for sample (%)',
'Completeness for sample (%)')
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'x',
'purity_completeness_seq',
'Purity for sample (%)',
'Completeness for sample (%)')
def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'p',
'ari_vs_assigned_bps',
'Adjusted Rand index',
'Percentage of binned base pairs')
def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
colors_list = ["#006cba", "#008000", "#ba9e00", "red"]
for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
dict_metric_list = []
for metric in metrics_list:
rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_metric_list.append(rank_to_metric)
dict_error_list = []
for error in errors_list:
rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_error_list.append(rank_to_metric_error)
for index, row in pd_results.iterrows():
for rank_to_metric, metric in zip(dict_metric_list, metrics_list):
rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric]
for rank_to_metric_error, error in zip(dict_error_list, errors_list):
rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error]
fig, axs = plt.subplots(figsize=(6, 5))
# force axes to be from 0 to 100%
axs.set_xlim([0, 7])
axs.set_ylim([0.0, 1.0])
x_values = range(len(load_ncbi_taxinfo.RANKS))
y_values_list = []
for rank_to_metric, color in zip(dict_metric_list, colors_list):
y_values = list(rank_to_metric.values())
axs.plot(x_values, y_values, color=color)
y_values_list.append(y_values)
for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list):
sem = list(rank_to_metric_error.values())
plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5)
plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical')
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def create_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp']
def create_completeness_minus_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1
def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir):
if len(pd_bins) == 0:
return
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
create_column_function(pd_bins_copy)
colors_list = create_colors_list()
fig, axs = plt.subplots(figsize=(6, 5))
tools = pd_bins_copy[utils_labels.TOOL].unique().tolist()
for color, tool in zip(colors_list, tools):
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index()
pd_tool_bins = pd_tool_bins.drop(['index'], axis=1)
axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color)
min_value = pd_bins_copy['newcolumn'].min()
axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0)
axs.set_xlim(1, None)
axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey')
# transform plot_labels to percentages
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals])
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel + ' [%]', fontsize=14)
lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12)
plt.tight_layout()
file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_number_of_hq_bins(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def get_number_of_hq_bins_by_score(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1)
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0]
x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0]
x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0]
x50 -= x70
x70 -= x90
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function):
pd_counts = get_bin_counts_function(tools, pd_bins)
fig, axs = plt.subplots(figsize=(11, 5))
if output_file == 'bin_counts':
fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure()
else:
fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure()
axs.tick_params(axis='x', labelrotation=45, length=0)
axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14)
axs.set_xlabel(None)
# axs.yaxis.set_major_locator(MaxNLocator(integer=True))
h, l = axs.get_legend_handles_labels()
axs.set_ylabel('#genome bins', fontsize=14)
# axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
ph = [plt.plot([], marker='', ls='')[0]]
handles = ph + h
if output_file == 'bin_counts':
labels = ['Contamination < 10% Completeness '] + l
bbox_to_anchor = (0.49, 1.02)
else:
labels = ['Score '] + l
y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist()
for i, v in enumerate(y_values):
axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold')
bbox_to_anchor = (0.47, 1.02)
lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5)
# plt.subplots_adjust(hspace=0.6, wspace=0.2)
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
| CAMI-challenge/AMBER | src/plots.py | Python | apache-2.0 | 26,475 |
package web.magic.jvm;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
class MBeanTyper {
static final boolean DEBUG = Boolean.getBoolean("jboss.jmx.debug");
/**
* create a typed object from an mbean
*/
public static final Object typeMBean(MBeanServer server, ObjectName mbean, Class<?> mainInterface) throws Exception {
List<Class<?>> interfaces = new ArrayList<Class<?>>();
if (mainInterface.isInterface()) {
interfaces.add(mainInterface);
}
addInterfaces(mainInterface.getInterfaces(), interfaces);
Class<?> cl[] = (Class[]) interfaces.toArray(new Class[interfaces.size()]);
if (DEBUG) {
System.err.println("typeMean->server=" + server + ",mbean=" + mbean + ",mainInterface=" + mainInterface);
for (int c = 0; c < cl.length; c++) {
System.err.println(" :" + cl[c]);
}
}
return Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), cl, new MBeanTyperInvoker(server,
mbean));
}
private static final void addInterfaces(Class<?> cl[], List<Class<?>> list) {
if (cl == null)
return;
for (int c = 0; c < cl.length; c++) {
list.add(cl[c]);
addInterfaces(cl[c].getInterfaces(), list);
}
}
}
/**
* MBeanTyperInvoker handles method invocations against the MBeanTyper target
* object and forwards them to the MBeanServer and ObjectName for invocation.
*
* @author <a href="mailto:jhaynie@vocalocity.net">Jeff Haynie</a>
*/
final class MBeanTyperInvoker implements java.lang.reflect.InvocationHandler {
private final MBeanServer server;
private final ObjectName mbean;
private final Map<Method, String[]> signatureCache = Collections.synchronizedMap(new HashMap<Method, String[]>());
MBeanTyperInvoker(MBeanServer server, ObjectName mbean) {
this.server = server;
this.mbean = mbean;
}
private boolean isJMXAttribute(Method m) {
String name = m.getName();
return (name.startsWith("get"));
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (MBeanTyper.DEBUG) {
System.err.println(" ++ method=" + method.getName() + ",args=" + args);
}
try {
if (method.getDeclaringClass() == Object.class) {
String name = method.getName();
if (name.equals("hashCode")) {
return new Integer(this.hashCode());
} else if (name.equals("toString")) {
return this.toString();
} else if (name.equals("equals")) {
// FIXME: this needs to be reviewed - we should be
// smarter about this ...
return new Boolean(equals(args[0]));
}
} else if (isJMXAttribute(method) && (args == null || args.length <= 0)) {
String name = method.getName().substring(3);
return server.getAttribute(mbean, name);
}
String sig[] = (String[]) signatureCache.get(method);
if (sig == null) {
// get the method signature from the method argument directly
// vs. the arguments passed, since there may be primitives that
// are wrapped as objects in the arguments
Class<?> _args[] = method.getParameterTypes();
if (_args != null && _args.length > 0) {
sig = new String[_args.length];
for (int c = 0; c < sig.length; c++) {
if (_args[c] != null) {
sig[c] = _args[c].getName();
}
}
} else {
sig = new String[0];
}
signatureCache.put(method, sig);
}
return server.invoke(mbean, method.getName(), args, sig);
} catch (Throwable t) {
if (MBeanTyper.DEBUG) {
t.printStackTrace();
}
if (t instanceof UndeclaredThrowableException) {
UndeclaredThrowableException ut = (UndeclaredThrowableException) t;
throw ut.getUndeclaredThrowable();
} else if (t instanceof InvocationTargetException) {
InvocationTargetException it = (InvocationTargetException) t;
throw it.getTargetException();
} else if (t instanceof MBeanException) {
MBeanException me = (MBeanException) t;
throw me.getTargetException();
} else {
throw t;
}
}
}
} | liufeiit/WebMagic | WebMagic/src/test/java/web/magic/jvm/MBeanTyper.java | Java | apache-2.0 | 4,284 |
// ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') {
$.setting.width = this.args.width;
}
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
}
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value);
self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
}
},
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function onClick(e) {
setting.clickHandler();
}
| garlictech/APIXMobil | app/controllers/setting.js | JavaScript | apache-2.0 | 2,314 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<title><?php print $title; ?></title>
<!-- Bootstrap -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
| ak229/personalWebsite | old/pages/header.php | PHP | apache-2.0 | 830 |
package eu.dowsing.kolla.widget.brick.facade;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.scene.shape.CircleBuilder;
import javafx.scene.shape.Rectangle;
import javafx.scene.shape.RectangleBuilder;
import com.leapmotion.leap.Hand;
import eu.dowsing.kolla.widget.brick.model.BrickModel;
import eu.dowsing.kolla.widget.brick.model.BrickModel.Position;
/**
* Represents a complete hand including its fingers.
*
* @author richardg
*
*/
public class BrickView {
// port(left hand:red) and starboard(right hand:green)
public enum Importance {
PRIMARY, SECONDARY
}
private Rectangle horizontal;
private Rectangle vertical;
private Rectangle[] fingerRects;
private Circle hint;
/** Hints at where the gesture started. **/
private Circle startHint;
public BrickView(Pane p, int rectHeight, int rectWidth, int rectX, int rectY, int miniRectHeight, int miniRectWidth) {
drawIndicator(p, rectHeight, rectWidth, rectX, rectY, miniRectHeight, miniRectWidth);
}
private void drawIndicator(Pane p, int hHeight, int hWidth, int rectX, int rectY, int mHeight, int mWidth) {
final int fingerCount = 5;
fingerRects = new Rectangle[fingerCount];
final int rectMargin = 10;
final int hRealWidth = hWidth - (2 * rectMargin);
// create the measure for the mini finger rectangles
int miniRectMargin = rectMargin / 2;
int mRealWidth = mWidth - miniRectMargin;
int mRectX = rectX + (miniRectMargin / 2);
int mRectY = rectY;
// create measures for the vertical rectangle
final int vWidth = hHeight;
final int vHeight = hWidth / 2;
// create the circle indicating where the hand can be
this.hint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(hint);
// create the circle indicating where the gesture started
this.startHint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(startHint);
// create the rectangle indicating position of the hand
horizontal = RectangleBuilder.create().height(hHeight).width(hRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.RED).fill(Color.web("blue", 0.1)).translateX(rectX).translateY(rectY).build();
p.getChildren().add(horizontal);
// create rectangle indicating if the hand is vertical
vertical = RectangleBuilder.create().height(vHeight).width(vWidth).arcHeight(0).arcWidth(0).stroke(Color.RED)
.fill(Color.web("blue", 0.1)).translateX(rectX + (vWidth / 2)).translateY(rectY - (vHeight / 2))
.build();
p.getChildren().add(vertical);
// now create the rectangles indicating fingers found
for (int i = 0; i < fingerRects.length; i++) {
Rectangle mini = RectangleBuilder.create().height(mHeight).width(mRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.GREEN).fill(Color.web("blue", 0.1)).translateX(mRectX + (i * mWidth))
.translateY(mRectY).build();
fingerRects[i] = mini;
p.getChildren().add(mini);
}
}
public Color getPitchColor(Hand h) {
double direction = Math.toDegrees(h.direction().pitch());
if (direction < 10 && direction > -10) {
return Color.web("blue", 0.1);
} else if (direction < 100 && direction > 80) {
return Color.web("green", 0.1);
} else if (direction < -80 && direction > -100) {
return Color.web("yellow", 0.1);
} else {
return Color.web("red", 0.1);
}
}
public Color getHandColor(Importance importance) {
// port(left hand/secondary:red) and starboard(right hand/primary:green)
if (importance == Importance.PRIMARY) {
return Color.web("green", 1);
} else if (importance == Importance.SECONDARY) {
return Color.web("red", 1);
} else {
return Color.web("yellow", 1);
}
}
public void setShowGestureStart(Importance importance) {
Color fill = getHandColor(importance);
this.startHint.setVisible(true);
this.startHint.setFill(fill);
}
/**
* Show the hand
*
* @param importance
* @param pos
* @param fingerCount
* @param handledGesture
*/
public void showHand(Importance importance, Position pos, int fingerCount, boolean handledGesture) {
// first all rectangles visible
setVisible(true);
// hide vertical or horizontal position
Color fill = getHandColor(importance);
if (pos == Position.HORIZONTAL) {
vertical.setVisible(false);
} else if (pos == Position.VERTICAL) {
horizontal.setVisible(false);
}
// notify the user that the gesture was handled
if (handledGesture) {
fill = Color.web("yellow", 1);
}
// color the rectangles
horizontal.setFill(fill);
vertical.setFill(fill);
// then we hide invisible fingers
for (int i = fingerCount; i < fingerRects.length; i++) {
fingerRects[i].setVisible(false);
}
}
/**
* Show or hide the complete hand with all indicators
*
* @param visible
*/
public void setVisible(boolean visible) {
hint.setVisible(visible);
startHint.setVisible(visible);
horizontal.setVisible(visible);
vertical.setVisible(visible);
for (Rectangle rect : this.fingerRects) {
rect.setVisible(visible);
}
}
/**
* Show or hide only the hand hint.
*
* @param visible
*/
public void setHintVisible(boolean visible) {
this.hint.setVisible(visible);
}
}
| N0rp/Snabb | src/main/java/eu/dowsing/kolla/widget/brick/facade/BrickView.java | Java | apache-2.0 | 6,215 |
import os
import datetime
from jinja2 import Environment,PackageLoader,TemplateNotFound
from hotzenplotz.openstack.common import cfg
from hotzenplotz.openstack.common import log as logging
from hotzenplotz.openstack.common import utils
from hotzenplotz.common import exception
from hotzenplotz.api import validator
LOG = logging.getLogger(__name__)
class CronHandler(object):
"""Handler Cron Resource
"""
def __init__(self, **kwargs):
env = Environment(loader=PackageLoader('hotzenplotz.worker','templates'))
self.template = env.get_template('cron')
self.dir_path = None
# @utils.synchronized('haproxy')
def do_config(self, request):
try:
self._validate_request(request)
except exception.BadRequest as e:
LOG.warn('Bad request: %s' % e)
raise exception.CronConfigureError(explanation=str(e))
cmd = request['method']
msg = request['cron_resource']
if cmd == 'create_cron':
try:
self._create_cron(msg)
except exception.CronCreateError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'delete_cron':
try:
self._delete_cron(msg)
except exception.HaproxyDeleteError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'update_cron':
try:
self._update_cron(msg)
except exception.CronUpdateError as e:
raise exception.CronConfigureError(explanation=str(e))
def _create_cron(self,msg,syntax_check=False):
try:
output = self.template.render(cron_resource=msg)
except TemplateNotFound as e:
raise TemplateNotFound(str(e))
try:
if not self.dir_path:
self.dir_path = '/etc/puppet/modules/cron/'
cron_name = msg['title']
file_path = self.dir_path + cron_name
if not path.exists(file_path):
with open(file_path,'a') as f:
f.write(output)
except exception.CronCreateError as e:
raise exception.CronCreateError(explanation=str(e))
if syntax_check:
try:
self._test_syntax(file_path)
except exception.ProcessExecutionError as e:
raise exception.CronCreateError(explanation=str(e))
LOG.debug("Created the new cron successfully")
def _delete_cron(self, msg):
LOG.debug("Deleting cron for NAME:%s USER: %s PROJECT:%s" %
(msg['id'], msg['user_id'], msg['project_id']))
try:
new_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
return
##raise exception.HaproxyDeleteError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyDeleteError(explanation=str(e))
rc, backup_path = self._backup_original_cfg()
if rc != 0:
raise exception.HaproxyDeleteError(explanation=backup_path)
rc, strerror = self._replace_original_cfg_with_new(new_cfg_path)
if rc != 0:
raise exception.HaproxyDeleteError(explanation=strerror)
if self._reload_haproxy_cfg(backup_path) != 0:
e = 'Failed to reload haproxy'
raise exception.HaproxyDeleteError(explanation=str(e))
LOG.debug("Deleted the new load balancer successfully")
def _update_cron(self, msg):
LOG.debug("Updating the haproxy load "
"balancer for NAME:%s USER: %s PROJECT:%s" %
(msg['uuid'], msg['user_id'], msg['project_id']))
try:
lb_deleted_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
raise exception.HaproxyUpdateError(explanation=str(e))
try:
new_cfg_path = self._create_lb_haproxy_cfg(
msg, base_cfg_path=lb_deleted_cfg_path)
except exception.HaproxyCreateCfgError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
LOG.debug("Updated the new load balancer successfully")
def _validate_request(self, request):
validate.check_tcp_request(request)
def _get_lb_name(self, msg):
# TODO(wenjianhn): utf-8 support, base64
##return "%s_%s" % (msg['project_id'],
return "%s" % msg['uuid']
def _is_lb_in_use(self, lb_name,
base_cfg_path='/etc/haproxy/haproxy.cfg'):
with open(base_cfg_path) as cfg:
lines = cfg.readlines()
try:
in_use_lb_name = [line.split()[1] for line in lines
if line.startswith('listen')]
except IndexError:
LOG.error("No item was found after listen directive,"
"is the haproxy configuraion file valid?")
raise
return lb_name in in_use_lb_name
def _test_syntax(self, cfile_path):
LOG.info('Testing the new puppet configuration file')
cmd = "puppet parser validate %s" % cfile_path
try:
utils.execute(cmd)
except exception.ProcessExecutionError as e:
LOG.warn('Did not pass the configuration syntax test: %s', e)
raise
def _get_one_lb_info(self, line_all, line_index, line_total):
value = []
for i in range(line_index, line_total):
line = line_all[i]
if line.startswith('\t'):
value.append(line)
elif line.startswith('listen'):
return i, value
return line_total - 1, value
| NewpTone/hotzenplotz | hotzenplotz/worker/driver/cron.py | Python | apache-2.0 | 6,103 |
/*
* Copyright (c) 2016, WSO2 Inc. (http://wso2.com) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.msf4j;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A responder for sending chunk-encoded response.
*/
public interface ChunkResponder extends Closeable {
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if the connection is already closed
*/
void sendChunk(ByteBuffer chunk) throws IOException;
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if this {@link ChunkResponder} already closed or the connection is closed
*/
void sendChunk(ByteBuf chunk) throws IOException;
/**
* Closes this responder which signals the end of the chunk response.
*/
@Override
void close() throws IOException;
}
| taniamahanama/product-msf4j | core/src/main/java/org/wso2/msf4j/ChunkResponder.java | Java | apache-2.0 | 1,625 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/sagemaker/model/ListProcessingJobsResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::SageMaker::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
ListProcessingJobsResult::ListProcessingJobsResult()
{
}
ListProcessingJobsResult::ListProcessingJobsResult(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
ListProcessingJobsResult& ListProcessingJobsResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("ProcessingJobSummaries"))
{
Array<JsonView> processingJobSummariesJsonList = jsonValue.GetArray("ProcessingJobSummaries");
for(unsigned processingJobSummariesIndex = 0; processingJobSummariesIndex < processingJobSummariesJsonList.GetLength(); ++processingJobSummariesIndex)
{
m_processingJobSummaries.push_back(processingJobSummariesJsonList[processingJobSummariesIndex].AsObject());
}
}
if(jsonValue.ValueExists("NextToken"))
{
m_nextToken = jsonValue.GetString("NextToken");
}
return *this;
}
| cedral/aws-sdk-cpp | aws-cpp-sdk-sagemaker/source/model/ListProcessingJobsResult.cpp | C++ | apache-2.0 | 1,879 |
#region License Header
/*
* QUANTLER.COM - Quant Fund Development Platform
* Quantler Core Trading Engine. Copyright 2018 Quantler B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#endregion License Header
using MessagePack;
using System;
using Quantler.Securities;
namespace Quantler.Data.Corporate
{
/// <summary>
/// Dividend amount
/// </summary>
[MessagePackObject]
public class Dividend : DataPointImpl
{
#region Public Constructors
/// <summary>
/// Initializes a new instance of the <see cref="Dividend"/> class.
/// </summary>
public Dividend() =>
DataType = DataType.Dividend;
/// <summary>
/// Initializes a new instance of the <see cref="Dividend"/> class.
/// </summary>
/// <param name="ticker">The ticker.</param>
/// <param name="date">The date.</param>
/// <param name="amount">The amount.</param>
public Dividend(TickerSymbol ticker, DateTime date, decimal amount)
: this()
{
Ticker = ticker;
Occured = date;
TimeZone = TimeZone.Utc;
Amount = amount;
}
#endregion Public Constructors
#region Public Properties
/// <summary>
/// Amount distribution
/// </summary>
[Key(6)]
public decimal Amount
{
get => Price;
set => Price = Math.Round(Price, 2);
}
#endregion Public Properties
}
} | Quantler/Core | Quantler/Data/Corporate/Dividend.cs | C# | apache-2.0 | 2,025 |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of YANG node bgpVrfAf's children nodes.
*/
package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf; | mengmoya/onos | apps/l3vpn/nel3vpn/nemgr/src/main/java/org/onosproject/yang/gen/v1/ne/bgpcomm/rev20141225/nebgpcomm/bgpcomm/bgpvrfs/bgpvrf/bgpvrfafs/bgpvrfaf/package-info.java | Java | apache-2.0 | 796 |
/// <reference path="./fabricPlugin.ts"/>
/// <reference path="./profileHelpers.ts"/>
/// <reference path="./containerHelpers.ts"/>
/// <reference path="../../helpers/js/storageHelpers.ts"/>
/// <reference path="../../helpers/js/controllerHelpers.ts"/>
/// <reference path="../../helpers/js/selectionHelpers.ts"/>
/// <reference path="../../helpers/js/filterHelpers.ts"/>
module Fabric {
export var ContainerViewController = _module.controller("Fabric.ContainerViewController", ["$scope", "jolokia", "$location", "localStorage", "$route", "workspace", "marked", "ProfileCart", "$dialog", ($scope, jolokia, $location, localStorage, $route, workspace:Workspace, marked, ProfileCart, $dialog) => {
$scope.name = ContainerViewController.name;
$scope.containers = <Array<Container>>[];
$scope.selectedContainers = <Array<Container>>[];
$scope.groupBy = 'none';
$scope.filter = '';
$scope.cartItems = [];
$scope.versionIdFilter = '';
$scope.profileIdFilter = '';
$scope.locationIdFilter = '';
$scope.hasCounts = true;
$scope.toString = Core.toString;
$scope.filterContainersText = 'Filter Containers...';
$scope.filterProfilesText = 'Filter Profiles...';
$scope.filterLocationsText = 'Filter Locations...';
$scope.filterBoxText = $scope.filterContainersText;
$scope.selectedTags = [];
$scope.createLocationDialog = ContainerHelpers.getCreateLocationDialog($scope, $dialog);
var containerFields = ['id', 'profileIds', 'profiles', 'versionId', 'location', 'alive', 'type', 'ensembleServer', 'provisionResult', 'root', 'jolokiaUrl', 'jmxDomains', 'metadata', 'parentId'];
var profileFields = ['id', 'hidden', 'version', 'summaryMarkdown', 'iconURL', 'tags'];
Fabric.initScope($scope, $location, jolokia, workspace);
SelectionHelpers.decorate($scope);
// when viewing profile boxes in container view, disable checkboxes
$scope.viewOnly = true;
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'groupBy',
paramName: 'groupBy',
initialValue: $scope.groupBy
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'versionIdFilter',
paramName: 'versionIdFilter',
initialValue: $scope.versionIdFilter
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'profileIdFilter',
paramName: 'profileIdFilter',
initialValue: $scope.profileIdFilter
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'locationIdFilter',
paramName: 'locationIdFilter',
initialValue: $scope.locationIdFilter
});
$scope.groupByClass = ControllerHelpers.createClassSelector({
'profileIds': 'btn-primary',
'location': 'btn-primary',
'none': 'btn-primary'
});
$scope.$watch('containers', (newValue, oldValue) => {
if (newValue !== oldValue) {
$scope.selectedContainers = $scope.containers.filter((container) => { return container['selected']; });
}
}, true);
$scope.maybeShowLocation = () => {
return ($scope.groupBy === 'location' || $scope.groupBy === 'none') && $scope.selectedContainers.length > 0;
}
$scope.showContainersFor = (thing) => {
if (angular.isString(thing)) {
$scope.locationIdFilter = thing;
} else {
$scope.profileIdFilter = thing.id;
$scope.versionIdFilter = thing.version;
}
$scope.groupBy = 'none';
}
$scope.filterLocation = (locationId) => {
return FilterHelpers.searchObject(locationId, $scope.filter);
}
$scope.filterProfiles = (profile) => {
return FilterHelpers.searchObject(profile.id, $scope.filter);
}
$scope.filterContainers = (container) => {
if (!Core.isBlank($scope.versionIdFilter) && container.versionId !== $scope.versionIdFilter) {
return false;
}
if (!Core.isBlank($scope.profileIdFilter) && !container.profileIds.any($scope.profileIdFilter)) {
return false;
}
if (!Core.isBlank($scope.locationIdFilter) && container.location !== $scope.locationIdFilter) {
return false;
}
return FilterHelpers.searchObject(container.id, $scope.filter);
}
$scope.filterContainer = $scope.filterContainers;
$scope.viewProfile = (profile:Profile) => {
Fabric.gotoProfile(workspace, jolokia, workspace.localStorage, $location, profile.version, profile.id);
};
function maybeAdd(group: Array<any>, thing:any, index:string) {
if (angular.isArray(thing)) {
thing.forEach((i) => { maybeAdd(group, i, index); });
} else {
if (!group.any((item) => { return thing[index] === item[index] })) {
group.add(thing);
}
}
}
function groupByVersions(containers:Array<Container>) {
var answer = {};
containers.forEach((container) => {
var versionId = container.versionId;
var version = answer[versionId] || { containers: <Array<Container>>[], profiles: <Array<Profile>>[] };
maybeAdd(version.containers, container, 'id');
maybeAdd(version.profiles, container.profiles, 'id');
answer[versionId] = version;
});
return answer;
}
function groupByLocation(containers:Array<Container>) {
var answer = {};
containers.forEach((container) => {
var location = container.location;
var loc = answer[location] || { containers: Array<Container>() };
maybeAdd(loc.containers, container, 'id');
answer[location] = loc;
});
return answer;
}
Fabric.loadRestApi(jolokia, workspace, undefined, (response) => {
$scope.restApiUrl = UrlHelpers.maybeProxy(Core.injector.get('jolokiaUrl'), response.value);
log.debug("Scope rest API: ", $scope.restApiUrl);
Core.registerForChanges(jolokia, $scope, {
type: 'exec',
mbean: Fabric.managerMBean,
operation: 'containers(java.util.List, java.util.List)',
arguments:[containerFields, profileFields]
}, (response) => {
var containers = response.value;
SelectionHelpers.sync($scope.selectedContainers, containers, 'id');
var versions = {};
var locations = {};
// massage the returned data a bit first
containers.forEach((container) => {
if (Core.isBlank(container.location)) {
container.location = ContainerHelpers.NO_LOCATION;
}
container.profiles = container.profiles.filter((p) => { return !p.hidden });
container.icon = Fabric.getTypeIcon(container);
container.services = Fabric.getServiceList(container);
});
var versions = groupByVersions(containers);
angular.forEach(versions, (version, versionId) => {
version.profiles.forEach((profile) => {
var containers = version.containers.filter((c) => { return c.profileIds.some(profile.id); });
profile.aliveCount = containers.count((c) => { return c.alive; });
profile.deadCount = containers.length - profile.aliveCount;
profile.summary = profile.summaryMarkdown ? marked(profile.summaryMarkdown) : '';
profile.iconURL = Fabric.toIconURL($scope, profile.iconURL);
profile.tags = ProfileHelpers.getTags(profile);
});
});
var locations = groupByLocation(containers);
var locationIds = ContainerHelpers.extractLocations(containers);
$scope.locationMenu = ContainerHelpers.buildLocationMenu($scope, jolokia, locationIds);
// grouped by location
$scope.locations = locations;
// grouped by version/profile
$scope.versions = versions;
// Sort by id with child containers grouped under parents
var sortedContainers = containers.sortBy('id');
var rootContainers = sortedContainers.exclude((c) => { return !c.root; });
var childContainers = sortedContainers.exclude((c) => { return c.root; });
if (childContainers.length > 0) {
var tmp = [];
rootContainers.each((c) => {
tmp.add(c);
var children = childContainers.exclude((child) => { return child.parentId !== c.id });
tmp.add(children);
});
containers = tmp;
}
$scope.containers = containers;
Core.$apply($scope);
});
Core.registerForChanges(jolokia, $scope, {
type: 'read',
mbean: Fabric.clusterManagerMBean,
attribute: 'EnsembleContainers'
}, (response) => {
$scope.ensembleContainerIds = response.value;
Core.$apply($scope);
});
});
}]);
}
| mposolda/hawtio | hawtio-web/src/main/webapp/app/fabric/js/containerView.ts | TypeScript | apache-2.0 | 8,985 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
* Manages and performs region assignment.
* <p>
* Monitors ZooKeeper for events related to regions in transition.
* <p>
* Handles existing regions in transition during master failover.
*/
@InterfaceAudience.Private
public class AssignmentManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(AssignmentManager.class);
public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME,
-1, -1L);
public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout";
public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000;
public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management";
public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false;
public static final String ALREADY_IN_TRANSITION_WAITTIME
= "hbase.assignment.already.intransition.waittime";
public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute
protected final Server server;
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
private final TimerUpdater timerUpdater;
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
private final TableLockManager tableLockManager;
private AtomicInteger numRegionsOpened = new AtomicInteger(0);
final private KeyLocker<String> locker = new KeyLocker<String>();
/**
* Map of regions to reopen after the schema of a table is changed. Key -
* encoded region name, value - HRegionInfo
*/
private final Map <String, HRegionInfo> regionsToReopen;
/*
* Maximum times we recurse an assignment/unassignment.
* See below in {@link #assign()} and {@link #unassign()}.
*/
private final int maximumAttempts;
/**
* Map of two merging regions from the region to be created.
*/
private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions
= new HashMap<String, PairOfSameType<HRegionInfo>>();
/**
* The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment
* failure due to lack of availability of region plan
*/
private final long sleepTimeBeforeRetryingMetaAssignment;
/** Plans for region movement. Key is the encoded version of a region name*/
// TODO: When do plans get cleaned out? Ever? In server open and in server
// shutdown processing -- St.Ack
// All access to this Map must be synchronized.
final NavigableMap<String, RegionPlan> regionPlans =
new TreeMap<String, RegionPlan>();
private final ZKTable zkTable;
/**
* Contains the server which need to update timer, these servers will be
* handled by {@link TimerUpdater}
*/
private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer;
private final ExecutorService executorService;
// For unit tests, keep track of calls to ClosedRegionHandler
private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null;
// For unit tests, keep track of calls to OpenedRegionHandler
private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null;
//Thread pool executor service for timeout monitor
private java.util.concurrent.ExecutorService threadPoolExecutorService;
// A bunch of ZK events workers. Each is a single thread executor service
private final java.util.concurrent.ExecutorService zkEventWorkers;
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
private final RegionStates regionStates;
// The threshold to use bulk assigning. Using bulk assignment
// only if assigning at least this many regions to at least this
// many servers. If assigning fewer regions to fewer servers,
// bulk assigning may be not as efficient.
private final int bulkAssignThresholdRegions;
private final int bulkAssignThresholdServers;
// Should bulk assignment wait till all regions are assigned,
// or it is timed out? This is useful to measure bulk assignment
// performance, but not needed in most use cases.
private final boolean bulkAssignWaitTillAllAssigned;
/**
* Indicator that AssignmentManager has recovered the region states so
* that ServerShutdownHandler can be fully enabled and re-assign regions
* of dead servers. So that when re-assignment happens, AssignmentManager
* has proper region states.
*
* Protected to ease testing.
*/
protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false);
/** Is the TimeOutManagement activated **/
private final boolean tomActivated;
/**
* A map to track the count a region fails to open in a row.
* So that we don't try to open a region forever if the failure is
* unrecoverable. We don't put this information in region states
* because we don't expect this to happen frequently; we don't
* want to copy this information over during each state transition either.
*/
private final ConcurrentHashMap<String, AtomicInteger>
failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
// A flag to indicate if we are using ZK for region assignment
private final boolean useZKForAssignment;
// In case not using ZK for region assignment, region states
// are persisted in meta with a state store
private final RegionStateStore regionStateStore;
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP_SPLIT_HANDLING = false;
/** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
/**
* Constructs a new assignment manager.
*
* @param server
* @param serverManager
* @param catalogTracker
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean(
ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
this.timeoutMonitor = new TimeoutMonitor(
conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000),
server, serverManager,
conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT));
this.timerUpdater = new TimerUpdater(conf.getInt(
"hbase.master.assignment.timerupdater.period", 10000), server);
Threads.setDaemonThreadRunning(timerUpdater.getThread(),
server.getServerName() + ".timerUpdater");
} else {
this.serversInUpdatingTimer = null;
this.timeoutMonitor = null;
this.timerUpdater = null;
}
this.zkTable = new ZKTable(this.watcher);
// This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong(
"hbase.meta.assignment.retry.sleeptime", 1000l);
this.balancer = balancer;
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
this.regionStates = new RegionStates(server, serverManager, regionStateStore);
this.bulkAssignWaitTillAllAssigned =
conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
TimeUnit.SECONDS, threadFactory);
this.tableLockManager = tableLockManager;
this.metricsAssignmentManager = new MetricsAssignmentManager();
useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
}
void startTimeOutMonitor() {
if (tomActivated) {
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName()
+ ".timeoutMonitor");
}
}
/**
* Add the listener to the notification list.
* @param listener The AssignmentListener to register
*/
public void registerListener(final AssignmentListener listener) {
this.listeners.add(listener);
}
/**
* Remove the listener from the notification list.
* @param listener The AssignmentListener to unregister
*/
public boolean unregisterListener(final AssignmentListener listener) {
return this.listeners.remove(listener);
}
/**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
// These are 'expensive' to make involving trip to zk ensemble so allow
// sharing.
return this.zkTable;
}
/**
* This SHOULD not be public. It is public now
* because of some unit tests.
*
* TODO: make it package private and keep RegionStates in the master package
*/
public RegionStates getRegionStates() {
return regionStates;
}
/**
* Used in some tests to mock up region state in meta
*/
@VisibleForTesting
RegionStateStore getRegionStateStore() {
return regionStateStore;
}
public RegionPlan getRegionReopenPlan(HRegionInfo hri) {
return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri));
}
/**
* Add a regionPlan for the specified region.
* @param encodedName
* @param plan
*/
public void addPlan(String encodedName, RegionPlan plan) {
synchronized (regionPlans) {
regionPlans.put(encodedName, plan);
}
}
/**
* Add a map of region plans.
*/
public void addPlans(Map<String, RegionPlan> plans) {
synchronized (regionPlans) {
regionPlans.putAll(plans);
}
}
/**
* Set the list of regions that will be reopened
* because of an update in table schema
*
* @param regions
* list of regions that should be tracked for reopen
*/
public void setRegionsToReopen(List <HRegionInfo> regions) {
for(HRegionInfo hri : regions) {
regionsToReopen.put(hri.getEncodedName(), hri);
}
}
/**
* Used by the client to identify if all regions have the schema updates
*
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
// no lock concurrent access ok: sequential consistency respected.
if (regionsToReopen.containsKey(name)
|| regionStates.isRegionInTransition(name)) {
pending++;
}
}
return new Pair<Integer, Integer>(pending, hris.size());
}
/**
* Used by ServerShutdownHandler to make sure AssignmentManager has completed
* the failover cleanup before re-assigning regions of dead servers. So that
* when re-assignment happens, AssignmentManager has proper region states.
*/
public boolean isFailoverCleanupDone() {
return failoverCleanupDone.get();
}
/**
* To avoid racing with AM, external entities may need to lock a region,
* for example, when SSH checks what regions to skip re-assigning.
*/
public Lock acquireRegionLock(final String encodedName) {
return locker.acquireLock(encodedName);
}
/**
* Now, failover cleanup is completed. Notify server manager to
* process queued up dead servers processing, if any.
*/
void failoverCleanupDone() {
failoverCleanupDone.set(true);
serverManager.processQueuedDeadServers();
}
/**
* Called on startup.
* Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
* @throws KeeperException
* @throws InterruptedException
*/
void joinCluster() throws IOException,
KeeperException, InterruptedException {
long startTime = System.currentTimeMillis();
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
// Scan hbase:meta to build list of existing regions, servers, and assignment
// Returns servers who have not checked in (assumed dead) and their regions
Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions();
// This method will assign all user regions if a clean server startup or
// it will reconstruct master state and cleanup any leftovers from
// previous master process.
boolean failover = processDeadServersAndRegionsInTransition(deadServers);
if (!useZKForAssignment) {
// Not use ZK for assignment any more, remove the ZNode
ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode);
}
recoverTableInDisablingState();
recoverTableInEnablingState();
LOG.info("Joined the cluster in " + (System.currentTimeMillis()
- startTime) + "ms, failover=" + failover);
}
/**
* Process all regions that are in transition in zookeeper and also
* processes the list of dead servers by scanning the META.
* Used by master joining an cluster. If we figure this is a clean cluster
* startup, will assign all user regions.
* @param deadServers
* Map of dead servers and their regions. Can be null.
* @throws KeeperException
* @throws IOException
* @throws InterruptedException
*/
boolean processDeadServersAndRegionsInTransition(
final Map<ServerName, List<HRegionInfo>> deadServers)
throws KeeperException, IOException, InterruptedException {
List<String> nodes = ZKUtil.listChildrenNoWatch(watcher,
watcher.assignmentZNode);
if (nodes == null && useZKForAssignment) {
String errorMessage = "Failed to get the children from ZK";
server.abort(errorMessage, new IOException(errorMessage));
return true; // Doesn't matter in this case
}
boolean failover = !serverManager.getDeadServers().isEmpty();
if (failover) {
// This may not be a failover actually, especially if meta is on this master.
if (LOG.isDebugEnabled()) {
LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers());
}
} else {
// If any one region except meta is assigned, it's a failover.
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) {
HRegionInfo hri = en.getKey();
if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) {
LOG.debug("Found " + hri + " out on cluster");
failover = true;
break;
}
}
}
if (!failover && nodes != null) {
// If any one region except meta is in transition, it's a failover.
for (String encodedName : nodes) {
RegionState regionState = regionStates.getRegionState(encodedName);
if (regionState != null && !regionState.getRegion().isMetaRegion()) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
if (!failover && !useZKForAssignment) {
// If any region except meta is in transition on a live server, it's a failover.
Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
if (!regionsInTransition.isEmpty()) {
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (RegionState regionState : regionsInTransition.values()) {
if (!regionState.getRegion().isMetaRegion()
&& onlineServers.contains(regionState.getServerName())) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
// if there are some HLogs are not split yet. For meta HLogs, they should have
// been split already, if any. We can walk through those queued dead servers,
// if they don't have any HLogs, this restart should be considered as a clean one
Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName : queuedDeadServers) {
Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
}
}
if (!failover) {
// We figured that it's not a failover, so no need to
// work on these re-queued dead servers any more.
LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size()
+ " queued dead servers");
serverManager.removeRequeuedDeadServers();
}
}
}
Set<TableName> disabledOrDisablingOrEnabling = null;
if (!failover) {
disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Clean re/start, mark all user regions closed before reassignment
// TODO -Hbase-11319
regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
}
// Now region states are restored
regionStateStore.start();
// If we found user regions out on cluster, its a failover.
if (failover) {
LOG.info("Found regions out on cluster or in RIT; presuming failover");
// Process list of dead servers and regions in RIT.
// See HBASE-4580 for more information.
processDeadServersAndRecoverLostRegions(deadServers);
}
if (!failover && useZKForAssignment) {
// Cleanup any existing ZK nodes and start watching
ZKAssign.deleteAllNodes(watcher);
ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode);
}
// Now we can safely claim failover cleanup completed and enable
// ServerShutdownHandler for further processing. The nodes (below)
// in transition, if any, are for regions not related to those
// dead servers at all, and can be done in parallel to SSH.
failoverCleanupDone();
if (!failover) {
// Fresh cluster startup.
LOG.info("Clean cluster startup. Assigning user regions");
assignAllUserRegions(disabledOrDisablingOrEnabling);
}
return failover;
}
/**
* If region is up in zk in transition, then do fixup and block and wait until
* the region is assigned and out of transition. Used on startup for
* catalog regions.
* @param hri Region to look for.
* @return True if we processed a region in transition else false if region
* was not up in zk in transition.
* @throws InterruptedException
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri)
throws InterruptedException, KeeperException, IOException {
String encodedRegionName = hri.getEncodedName();
if (!processRegionInTransition(encodedRegionName, hri)) {
return false; // The region is not in transition
}
LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName));
while (!this.server.isStopped() &&
this.regionStates.isRegionInTransition(encodedRegionName)) {
RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName);
if (state == null || !serverManager.isServerOnline(state.getServerName())) {
// The region is not in transition, or not in transition on an online
// server. Doesn't help to block here any more. Caller need to
// verify the region is actually assigned.
break;
}
this.regionStates.waitForUpdate(100);
}
return true;
}
/**
* Process failover of new master for region <code>encodedRegionName</code>
* up in zookeeper.
* @param encodedRegionName Region to process failover for.
* @param regionInfo If null we'll go get it from meta table.
* @return True if we processed <code>regionInfo</code> as a RIT.
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransition(final String encodedRegionName,
final HRegionInfo regionInfo) throws KeeperException, IOException {
// We need a lock here to ensure that we will not put the same region twice
// It has no reason to be a lock shared with the other operations.
// We can do the lock on the region only, instead of a global lock: what we want to ensure
// is that we don't have two threads working on the same region.
Lock lock = locker.acquireLock(encodedRegionName);
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
if (data == null) return false;
RegionTransition rt;
try {
rt = RegionTransition.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse znode data", e);
return false;
}
HRegionInfo hri = regionInfo;
if (hri == null) {
// The region info is not passed in. We will try to find the region
// from region states map/meta based on the encoded region name. But we
// may not be able to find it. This is valid for online merge that
// the region may have not been created if the merge is not completed.
// Therefore, it is not in meta at master recovery time.
hri = regionStates.getRegionInfo(rt.getRegionName());
EventType et = rt.getEventType();
if (hri == null && et != EventType.RS_ZK_REGION_MERGING
&& et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
LOG.warn("Couldn't find the region in recovering " + rt);
return false;
}
}
return processRegionsInTransition(
rt, hri, stat.getVersion());
} finally {
lock.unlock();
}
}
/**
* This call is invoked only (1) master assign meta;
* (2) during failover mode startup, zk assignment node processing.
* The locker is set in the caller. It returns true if the region
* is in transition for sure, false otherwise.
*
* It should be private but it is used by some test too.
*/
boolean processRegionsInTransition(
final RegionTransition rt, final HRegionInfo regionInfo,
final int expectedVersion) throws KeeperException {
EventType et = rt.getEventType();
// Get ServerName. Could not be null.
final ServerName sn = rt.getServerName();
final byte[] regionName = rt.getRegionName();
final String encodedName = HRegionInfo.encodeRegionName(regionName);
final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et);
if (regionStates.isRegionInTransition(encodedName)
&& (regionInfo.isMetaRegion() || !useZKForAssignment)) {
LOG.info("Processed region " + prettyPrintedRegionName + " in state: "
+ et + ", does nothing since the region is already in transition "
+ regionStates.getRegionTransitionState(encodedName));
// Just return
return true;
}
if (!serverManager.isServerOnline(sn)) {
// It was transitioning on a dead server, so it's closed now.
// Force to OFFLINE and put it in transition, but not assign it
// since log splitting for the dead server is not done yet.
LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() +
" was on deadserver; forcing offline");
if (regionStates.isRegionOnline(regionInfo)) {
// Meta could still show the region is assigned to the previous
// server. If that server is online, when we reload the meta, the
// region is put back to online, we need to offline it.
regionStates.regionOffline(regionInfo);
sendRegionClosedNotification(regionInfo);
}
// Put it back in transition so that SSH can re-assign it
regionStates.updateRegionState(regionInfo, State.OFFLINE, sn);
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
MetaRegionTracker.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
regionStates.setLastRegionServerOfRegion(sn, encodedName);
// Make sure we know the server is dead.
if (!serverManager.isServerDead(sn)) {
serverManager.expireServer(sn);
}
}
return false;
}
switch (et) {
case M_ZK_REGION_CLOSING:
// Insert into RIT & resend the query to the region server: may be the previous master
// died before sending the query the first time.
final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null);
if (regionStates.isRegionOffline(regionInfo)) {
assign(regionInfo, true);
}
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_CLOSED:
case RS_ZK_REGION_FAILED_OPEN:
// Region is closed, insert into RIT and handle it
regionStates.updateRegionState(regionInfo, State.CLOSED, sn);
invokeAssign(regionInfo);
break;
case M_ZK_REGION_OFFLINE:
// Insert in RIT and resend to the regionserver
regionStates.updateRegionState(rt, State.PENDING_OPEN);
final RegionState rsOffline = regionStates.getRegionState(regionInfo);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
RegionPlan plan = new RegionPlan(regionInfo, null, sn);
addPlan(encodedName, plan);
assign(rsOffline, false, false);
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_OPENING:
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Region is opened, insert into RIT and handle it
// This could be done asynchronously, we would need then to acquire the lock in the
// handler.
regionStates.updateRegionState(rt, State.OPEN);
new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process();
break;
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
// Splitting region should be online. We could have skipped it during
// user region rebuilding since we may consider the split is completed.
// Put it in SPLITTING state to avoid complications.
regionStates.regionOnline(regionInfo, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
default:
throw new IllegalStateException("Received region in state:" + et + " is not valid.");
}
LOG.info("Processed region " + prettyPrintedRegionName + " in state "
+ et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ")
+ "server: " + sn);
return true;
}
/**
* When a region is closed, it should be removed from the regionsToReopen
* @param hri HRegionInfo of the region which was closed
*/
public void removeClosedRegion(HRegionInfo hri) {
if (regionsToReopen.remove(hri.getEncodedName()) != null) {
LOG.debug("Removed region from reopening regions because it was closed");
}
}
/**
* Handles various states an unassigned node can be in.
* <p>
* Method is called when a state change is suspected for an unassigned node.
* <p>
* This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
* yet).
* @param rt
* @param expectedVersion
*/
void handleRegion(final RegionTransition rt, int expectedVersion) {
if (rt == null) {
LOG.warn("Unexpected NULL input for RegionTransition rt");
return;
}
final ServerName sn = rt.getServerName();
// Check if this is a special HBCK transition
if (sn.equals(HBCK_CODE_SERVERNAME)) {
handleHBCK(rt);
return;
}
final long createTime = rt.getCreateTime();
final byte[] regionName = rt.getRegionName();
String encodedName = HRegionInfo.encodeRegionName(regionName);
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
// Verify this is a known server
if (!serverManager.isServerOnline(sn)
&& !ignoreStatesRSOffline.contains(rt.getEventType())) {
LOG.warn("Attempted to handle region transition for server but " +
"it is not online: " + prettyPrintedRegionName + ", " + rt);
return;
}
RegionState regionState =
regionStates.getRegionState(encodedName);
long startTime = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
boolean lateEvent = createTime < (startTime - 15000);
LOG.debug("Handling " + rt.getEventType() +
", server=" + sn + ", region=" +
(prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) +
(lateEvent ? ", which is more than 15 seconds late" : "") +
", current_state=" + regionState);
}
// We don't do anything for this event,
// so separate it out, no need to lock/unlock anything
if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
return;
}
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState latestState =
regionStates.getRegionState(encodedName);
if ((regionState == null && latestState != null)
|| (regionState != null && latestState == null)
|| (regionState != null && latestState != null
&& latestState.getState() != regionState.getState())) {
LOG.warn("Region state changed from " + regionState + " to "
+ latestState + ", while acquiring lock");
}
long waitedTime = System.currentTimeMillis() - startTime;
if (waitedTime > 5000) {
LOG.warn("Took " + waitedTime + "ms to acquire the lock");
}
regionState = latestState;
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
// Merged region is a new region, we can't find it in the region states now.
// However, the two merging regions are not new. They should be in state for merging.
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to CLOSING (or update stamp if already CLOSING)
regionStates.updateRegionState(rt, State.CLOSING);
break;
case RS_ZK_REGION_CLOSED:
// Should see CLOSED after CLOSING but possible after PENDING_CLOSE
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Handle CLOSED by assigning elsewhere or stopping if a disable
// If we got here all is good. Need to update RegionState -- else
// what follows will fail because not in expected state.
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
updateClosedRegionHandlerTracker(regionState.getRegion());
break;
case RS_ZK_REGION_FAILED_OPEN:
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(rt, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
regionState = regionStates.updateRegionState(rt, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the regionplan. (HBASE-5546)
try {
getRegionPlan(regionState.getRegion(), sn, true);
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
}
break;
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to OPENING (or update stamp if already OPENING)
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Should see OPENED after OPENING but possible after PENDING_OPEN.
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
if (regionState != null) {
// Close it without updating the internal region states,
// so as not to create double assignments in unlucky scenarios
// mentioned in OpenRegionHandler#process
unassign(regionState.getRegion(), null, -1, null, false, sn);
}
return;
}
// Handle OPENED by removing from transition and deleted zk node
regionState = regionStates.updateRegionState(rt, State.OPEN);
if (regionState != null) {
failedOpenTracker.remove(encodedName); // reset the count, if any
new OpenedRegionHandler(
server, this, regionState.getRegion(), sn, expectedVersion).process();
updateOpenedRegionHandlerTracker(regionState.getRegion());
}
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
} finally {
lock.unlock();
}
}
//For unit tests only
boolean wasClosedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = closedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
boolean wasOpenedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = openedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
void initializeHandlerTrackers() {
closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
}
void updateClosedRegionHandlerTracker(HRegionInfo hri) {
if (closedRegionHandlerCalled != null) { //only for unit tests this is true
closedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
void updateOpenedRegionHandlerTracker(HRegionInfo hri) {
if (openedRegionHandlerCalled != null) { //only for unit tests this is true
openedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* Handle a ZK unassigned node transition triggered by HBCK repair tool.
* <p>
* This is handled in a separate code path because it breaks the normal rules.
* @param rt
*/
private void handleHBCK(RegionTransition rt) {
String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
", server=" + rt.getServerName() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
switch (rt.getEventType()) {
case M_ZK_REGION_OFFLINE:
HRegionInfo regionInfo;
if (regionState != null) {
regionInfo = regionState.getRegion();
} else {
try {
byte [] name = rt.getRegionName();
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
return;
}
}
LOG.info("HBCK repair is triggering assignment of region=" +
regionInfo.getRegionNameAsString());
// trigger assign, node is already in OFFLINE so don't need to update ZK
assign(regionInfo, false);
break;
default:
LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
break;
}
}
// ZooKeeper events
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING or CLOSING of a region by
* creating an unassigned node.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeCreated(String path) {
handleAssignmentEvent(path);
}
/**
* Existing unassigned node has had data changed.
*
* <p>This happens when an RS transitions from OFFLINE to OPENING, or between
* OPENING/OPENED and CLOSING/CLOSED.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeDataChanged(String path) {
handleAssignmentEvent(path);
}
// We don't want to have two events on the same region managed simultaneously.
// For this reason, we need to wait if an event on the same region is currently in progress.
// So we track the region names of the events in progress, and we keep a waiting list.
private final Set<String> regionsInProgress = new HashSet<String>();
// In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need
// this as we want the events to be managed in the same order as we received them.
private final LinkedHashMultimap <String, RegionRunnable>
zkEventWorkerWaitingList = LinkedHashMultimap.create();
/**
* A specific runnable that works only on a region.
*/
private interface RegionRunnable extends Runnable{
/**
* @return - the name of the region it works on.
*/
String getRegionName();
}
/**
* Submit a task, ensuring that there is only one task at a time that working on a given region.
* Order is respected.
*/
protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) {
synchronized (regionsInProgress) {
// If we're there is already a task with this region, we add it to the
// waiting list and return.
if (regionsInProgress.contains(regRunnable.getRegionName())) {
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable);
}
return;
}
// No event in progress on this region => we can submit a new task immediately.
regionsInProgress.add(regRunnable.getRegionName());
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
regRunnable.run();
} finally {
// now that we have finished, let's see if there is an event for the same region in the
// waiting list. If it's the case, we can now submit it to the pool.
synchronized (regionsInProgress) {
regionsInProgress.remove(regRunnable.getRegionName());
synchronized (zkEventWorkerWaitingList) {
java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get(
regRunnable.getRegionName());
if (!waiting.isEmpty()) {
// We want the first object only. The only way to get it is through an iterator.
RegionRunnable toSubmit = waiting.iterator().next();
zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit);
zkEventWorkersSubmit(toSubmit);
}
}
}
}
}
});
}
}
@Override
public void nodeDeleted(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
Lock lock = locker.acquireLock(regionName);
try {
RegionState rs = regionStates.getRegionTransitionState(regionName);
if (rs == null) {
rs = regionStates.getRegionState(regionName);
if (rs == null || !rs.isMergingNew()) {
// MergingNew is an offline state
return;
}
}
HRegionInfo regionInfo = rs.getRegion();
String regionNameStr = regionInfo.getRegionNameAsString();
LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable());
ServerName serverName = rs.getServerName();
if (serverManager.isServerOnline(serverName)) {
if (rs.isOnServer(serverName)
&& (rs.isOpened() || rs.isSplitting())) {
regionOnline(regionInfo, serverName);
if (disabled) {
// if server is offline, no hurt to unassign again
LOG.info("Opened " + regionNameStr
+ "but this table is disabled, triggering close of region");
unassign(regionInfo);
}
} else if (rs.isMergingNew()) {
synchronized (regionStates) {
String p = regionInfo.getEncodedName();
PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
if (regions != null) {
onlineMergingRegion(disabled, regions.getFirst(), serverName);
onlineMergingRegion(disabled, regions.getSecond(), serverName);
}
}
}
}
} finally {
lock.unlock();
}
}
private void onlineMergingRegion(boolean disabled,
final HRegionInfo hri, final ServerName serverName) {
RegionState regionState = regionStates.getRegionState(hri);
if (regionState != null && regionState.isMerging()
&& regionState.isOnServer(serverName)) {
regionOnline(regionState.getRegion(), serverName);
if (disabled) {
unassign(hri);
}
}
}
});
}
}
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a
* region by creating a znode.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further children changed events</li>
* <li>Watch all new children for changed events</li>
* </ol>
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.assignmentZNode)) {
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
// Just make sure we see the changes for the new znodes
List<String> children =
ZKUtil.listChildrenAndWatchForNewChildren(
watcher, watcher.assignmentZNode);
if (children != null) {
Stat stat = new Stat();
for (String child : children) {
// if region is in transition, we already have a watch
// on it, so no need to watch it again. So, as I know for now,
// this is needed to watch splitting nodes only.
if (!regionStates.isRegionInTransition(child)) {
ZKAssign.getDataAndWatch(watcher, child, stat);
}
}
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned children", e);
}
}
});
}
}
/**
* Marks the region as online. Removes it from regions in transition and
* updates the in-memory assignment information.
* <p>
* Used when a region has been successfully opened on a region server.
* @param regionInfo
* @param sn
*/
void regionOnline(HRegionInfo regionInfo, ServerName sn) {
regionOnline(regionInfo, sn, HConstants.NO_SEQNUM);
}
void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) {
numRegionsOpened.incrementAndGet();
regionStates.regionOnline(regionInfo, sn, openSeqNum);
// Remove plan if one.
clearRegionPlan(regionInfo);
// Add the server to serversInUpdatingTimer
addToServersInUpdatingTimer(sn);
balancer.regionOnline(regionInfo, sn);
// Tell our listeners that a region was opened
sendRegionOpenedNotification(regionInfo, sn);
}
/**
* Pass the assignment event to a worker for processing.
* Each worker is a single thread executor service. The reason
* for just one thread is to make sure all events for a given
* region are processed in order.
*
* @param path
*/
private void handleAssignmentEvent(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat);
if (data == null) return;
RegionTransition rt = RegionTransition.parseFrom(data);
handleRegion(rt, stat.getVersion());
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned node data", e);
} catch (DeserializationException e) {
server.abort("Unexpected exception deserializing node data", e);
}
}
});
}
}
/**
* Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater}
* will update timers for this server in background
* @param sn
*/
private void addToServersInUpdatingTimer(final ServerName sn) {
if (tomActivated){
this.serversInUpdatingTimer.add(sn);
}
}
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
Preconditions.checkState(tomActivated);
if (sn == null) return;
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
List<Map.Entry<String, RegionPlan>> rps;
synchronized(this.regionPlans) {
rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet());
}
for (Map.Entry<String, RegionPlan> e : rps) {
if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) {
RegionState regionState = regionStates.getRegionTransitionState(e.getKey());
if (regionState != null) {
regionState.updateTimestampToNow();
}
}
}
}
/**
* Marks the region as offline. Removes it from regions in transition and
* removes in-memory assignment information.
* <p>
* Used when a region has been closed and should remain closed.
* @param regionInfo
*/
public void regionOffline(final HRegionInfo regionInfo) {
regionOffline(regionInfo, null);
}
public void offlineDisabledRegion(HRegionInfo regionInfo) {
if (useZKForAssignment) {
// Disabling so should not be reassigned, just delete the CLOSED node
LOG.debug("Table being disabled so deleting ZK node and removing from " +
"regions in transition, skipping assignment of region " +
regionInfo.getRegionNameAsString());
String encodedName = regionInfo.getEncodedName();
deleteNodeInStates(encodedName, "closed", null,
EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE);
}
regionOffline(regionInfo);
}
// Assignment methods
/**
* Assigns the specified region.
* <p>
* If a RegionPlan is available with a valid destination then it will be used
* to determine what server region is assigned to. If no RegionPlan is
* available, region will be assigned to a random available server.
* <p>
* Updates the RegionState and sends the OPEN RPC.
* <p>
* This will only succeed if the region is in transition and in a CLOSED or
* OFFLINE state or not in transition (in-memory not zk), and of course, the
* chosen server is up and running (It may have just crashed!). If the
* in-memory checks pass, the zk node is forced to OFFLINE before assigning.
*
* @param region server to be assigned
* @param setOfflineInZK whether ZK node should be created/transitioned to an
* OFFLINE state before assigning the region
*/
public void assign(HRegionInfo region, boolean setOfflineInZK) {
assign(region, setOfflineInZK, false);
}
/**
* Use care with forceNewPlan. It could cause double assignment.
*/
public void assign(HRegionInfo region,
boolean setOfflineInZK, boolean forceNewPlan) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
if (this.serverManager.isClusterShutdown()) {
LOG.info("Cluster shutdown is set; skipping assign of " +
region.getRegionNameAsString());
return;
}
String encodedName = region.getEncodedName();
Lock lock = locker.acquireLock(encodedName);
try {
RegionState state = forceRegionStateToOffline(region, forceNewPlan);
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
return;
}
assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan);
}
} finally {
lock.unlock();
}
}
/**
* Bulk assign regions to <code>destination</code>.
* @param destination
* @param regions Regions to assign.
* @return true if successful
*/
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
int regionCount = regions.size();
if (regionCount == 0) {
return true;
}
LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString());
Set<String> encodedNames = new HashSet<String>(regionCount);
for (HRegionInfo region : regions) {
encodedNames.add(region.getEncodedName());
}
List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
Map<String, Lock> locks = locker.acquireLocks(encodedNames);
try {
AtomicInteger counter = new AtomicInteger(0);
Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>();
OfflineCallback cb = new OfflineCallback(
watcher, destination, counter, offlineNodesVersions);
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size());
List<RegionState> states = new ArrayList<RegionState>(regions.size());
for (HRegionInfo region : regions) {
String encodedName = region.getEncodedName();
if (!isDisabledorDisablingRegionInRIT(region)) {
RegionState state = forceRegionStateToOffline(region, false);
boolean onDeadServer = false;
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
onDeadServer = true;
} else if (!useZKForAssignment
|| asyncSetOfflineInZooKeeper(state, cb, destination)) {
RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
plans.put(encodedName, plan);
states.add(state);
continue;
}
}
// Reassign if the region wasn't on a dead server
if (!onDeadServer) {
LOG.info("failed to force region state to offline or "
+ "failed to set it offline in ZK, will reassign later: " + region);
failedToOpenRegions.add(region); // assign individually later
}
}
// Release the lock, this region is excluded from bulk assign because
// we can't update its state, or set its znode to offline.
Lock lock = locks.remove(encodedName);
lock.unlock();
}
if (useZKForAssignment) {
// Wait until all unassigned nodes have been put up and watchers set.
int total = states.size();
for (int oldCounter = 0; !server.isStopped();) {
int count = counter.get();
if (oldCounter != count) {
LOG.info(destination.toString() + " unassigned znodes=" + count + " of total="
+ total);
oldCounter = count;
}
if (count >= total) break;
Threads.sleep(5);
}
}
if (server.isStopped()) {
return false;
}
// Add region plans, so we can updateTimers when one region is opened so
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
Integer nodeVersion = offlineNodesVersions.get(encodedRegionName);
if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) {
LOG.warn("failed to offline in zookeeper: " + region);
failedToOpenRegions.add(region); // assign individually later
Lock lock = locks.remove(encodedRegionName);
lock.unlock();
} else {
regionStates.updateRegionState(
region, State.PENDING_OPEN, destination);
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
// Move on to open regions.
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException,
// regions will be assigned individually.
long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
try {
// regionOpenInfos is empty if all regions are in failedToOpenRegions list
if (regionOpenInfos.isEmpty()) {
break;
}
List<RegionOpeningState> regionOpeningStateList = serverManager
.sendRegionOpen(destination, regionOpenInfos);
if (regionOpeningStateList == null) {
// Failed getting RPC connection to this server
return false;
}
for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
RegionOpeningState openingState = regionOpeningStateList.get(k);
if (openingState != RegionOpeningState.OPENED) {
HRegionInfo region = regionOpenInfos.get(k).getFirst();
if (openingState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, destination);
} else if (openingState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, reassign it later
failedToOpenRegions.add(region);
} else {
LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state "
+ openingState + " in assigning region " + region);
}
}
}
break;
} catch (IOException e) {
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof RegionServerStoppedException) {
LOG.warn("The region server was shut down, ", e);
// No need to retry, the region server is a goner.
return false;
} else if (e instanceof ServerNotRunningYetException) {
long now = System.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
Thread.sleep(100);
i--; // reset the try count
continue;
}
} else if (e instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(destination)) {
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server.
if (LOG.isDebugEnabled()) {
LOG.debug("Bulk assigner openRegion() to " + destination
+ " has timed out, but the regions might"
+ " already be opened on it.", e);
}
// wait and reset the re-try count, server might be just busy.
Thread.sleep(100);
i--;
continue;
}
throw e;
}
}
} catch (IOException e) {
// Can be a socket timeout, EOF, NoRouteToHost, etc
LOG.info("Unable to communicate with " + destination
+ " in order to assign regions, ", e);
return false;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
for (Lock lock : locks.values()) {
lock.unlock();
}
}
if (!failedToOpenRegions.isEmpty()) {
for (HRegionInfo region : failedToOpenRegions) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
LOG.debug("Bulk assigning done for " + destination);
return true;
} finally {
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
/**
* Send CLOSE RPC if the server is online, otherwise, offline the region.
*
* The RPC will be sent only to the region sever found in the region state
* if it is passed in, otherwise, to the src server specified. If region
* state is not specified, we don't update region state at all, instead
* we just send the RPC call. This is useful for some cleanup without
* messing around the region states (see handleRegion, on region opened
* on an unexpected server scenario, for an example)
*/
private void unassign(final HRegionInfo region,
final RegionState state, final int versionOfClosingNode,
final ServerName dest, final boolean transitionInZK,
final ServerName src) {
ServerName server = src;
if (state != null) {
server = state.getServerName();
}
long maxWaitTime = -1;
for (int i = 1; i <= this.maximumAttempts; i++) {
if (this.server.isStopped() || this.server.isAborted()) {
LOG.debug("Server stopped/aborted; skipping unassign of " + region);
return;
}
// ClosedRegionhandler can remove the server from this.regions
if (!serverManager.isServerOnline(server)) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", no need to unassign since it's on a dead server: " + server);
if (transitionInZK) {
// delete the node. if no node exists need not bother.
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
}
try {
// Send CLOSE RPC
if (serverManager.sendRegionClose(server, region,
versionOfClosingNode, dest, transitionInZK)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
if (useZKForAssignment && !transitionInZK && state != null) {
// Retry to make sure the region is
// closed so as to avoid double assignment.
unassign(region, state, versionOfClosingNode,
dest, transitionInZK, src);
}
return;
}
// This never happens. Currently regionserver close always return true.
// Todo; this can now happen (0.96) if there is an exception in a coprocessor
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
boolean logRetries = true;
if (t instanceof NotServingRegionException
|| t instanceof RegionServerStoppedException
|| t instanceof ServerNotRunningYetException) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", it's not any more on " + server, t);
if (transitionInZK) {
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
long sleepTime = 0;
Configuration conf = this.server.getConfiguration();
if(t instanceof FailedServerException) {
sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
LOG.debug("update " + state + " the timestamp.");
state.updateTimestampToNow();
if (maxWaitTime < 0) {
maxWaitTime =
EnvironmentEdgeManager.currentTimeMillis()
+ conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
}
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
sleepTime = 100;
i--; // reset the try count
logRetries = false;
}
}
try {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
} catch (InterruptedException ie) {
LOG.warn("Failed to unassign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
return;
}
}
if (logRetries) {
LOG.info("Server " + server + " returned " + t + " for "
+ region.getRegionNameAsString() + ", try=" + i
+ " of " + this.maximumAttempts, t);
// Presume retry or server will expire.
}
}
}
// Run out of attempts
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
}
/**
* Set region to OFFLINE unless it is opening and forceNewPlan is false.
*/
private RegionState forceRegionStateToOffline(
final HRegionInfo region, final boolean forceNewPlan) {
RegionState state = regionStates.getRegionState(region);
if (state == null) {
LOG.warn("Assigning a region not in region states: " + region);
state = regionStates.createRegionState(region);
}
ServerName sn = state.getServerName();
if (forceNewPlan && LOG.isDebugEnabled()) {
LOG.debug("Force region state offline " + state);
}
switch (state.getState()) {
case OPEN:
case OPENING:
case PENDING_OPEN:
case CLOSING:
case PENDING_CLOSE:
if (!forceNewPlan) {
LOG.debug("Skip assigning " +
region + ", it is already " + state);
return null;
}
case FAILED_CLOSE:
case FAILED_OPEN:
unassign(region, state, -1, null, false, null);
state = regionStates.getRegionState(region);
if (state.isFailedClose()) {
// If we can't close the region, we can't re-assign
// it so as to avoid possible double assignment/data loss.
LOG.info("Skip assigning " +
region + ", we couldn't close it: " + state);
return null;
}
case OFFLINE:
// This region could have been open on this server
// for a while. If the server is dead and not processed
// yet, we can move on only if the meta shows the
// region is not on this server actually, or on a server
// not dead, or dead and processed already.
// In case not using ZK, we don't need this check because
// we have the latest info in memory, and the caller
// will do another round checking any way.
if (useZKForAssignment
&& regionStates.isServerDeadAndNotProcessed(sn)
&& wasRegionOnDeadServerByMeta(region, sn)) {
if (!regionStates.isRegionInTransition(region)) {
LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH");
regionStates.updateRegionState(region, State.OFFLINE);
}
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it is on a dead but not processed yet server: " + sn);
return null;
}
case CLOSED:
break;
default:
LOG.error("Trying to assign region " + region
+ ", which is " + state);
return null;
}
return state;
}
private boolean wasRegionOnDeadServerByMeta(
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
ServerName server = catalogTracker.getMetaLocation();
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
catalogTracker.waitForMeta();
Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
LOG.info("Received exception accessing hbase:meta during force assign "
+ region.getRegionNameAsString() + ", retrying", ioe);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted accessing hbase:meta", e);
}
// Call is interrupted or server is stopped.
return regionStates.isServerDeadAndNotProcessed(sn);
}
/**
* Caller must hold lock on the passed <code>state</code> object.
* @param state
* @param setOfflineInZK
* @param forceNewPlan
*/
private void assign(RegionState state,
final boolean setOfflineInZK, final boolean forceNewPlan) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
Configuration conf = server.getConfiguration();
RegionState currentState = state;
int versionOfOfflineNode = -1;
RegionPlan plan = null;
long maxWaitTime = -1;
HRegionInfo region = state.getRegion();
RegionOpeningState regionOpenState;
Throwable previousException = null;
for (int i = 1; i <= maximumAttempts; i++) {
if (server.isStopped() || server.isAborted()) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", the server is stopped/aborted");
return;
}
if (plan == null) { // Get a server for the region at first
try {
plan = getRegionPlan(region, forceNewPlan);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
if (plan == null) {
LOG.warn("Unable to determine a plan to assign " + region);
if (tomActivated){
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
if (region.isMetaRegion()) {
try {
Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment);
if (i == maximumAttempts) i = 1;
continue;
} catch (InterruptedException e) {
LOG.error("Got exception while waiting for hbase:meta assignment");
Thread.currentThread().interrupt();
}
}
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination());
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
// In case of assignment from EnableTableHandler table state is ENABLING. Any how
// EnableTableHandler will set ENABLED after assigning all the table regions. If we
// try to set to ENABLED directly then client API may think table is enabled.
// When we have a case such as all the regions are added directly into hbase:meta and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
TableName tableName = region.getTable();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
}
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
LOG.info("Unable to set offline in ZooKeeper to assign " + region);
// Setting offline in ZK must have been failed due to ZK racing or some
// exception which may make the server to abort. If it is ZK racing,
// we should retry since we already reset the region state,
// existing (re)assignment will fail anyway.
if (!server.isAborted()) {
continue;
}
}
LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
currentState = regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination());
boolean needNewPlan;
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
needNewPlan = true;
LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " +
" trying to assign elsewhere instead; " +
"try=" + i + " of " + this.maximumAttempts);
} else {
// we're done
if (regionOpenState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, plan.getDestination());
}
return;
}
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
previousException = t;
// Should we wait a little before retrying? If the server is starting it's yes.
// If the region is already in transition, it's yes as well: we want to be sure that
// the region will get opened but we don't want a double assignment.
boolean hold = (t instanceof RegionAlreadyInTransitionException ||
t instanceof ServerNotRunningYetException);
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server to avoid possible
// double assignment.
boolean retry = !hold && (t instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(plan.getDestination()));
if (hold) {
LOG.warn(assignMsg + ", waiting a little before trying on the same region server " +
"try=" + i + " of " + this.maximumAttempts, t);
if (maxWaitTime < 0) {
if (t instanceof RegionAlreadyInTransitionException) {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
} else {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(
"hbase.regionserver.rpc.startup.waittime", 60000);
}
}
try {
needNewPlan = false;
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up or region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
Thread.sleep(100);
i--; // reset the try count
} else if (!(t instanceof RegionAlreadyInTransitionException)) {
LOG.debug("Server is not up for a while; try a new one", t);
needNewPlan = true;
}
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
} else if (retry) {
needNewPlan = false;
i--; // we want to retry as many times as needed as long as the RS is not dead.
LOG.warn(assignMsg + ", trying to assign to the same region server due ", t);
} else {
needNewPlan = true;
LOG.warn(assignMsg + ", trying to assign elsewhere instead;" +
" try=" + i + " of " + this.maximumAttempts, t);
}
}
if (i == this.maximumAttempts) {
// Don't reset the region state or get a new plan any more.
// This is the last try.
continue;
}
// If region opened on destination of present plan, reassigning to new
// RS may cause double assignments. In case of RegionAlreadyInTransitionException
// reassigning to same RS.
if (needNewPlan) {
// Force a new plan and reassign. Will return null if no servers.
// The new plan could be the same as the existing plan since we don't
// exclude the server of the original plan, which should not be
// excluded since it could be the only server up now.
RegionPlan newPlan = null;
try {
newPlan = getRegionPlan(region, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
if (newPlan == null) {
if (tomActivated) {
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
LOG.warn("Unable to find a viable location to assign region " +
region.getRegionNameAsString());
return;
}
if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
currentState = regionStates.updateRegionState(region, State.OFFLINE);
versionOfOfflineNode = -1;
plan = newPlan;
} else if(plan.getDestination().equals(newPlan.getDestination()) &&
previousException instanceof FailedServerException) {
try {
LOG.info("Trying to re-assign " + region.getRegionNameAsString() +
" to the same failed server.");
Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT));
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
}
}
}
// Run out of attempts
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
} finally {
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
regionStates.regionOnline(region, sn);
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
TableName tableName = region.getTable();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
" skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return true;
}
return false;
}
/**
* Set region as OFFLINED up in zookeeper
*
* @param state
* @return the version of the offline node if setting of the OFFLINE node was
* successful, -1 otherwise.
*/
private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
this.server.abort(msg, new IllegalStateException(msg));
return -1;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
int versionOfOfflineNode;
try {
// get the version after setting the znode to OFFLINE
versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher,
state.getRegion(), destination);
if (versionOfOfflineNode == -1) {
LOG.warn("Attempted to create/force node into OFFLINE state before "
+ "completing assignment but failed to do so for " + state);
return -1;
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return -1;
}
return versionOfOfflineNode;
}
/**
* @param region the region to assign
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final boolean forceNewPlan) throws HBaseIOException {
return getRegionPlan(region, null, forceNewPlan);
}
/**
* @param region the region to assign
* @param serverToExclude Server to exclude (we know its bad). Pass null if
* all servers are thought to be assignable.
* @param forceNewPlan If true, then if an existing plan exists, a new plan
* will be generated.
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException {
// Pickup existing plan or make a new one
final String encodedName = region.getEncodedName();
final List<ServerName> destServers =
serverManager.createDestinationServersList(serverToExclude);
if (destServers.isEmpty()){
LOG.warn("Can't move " + encodedName +
", there is no destination server available.");
return null;
}
RegionPlan randomPlan = null;
boolean newPlan = false;
RegionPlan existingPlan;
synchronized (this.regionPlans) {
existingPlan = this.regionPlans.get(encodedName);
if (existingPlan != null && existingPlan.getDestination() != null) {
LOG.debug("Found an existing plan for " + region.getRegionNameAsString()
+ " destination server is " + existingPlan.getDestination() +
" accepted as a dest server = " + destServers.contains(existingPlan.getDestination()));
}
if (forceNewPlan
|| existingPlan == null
|| existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
if (newPlan) {
if (randomPlan.getDestination() == null) {
LOG.warn("Can't find a destination for " + encodedName);
return null;
}
LOG.debug("No previous transition plan found (or ignoring " +
"an existing plan) for " + region.getRegionNameAsString() +
"; generated random plan=" + randomPlan + "; " +
serverManager.countOfRegionServers() +
" (online=" + serverManager.getOnlineServers().size() +
", available=" + destServers.size() + ") available servers" +
", forceNewPlan=" + forceNewPlan);
return randomPlan;
}
LOG.debug("Using pre-existing plan for " +
region.getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
*/
public void unassign(HRegionInfo region) {
unassign(region, false);
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force, ServerName dest) {
// TODO: Method needs refactoring. Ugly buried returns throughout. Beware!
LOG.debug("Starting unassign of " + region.getRegionNameAsString()
+ " (offlining), current state: " + regionStates.getRegionState(region));
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
int versionOfClosingNode = -1;
// We need a lock here as we're going to do a put later and we don't want multiple states
// creation
ReentrantLock lock = locker.acquireLock(encodedName);
RegionState state = regionStates.getRegionTransitionState(encodedName);
boolean reassign = true;
try {
if (state == null) {
// Region is not in transition.
// We can unassign it only if it's not SPLIT/MERGED.
state = regionStates.getRegionState(encodedName);
if (state != null && state.isUnassignable()) {
LOG.info("Attempting to unassign " + state + ", ignored");
// Offline region will be reassigned below
return;
}
// Create the znode in CLOSING state
try {
if (state == null || state.getServerName() == null) {
// We don't know where the region is, offline it.
// No need to send CLOSE RPC
LOG.warn("Attempting to unassign a region not in RegionStates"
+ region.getRegionNameAsString() + ", offlined");
regionOffline(region);
return;
}
if (useZKForAssignment) {
versionOfClosingNode = ZKAssign.createNodeClosing(
watcher, region, state.getServerName());
if (versionOfClosingNode == -1) {
LOG.info("Attempting to unassign " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
reassign = false; // not unassigned at all
return;
}
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplittingOrMergedOrMerging(path)) {
LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " +
"skipping unassign because region no longer exists -- its split or merge");
reassign = false; // no need to reassign for split/merged region
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
} catch (DeserializationException de) {
LOG.error("Failed parse", de);
}
}
// If we get here, don't understand whats going on -- abort.
server.abort("Unexpected ZK exception creating node CLOSING", e);
reassign = false; // heading out already
return;
}
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
} else if (state.isFailedOpen()) {
// The region is not open yet
regionOffline(region);
return;
} else if (force && state.isPendingCloseOrClosing()) {
LOG.debug("Attempting to unassign " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
if (state.isFailedClose()) {
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
}
state.updateTimestampToNow();
} else {
LOG.debug("Attempting to unassign " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null);
} finally {
lock.unlock();
// Region is expected to be reassigned afterwards
if (reassign && regionStates.isRegionOffline(region)) {
assign(region, true);
}
}
}
public void unassign(HRegionInfo region, boolean force){
unassign(region, force, null);
}
/**
* @param region regioninfo of znode to be deleted.
*/
public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) {
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING,
EventType.RS_ZK_REGION_CLOSED);
}
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
/**
* Used by unit tests. Return the number of regions opened so far in the life
* of the master. Increases by one every time the master opens a region
* @return the counter value of the number of regions opened so far
*/
public int getNumRegionsOpened() {
return numRegionsOpened.get();
}
/**
* Waits until the specified region has completed assignment.
* <p>
* If the region is already assigned, returns immediately. Otherwise, method
* blocks until the region is assigned.
* @param regionInfo region to wait on assignment for
* @throws InterruptedException
*/
public boolean waitForAssignment(HRegionInfo regionInfo)
throws InterruptedException {
while (!regionStates.isRegionOnline(regionInfo)) {
if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN)
|| this.server.isStopped()) {
return false;
}
// We should receive a notification, but it's
// better to have a timeout to recheck the condition here:
// it lowers the impact of a race condition if any
regionStates.waitForUpdate(100);
}
return true;
}
/**
* Assigns the hbase:meta region.
* <p>
* Assumes that hbase:meta is currently closed and is not being actively served by
* any RegionServer.
* <p>
* Forcibly unsets the current meta region location in ZooKeeper and assigns
* hbase:meta to a random RegionServer.
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
MetaRegionTracker.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
/**
* Assigns specified regions retaining assignments, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(Map<HRegionInfo, ServerName> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Reuse existing assignment info
Map<ServerName, List<HRegionInfo>> bulkPlan =
balancer.retainAssignment(regions, servers);
assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan);
}
/**
* Assigns specified regions round robin, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(List<HRegionInfo> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
}
private void assign(int regions, int totalServers,
String message, Map<ServerName, List<HRegionInfo>> bulkPlan)
throws InterruptedException, IOException {
int servers = bulkPlan.size();
if (servers == 1 || (regions < bulkAssignThresholdRegions
&& servers < bulkAssignThresholdServers)) {
// Not use bulk assignment. This could be more efficient in small
// cluster, especially mini cluster for testing, so that tests won't time out
if (LOG.isTraceEnabled()) {
LOG.trace("Not using bulk assignment since we are assigning only " + regions +
" region(s) to " + servers + " server(s)");
}
for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
if (!assign(plan.getKey(), plan.getValue())) {
for (HRegionInfo region: plan.getValue()) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
}
} else {
LOG.info("Bulk assigning " + regions + " region(s) across "
+ totalServers + " server(s), " + message);
// Use fixed count thread pool assigning.
BulkAssigner ba = new GeneralBulkAssigner(
this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned);
ba.bulkAssign();
LOG.info("Bulk assigning done");
}
}
/**
* Assigns all user regions, if any exist. Used during cluster startup.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
*/
private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling)
throws IOException, InterruptedException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
// Scan hbase:meta for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) {
return;
}
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
if (retainAssignment) {
assign(allRegions);
} else {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet());
assign(regions);
}
for (HRegionInfo hri : allRegions.keySet()) {
TableName tableName = hri.getTable();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
}
/**
* Wait until no regions in transition.
* @param timeout How long to wait.
* @return True if nothing in regions in transition.
* @throws InterruptedException
*/
boolean waitUntilNoRegionsInTransition(final long timeout)
throws InterruptedException {
// Blocks until there are no regions in transition. It is possible that
// there
// are regions in transition immediately after this returns but guarantees
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
&& endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
return !regionStates.isRegionsInTransition();
}
/**
* Rebuild the list of user regions and assignment information.
* <p>
* Returns a map of servers that are not found to be online and the regions
* they were hosting.
* @return map of servers not online to their assigned regions, as stored
* in META
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
List<Result> results = MetaReader.fullScan(this.catalogTracker);
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
Map<ServerName, List<HRegionInfo>> offlineServers =
new TreeMap<ServerName, List<HRegionInfo>>();
// Iterate regions in META
for (Result result : results) {
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result);
if (regionInfo == null) continue;
State state = RegionStateStore.getRegionState(result);
ServerName regionLocation = RegionStateStore.getRegionServer(result);
regionStates.createRegionState(regionInfo, state, regionLocation);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList<HRegionInfo>(1);
offlineServers.put(regionLocation, offlineRegions);
}
if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
offlineRegions.add(regionInfo);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation);
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
} else if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
// need to enable the table if not disabled or disabling or enabling
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName)
&& !getZKTable().isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
return offlineServers;
}
/**
* Recover the tables that were not fully moved to DISABLED state. These
* tables are in DISABLING state when the master restarted/switched.
*
* @throws KeeperException
* @throws TableNotFoundException
* @throws IOException
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
}
/**
* Recover the tables that are not fully moved to ENABLED state. These tables
* are in ENABLING state when the master restarted/switched
*
* @throws KeeperException
* @throws org.apache.hadoop.hbase.TableNotFoundException
* @throws IOException
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
LOG.warn("Table " + tableName + " not found in hbase:meta to recover.");
continue;
}
eth.process();
}
}
}
/**
* Processes list of dead servers from result of hbase:meta scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<HRegionInfo>> deadServers)
throws IOException, KeeperException {
if (deadServers != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) {
ServerName serverName = server.getKey();
// We need to keep such info even if the server is known dead
regionStates.setLastRegionServerOfRegions(serverName, server.getValue());
if (!serverManager.isServerDead(serverName)) {
serverManager.expireServer(serverName); // Let SSH do region re-assign
}
}
}
List<String> nodes = useZKForAssignment ?
ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode)
: ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
if (nodes != null && !nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null);
}
} else if (!useZKForAssignment) {
// We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions
// in case the RPC call is not sent out yet before the master was shut down
// since we update the state before we send the RPC call. We can't update
// the state after the RPC call. Otherwise, we don't know what's happened
// to the region if the master dies right after the RPC call is out.
Map<String, RegionState> rits = regionStates.getRegionsInTransition();
for (RegionState regionState: rits.values()) {
if (!serverManager.isServerOnline(regionState.getServerName())) {
continue; // SSH will handle it
}
State state = regionState.getState();
LOG.info("Processing " + regionState);
switch (state) {
case CLOSED:
invokeAssign(regionState.getRegion());
break;
case PENDING_OPEN:
retrySendRegionOpen(regionState);
break;
case PENDING_CLOSE:
retrySendRegionClose(regionState);
break;
default:
// No process for other states
}
}
}
}
/**
* At master failover, for pending_open region, make sure
* sendRegionOpen RPC call is sent to the target regionserver
*/
private void retrySendRegionOpen(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri);
}
RegionOpeningState regionOpenState = serverManager.sendRegionOpen(
serverName, hri, -1, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, this means the target server didn't get
// the original region open RPC, so re-assign it with a new plan
LOG.debug("Got failed_opening in retry sendRegionOpen for "
+ regionState + ", re-assign it");
invokeAssign(hri, true);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
// For other exceptions, re-assign it
LOG.debug("Got exception in retry sendRegionOpen for "
+ regionState + ", re-assign it", t);
invokeAssign(hri);
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* At master failover, for pending_close region, make sure
* sendRegionClose RPC call is sent to the target regionserver
*/
private void retrySendRegionClose(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) {
// This means the region is still on the target server
LOG.debug("Got false in retry sendRegionClose for "
+ regionState + ", re-close it");
invokeUnAssign(hri);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
if (!(t instanceof NotServingRegionException
|| t instanceof RegionAlreadyInTransitionException)) {
// NotServingRegionException/RegionAlreadyInTransitionException
// means the target server got the original region close request.
// For other exceptions, re-close it
LOG.debug("Got exception in retry sendRegionClose for "
+ regionState + ", re-close it", t);
invokeUnAssign(hri);
}
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* Set Regions in transitions metrics.
* This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized.
* This iterator is not fail fast, which may lead to stale read; but that's better than
* creating a copy of the map for metrics computation, as this method will be invoked
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
int ritThreshold = this.server.getConfiguration().
getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000);
for (RegionState state: regionStates.getRegionsInTransition().values()) {
totalRITs++;
long ritTime = currentTime - state.getStamp();
if (ritTime > ritThreshold) { // more than the threshold
totalRITsOverThreshold++;
}
if (oldestRITTime < ritTime) {
oldestRITTime = ritTime;
}
}
if (this.metricsAssignmentManager != null) {
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
this.metricsAssignmentManager.updateRITCount(totalRITs);
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
/**
* @param region Region whose plan we are to clear.
*/
void clearRegionPlan(final HRegionInfo region) {
synchronized (this.regionPlans) {
this.regionPlans.remove(region.getEncodedName());
}
}
/**
* Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
throws IOException, InterruptedException {
waitOnRegionToClearRegionsInTransition(hri, -1L);
}
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTimeMillis() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
/**
* Update timers for all regions in transition going against the server in the
* serversInUpdatingTimer.
*/
public class TimerUpdater extends Chore {
public TimerUpdater(final int period, final Stoppable stopper) {
super("AssignmentTimerUpdater", period, stopper);
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
ServerName serverToUpdateTimer = null;
while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) {
if (serverToUpdateTimer == null) {
serverToUpdateTimer = serversInUpdatingTimer.first();
} else {
serverToUpdateTimer = serversInUpdatingTimer
.higher(serverToUpdateTimer);
}
if (serverToUpdateTimer == null) {
break;
}
updateTimers(serverToUpdateTimer);
serversInUpdatingTimer.remove(serverToUpdateTimer);
}
}
}
/**
* Monitor to check for time outs on region transition operations
*/
public class TimeoutMonitor extends Chore {
private boolean allRegionServersOffline = false;
private ServerManager serverManager;
private final int timeout;
/**
* Creates a periodic monitor to check for time outs on region transition
* operations. This will deal with retries if for some reason something
* doesn't happen within the specified timeout.
* @param period
* @param stopper When {@link Stoppable#isStopped()} is true, this thread will
* cleanup and exit cleanly.
* @param timeout
*/
public TimeoutMonitor(final int period, final Stoppable stopper,
ServerManager serverManager,
final int timeout) {
super("AssignmentTimeoutMonitor", period, stopper);
this.timeout = timeout;
this.serverManager = serverManager;
}
private synchronized void setAllRegionServersOffline(
boolean allRegionServersOffline) {
this.allRegionServersOffline = allRegionServersOffline;
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (String regionName : regionStates.getRegionsInTransition().keySet()) {
RegionState regionState = regionStates.getRegionTransitionState(regionName);
if (regionState == null) continue;
if (regionState.getStamp() + timeout <= now) {
// decide on action upon timeout
actOnTimeOut(regionState);
} else if (this.allRegionServersOffline && !noRSAvailable) {
RegionPlan existingPlan = regionPlans.get(regionName);
if (existingPlan == null
|| !this.serverManager.isServerOnline(existingPlan
.getDestination())) {
// if some RSs just came back online, we can start the assignment
// right away
actOnTimeOut(regionState);
}
}
}
setAllRegionServersOffline(noRSAvailable);
}
private void actOnTimeOut(RegionState regionState) {
HRegionInfo regionInfo = regionState.getRegion();
LOG.info("Regions in transition timed out: " + regionState);
// Expired! Do a retry.
switch (regionState.getState()) {
case CLOSED:
LOG.info("Region " + regionInfo.getEncodedName()
+ " has been CLOSED for too long, waiting on queued "
+ "ClosedRegionHandler to run or server shutdown");
// Update our timestamp.
regionState.updateTimestampToNow();
break;
case OFFLINE:
LOG.info("Region has been OFFLINE for too long, " + "reassigning "
+ regionInfo.getRegionNameAsString() + " to a random server");
invokeAssign(regionInfo);
break;
case PENDING_OPEN:
LOG.info("Region has been PENDING_OPEN for too "
+ "long, reassigning region=" + regionInfo.getRegionNameAsString());
invokeAssign(regionInfo);
break;
case OPENING:
processOpeningState(regionInfo);
break;
case OPEN:
LOG.error("Region has been OPEN for too long, " +
"we don't know where region was opened so can't do anything");
regionState.updateTimestampToNow();
break;
case PENDING_CLOSE:
LOG.info("Region has been PENDING_CLOSE for too "
+ "long, running forced unassign again on region="
+ regionInfo.getRegionNameAsString());
invokeUnassign(regionInfo);
break;
case CLOSING:
LOG.info("Region has been CLOSING for too " +
"long, this should eventually complete or the server will " +
"expire, send RPC again");
invokeUnassign(regionInfo);
break;
case SPLIT:
case SPLITTING:
case FAILED_OPEN:
case FAILED_CLOSE:
case MERGING:
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
}
}
private void processOpeningState(HRegionInfo regionInfo) {
LOG.info("Region has been OPENING for too long, reassigning region="
+ regionInfo.getRegionNameAsString());
// Should have a ZK node in OPENING state
try {
String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName());
Stat stat = new Stat();
byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
LOG.warn("Data is null, node " + node + " no longer exists");
return;
}
RegionTransition rt = RegionTransition.parseFrom(data);
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REGION_OPENED) {
LOG.debug("Region has transitioned to OPENED, allowing "
+ "watched event handlers to process");
return;
} else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) {
LOG.warn("While timing out a region, found ZK node in unexpected state: " + et);
return;
}
invokeAssign(regionInfo);
} catch (KeeperException ke) {
LOG.error("Unexpected ZK exception timing out CLOSING region", ke);
} catch (DeserializationException e) {
LOG.error("Unexpected exception parsing CLOSING region", e);
}
}
void invokeAssign(HRegionInfo regionInfo) {
invokeAssign(regionInfo, true);
}
void invokeAssign(HRegionInfo regionInfo, boolean newPlan) {
threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan));
}
void invokeUnAssign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
private void invokeUnassign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
public boolean isCarryingMeta(ServerName serverName) {
return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
}
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransition rt = null;
try {
byte [] data = ZKAssign.getData(watcher, hri.getEncodedName());
// This call can legitimately come by null
rt = data == null? null: RegionTransition.parseFrom(data);
} catch (KeeperException e) {
server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e);
} catch (DeserializationException e) {
server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e);
}
ServerName addressFromZK = rt != null? rt.getServerName(): null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = addressFromZK.equals(serverName);
LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK +
" current=" + serverName + ", matches=" + matchZK);
return matchZK;
}
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
/**
* @param plan Plan to execute.
*/
public void balance(final RegionPlan plan) {
HRegionInfo hri = plan.getRegionInfo();
TableName tableName = hri.getTable();
if (zkTable.isDisablingOrDisabledTable(tableName)) {
LOG.info("Ignored moving region of disabling/disabled table "
+ tableName);
return;
}
// Move the region only if it's assigned
String encodedName = hri.getEncodedName();
ReentrantLock lock = locker.acquireLock(encodedName);
try {
if (!regionStates.isRegionOnline(hri)) {
RegionState state = regionStates.getRegionState(encodedName);
LOG.info("Ignored moving region not assigned: " + hri + ", "
+ (state == null ? "not in region states" : state));
return;
}
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
unassign(hri, false, plan.getDestination());
} finally {
lock.unlock();
}
}
public void stop() {
shutdown(); // Stop executor service, etc
if (tomActivated){
this.timeoutMonitor.interrupt();
this.timerUpdater.interrupt();
}
}
/**
* Shutdown the threadpool executor service
*/
public void shutdown() {
// It's an immediate shutdown, so we're clearing the remaining tasks.
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.clear();
}
threadPoolExecutorService.shutdownNow();
zkEventWorkers.shutdownNow();
regionStateStore.stop();
}
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {
// here we can abort as it is the start up flow
String errorMsg = "Unable to ensure that the table " + tableName
+ " will be" + " enabled because of a ZooKeeper issue";
LOG.error(errorMsg);
this.server.abort(errorMsg, e);
}
}
/**
* Set region as OFFLINED up in zookeeper asynchronously.
* @param state
* @return True if we succeeded, false otherwise (State was incorrect or failed
* updating zk).
*/
private boolean asyncSetOfflineInZooKeeper(final RegionState state,
final AsyncCallback.StringCallback cb, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
this.server.abort("Unexpected state trying to OFFLINE; " + state,
new IllegalStateException());
return false;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(),
destination, cb, state);
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
LOG.warn("Node for " + state.getRegion() + " already exists");
} else {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
}
return false;
}
return true;
}
private boolean deleteNodeInStates(String encodedName,
String desc, ServerName sn, EventType... types) {
try {
for (EventType et: types) {
if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) {
return true;
}
}
LOG.info("Failed to delete the " + desc + " node for "
+ encodedName + ". The node type may not match");
} catch (NoNodeException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("The " + desc + " node for " + encodedName + " already deleted");
}
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting " + desc
+ " node for the region " + encodedName, ke);
}
return false;
}
private void deleteMergingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING,
EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED);
}
private void deleteSplittingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING,
EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
}
private void onRegionFailedOpen(
final HRegionInfo hri, final ServerName sn) {
String encodedName = hri.getEncodedName();
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the region plan. (HBASE-5546)
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
if (disablingOrDisabled.contains(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
// ZK Node is in CLOSED state, assign it.
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
try {
getRegionPlan(hri, sn, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
invokeAssign(hri, false);
}
}
}
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (isTableDisabledOrDisabling(hri.getTable())) {
invokeUnAssign(hri);
}
}
private void onRegionClosed(final HRegionInfo hri) {
if (isTableDisabledOrDisabling(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
invokeAssign(hri, false);
}
private String onRegionSplit(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_p.isOpenOrSplittingOnServer(sn)
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
return "Not in state good for split";
}
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(p, State.SPLITTING);
if (code == TransitionCode.SPLIT) {
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(p, State.SPLIT);
regionOnline(a, sn, 1);
regionOnline(b, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
} else if (code == TransitionCode.SPLIT_PONR) {
try {
regionStateStore.splitRegion(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record split region " + p.getShortNameToLog());
return "Failed to record the splitting in meta";
}
} else if (code == TransitionCode.SPLIT_REVERTED) {
regionOnline(p, sn);
regionOffline(a);
regionOffline(b);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
}
return null;
}
private boolean isTableDisabledOrDisabling(TableName t) {
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
return disablingOrDisabled.contains(t) ? true : false;
}
private String onRegionMerge(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn)
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
return "Not in state good for merge";
}
regionStates.updateRegionState(a, State.MERGING);
regionStates.updateRegionState(b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
String encodedName = p.getEncodedName();
if (code == TransitionCode.READY_TO_MERGE) {
mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(a, b));
} else if (code == TransitionCode.MERGED) {
mergingRegions.remove(encodedName);
regionOffline(a, State.MERGED);
regionOffline(b, State.MERGED);
regionOnline(p, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
} else if (code == TransitionCode.MERGE_PONR) {
try {
regionStateStore.mergeRegions(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record merged region " + p.getShortNameToLog());
return "Failed to record the merging in meta";
}
} else {
mergingRegions.remove(encodedName);
regionOnline(a, sn);
regionOnline(b, sn);
regionOffline(p);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
}
return null;
}
/**
* A helper to handle region merging transition event.
* It transitions merging regions to MERGING state.
*/
private boolean handleRegionMerging(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped merging! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfMerging = rt.getPayload();
List<HRegionInfo> mergingRegions;
try {
mergingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfMerging, 0, payloadOfMerging.length);
} catch (IOException e) {
LOG.error("Dropped merging! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert mergingRegions.size() == 3;
HRegionInfo p = mergingRegions.get(0);
HRegionInfo hri_a = mergingRegions.get(1);
HRegionInfo hri_b = mergingRegions.get(2);
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrMergingOnServer(sn))
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
LOG.warn("Dropped merging! Not in state good for MERGING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) {
try {
if (RegionMergeTransaction.transitionMergingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE,
EventType.RS_ZK_REGION_MERGING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED
&& currentType != EventType.RS_ZK_REGION_MERGING)) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.MERGING);
regionStates.updateRegionState(hri_b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
if (et != EventType.RS_ZK_REGION_MERGED) {
this.mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(hri_a, hri_b));
} else {
this.mergingRegions.remove(encodedName);
regionOffline(hri_a, State.MERGED);
regionOffline(hri_b, State.MERGED);
regionOnline(p, sn);
}
}
if (et == EventType.RS_ZK_REGION_MERGED) {
LOG.debug("Handling MERGED event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_MERGED, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting MERGED node " + encodedName, e);
}
}
LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString()
+ ", region_a=" + hri_a.getRegionNameAsString() + ", region_b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(p);
}
}
return true;
}
/**
* A helper to handle region splitting transition event.
*/
private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped splitting! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfSplitting = rt.getPayload();
List<HRegionInfo> splittingRegions;
try {
splittingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfSplitting, 0, payloadOfSplitting.length);
} catch (IOException e) {
LOG.error("Dropped splitting! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert splittingRegions.size() == 2;
HRegionInfo hri_a = splittingRegions.get(0);
HRegionInfo hri_b = splittingRegions.get(1);
RegionState rs_p = regionStates.getRegionState(encodedName);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn))
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
if (rs_p == null) {
// Splitting region should be online
rs_p = regionStates.updateRegionState(rt, State.OPEN);
if (rs_p == null) {
LOG.warn("Received splitting for region " + prettyPrintedRegionName
+ " from server " + sn + " but it doesn't exist anymore,"
+ " probably already processed its split");
return false;
}
regionStates.regionOnline(rs_p.getRegion(), sn);
}
HRegionInfo p = rs_p.getRegion();
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) {
try {
if (SplitTransaction.transitionSplittingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT,
EventType.RS_ZK_REGION_SPLITTING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT
&& currentType != EventType.RS_ZK_REGION_SPLITTING)) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
// The below is for testing ONLY! We can't do fault injection easily, so
// resort to this kinda uglyness -- St.Ack 02/25/2011.
if (TEST_SKIP_SPLIT_HANDLING) {
LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set");
return true; // return true so that the splitting node stays
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
regionOffline(p, State.SPLIT);
regionOnline(hri_a, sn);
regionOnline(hri_b, sn);
}
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_SPLIT, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting SPLIT node " + encodedName, e);
}
}
LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString()
+ ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(hri_a);
unassign(hri_b);
}
}
return true;
}
/**
* A region is offline. The new state should be the specified one,
* if not null. If the specified state is null, the new state is Offline.
* The specified state can be Split/Merged/Offline/null only.
*/
private void regionOffline(final HRegionInfo regionInfo, final State state) {
regionStates.regionOffline(regionInfo, state);
removeClosedRegion(regionInfo);
// remove the region plan as well just in case.
clearRegionPlan(regionInfo);
balancer.regionOffline(regionInfo);
// Tell our listeners that a region was closed
sendRegionClosedNotification(regionInfo);
}
private void sendRegionOpenedNotification(final HRegionInfo regionInfo,
final ServerName serverName) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionOpened(regionInfo, serverName);
}
}
}
private void sendRegionClosedNotification(final HRegionInfo regionInfo) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionClosed(regionInfo);
}
}
}
/**
* Try to update some region states. If the state machine prevents
* such update, an error message is returned to explain the reason.
*
* It's expected that in each transition there should have just one
* region for opening/closing, 3 regions for splitting/merging.
* These regions should be on the server that requested the change.
*
* Region state machine. Only these transitions
* are expected to be triggered by a region server.
*
* On the state transition:
* (1) Open/Close should be initiated by master
* (a) Master sets the region to pending_open/pending_close
* in memory and hbase:meta after sending the request
* to the region server
* (b) Region server reports back to the master
* after open/close is done (either success/failure)
* (c) If region server has problem to report the status
* to master, it must be because the master is down or some
* temporary network issue. Otherwise, the region server should
* abort since it must be a bug. If the master is not accessible,
* the region server should keep trying until the server is
* stopped or till the status is reported to the (new) master
* (d) If region server dies in the middle of opening/closing
* a region, SSH picks it up and finishes it
* (e) If master dies in the middle, the new master recovers
* the state during initialization from hbase:meta. Region server
* can report any transition that has not been reported to
* the previous active master yet
* (2) Split/merge is initiated by region servers
* (a) To split a region, a region server sends a request
* to master to try to set a region to splitting, together with
* two daughters (to be created) to splitting new. If approved
* by the master, the splitting can then move ahead
* (b) To merge two regions, a region server sends a request to
* master to try to set the new merged region (to be created) to
* merging_new, together with two regions (to be merged) to merging.
* If it is ok with the master, the merge can then move ahead
* (c) Once the splitting/merging is done, the region server
* reports the status back to the master either success/failure.
* (d) Other scenarios should be handled similarly as for
* region open/close
*/
protected String onRegionTransition(final ServerName serverName,
final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri);
if (LOG.isDebugEnabled()) {
LOG.debug("Got transition " + code + " for "
+ (current != null ? current.toString() : hri.getShortNameToLog())
+ " from " + serverName);
}
String errorMsg = null;
switch (code) {
case OPENED:
if (current != null && current.isOpened() && current.isOnServer(serverName)) {
LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on "
+ serverName);
break;
}
case FAILED_OPEN:
if (current == null
|| !current.isPendingOpenOrOpeningOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending open on " + serverName;
} else if (code == TransitionCode.FAILED_OPEN) {
onRegionFailedOpen(hri, serverName);
} else {
long openSeqNum = HConstants.NO_SEQNUM;
if (transition.hasOpenSeqNum()) {
openSeqNum = transition.getOpenSeqNum();
}
if (openSeqNum < 0) {
errorMsg = "Newly opened region has invalid open seq num " + openSeqNum;
} else {
onRegionOpen(hri, serverName, openSeqNum);
}
}
break;
case CLOSED:
if (current == null
|| !current.isPendingCloseOrClosingOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending close on " + serverName;
} else {
onRegionClosed(hri);
}
break;
case READY_TO_SPLIT:
case SPLIT_PONR:
case SPLIT:
case SPLIT_REVERTED:
errorMsg = onRegionSplit(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
case READY_TO_MERGE:
case MERGE_PONR:
case MERGED:
case MERGE_REVERTED:
errorMsg = onRegionMerge(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
default:
errorMsg = "Unexpected transition code " + code;
}
if (errorMsg != null) {
LOG.error("Failed to transtion region from " + current + " to "
+ code + " by " + serverName + ": " + errorMsg);
}
return errorMsg;
}
/**
* @return Instance of load balancer
*/
public LoadBalancer getBalancer() {
return this.balancer;
}
}
| Jackygq1982/hbase_src | hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java | Java | apache-2.0 | 167,470 |
package com.p.service;
import java.util.Collection;
import java.util.Optional;
import java.util.Random;
import java.util.UUID;
import javax.annotation.Resource;
import org.apache.log4j.Logger;
import org.hibernate.SessionFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import com.p.model.Notificacion;
import com.p.model.Role;
import com.p.model.User;
import com.p.model.modelAux.RegisterUser;
import com.p.model.repositories.UserRepository;
@Service("usersService")
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public class UsersService {
protected static Logger logger = Logger.getLogger("service");
@Resource(name = "sessionFactory")
private SessionFactory sessionFactory;
@Autowired
private UserRepository repository;
@Autowired
private NotificacionService notificacionService;
@Autowired
private EmailManager emailManager;
@Autowired
private PasswordEncoder passwordEncoder;
@Transactional
/**
* Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0)
*
* @param id
* el id del usuario existente
*/
public void delete(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
repository.delete(id);
}
/**
* Guarda o edita sengún si el ID esta o no relleno
*
* @param us
*/
@Transactional()
public User save(User us) {
gestionarAvatar(us);
gestionarAltaUsuario(us);
User usr = repository.save(us);
return usr;
}
protected void gestionarAltaUsuario(User us) {
if (us.getId() == null || us.getId().equals(0)) {
gestionarNotificacionAltaUsuario(us);
gestionarEmailAltaUsuario(us);
}
}
protected void gestionarEmailAltaUsuario(User us) {
emailManager.notify(us);
}
/**
* @param us
*/
protected void gestionarNotificacionAltaUsuario(User us) {
// Es nuevo usuario
// Le enviamos un email y una notificacion
Notificacion notificacion = notificacionService.create();
Optional<User> admin = repository.findAdministradores().stream()
.findFirst();
Assert.isTrue(admin.isPresent());
User administrador = admin.get();
notificacion.setEmisor(administrador);
notificacion.setReceptor(us);
notificacion.setTitulo("Gracias por registrarte en Pachanga!");
notificacion
.setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)");
notificacionService.save(notificacion);
}
/**
* @param us
*/
protected void gestionarAvatar(User us) {
if (us.getAvatar() == null) {
Random rd = new Random();
us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]);
}
}
@Transactional
public User getByEmail(String login) {
Assert.notNull(login);
Assert.isTrue(login.length() > 0);
return repository.findByEmail(login);
}
@Transactional
public User findOne(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > -1);
return repository.findOne(id);
}
@Transactional
public Collection<User> findAll() {
return repository.findAll();
}
@Transactional
public Collection<User> findAllDifferent(String email) {
return repository.findAllDifferent(email);
}
@Transactional(readOnly = true)
/**
*
* @author David Romero Alcaide
* @return
*/
public User getPrincipal() {
User result;
SecurityContext context;
Authentication authentication;
Object principal;
// If the asserts in this method fail, then you're
// likely to have your Tomcat's working directory
// corrupt. Please, clear your browser's cache, stop
// Tomcat, update your Maven's project configuration,
// clean your project, clean Tomcat's working directory,
// republish your project, and start it over.
context = SecurityContextHolder.getContext();
Assert.notNull(context);
authentication = context.getAuthentication();
Assert.notNull(authentication);
principal = authentication.getPrincipal();
Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User);
result = getByEmail(((org.springframework.security.core.userdetails.User) principal)
.getUsername());
Assert.notNull(result);
Assert.isTrue(result.getId() != 0);
return result;
}
public User map(RegisterUser user) {
User usr = create();
usr.setEmail(user.getEmail());
usr.setPassword(user.getPassword());
return usr;
}
public User create() {
User user = new User();
user.setFirstName(" ");
user.setLastName(" ");
user.setRole(Role.ROLE_USER);
return user;
}
@Transactional
public void regenerarPassword(User user) {
String newPass = UUID.randomUUID().toString();
newPass = passwordEncoder.encode(newPass);
user.setPassword(newPass);
save(user);
emailManager.notifyNewPassword(user,newPass);
}
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public byte[] findImage(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
return repository.findImage(id);
}
@Transactional(readOnly = true)
public Collection<? extends User> find(String texto) {
return repository.findFullText(texto);
}
}
| david-romero/Pachanga | src/main/java/com/p/service/UsersService.java | Java | apache-2.0 | 5,489 |
from must import MustHavePatterns
from successor import Successor
class TestSuccessor(object):
@classmethod
def setup_class(cls):
cls.test_patterns = MustHavePatterns(Successor)
def test_successor(self):
try:
self.test_patterns.create(Successor)
raise Exception("Recursive structure did not explode.")
except RuntimeError as re:
assert str(re).startswith("maximum recursion depth")
| umaptechnologies/must | examples/miscExamples/test_successor.py | Python | apache-2.0 | 457 |
(function() {
'use strict';
angular
.module('fitappApp')
.controller('RequestResetController', RequestResetController);
RequestResetController.$inject = ['$timeout', 'Auth'];
function RequestResetController ($timeout, Auth) {
var vm = this;
vm.error = null;
vm.errorEmailNotExists = null;
vm.requestReset = requestReset;
vm.resetAccount = {};
vm.success = null;
$timeout(function (){angular.element('#email').focus();});
function requestReset () {
vm.error = null;
vm.errorEmailNotExists = null;
Auth.resetPasswordInit(vm.resetAccount.email).then(function () {
vm.success = 'OK';
}).catch(function (response) {
vm.success = null;
if (response.status === 400 && response.data === 'e-mail address not registered') {
vm.errorEmailNotExists = 'ERROR';
} else {
vm.error = 'ERROR';
}
});
}
}
})();
| tomkasp/fitapp | src/main/webapp/app/account/reset/request/reset.request.controller.js | JavaScript | apache-2.0 | 1,089 |
#-*- encoding: utf-8 -*-
import csv, math, time, re, threading, sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class ErAPI():
# Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
def __init__(self):
self.data = {}
# Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}}
# Diccionario de puntos/rango
self.rank_required_points = {
"Recruit": 0,
"Private": 15,
"Private*": 45,
"Private**": 80,
"Private***": 120,
"Corporal": 170,
"Corporal*": 250,
"Corporal**": 350,
"Corporal***": 450,
"Sergeant": 600,
"Sergeant*": 800,
"Sergeant**": 1000,
"Sergeant***": 1400,
"Lieutenant": 1850,
"Lieutenant*": 2350,
"Lieutenant**": 3000,
"Lieutenant***": 3750,
"Captain": 5000,
"Captain*": 6500,
"Captain**": 9000,
"Captain***": 12000,
"Major": 15500,
"Major*": 20000,
"Major**": 25000,
"Major***": 31000,
"Commander": 40000,
"Commander*": 52000,
"Commander**": 67000,
"Commander***": 85000,
"Lt Colonel": 110000,
"Lt Colonel*": 140000,
"Lt Colonel**": 180000,
"Lt Colonel***": 225000,
"Colonel": 285000,
"Colonel*": 355000,
"Colonel**": 435000,
"Colonel***": 540000,
"General": 660000,
"General*": 800000,
"General**": 950000,
"General***": 1140000,
"Field Marshal": 1350000,
"Field Marshal*": 1600000,
"Field Marshal**": 1875000,
"Field Marshal***": 2185000,
"Supreme Marshal": 2550000,
"Supreme Marshal*": 3000000,
"Supreme Marshal**": 3500000,
"Supreme Marshal***": 4150000,
"National Force": 4900000,
"National Force*": 5800000,
"National Force**": 7000000,
"National Force***": 9000000,
"World Class Force": 11500000,
"World Class Force*": 14500000,
"World Class Force**": 18000000,
"World Class Force***": 22000000,
"Legendary Force": 26500000,
"Legendary Force*": 31500000,
"Legendary Force**": 37000000,
"Legendary Force***": 42000000,
"God of War": 50000000,
"God of War*": 100000000 ,
"God of War**": 200000000,
"God of War***": 500000000,
"Titan": 1000000000,
"Titan*": 2000000000,
"Titan**": 4000000000,
"Titan***": 10000000000}
# Lista ordenada de rangos segun importancia
self.rank_to_pos = [
"Recruit",
"Private",
"Private*",
"Private**",
"Private***",
"Corporal",
"Corporal*",
"Corporal**",
"Corporal***",
"Sergeant",
"Sergeant*",
"Sergeant**",
"Sergeant***",
"Lieutenant",
"Lieutenant*",
"Lieutenant**",
"Lieutenant***",
"Captain",
"Captain*",
"Captain**",
"Captain***",
"Major",
"Major*",
"Major**",
"Major***",
"Commander",
"Commander*",
"Commander**",
"Commander***",
"Lt Colonel",
"Lt Colonel*",
"Lt Colonel**",
"Lt Colonel***",
"Colonel",
"Colonel*",
"Colonel**",
"Colonel***",
"General",
"General*",
"General**",
"General***",
"Field Marshal",
"Field Marshal*",
"Field Marshal**",
"Field Marshal***",
"Supreme Marshal",
"Supreme Marshal*",
"Supreme Marshal**",
"Supreme Marshal***",
"National Force",
"National Force*",
"National Force**",
"National Force***",
"World Class Force",
"World Class Force*",
"World Class Force**",
"World Class Force***",
"Legendary Force",
"Legendary Force*",
"Legendary Force**",
"Legendary Force***",
"God of War",
"God of War*",
"God of War**",
"God of War***",
"Titan",
"Titan*",
"Titan**",
"Titan***",]
# Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos
self.run = True
# Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
th = threading.Thread(target=self.data_loader)
th.daemon = True
th.start()
# Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor
def data_loader(self):
self.load_data()
self.data_saver_th = threading.Thread(target=self.data_saver)
self.data_saver_th.daemon = True
self.data_saver_th.start()
self.data_updater_th = threading.Thread(target=self.data_updater)
self.data_updater_th.daemon = True
self.data_updater_th.start()
# Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
def data_saver(self):
while self.run:
self.save_data()
time.sleep(60)
# Metodo para actualizar informacion, solo llamado de metodo data_loader
def data_updater(self):
while self.run:
for irc_nick in self.data:
self.update_data(irc_nick)
time.sleep(30)
time.sleep(600)
# ---------------------------------------------------------------------------------- #
# @ PUBLIC METHODS #
# ---------------------------------------------------------------------------------- #
# Metodo para actualizar informacion local del objeto desde archivo
def load_data(self):
try:
f = open('data/er_nick-data.csv', 'rt')
reader = csv.reader(f)
for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
f.close()
except:
pass
# Metodo para guardar informacion local del objeto en archivo
def save_data(self):
try:
f = open('data/er_nick-data.csv', 'wt')
writer = csv.writer(f)
for u in self.data:
writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
f.close()
except:
pass
# Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
def update_data(self, irc_nick):
try:
id = self.data[irc_nick]['id']
c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
page = c.read()
c.close()
self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',',''))
self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',',''))
self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1)
except:
pass
# Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo
def reg_nick_write(self, nick, id):
if(nick.lower() in self.data.keys()):
self.data[nick.lower()]['id'] = int(id)
else:
self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''}
self.update_data(nick.lower())
# Metodo para obtener ID del nick de irc especificado
def get_id(self, nick):
return self.data[nick.lower()]['id']
# Metodo para obtener LEVEL del nick de irc especificado
def get_level(self, nick):
return self.data[nick.lower()]['level']
# Metodo para obtener STRENGTH del nick de irc especificado
def get_strength(self, nick):
return self.data[nick.lower()]['strength']
# Metodo para obtener RANK POINTS del nick de irc especificado
def get_rank_points(self, nick):
return self.data[nick.lower()]['rank_points']
# Metodo para obtener CITIZENSHIP del nick de irc especificado
def get_citizenship(self, nick):
return self.data[nick.lower()]['citizenship']
# Metodo para obtener NICK INGAME del nick de irc especificado
def get_nick(self, nick):
return self.data[nick.lower()]['nick']
# Metodo para obtener RANK NAME del nick de irc especificado
def calculate_rank_name(self, rank_points):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return self.rank_to_pos[index]
# Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales
def calculate_damage(self, rank_points, strength, weapon_power, level, bonus):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus)) | CPedrini/TateTRES | erapi.py | Python | apache-2.0 | 11,009 |
package org.apache.solr.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.http.params.CoreConnectionPNames;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Hash;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ShardSplitTest extends BasicDistributedZkTest {
public static final String SHARD1_0 = SHARD1 + "_0";
public static final String SHARD1_1 = SHARD1 + "_1";
@Override
@Before
public void setUp() throws Exception {
super.setUp();
System.setProperty("numShards", Integer.toString(sliceCount));
System.setProperty("solr.xml.persist", "true");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
if (VERBOSE || printLayoutOnTearDown) {
super.printLayout();
}
if (controlClient != null) {
controlClient.shutdown();
}
if (cloudClient != null) {
cloudClient.shutdown();
}
if (controlClientCloud != null) {
controlClientCloud.shutdown();
}
super.tearDown();
System.clearProperty("zkHost");
System.clearProperty("numShards");
System.clearProperty("solr.xml.persist");
// insurance
DirectUpdateHandler2.commitOnClose = true;
}
@Override
public void doTest() throws Exception {
waitForThingsToLevelOut(15);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
int numReplicas = shard1.getReplicas().size();
del("*:*");
for (int id = 0; id < 100; id++) {
indexAndUpdateCount(ranges, docCounts, id);
}
commit();
Thread indexThread = new Thread() {
@Override
public void run() {
for (int id = 101; id < atLeast(401); id++) {
try {
indexAndUpdateCount(ranges, docCounts, id);
Thread.sleep(atLeast(25));
} catch (Exception e) {
log.error("Exception while adding doc", e);
}
}
}
};
indexThread.start();
splitShard(SHARD1);
log.info("Layout after split: \n");
printLayout();
indexThread.join();
commit();
checkDocCountsAndShardStates(docCounts, numReplicas);
// todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
// and the new sub-shards don't have any.
waitForRecoveriesToFinish(true);
//waitForThingsToLevelOut(15);
}
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response = shard1_0Server.query(query);
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2 = shard1_1Server.query(query);
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
}
protected void splitShard(String shardId) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
params.set("collection", "collection1");
params.set("shard", shardId);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
baseServer.setConnectionTimeout(15000);
baseServer.setSoTimeout((int) (CollectionsHandler.DEFAULT_ZK_TIMEOUT * 5));
baseServer.request(request);
}
protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception {
indexr("id", id);
// todo - hook in custom hashing
byte[] bytes = String.valueOf(id).getBytes("UTF-8");
int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
for (int i = 0; i < ranges.size(); i++) {
DocRouter.Range range = ranges.get(i);
if (range.includes(hash))
docCounts[i]++;
}
}
protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Expected docCount for shard1_{} = {}", i, docCount);
}
log.info("Actual docCount for shard1_0 = {}", shard10Count);
log.info("Actual docCount for shard1_1 = {}", shard11Count);
Map<String, String> idVsVersion = new HashMap<String, String>();
Map<String, SolrDocument> shard10Docs = new HashMap<String, SolrDocument>();
Map<String, SolrDocument> shard11Docs = new HashMap<String, SolrDocument>();
for (int i = 0; i < response.getResults().size(); i++) {
SolrDocument document = response.getResults().get(i);
idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
for (int i = 0; i < response2.getResults().size(); i++) {
SolrDocument document = response2.getResults().get(i);
String value = document.getFieldValue("id").toString();
String version = idVsVersion.get(value);
if (version != null) {
log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
}
SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
}
@Override
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected SolrServer createNewSolrServer(int port) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected CloudSolrServer createCloudClient(String defaultCollection) throws MalformedURLException {
CloudSolrServer client = super.createCloudClient(defaultCollection);
client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
return client;
}
}
| halentest/solr | solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java | Java | apache-2.0 | 11,050 |
package org.zstack.header.identity;
import org.zstack.header.message.APICreateMessage;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.APIParam;
@NeedRoles(roles = {IdentityRoles.CREATE_POLICY_ROLE})
public class APICreatePolicyMsg extends APICreateMessage implements AccountMessage {
@APIParam
private String name;
private String description;
@APIParam
private String policyData;
@Override
public String getAccountUuid() {
return this.getSession().getAccountUuid();
}
public String getPolicyData() {
return policyData;
}
public void setPolicyData(String policyData) {
this.policyData = policyData;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
| SoftwareKing/zstack | header/src/main/java/org/zstack/header/identity/APICreatePolicyMsg.java | Java | apache-2.0 | 1,059 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Xml.Serialization;
namespace SAM.DTO
{
[XmlType(TypeName = "user")]
public class User
{
public string id { get; set; }
public string name { get; set; }
public string avatar_url { get; set; }
}
[XmlType(TypeName = "users")]
public class UserList : SamList<User>
{
}
}
| SAMdesk/sam-dotnet | src/SAM/DTO/User.cs | C# | apache-2.0 | 430 |
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler.memory;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.packages.AspectClass;
import com.google.devtools.build.lib.packages.RuleClass;
import com.google.devtools.build.lib.packages.RuleFunction;
import com.google.devtools.build.lib.syntax.Debug;
import com.google.devtools.build.lib.syntax.Location;
import com.google.devtools.build.lib.syntax.StarlarkCallable;
import com.google.devtools.build.lib.syntax.StarlarkThread;
import com.google.monitoring.runtime.instrumentation.Sampler;
import com.google.perftools.profiles.ProfileProto.Function;
import com.google.perftools.profiles.ProfileProto.Line;
import com.google.perftools.profiles.ProfileProto.Profile;
import com.google.perftools.profiles.ProfileProto.Sample;
import com.google.perftools.profiles.ProfileProto.ValueType;
import java.io.FileOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.zip.GZIPOutputStream;
import javax.annotation.Nullable;
/** Tracks allocations for memory reporting. */
@ConditionallyThreadCompatible
@SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global
public final class AllocationTracker implements Sampler, Debug.ThreadHook {
// A mapping from Java thread to StarlarkThread.
// Used to effect a hidden StarlarkThread parameter to sampleAllocation.
// TODO(adonovan): opt: merge the three different ThreadLocals in use here.
private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>();
@Override
public void onPushFirst(StarlarkThread thread) {
starlarkThread.set(thread);
}
@Override
public void onPopLast(StarlarkThread thread) {
starlarkThread.remove();
}
private static class AllocationSample {
@Nullable final RuleClass ruleClass; // Current rule being analysed, if any
@Nullable final AspectClass aspectClass; // Current aspect being analysed, if any
final ImmutableList<Frame> callstack; // Starlark callstack, if any
final long bytes;
AllocationSample(
@Nullable RuleClass ruleClass,
@Nullable AspectClass aspectClass,
ImmutableList<Frame> callstack,
long bytes) {
this.ruleClass = ruleClass;
this.aspectClass = aspectClass;
this.callstack = callstack;
this.bytes = bytes;
}
}
private static class Frame {
final String name;
final Location loc;
@Nullable final RuleFunction ruleFunction;
Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) {
this.name = name;
this.loc = loc;
this.ruleFunction = ruleFunction;
}
}
private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap();
private final int samplePeriod;
private final int sampleVariance;
private boolean enabled = true;
/**
* Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation.
*/
private static final class LongValue {
long value;
}
private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new);
private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample);
private final Random random = new Random();
AllocationTracker(int samplePeriod, int variance) {
this.samplePeriod = samplePeriod;
this.sampleVariance = variance;
}
// Called by instrumentation.recordAllocation, which is in turn called
// by an instrumented version of the application assembled on the fly
// by instrumentation.AllocationInstrumenter.
// The instrumenter inserts a call to recordAllocation after every
// memory allocation instruction in the original class.
//
// This function runs within 'new', so is not supposed to allocate memory;
// see Sampler interface. In fact it allocates in nearly a dozen places.
// TODO(adonovan): suppress reentrant calls by setting a thread-local flag.
@Override
@ThreadSafe
public void sampleAllocation(int count, String desc, Object newObj, long size) {
if (!enabled) {
return;
}
@Nullable StarlarkThread thread = starlarkThread.get();
// Calling Debug.getCallStack is a dubious operation here.
// First it allocates memory, which breaks the Sampler contract.
// Second, the allocation could in principle occur while the thread's
// representation invariants are temporarily broken (that is, during
// the call to ArrayList.add when pushing a new stack frame).
// For now at least, the allocation done by ArrayList.add occurs before
// the representation of the ArrayList is changed, so it is safe,
// but this is a fragile assumption.
ImmutableList<Debug.Frame> callstack =
thread != null ? Debug.getCallStack(thread) : ImmutableList.of();
RuleClass ruleClass = CurrentRuleTracker.getRule();
AspectClass aspectClass = CurrentRuleTracker.getAspect();
// Should we bother sampling?
if (callstack.isEmpty() && ruleClass == null && aspectClass == null) {
return;
}
// Convert the thread's stack right away to our internal form.
// It is not safe to inspect Debug.Frame references once the thread resumes,
// and keeping StarlarkCallable values live defeats garbage collection.
ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size());
for (Debug.Frame fr : callstack) {
// The frame's PC location is currently not updated at every step,
// only at function calls, so the leaf frame's line number may be
// slightly off; see the tests.
// TODO(b/149023294): remove comment when we move to a compiled representation.
StarlarkCallable fn = fr.getFunction();
frames.add(
new Frame(
fn.getName(),
fr.getLocation(),
fn instanceof RuleFunction ? (RuleFunction) fn : null));
}
// If we start getting stack overflows here, it's because the memory sampling
// implementation has changed to call back into the sampling method immediately on
// every allocation. Since thread locals can allocate, this can in this case lead
// to infinite recursion. This method will then need to be rewritten to not
// allocate, or at least not allocate to obtain its sample counters.
LongValue bytesValue = currentSampleBytes.get();
long bytes = bytesValue.value + size;
if (bytes < nextSampleBytes.get()) {
bytesValue.value = bytes;
return;
}
bytesValue.value = 0;
nextSampleBytes.set(getNextSample());
allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes));
}
private long getNextSample() {
return (long) samplePeriod
+ (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0);
}
/** A pair of rule/aspect name and the bytes it consumes. */
public static final class RuleBytes {
private final String name;
private long bytes;
public RuleBytes(String name) {
this.name = name;
}
/** The number of bytes total occupied by this rule or aspect class. */
public long getBytes() {
return bytes;
}
public RuleBytes addBytes(long bytes) {
this.bytes += bytes;
return this;
}
@Override
public String toString() {
return String.format("RuleBytes(%s, %d)", name, bytes);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RuleBytes ruleBytes = (RuleBytes) o;
return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name);
}
@Override
public int hashCode() {
return Objects.hashCode(name, bytes);
}
}
// If the topmost stack entry is a call to a rule function, returns it.
@Nullable
private static RuleFunction getRule(AllocationSample sample) {
Frame top = Iterables.getLast(sample.callstack, null);
return top != null ? top.ruleFunction : null;
}
/**
* Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey}
* or {@link AspectClass#getKey}.
*/
public void getRuleMemoryConsumption(
Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
// Get loading phase memory for rules.
for (AllocationSample sample : allocations.values()) {
RuleFunction rule = getRule(sample);
if (rule != null) {
RuleClass ruleClass = rule.getRuleClass();
String key = ruleClass.getKey();
RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
}
// Get analysis phase memory for rules and aspects
for (AllocationSample sample : allocations.values()) {
if (sample.ruleClass != null) {
String key = sample.ruleClass.getKey();
RuleBytes ruleBytes =
rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
if (sample.aspectClass != null) {
String key = sample.aspectClass.getKey();
RuleBytes ruleBytes =
aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName()));
aspects.put(key, ruleBytes.addBytes(sample.bytes));
}
}
enabled = true;
}
/** Dumps all Starlark analysis time allocations to a pprof-compatible file. */
public void dumpSkylarkAllocations(String path) throws IOException {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
Profile profile = buildMemoryProfile();
try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) {
profile.writeTo(outputStream);
outputStream.finish();
}
enabled = true;
}
Profile buildMemoryProfile() {
Profile.Builder profile = Profile.newBuilder();
StringTable stringTable = new StringTable(profile);
FunctionTable functionTable = new FunctionTable(profile, stringTable);
LocationTable locationTable = new LocationTable(profile, functionTable);
profile.addSampleType(
ValueType.newBuilder()
.setType(stringTable.get("memory"))
.setUnit(stringTable.get("bytes"))
.build());
for (AllocationSample sample : allocations.values()) {
// Skip empty callstacks
if (sample.callstack.isEmpty()) {
continue;
}
Sample.Builder b = Sample.newBuilder().addValue(sample.bytes);
for (Frame fr : sample.callstack.reverse()) {
b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line()));
}
profile.addSample(b.build());
}
profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000);
return profile.build();
}
private static class StringTable {
final Profile.Builder profile;
final Map<String, Long> table = new HashMap<>();
long index = 0;
StringTable(Profile.Builder profile) {
this.profile = profile;
get(""); // 0 is reserved for the empty string
}
long get(String str) {
return table.computeIfAbsent(
str,
key -> {
profile.addStringTable(key);
return index++;
});
}
}
private static class FunctionTable {
final Profile.Builder profile;
final StringTable stringTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
FunctionTable(Profile.Builder profile, StringTable stringTable) {
this.profile = profile;
this.stringTable = stringTable;
}
long get(String file, String function) {
return table.computeIfAbsent(
file + "#" + function,
key -> {
Function fn =
Function.newBuilder()
.setId(index)
.setFilename(stringTable.get(file))
.setName(stringTable.get(function))
.build();
profile.addFunction(fn);
return index++;
});
}
}
private static class LocationTable {
final Profile.Builder profile;
final FunctionTable functionTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
LocationTable(Profile.Builder profile, FunctionTable functionTable) {
this.profile = profile;
this.functionTable = functionTable;
}
long get(String file, String function, long line) {
return table.computeIfAbsent(
file + "#" + function + "#" + line,
key -> {
com.google.perftools.profiles.ProfileProto.Location location =
com.google.perftools.profiles.ProfileProto.Location.newBuilder()
.setId(index)
.addLine(
Line.newBuilder()
.setFunctionId(functionTable.get(file, function))
.setLine(line)
.build())
.build();
profile.addLocation(location);
return index++;
});
}
}
}
| akira-baruah/bazel | src/main/java/com/google/devtools/build/lib/profiler/memory/AllocationTracker.java | Java | apache-2.0 | 14,364 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
| github-borat/cinder | cinder/tests/test_netapp_nfs.py | Python | apache-2.0 | 47,799 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.service.base.client;
import net.sf.mmm.service.api.RemoteInvocationCall;
import net.sf.mmm.util.lang.api.function.Consumer;
/**
* This is a simple container for the data corresponding to a {@link RemoteInvocationCall}.
*
* @param <RESULT> is the generic type of the method return-type.
* @param <CALL> is the generic type of the {@link #getCall() call} data.
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0
*/
public class RemoteInvocationCallData<RESULT, CALL extends RemoteInvocationCall> {
/** The callback to receive the service result on success. */
private final Consumer<? extends RESULT> successCallback;
/** The callback to receive a potential service failure. */
private final Consumer<Throwable> failureCallback;
/** @see #getCall() */
private CALL call;
/**
* The constructor.
*
* @param successCallback is the callback that {@link Consumer#accept(Object) receives} the result on
* success.
* @param failureCallback is the callback that {@link Consumer#accept(Object) receives} the failure on
* error.
*/
public RemoteInvocationCallData(Consumer<? extends RESULT> successCallback, Consumer<Throwable> failureCallback) {
super();
this.successCallback = successCallback;
this.failureCallback = failureCallback;
}
/**
* @return the successCallback.
*/
public Consumer<? extends RESULT> getSuccessCallback() {
return this.successCallback;
}
/**
* @return the failureCallback.
*/
public Consumer<Throwable> getFailureCallback() {
return this.failureCallback;
}
/**
* @return the actual call data (either {@link net.sf.mmm.service.api.command.RemoteInvocationCommand}
* itself or {@link net.sf.mmm.service.base.rpc.GenericRemoteInvocationRpcCall}).
*/
public CALL getCall() {
return this.call;
}
/**
* @param call is the new value of {@link #getCall()}.
*/
public void setCall(CALL call) {
assert (this.call == null);
assert (call != null);
this.call = call;
}
}
| m-m-m/service | base/src/main/java/net/sf/mmm/service/base/client/RemoteInvocationCallData.java | Java | apache-2.0 | 2,216 |
/**
*
*/
package jframe.core.plugin;
import java.util.EventListener;
/**
* @author dzh
* @date Sep 12, 2013 9:42:33 PM
* @since 1.0
*/
public interface PluginListener extends EventListener {
void pluginChanged(PluginEvent event);
}
| dzh/jframe | jframe/jframe-core/src/main/java/jframe/core/plugin/PluginListener.java | Java | apache-2.0 | 244 |