code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
<?php
/**
* Template Name: Style Guide
*
* @package {{PROJECT NAME}}
* @since {{VERSION}}
*/
?>
<?php
include_once('_incs/html/header.php');
if (have_posts()) : while (have_posts()) : the_post();
$body = get_the_content();
function cgy_codifyBody($matches){
return '<pre'.$matches[1].'><code'.$matches[1].'>'.
trim( str_replace( array('<', '>'),
array('<', '>'), $matches[2]
)).
'</code></pre>'.PHP_EOL.
'<div class="blowout" data-label="render as:">'.$matches[2].'</div>';
}
?>
<article role="main">
<div class="gridbase">
<h1 class="entry-title"><?php the_title(); ?></h1>
<?php
echo '<h2>Table of Contents</h2>'.PHP_EOL.
cgy_generate_toc($body).PHP_EOL.
preg_replace_callback( '%<pre(.*?)>\s*?<code.*?>(.*?)<\/code>\s*?<\/pre>%ism', 'cgy_codifyBody', $body );
?>
</div>
</article>
<?php
endwhile; endif;
include_once('_incs/html/footer.php');
?> | criography/spidersock-generator | app/source/{{THEME SLUG}}/page-styleGuide.php | PHP | apache-2.0 | 1,229 |
module FHIR
class Client
VERSION = '1.6.9'
end
end
| tambling/fhir_client | lib/fhir_client/version.rb | Ruby | apache-2.0 | 59 |
/*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.event.test;
/**
* @author Stephane Nicoll
*/
@SuppressWarnings("serial")
public class TestEvent extends IdentifiableApplicationEvent {
public final String msg;
public TestEvent(Object source, String id, String msg) {
super(source, id);
this.msg = msg;
}
public TestEvent(Object source, String msg) {
super(source);
this.msg = msg;
}
public TestEvent(Object source) {
this(source, "test");
}
public TestEvent() {
this(new Object());
}
}
| spring-projects/spring-framework | spring-context/src/test/java/org/springframework/context/event/test/TestEvent.java | Java | apache-2.0 | 1,125 |
/**
* Copyright (C) 2010-2013 Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.rocketmq.client.impl;
import com.alibaba.rocketmq.client.impl.factory.MQClientInstance;
import com.alibaba.rocketmq.client.impl.producer.MQProducerInner;
import com.alibaba.rocketmq.client.log.ClientLogger;
import com.alibaba.rocketmq.common.UtilAll;
import com.alibaba.rocketmq.common.message.MessageConst;
import com.alibaba.rocketmq.common.message.MessageDecoder;
import com.alibaba.rocketmq.common.message.MessageExt;
import com.alibaba.rocketmq.common.message.MessageQueue;
import com.alibaba.rocketmq.common.protocol.RequestCode;
import com.alibaba.rocketmq.common.protocol.ResponseCode;
import com.alibaba.rocketmq.common.protocol.body.ConsumeMessageDirectlyResult;
import com.alibaba.rocketmq.common.protocol.body.ConsumerRunningInfo;
import com.alibaba.rocketmq.common.protocol.body.GetConsumerStatusBody;
import com.alibaba.rocketmq.common.protocol.body.ResetOffsetBody;
import com.alibaba.rocketmq.common.protocol.header.*;
import com.alibaba.rocketmq.remoting.RpcContext;
import com.alibaba.rocketmq.remoting.common.RemotingHelper;
import com.alibaba.rocketmq.remoting.exception.RemotingCommandException;
import com.alibaba.rocketmq.remoting.netty.NettyRequestProcessor;
import com.alibaba.rocketmq.remoting.protocol.RemotingCommand;
import io.netty.channel.ChannelHandlerContext;
import org.slf4j.Logger;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
/**
* Client接收Broker的回调操作,例如事务回调,或者其他管理类命令回调
*
* @author shijia.wxr<vintage.wang@gmail.com>
* @since 2013-7-24
*/
public class ClientRemotingProcessor implements NettyRequestProcessor {
private final Logger log = ClientLogger.getLog();
private final MQClientInstance mqClientFactory;
public ClientRemotingProcessor(final MQClientInstance mqClientFactory) {
this.mqClientFactory = mqClientFactory;
}
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request, RpcContext rpcContext)
throws RemotingCommandException {
switch (request.getCode()) {
case RequestCode.CHECK_TRANSACTION_STATE:
return this.checkTransactionState(ctx, request);
case RequestCode.NOTIFY_CONSUMER_IDS_CHANGED:
return this.notifyConsumerIdsChanged(ctx, request);
case RequestCode.RESET_CONSUMER_CLIENT_OFFSET:
return this.resetOffset(ctx, request);
case RequestCode.GET_CONSUMER_STATUS_FROM_CLIENT:
return this.getConsumeStatus(ctx, request);
case RequestCode.GET_CONSUMER_RUNNING_INFO:
return this.getConsumerRunningInfo(ctx, request);
case RequestCode.CONSUME_MESSAGE_DIRECTLY:
return this.consumeMessageDirectly(ctx, request);
default:
break;
}
return null;
}
private RemotingCommand consumeMessageDirectly(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final ConsumeMessageDirectlyResultRequestHeader requestHeader =
(ConsumeMessageDirectlyResultRequestHeader) request
.decodeCommandCustomHeader(ConsumeMessageDirectlyResultRequestHeader.class);
final MessageExt msg = MessageDecoder.decode(ByteBuffer.wrap(request.getBody()));
ConsumeMessageDirectlyResult result =
this.mqClientFactory.consumeMessageDirectly(msg, requestHeader.getConsumerGroup(),
requestHeader.getBrokerName());
if (null != result) {
response.setCode(ResponseCode.SUCCESS);
response.setBody(result.encode());
}
else {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("The Consumer Group <%s> not exist in this consumer",
requestHeader.getConsumerGroup()));
}
return response;
}
private RemotingCommand getConsumerRunningInfo(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetConsumerRunningInfoRequestHeader requestHeader =
(GetConsumerRunningInfoRequestHeader) request
.decodeCommandCustomHeader(GetConsumerRunningInfoRequestHeader.class);
ConsumerRunningInfo consumerRunningInfo =
this.mqClientFactory.consumerRunningInfo(requestHeader.getConsumerGroup());
if (null != consumerRunningInfo) {
if (requestHeader.isJstackEnable()) {
String jstack = UtilAll.jstack();
consumerRunningInfo.setJstack(jstack);
}
response.setCode(ResponseCode.SUCCESS);
response.setBody(consumerRunningInfo.encode());
}
else {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("The Consumer Group <%s> not exist in this consumer",
requestHeader.getConsumerGroup()));
}
return response;
}
/**
* Oneway调用,无返回值
*/
public RemotingCommand checkTransactionState(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final CheckTransactionStateRequestHeader requestHeader =
(CheckTransactionStateRequestHeader) request
.decodeCommandCustomHeader(CheckTransactionStateRequestHeader.class);
final ByteBuffer byteBuffer = ByteBuffer.wrap(request.getBody());
final MessageExt messageExt = MessageDecoder.decode(byteBuffer);
if (messageExt != null) {
final String group = messageExt.getProperty(MessageConst.PROPERTY_PRODUCER_GROUP);
if (group != null) {
MQProducerInner producer = this.mqClientFactory.selectProducer(group);
if (producer != null) {
final String addr = RemotingHelper.parseChannelRemoteAddr(ctx.channel());
producer.checkTransactionState(addr, messageExt, requestHeader);
}
else {
log.debug("checkTransactionState, pick producer by group[{}] failed", group);
}
}
else {
log.warn("checkTransactionState, pick producer group failed");
}
}
else {
log.warn("checkTransactionState, decode message failed");
}
return null;
}
/**
* Oneway调用,无返回值
*/
public RemotingCommand notifyConsumerIdsChanged(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
try {
final NotifyConsumerIdsChangedRequestHeader requestHeader =
(NotifyConsumerIdsChangedRequestHeader) request
.decodeCommandCustomHeader(NotifyConsumerIdsChangedRequestHeader.class);
log.info(
"receive broker's notification[{}], the consumer group: {} changed, rebalance immediately",//
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),//
requestHeader.getConsumerGroup());
this.mqClientFactory.rebalanceImmediately();
}
catch (Exception e) {
log.error("notifyConsumerIdsChanged exception", RemotingHelper.exceptionSimpleDesc(e));
}
return null;
}
/**
* 重置 offset, oneWay调用,无返回值。
*/
public RemotingCommand resetOffset(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final ResetOffsetRequestHeader requestHeader =
(ResetOffsetRequestHeader) request.decodeCommandCustomHeader(ResetOffsetRequestHeader.class);
log.info(
"invoke reset offset operation from broker. brokerAddr={}, topic={}, group={}, timestamp={}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.getTopic(),
requestHeader.getGroup(), requestHeader.getTimestamp());
Map<MessageQueue, Long> offsetTable = new HashMap<MessageQueue, Long>();
if (request.getBody() != null) {
ResetOffsetBody body = ResetOffsetBody.decode(request.getBody(), ResetOffsetBody.class);
offsetTable = body.getOffsetTable();
}
this.mqClientFactory.resetOffset(requestHeader.getTopic(), requestHeader.getGroup(), offsetTable);
return null;
}
/**
* 获取 consumer 消息消费状态。
*/
@Deprecated
public RemotingCommand getConsumeStatus(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetConsumerStatusRequestHeader requestHeader =
(GetConsumerStatusRequestHeader) request
.decodeCommandCustomHeader(GetConsumerStatusRequestHeader.class);
Map<MessageQueue, Long> offsetTable =
this.mqClientFactory.getConsumerStatus(requestHeader.getTopic(), requestHeader.getGroup());
GetConsumerStatusBody body = new GetConsumerStatusBody();
body.setMessageQueueTable(offsetTable);
response.setBody(body.encode());
response.setCode(ResponseCode.SUCCESS);
return response;
}
}
| lizhanhui/Alibaba_RocketMQ | rocketmq-client/src/main/java/com/alibaba/rocketmq/client/impl/ClientRemotingProcessor.java | Java | apache-2.0 | 10,234 |
package cav.pdst.ui.activity;
import android.app.AlertDialog;
import android.content.Intent;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Spinner;
import android.widget.TextView;
import java.text.SimpleDateFormat;
import java.util.Date;
import cav.pdst.R;
import cav.pdst.data.managers.DataManager;
import cav.pdst.data.models.AbonementModel;
import cav.pdst.ui.fragments.DatePickerFragment;
import cav.pdst.ui.fragments.DateTimeFragment;
import cav.pdst.utils.ConstantManager;
import cav.pdst.utils.Utils;
public class AbonementActivity extends AppCompatActivity implements View.OnClickListener,DatePickerFragment.OnDateGetListener {
private TextView mStartDate;
private TextView mCreateDate;
private TextView mEndDate;
private TextView mCountTraining;
private TextView mComent;
private TextView mPay;
private EditText mDebit;
private Button mDebitDate;
private int mode;
private Spinner mSpinner;
private int dMode;
private DataManager mDataManager;
private AbonementModel mAbonementModel;
private int mAbType = 0;
private String[] ab_type = new String[]{"Абонемент","Разовое занятие"};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_abonement);
mDataManager = DataManager.getInstance();
mCreateDate = (TextView) findViewById(R.id.et_create_date);
mStartDate = (TextView) findViewById(R.id.et_start_date);
mEndDate = (TextView) findViewById(R.id.et_end_date);
mPay = (TextView) findViewById(R.id.et_price_ab);
mCountTraining = (TextView) findViewById(R.id.et_count_tr);
mComent = (TextView) findViewById(R.id.et_coment);
mDebit = (EditText) findViewById(R.id.et_debit);
mDebitDate = (Button) findViewById(R.id.button_debit_date);
mSpinner = (Spinner) findViewById(R.id.tv_spiner_type);
ArrayAdapter<String> spinerAdapter = new ArrayAdapter<String>(this,android.R.layout.simple_spinner_item,ab_type);
spinerAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
mSpinner.setAdapter(spinerAdapter);
mSpinner.setOnItemSelectedListener(mItemSelected);
mCreateDate.setOnClickListener(this);
mStartDate.setOnClickListener(this);
mEndDate.setOnClickListener(this);
mDebitDate.setOnClickListener(this);
SimpleDateFormat format = new SimpleDateFormat("E dd.MM.yyyy");
mode = getIntent().getIntExtra(ConstantManager.MODE_ABONEMENT,ConstantManager.NEW_ABONEMENT);
if (mode == ConstantManager.NEW_ABONEMENT) {
Date date = new Date();
mCreateDate.setText(format.format(date));
} else {
mAbonementModel = getIntent().getParcelableExtra(ConstantManager.AB_DETAIL_DATA);
mCreateDate.setText(format.format(mAbonementModel.getCreateDate()));
mStartDate.setText(format.format(mAbonementModel.getStartDate()));
mEndDate.setText(format.format(mAbonementModel.getEndDate()));
mCountTraining.setText(String.valueOf(mAbonementModel.getCountTraining()));
mPay.setText(String.valueOf(mAbonementModel.getPay()));
mComent.setText(mAbonementModel.getComment());
mDebit.setText(String.valueOf(mAbonementModel.getDebit()));
if (mAbonementModel.getDebitDate()!=null){
mDebitDate.setText(new SimpleDateFormat("dd.MM.yyyy HH:mm").format(mAbonementModel.getDebitDate()));
}
}
if (mode == ConstantManager.VIEW_ABONEMENT) {
mCreateDate.setEnabled(false);
mStartDate.setEnabled(false);
mEndDate.setEnabled(false);
mCountTraining.setEnabled(false);
mPay.setEnabled(false);
mComent.setEnabled(false);
mSpinner.setEnabled(false);
mDebit.setEnabled(false);
mDebitDate.setEnabled(false);
}
setupToolBar();
}
private void setupToolBar() {
ActionBar actionBar = getSupportActionBar();
if (actionBar!=null){
actionBar.setDisplayHomeAsUpEnabled(true);
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
MenuInflater inflater = getMenuInflater();
inflater.inflate(R.menu.all_save_menu, menu);
if (mode == ConstantManager.VIEW_ABONEMENT) {
menu.findItem(R.id.save_item).setVisible(false);
// menu.findItem(R.id.edit_tr_item).setVisible(true);
}
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
onBackPressed();
}
if (item.getItemId() == R.id.save_item) {
saveResult();
onBackPressed();
}
return true;
//return super.onOptionsItemSelected(item);
}
@Override
public void onBackPressed() {
// saveResult();
super.onBackPressed();
}
public void saveResult(){
if (mStartDate.getText().toString().length()==0) {
return;
}
if (mode == ConstantManager.VIEW_ABONEMENT) return;;
Intent answerIntent = new Intent();
if (mode == ConstantManager.NEW_ABONEMENT) {
answerIntent.putExtra(ConstantManager.AB_CREATEDATE, mCreateDate.getText().toString());
answerIntent.putExtra(ConstantManager.AB_STARTDATE, mStartDate.getText().toString());
answerIntent.putExtra(ConstantManager.AB_ENDDATE, mEndDate.getText().toString());
if (mCountTraining.getText().toString().length()!=0 ) {
answerIntent.putExtra(ConstantManager.AB_COUNT_TR, Integer.parseInt(mCountTraining.getText().toString()));
} else {
answerIntent.putExtra(ConstantManager.AB_COUNT_TR, 0);
}
answerIntent.putExtra(ConstantManager.AB_COMMENT, mComent.getText().toString());
if (mPay.getText().toString().length() != 0) {
answerIntent.putExtra(ConstantManager.AB_PAY, Float.parseFloat(mPay.getText().toString()));
} else {
answerIntent.putExtra(ConstantManager.AB_PAY,0.0f);
}
answerIntent.putExtra(ConstantManager.AB_TYPE,mAbType);
if (mDebit.getText().toString().length()!=0) {
answerIntent.putExtra(ConstantManager.AB_DEBIT, Float.parseFloat(mDebit.getText().toString()));
}else {
answerIntent.putExtra(ConstantManager.AB_DEBIT,0.0f);
}
answerIntent.putExtra(ConstantManager.AB_DEBIT_DATETIME,mDebitDate.getText().toString());
}
if (mode == ConstantManager.EDIT_ABONEMENT) {
mAbonementModel.setStartDate(Utils.getSteToDate(mStartDate.getText().toString(),"E dd.MM.yyyy"));
mAbonementModel.setEndDate(Utils.getSteToDate(mEndDate.getText().toString(),"E dd.MM.yyyy"));
mAbonementModel.setCreateDate(Utils.getSteToDate(mCreateDate.getText().toString(),"E dd.MM.yyyy"));// дата создания
mAbonementModel.setCountTraining(Integer.parseInt(mCountTraining.getText().toString()));
mAbonementModel.setComment(mComent.getText().toString());
mAbonementModel.setPay(Float.parseFloat(mPay.getText().toString()));
mAbonementModel.setDebit(Float.parseFloat(mDebit.getText().toString()));
if (mDebitDate.getText().toString().length()!=0) {
mAbonementModel.setDebitDate(Utils.getSteToDate(mDebitDate.getText().toString(), "dd.MM.yyyy HH:mm"));
}
if (mAbonementModel.getUsedTraining()>mAbonementModel.getCountTraining()){
showInfoDialog();
return;
}
answerIntent.putExtra(ConstantManager.AB_DETAIL_DATA, mAbonementModel);
}
setResult(RESULT_OK,answerIntent);
}
private void showInfoDialog(){
AlertDialog.Builder dialog = new AlertDialog.Builder(this);
dialog.setTitle("Внимание !")
.setMessage("Нельзя поставить количество тренировок меньше использованного")
.setIcon(android.R.drawable.ic_dialog_info)
.setPositiveButton(R.string.dialog_yes,null);
dialog.show();
}
AdapterView.OnItemSelectedListener mItemSelected = new AdapterView.OnItemSelectedListener(){
@Override
public void onItemSelected(AdapterView<?> adapterView, View view, int position, long id) {
// номер позиции равен типу 0- абонемент 1- разовое занятие
mAbType = position;
if (position==1) {
mCountTraining.setText("1");
mCountTraining.setVisibility(View.GONE);
findViewById(R.id.tv_end_date).setVisibility(View.GONE);
findViewById(R.id.tv_count_tr).setVisibility(View.GONE);
mEndDate.setVisibility(View.GONE);
mDebit.setVisibility(View.GONE);
mDebitDate.setVisibility(View.GONE);
findViewById(R.id.tv_debit_ti).setVisibility(View.GONE);
findViewById(R.id.tv_alarm_txt).setVisibility(View.GONE);
} else {
mCountTraining.setVisibility(View.VISIBLE);
mEndDate.setVisibility(View.VISIBLE);
findViewById(R.id.tv_end_date).setVisibility(View.VISIBLE);
findViewById(R.id.tv_count_tr).setVisibility(View.VISIBLE);
mDebit.setVisibility(View.VISIBLE);
mDebitDate.setVisibility(View.VISIBLE);
findViewById(R.id.tv_debit_ti).setVisibility(View.VISIBLE);
findViewById(R.id.tv_alarm_txt).setVisibility(View.VISIBLE);
}
}
@Override
public void onNothingSelected(AdapterView<?> adapterView) {
}
};
@Override
public void OnDateGet(Date date) {
SimpleDateFormat format = new SimpleDateFormat("E dd.MM.yyyy");
if (dMode == 0 ){
mStartDate.setText(format.format(date));
if (mAbType==1) {
mEndDate.setText(format.format(date));
}
} else if (dMode == 1){
mEndDate.setText(format.format(date));
} else if (dMode == 2){
mCreateDate.setText(format.format(date));
}
}
@Override
public void onClick(View view) {
DatePickerFragment dialog = DatePickerFragment.newInstance();
switch (view.getId()){
case R.id.et_start_date:
dMode = 0;
dialog.show(getSupportFragmentManager(), ConstantManager.DIALOG_DATE);
break;
case R.id.et_end_date:
dMode = 1;
dialog.show(getSupportFragmentManager(), ConstantManager.DIALOG_DATE);
break;
case R.id.et_create_date:
dMode = 2;
dialog.show(getSupportFragmentManager(),ConstantManager.DIALOG_DATE);
break;
case R.id.button_debit_date:
DateTimeFragment dateTimeFragment = new DateTimeFragment();
dateTimeFragment.setOnDateTimeChangeListener(new DateTimeFragment.OnDateTimeChangeListener() {
@Override
public void OnDateTimeChange(Date date) {
mDebitDate.setText(new SimpleDateFormat("dd.MM.yyy HH:mm").format(date));
}
});
dateTimeFragment.show(getSupportFragmentManager(),"date_time");
break;
}
}
}
| CavInc/PDANDSt | app/src/main/java/cav/pdst/ui/activity/AbonementActivity.java | Java | apache-2.0 | 12,221 |
using System;
using System.Globalization;
using System.Linq;
using System.Security.Claims;
using System.Threading.Tasks;
using System.Web;
using System.Web.Mvc;
using Microsoft.AspNet.Identity;
using Microsoft.AspNet.Identity.Owin;
using Microsoft.Owin.Security;
using WebApplicationWithInternallyScheduledWebJobs.Models;
namespace WebApplicationWithInternallyScheduledWebJobs.Controllers
{
[Authorize]
public class AccountController : Controller
{
private ApplicationSignInManager _signInManager;
private ApplicationUserManager _userManager;
public AccountController()
{
}
public AccountController(ApplicationUserManager userManager, ApplicationSignInManager signInManager )
{
UserManager = userManager;
SignInManager = signInManager;
}
public ApplicationSignInManager SignInManager
{
get
{
return _signInManager ?? HttpContext.GetOwinContext().Get<ApplicationSignInManager>();
}
private set
{
_signInManager = value;
}
}
public ApplicationUserManager UserManager
{
get
{
return _userManager ?? HttpContext.GetOwinContext().GetUserManager<ApplicationUserManager>();
}
private set
{
_userManager = value;
}
}
//
// GET: /Account/Login
[AllowAnonymous]
public ActionResult Login(string returnUrl)
{
ViewBag.ReturnUrl = returnUrl;
return View();
}
//
// POST: /Account/Login
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> Login(LoginViewModel model, string returnUrl)
{
if (!ModelState.IsValid)
{
return View(model);
}
// This doesn't count login failures towards account lockout
// To enable password failures to trigger account lockout, change to shouldLockout: true
var result = await SignInManager.PasswordSignInAsync(model.Email, model.Password, model.RememberMe, shouldLockout: false);
switch (result)
{
case SignInStatus.Success:
return RedirectToLocal(returnUrl);
case SignInStatus.LockedOut:
return View("Lockout");
case SignInStatus.RequiresVerification:
return RedirectToAction("SendCode", new { ReturnUrl = returnUrl, RememberMe = model.RememberMe });
case SignInStatus.Failure:
default:
ModelState.AddModelError("", "Invalid login attempt.");
return View(model);
}
}
//
// GET: /Account/VerifyCode
[AllowAnonymous]
public async Task<ActionResult> VerifyCode(string provider, string returnUrl, bool rememberMe)
{
// Require that the user has already logged in via username/password or external login
if (!await SignInManager.HasBeenVerifiedAsync())
{
return View("Error");
}
return View(new VerifyCodeViewModel { Provider = provider, ReturnUrl = returnUrl, RememberMe = rememberMe });
}
//
// POST: /Account/VerifyCode
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> VerifyCode(VerifyCodeViewModel model)
{
if (!ModelState.IsValid)
{
return View(model);
}
// The following code protects for brute force attacks against the two factor codes.
// If a user enters incorrect codes for a specified amount of time then the user account
// will be locked out for a specified amount of time.
// You can configure the account lockout settings in IdentityConfig
var result = await SignInManager.TwoFactorSignInAsync(model.Provider, model.Code, isPersistent: model.RememberMe, rememberBrowser: model.RememberBrowser);
switch (result)
{
case SignInStatus.Success:
return RedirectToLocal(model.ReturnUrl);
case SignInStatus.LockedOut:
return View("Lockout");
case SignInStatus.Failure:
default:
ModelState.AddModelError("", "Invalid code.");
return View(model);
}
}
//
// GET: /Account/Register
[AllowAnonymous]
public ActionResult Register()
{
return View();
}
//
// POST: /Account/Register
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> Register(RegisterViewModel model)
{
if (ModelState.IsValid)
{
var user = new ApplicationUser { UserName = model.Email, Email = model.Email };
var result = await UserManager.CreateAsync(user, model.Password);
if (result.Succeeded)
{
await SignInManager.SignInAsync(user, isPersistent:false, rememberBrowser:false);
// For more information on how to enable account confirmation and password reset please visit http://go.microsoft.com/fwlink/?LinkID=320771
// Send an email with this link
// string code = await UserManager.GenerateEmailConfirmationTokenAsync(user.Id);
// var callbackUrl = Url.Action("ConfirmEmail", "Account", new { userId = user.Id, code = code }, protocol: Request.Url.Scheme);
// await UserManager.SendEmailAsync(user.Id, "Confirm your account", "Please confirm your account by clicking <a href=\"" + callbackUrl + "\">here</a>");
return RedirectToAction("Index", "Home");
}
AddErrors(result);
}
// If we got this far, something failed, redisplay form
return View(model);
}
//
// GET: /Account/ConfirmEmail
[AllowAnonymous]
public async Task<ActionResult> ConfirmEmail(string userId, string code)
{
if (userId == null || code == null)
{
return View("Error");
}
var result = await UserManager.ConfirmEmailAsync(userId, code);
return View(result.Succeeded ? "ConfirmEmail" : "Error");
}
//
// GET: /Account/ForgotPassword
[AllowAnonymous]
public ActionResult ForgotPassword()
{
return View();
}
//
// POST: /Account/ForgotPassword
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> ForgotPassword(ForgotPasswordViewModel model)
{
if (ModelState.IsValid)
{
var user = await UserManager.FindByNameAsync(model.Email);
if (user == null || !(await UserManager.IsEmailConfirmedAsync(user.Id)))
{
// Don't reveal that the user does not exist or is not confirmed
return View("ForgotPasswordConfirmation");
}
// For more information on how to enable account confirmation and password reset please visit http://go.microsoft.com/fwlink/?LinkID=320771
// Send an email with this link
// string code = await UserManager.GeneratePasswordResetTokenAsync(user.Id);
// var callbackUrl = Url.Action("ResetPassword", "Account", new { userId = user.Id, code = code }, protocol: Request.Url.Scheme);
// await UserManager.SendEmailAsync(user.Id, "Reset Password", "Please reset your password by clicking <a href=\"" + callbackUrl + "\">here</a>");
// return RedirectToAction("ForgotPasswordConfirmation", "Account");
}
// If we got this far, something failed, redisplay form
return View(model);
}
//
// GET: /Account/ForgotPasswordConfirmation
[AllowAnonymous]
public ActionResult ForgotPasswordConfirmation()
{
return View();
}
//
// GET: /Account/ResetPassword
[AllowAnonymous]
public ActionResult ResetPassword(string code)
{
return code == null ? View("Error") : View();
}
//
// POST: /Account/ResetPassword
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> ResetPassword(ResetPasswordViewModel model)
{
if (!ModelState.IsValid)
{
return View(model);
}
var user = await UserManager.FindByNameAsync(model.Email);
if (user == null)
{
// Don't reveal that the user does not exist
return RedirectToAction("ResetPasswordConfirmation", "Account");
}
var result = await UserManager.ResetPasswordAsync(user.Id, model.Code, model.Password);
if (result.Succeeded)
{
return RedirectToAction("ResetPasswordConfirmation", "Account");
}
AddErrors(result);
return View();
}
//
// GET: /Account/ResetPasswordConfirmation
[AllowAnonymous]
public ActionResult ResetPasswordConfirmation()
{
return View();
}
//
// POST: /Account/ExternalLogin
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public ActionResult ExternalLogin(string provider, string returnUrl)
{
// Request a redirect to the external login provider
return new ChallengeResult(provider, Url.Action("ExternalLoginCallback", "Account", new { ReturnUrl = returnUrl }));
}
//
// GET: /Account/SendCode
[AllowAnonymous]
public async Task<ActionResult> SendCode(string returnUrl, bool rememberMe)
{
var userId = await SignInManager.GetVerifiedUserIdAsync();
if (userId == null)
{
return View("Error");
}
var userFactors = await UserManager.GetValidTwoFactorProvidersAsync(userId);
var factorOptions = userFactors.Select(purpose => new SelectListItem { Text = purpose, Value = purpose }).ToList();
return View(new SendCodeViewModel { Providers = factorOptions, ReturnUrl = returnUrl, RememberMe = rememberMe });
}
//
// POST: /Account/SendCode
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> SendCode(SendCodeViewModel model)
{
if (!ModelState.IsValid)
{
return View();
}
// Generate the token and send it
if (!await SignInManager.SendTwoFactorCodeAsync(model.SelectedProvider))
{
return View("Error");
}
return RedirectToAction("VerifyCode", new { Provider = model.SelectedProvider, ReturnUrl = model.ReturnUrl, RememberMe = model.RememberMe });
}
//
// GET: /Account/ExternalLoginCallback
[AllowAnonymous]
public async Task<ActionResult> ExternalLoginCallback(string returnUrl)
{
var loginInfo = await AuthenticationManager.GetExternalLoginInfoAsync();
if (loginInfo == null)
{
return RedirectToAction("Login");
}
// Sign in the user with this external login provider if the user already has a login
var result = await SignInManager.ExternalSignInAsync(loginInfo, isPersistent: false);
switch (result)
{
case SignInStatus.Success:
return RedirectToLocal(returnUrl);
case SignInStatus.LockedOut:
return View("Lockout");
case SignInStatus.RequiresVerification:
return RedirectToAction("SendCode", new { ReturnUrl = returnUrl, RememberMe = false });
case SignInStatus.Failure:
default:
// If the user does not have an account, then prompt the user to create an account
ViewBag.ReturnUrl = returnUrl;
ViewBag.LoginProvider = loginInfo.Login.LoginProvider;
return View("ExternalLoginConfirmation", new ExternalLoginConfirmationViewModel { Email = loginInfo.Email });
}
}
//
// POST: /Account/ExternalLoginConfirmation
[HttpPost]
[AllowAnonymous]
[ValidateAntiForgeryToken]
public async Task<ActionResult> ExternalLoginConfirmation(ExternalLoginConfirmationViewModel model, string returnUrl)
{
if (User.Identity.IsAuthenticated)
{
return RedirectToAction("Index", "Manage");
}
if (ModelState.IsValid)
{
// Get the information about the user from the external login provider
var info = await AuthenticationManager.GetExternalLoginInfoAsync();
if (info == null)
{
return View("ExternalLoginFailure");
}
var user = new ApplicationUser { UserName = model.Email, Email = model.Email };
var result = await UserManager.CreateAsync(user);
if (result.Succeeded)
{
result = await UserManager.AddLoginAsync(user.Id, info.Login);
if (result.Succeeded)
{
await SignInManager.SignInAsync(user, isPersistent: false, rememberBrowser: false);
return RedirectToLocal(returnUrl);
}
}
AddErrors(result);
}
ViewBag.ReturnUrl = returnUrl;
return View(model);
}
//
// POST: /Account/LogOff
[HttpPost]
[ValidateAntiForgeryToken]
public ActionResult LogOff()
{
AuthenticationManager.SignOut();
return RedirectToAction("Index", "Home");
}
//
// GET: /Account/ExternalLoginFailure
[AllowAnonymous]
public ActionResult ExternalLoginFailure()
{
return View();
}
protected override void Dispose(bool disposing)
{
if (disposing)
{
if (_userManager != null)
{
_userManager.Dispose();
_userManager = null;
}
if (_signInManager != null)
{
_signInManager.Dispose();
_signInManager = null;
}
}
base.Dispose(disposing);
}
#region Helpers
// Used for XSRF protection when adding external logins
private const string XsrfKey = "XsrfId";
private IAuthenticationManager AuthenticationManager
{
get
{
return HttpContext.GetOwinContext().Authentication;
}
}
private void AddErrors(IdentityResult result)
{
foreach (var error in result.Errors)
{
ModelState.AddModelError("", error);
}
}
private ActionResult RedirectToLocal(string returnUrl)
{
if (Url.IsLocalUrl(returnUrl))
{
return Redirect(returnUrl);
}
return RedirectToAction("Index", "Home");
}
internal class ChallengeResult : HttpUnauthorizedResult
{
public ChallengeResult(string provider, string redirectUri)
: this(provider, redirectUri, null)
{
}
public ChallengeResult(string provider, string redirectUri, string userId)
{
LoginProvider = provider;
RedirectUri = redirectUri;
UserId = userId;
}
public string LoginProvider { get; set; }
public string RedirectUri { get; set; }
public string UserId { get; set; }
public override void ExecuteResult(ControllerContext context)
{
var properties = new AuthenticationProperties { RedirectUri = RedirectUri };
if (UserId != null)
{
properties.Dictionary[XsrfKey] = UserId;
}
context.HttpContext.GetOwinContext().Authentication.Challenge(properties, LoginProvider);
}
}
#endregion
}
} | davidebbo-test/WebApplicationWithInternallyScheduledWebJobs | WebApplicationWithInternallyScheduledWebJobs/Controllers/AccountController.cs | C# | apache-2.0 | 17,416 |
/**
* Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.pinot.core.segment.creator;
/**
* An interface to read the column statistics from statistics collectors.
*
*/
public interface ColumnStatistics {
/**
* @return Minimum value of the column
* @throws Exception
*/
Object getMinValue() throws Exception;
/**
* @return Maximum value of the column
* @throws Exception
*/
Object getMaxValue() throws Exception;
/**
*
* @return An array of elements that has the unique values for this column, sorted order.
* @throws Exception
*/
Object getUniqueValuesSet() throws Exception;
/**
*
* @return The number of unique values of this column.
* @throws Exception
*/
int getCardinality() throws Exception;
/**
*
* @return For string objects, returns the length of the longest string value. For others, returns -1.
* @throws Exception
*/
int getLengthOfLargestElement() throws Exception;
/**
*
* @return The number of null values in the input for this column.
*/
int getNumInputNullValues();
/**
*
* @return total number of entries
*/
int getTotalNumberOfEntries();
/**
* @return For multi-valued columns, returns the max number of values in a single occurrence of the column, otherwise 0.
*/
int getMaxNumberOfMultiValues();
/**
* @note
* @return Returns if any of the values have nulls in the segments.
*/
boolean hasNull();
}
| tkao1000/pinot | pinot-core/src/main/java/com/linkedin/pinot/core/segment/creator/ColumnStatistics.java | Java | apache-2.0 | 2,150 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.quicksight.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Display options related to tiles on a sheet.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TileStyle" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class TileStyle implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The border around a tile.
* </p>
*/
private BorderStyle border;
/**
* <p>
* The border around a tile.
* </p>
*
* @param border
* The border around a tile.
*/
public void setBorder(BorderStyle border) {
this.border = border;
}
/**
* <p>
* The border around a tile.
* </p>
*
* @return The border around a tile.
*/
public BorderStyle getBorder() {
return this.border;
}
/**
* <p>
* The border around a tile.
* </p>
*
* @param border
* The border around a tile.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public TileStyle withBorder(BorderStyle border) {
setBorder(border);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBorder() != null)
sb.append("Border: ").append(getBorder());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof TileStyle == false)
return false;
TileStyle other = (TileStyle) obj;
if (other.getBorder() == null ^ this.getBorder() == null)
return false;
if (other.getBorder() != null && other.getBorder().equals(this.getBorder()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBorder() == null) ? 0 : getBorder().hashCode());
return hashCode;
}
@Override
public TileStyle clone() {
try {
return (TileStyle) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.quicksight.model.transform.TileStyleMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| aws/aws-sdk-java | aws-java-sdk-quicksight/src/main/java/com/amazonaws/services/quicksight/model/TileStyle.java | Java | apache-2.0 | 3,886 |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import six
import warnings
from cassandra.cqlengine import CQLEngineException, ValidationError
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine import query
from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist
from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned
from cassandra.metadata import protect_name
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
class ModelException(CQLEngineException):
pass
class ModelDefinitionException(ModelException):
pass
class PolymorphicModelException(ModelException):
pass
class UndefinedKeyspaceWarning(Warning):
pass
DEFAULT_KEYSPACE = None
class hybrid_classmethod(object):
"""
Allows a method to behave as both a class method and
normal instance method depending on how it's called
"""
def __init__(self, clsmethod, instmethod):
self.clsmethod = clsmethod
self.instmethod = instmethod
def __get__(self, instance, owner):
if instance is None:
return self.clsmethod.__get__(owner, owner)
else:
return self.instmethod.__get__(instance, owner)
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
"""
raise NotImplementedError
class QuerySetDescriptor(object):
"""
returns a fresh queryset for the given model
it's declared on everytime it's accessed
"""
def __get__(self, obj, model):
""" :rtype: ModelQuerySet """
if model.__abstract__:
raise CQLEngineException('cannot execute queries against abstract models')
queryset = model.__queryset__(model)
# if this is a concrete polymorphic model, and the discriminator
# key is an indexed column, add a filter clause to only return
# logical rows of the proper type
if model._is_polymorphic and not model._is_polymorphic_base:
name, column = model._discriminator_column_name, model._discriminator_column
if column.partition_key or column.index:
# look for existing poly types
return queryset.filter(**{name: model.__discriminator_value__})
return queryset
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
:rtype: ModelQuerySet
"""
raise NotImplementedError
class TransactionDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
def transaction_setter(*prepared_transaction, **unprepared_transactions):
if len(prepared_transaction) > 0:
transactions = prepared_transaction[0]
else:
transactions = instance.objects.iff(**unprepared_transactions)._transaction
instance._transaction = transactions
return instance
return transaction_setter
qs = model.__queryset__(model)
def transaction_setter(**unprepared_transactions):
transactions = model.objects.iff(**unprepared_transactions)._transaction
qs._transaction = transactions
return qs
return transaction_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TTLDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
# instance = copy.deepcopy(instance)
# instance method
def ttl_setter(ts):
instance._ttl = ts
return instance
return ttl_setter
qs = model.__queryset__(model)
def ttl_setter(ts):
qs._ttl = ts
return qs
return ttl_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TimestampDescriptor(object):
"""
returns a query set descriptor with a timestamp specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def timestamp_setter(ts):
instance._timestamp = ts
return instance
return timestamp_setter
return model.objects.timestamp
def __call__(self, *args, **kwargs):
raise NotImplementedError
class IfNotExistsDescriptor(object):
"""
return a query set descriptor with a if_not_exists flag specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def ifnotexists_setter(ife):
instance._if_not_exists = ife
return instance
return ifnotexists_setter
return model.objects.if_not_exists
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ConsistencyDescriptor(object):
"""
returns a query set descriptor if called on Class, instance if it was an instance call
"""
def __get__(self, instance, model):
if instance:
# instance = copy.deepcopy(instance)
def consistency_setter(consistency):
instance.__consistency__ = consistency
return instance
return consistency_setter
qs = model.__queryset__(model)
def consistency_setter(consistency):
qs._consistency = consistency
return qs
return consistency_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ColumnQueryEvaluator(query.AbstractQueryableColumn):
"""
Wraps a column and allows it to be used in comparator
expressions, returning query operators
ie:
Model.column == 5
"""
def __init__(self, column):
self.column = column
def __unicode__(self):
return self.column.db_field_name
def _get_column(self):
""" :rtype: ColumnQueryEvaluator """
return self.column
class ColumnDescriptor(object):
"""
Handles the reading and writing of column values to and from
a model instance's value manager, as well as creating
comparator queries
"""
def __init__(self, column):
"""
:param column:
:type column: columns.Column
:return:
"""
self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
try:
return instance._values[self.column.column_name].getval()
except AttributeError:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
return instance._values[self.column.column_name].setval(value)
else:
raise AttributeError('cannot reassign column values')
def __delete__(self, instance):
"""
Sets the column value to None, if possible
"""
if instance:
if self.column.can_delete:
instance._values[self.column.column_name].delval()
else:
raise AttributeError('cannot delete {0} columns'.format(self.column.column_name))
class BaseModel(object):
"""
The base model class, don't inherit from this, inherit from Model, defined below
"""
class DoesNotExist(_DoesNotExist):
pass
class MultipleObjectsReturned(_MultipleObjectsReturned):
pass
objects = QuerySetDescriptor()
ttl = TTLDescriptor()
consistency = ConsistencyDescriptor()
iff = TransactionDescriptor()
# custom timestamps, see USING TIMESTAMP X
timestamp = TimestampDescriptor()
if_not_exists = IfNotExistsDescriptor()
# _len is lazily created by __len__
__table_name__ = None
__keyspace__ = None
__discriminator_value__ = None
__options__ = None
# the queryset class used for this class
__queryset__ = query.ModelQuerySet
__dmlquery__ = query.DMLQuery
__consistency__ = None # can be set per query
_timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP)
_if_not_exists = False # optional if_not_exists flag to check existence before insertion
_table_name = None # used internally to cache a derived table name
def __init__(self, **values):
self._values = {}
self._ttl = self.__default_ttl__
self._timestamp = None
self._transaction = None
for name, column in self._columns.items():
value = values.get(name, None)
if value is not None or isinstance(column, columns.BaseContainerColumn):
value = column.to_python(value)
value_mngr = column.value_manager(self, column, value)
if name in values:
value_mngr.explicit = True
self._values[name] = value_mngr
# a flag set by the deserializer to indicate
# that update should be used when persisting changes
self._is_persisted = False
self._batch = None
self._timeout = connection.NOT_SET
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
', '.join('{0}={1!r}'.format(k, getattr(self, k))
for k in self._defined_columns.keys()
if k != self._discriminator_column_name))
def __str__(self):
"""
Pretty printing of models by their primary key
"""
return '{0} <{1}>'.format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys()))
@classmethod
def _discover_polymorphic_submodels(cls):
if not cls._is_polymorphic_base:
raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes')
def _discover(klass):
if not klass._is_polymorphic_base and klass.__discriminator_value__ is not None:
cls._discriminator_map[klass.__discriminator_value__] = klass
for subklass in klass.__subclasses__():
_discover(subklass)
_discover(cls)
@classmethod
def _get_model_by_discriminator_value(cls, key):
if not cls._is_polymorphic_base:
raise ModelException('_get_model_by_discriminator_value can only be called on polymorphic base classes')
return cls._discriminator_map.get(key)
@classmethod
def _construct_instance(cls, values):
"""
method used to construct instances from query results
this is where polymorphic deserialization occurs
"""
# we're going to take the values, which is from the DB as a dict
# and translate that into our local fields
# the db_map is a db_field -> model field map
items = values.items()
field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items])
if cls._is_polymorphic:
disc_key = field_dict.get(cls._discriminator_column_name)
if disc_key is None:
raise PolymorphicModelException('discriminator value was not found in values')
poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
raise PolymorphicModelException(
'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__)
)
if not issubclass(klass, cls):
raise PolymorphicModelException(
'{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__)
)
field_dict = dict((k, v) for k, v in field_dict.items() if k in klass._columns.keys())
else:
klass = cls
instance = klass(**field_dict)
instance._is_persisted = True
return instance
def _can_update(self):
"""
Called by the save function to check if this should be
persisted with update or insert
:return:
"""
if not self._is_persisted:
return False
return all([not self._values[k].changed for k in self._primary_keys])
@classmethod
def _get_keyspace(cls):
"""
Returns the manual keyspace, if set, otherwise the default keyspace
"""
return cls.__keyspace__ or DEFAULT_KEYSPACE
@classmethod
def _get_column(cls, name):
"""
Returns the column matching the given name, raising a key error if
it doesn't exist
:param name: the name of the column to return
:rtype: Column
"""
return cls._columns[name]
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
# check attribute keys
keys = set(self._columns.keys())
other_keys = set(other._columns.keys())
if keys != other_keys:
return False
# check that all of the attributes match
for key in other_keys:
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def column_family_name(cls, include_keyspace=True):
"""
Returns the column family name if it's been defined
otherwise, it creates it from the module and class name
"""
cf_name = protect_name(cls._raw_column_family_name())
if include_keyspace:
return '{0}.{1}'.format(protect_name(cls._get_keyspace()), cf_name)
return cf_name
@classmethod
def _raw_column_family_name(cls):
if not cls._table_name:
if cls.__table_name__:
cls._table_name = cls.__table_name__.lower()
else:
if cls._is_polymorphic and not cls._is_polymorphic_base:
cls._table_name = cls._polymorphic_base._raw_column_family_name()
else:
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2).lower()), s)
cf_name = ccase(cls.__name__)
# trim to less than 48 characters or cassandra will complain
cf_name = cf_name[-48:]
cf_name = cf_name.lower()
cf_name = re.sub(r'^_+', '', cf_name)
cls._table_name = cf_name
return cls._table_name
def validate(self):
"""
Cleans and validates the field values
"""
for name, col in self._columns.items():
v = getattr(self, name)
if v is None and not self._values[name].explicit and col.has_default:
v = col.get_default()
val = col.validate(v)
setattr(self, name, val)
# Let an instance be used like a dict of its columns keys/values
def __iter__(self):
""" Iterate over column ids. """
for column_id in self._columns.keys():
yield column_id
def __getitem__(self, key):
""" Returns column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return getattr(self, key)
def __setitem__(self, key, val):
""" Sets a column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return setattr(self, key, val)
def __len__(self):
"""
Returns the number of columns defined on that model.
"""
try:
return self._len
except:
self._len = len(self._columns.keys())
return self._len
def keys(self):
""" Returns a list of column IDs. """
return [k for k in self]
def values(self):
""" Returns list of column values. """
return [self[k] for k in self]
def items(self):
""" Returns a list of column ID/value tuples. """
return [(k, self[k]) for k in self]
def _as_dict(self):
""" Returns a map of column names to cleaned values """
values = self._dynamic_columns or {}
for name, col in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values
@classmethod
def create(cls, **kwargs):
"""
Create an instance of this model in the database.
Takes the model column values as keyword arguments.
Returns the instance.
"""
extra_columns = set(kwargs.keys()) - set(cls._columns.keys())
if extra_columns:
raise ValidationError("Incorrect columns passed: {0}".format(extra_columns))
return cls.objects.create(**kwargs)
@classmethod
def all(cls):
"""
Returns a queryset representing all stored objects
This is a pass-through to the model objects().all()
"""
return cls.objects.all()
@classmethod
def filter(cls, *args, **kwargs):
"""
Returns a queryset based on filter parameters.
This is a pass-through to the model objects().:method:`~cqlengine.queries.filter`.
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def get(cls, *args, **kwargs):
"""
Returns a single object based on the passed filter constraints.
This is a pass-through to the model objects().:method:`~cqlengine.queries.get`.
"""
return cls.objects.get(*args, **kwargs)
def timeout(self, timeout):
"""
Sets a timeout for use in :meth:`~.save`, :meth:`~.update`, and :meth:`~.delete`
operations
"""
assert self._batch is None, 'Setting both timeout and batch is not supported'
self._timeout = timeout
return self
def save(self):
"""
Saves an object to the database.
.. code-block:: python
#create a person instance
person = Person(first_name='Kimberly', last_name='Eggleston')
#saves it to Cassandra
person.save()
"""
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot save polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
if_not_exists=self._if_not_exists,
transaction=self._transaction,
timeout=self._timeout).save()
# reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def update(self, **values):
"""
Performs an update on the model instance. You can pass in values to set on the model
for updating, or you can call without values to execute an update against any modified
fields. If no fields on the model have been modified since loading, no query will be
performed. Model validation is performed normally.
It is possible to do a blind update, that is, to update a field without having first selected the object out of the database.
See :ref:`Blind Updates <blind_updates>`
"""
for k, v in values.items():
col = self._columns.get(k)
# check for nonexistant columns
if col is None:
raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k))
# check for primary key update attempts
if col.is_primary_key:
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__))
setattr(self, k, v)
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot update polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
transaction=self._transaction,
timeout=self._timeout).update()
# reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def delete(self):
"""
Deletes the object from the database
"""
self.__dmlquery__(self.__class__, self,
batch=self._batch,
timestamp=self._timestamp,
consistency=self.__consistency__,
timeout=self._timeout).delete()
def get_changed_columns(self):
"""
Returns a list of the columns that have been updated since instantiation or save
"""
return [k for k, v in self._values.items() if v.changed]
@classmethod
def _class_batch(cls, batch):
return cls.objects.batch(batch)
def _inst_batch(self, batch):
assert self._timeout is connection.NOT_SET, 'Setting both timeout and batch is not supported'
self._batch = batch
return self
batch = hybrid_classmethod(_class_batch, _inst_batch)
class ModelMetaClass(type):
def __new__(cls, name, bases, attrs):
# move column definitions into columns dict
# and set default column names
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, '_defined_columns', {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
# short circuit __discriminator_value__ inheritance
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')
options = attrs.get('__options__') or {}
attrs['__default_ttl__'] = options.get('default_time_to_live')
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)]
column_definitions = sorted(column_definitions, key=lambda x: x[1].position)
is_polymorphic_base = any([c[1].discriminator_column for c in column_definitions])
column_definitions = [x for x in inherited_columns.items()] + column_definitions
discriminator_columns = [c for c in column_definitions if c[1].discriminator_column]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns)))
if attrs['__discriminator_value__'] and not is_polymorphic:
raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True')
discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None)
if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns')
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, '_is_polymorphic_base', False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any([v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException("At least 1 primary key is required.")
counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException('counter models may not have data columns')
has_partition_keys = any(v.partition_key for (k, v) in column_definitions)
# transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k))
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)):
raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
_transform_column(k, v)
partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException("at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name))
if v.clustering_order and not (v.primary_key and not v.partition_key):
raise ModelException("clustering_order may be specified only for clustering primary keys")
if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'):
raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name))
col_names.add(v.db_field_name)
# create db_name -> model name map for loading
db_map = {}
for field_name, col in column_dict.items():
db_map[col.db_field_name] = field_name
# add management members to the class
attrs['_columns'] = column_dict
attrs['_primary_keys'] = primary_keys
attrs['_defined_columns'] = defined_columns
# maps the database field to the models key
attrs['_db_map'] = db_map
attrs['_pk_name'] = pk_name
attrs['_dynamic_columns'] = {}
attrs['_partition_keys'] = partition_keys
attrs['_clustering_keys'] = clustering_keys
attrs['_has_counter'] = len(counter_columns) > 0
# add polymorphic management attributes
attrs['_is_polymorphic_base'] = is_polymorphic_base
attrs['_is_polymorphic'] = is_polymorphic
attrs['_polymorphic_base'] = polymorphic_base
attrs['_discriminator_column'] = discriminator_column
attrs['_discriminator_column_name'] = discriminator_column_name
attrs['_discriminator_map'] = {} if is_polymorphic_base else None
# setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, 'DoesNotExist', None)
if DoesNotExistBase is not None:
break
DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist)
attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {})
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None)
if MultipleObjectsReturnedBase is not None:
break
MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {})
# create the class and add a QuerySet to it
klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs)
udts = []
for col in column_dict.values():
columns.resolve_udts(col, udts)
for user_type in set(udts):
user_type.register_for_keyspace(klass._get_keyspace())
return klass
@six.add_metaclass(ModelMetaClass)
class Model(BaseModel):
__abstract__ = True
"""
*Optional.* Indicates that this model is only intended to be used as a base class for other models.
You can't create tables for abstract models, but checks around schema validity are skipped during class construction.
"""
__table_name__ = None
"""
*Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited.
"""
__keyspace__ = None
"""
Sets the name of the keyspace used by this model.
"""
__options__ = None
"""
*Optional* Table options applied with this model
(e.g. compaction, default ttl, cache settings, tec.)
"""
__discriminator_value__ = None
"""
*Optional* Specifies a value for the discriminator column when using model inheritance.
"""
| jfelectron/python-driver | cassandra/cqlengine/models.py | Python | apache-2.0 | 33,196 |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openehealth.ipf.commons.ihe.xds.core.transform.requests.query;
import org.openehealth.ipf.commons.ihe.xds.core.ebxml.EbXMLAdhocQueryRequest;
import org.openehealth.ipf.commons.ihe.xds.core.requests.query.FindDocumentsForMultiplePatientsQuery;
import static org.openehealth.ipf.commons.ihe.xds.core.transform.requests.QueryParameter.*;
/**
* Transforms between a {@link FindDocumentsForMultiplePatientsQuery} and {@link EbXMLAdhocQueryRequest}.
* @author Michael Ottati
*/
public class FindDocumentsForMultiplePatientsQueryTransformer extends DocumentsQueryTransformer<FindDocumentsForMultiplePatientsQuery> {
@Override
public void toEbXML(FindDocumentsForMultiplePatientsQuery query, EbXMLAdhocQueryRequest ebXML) {
if (query == null || ebXML == null) {
return;
}
super.toEbXML(query, ebXML);
QuerySlotHelper slots = new QuerySlotHelper(ebXML);
slots.fromPatientIdList(DOC_ENTRY_PATIENT_ID, query.getPatientIds());
slots.fromDocumentEntryType(DOC_ENTRY_TYPE, query.getDocumentEntryTypes());
slots.fromStatus(DOC_ENTRY_STATUS, query.getStatus());
}
public void fromEbXML(FindDocumentsForMultiplePatientsQuery query, EbXMLAdhocQueryRequest ebXML) {
if (query == null || ebXML == null) {
return;
}
super.fromEbXML(query, ebXML);
QuerySlotHelper slots = new QuerySlotHelper(ebXML);
query.setPatientIds(slots.toPatientIdList(DOC_ENTRY_PATIENT_ID));
query.setDocumentEntryTypes(slots.toDocumentEntryType(DOC_ENTRY_TYPE));
query.setStatus(slots.toStatus(DOC_ENTRY_STATUS));
}
}
| krasserm/ipf | commons/ihe/xds/src/main/java/org/openehealth/ipf/commons/ihe/xds/core/transform/requests/query/FindDocumentsForMultiplePatientsQueryTransformer.java | Java | apache-2.0 | 2,267 |
/*
* Copyright (c) 2001-2007, Inversoft Inc., All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.primeframework.mvc.util;
import java.lang.reflect.Array;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.primeframework.mvc.parameter.el.CollectionExpressionException;
/**
* This is a toolkit that assists with generics.
*
* @author Brian Pontarelli
*/
public class TypeTools {
/**
* Determines the final component type. This continues to loop over Collections until it hits a non-parameterized
* type.
*
* @param type The parametrized type.
* @param path The path to the type, used in exception message.
* @return The final component type.
*/
public static Class<?> componentFinalType(Type type, String path) {
while (!(type instanceof Class)) {
type = componentType(type, path);
}
return (Class<?>) type;
}
/**
* Determines the component type. Lists is the first type, Map is the second type, etc.
*
* @param type The parametrized type.
* @param path The path to the type, used in exception message.
* @return The component type.
*/
public static Type componentType(Type type, String path) {
if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) type;
Class<?> rawType = (Class<?>) parameterizedType.getRawType();
if (Map.class.isAssignableFrom(rawType)) {
return parameterizedType.getActualTypeArguments()[1];
} else if (Collection.class.isAssignableFrom(rawType)) {
return parameterizedType.getActualTypeArguments()[0];
} else {
throw new CollectionExpressionException("Unknown collection type [" + type + "]");
}
} else if (type instanceof GenericArrayType) {
return ((GenericArrayType) type).getGenericComponentType();
}
Class<?> rawType = (Class<?>) type;
if (Map.class == type || Collection.class == type) {
throw new CollectionExpressionException("The method or member [" + path + "] returns a simple " +
"Map or Collection. Unable to determine the type of the Map or Collection. " +
"Please make this method generic so that the correct type can be determined.");
} else if (rawType.isArray()) {
return rawType.getComponentType();
}
return rawType;
}
/**
* Returns true if the given type is a Parameterized type with a raw type of Map.
*
* @param t The type.
* @return True or false.
*/
public static boolean isGenericMap(Type t) {
return t instanceof ParameterizedType && Map.class.isAssignableFrom((Class) ((ParameterizedType) t).getRawType());
}
/**
* Determines the key type for a Map.
*
* @param type The parametrized type.
* @param path The path to the type, used in exception message.
* @return The key type.
*/
public static Type[] mapTypes(Type type, String path) {
if (type instanceof Class) {
Class<?> c = (Class<?>) type;
while (c != null && !isGenericMap(type)) {
Type[] types = c.getGenericInterfaces();
if (types != null && types.length > 0) {
for (Type t : types) {
if (isGenericMap(t)) {
type = t;
break;
}
}
}
// Go up to the next parent and check
if (!isGenericMap(type)) {
type = c.getGenericSuperclass();
if (type instanceof Class) {
c = (Class<?>) type;
} else if (type instanceof ParameterizedType) {
c = (Class) ((ParameterizedType) type).getRawType();
} else {
c = null;
}
}
}
}
if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) type;
Class<?> rawType = (Class<?>) parameterizedType.getRawType();
if (Map.class.isAssignableFrom(rawType)) {
return parameterizedType.getActualTypeArguments();
}
}
// Default to a string object map.
return new Type[]{String.class, Object.class};
}
/**
* Determines the raw type of the type given.
*
* @param type The type.
* @return The raw type.
*/
public static Class<?> rawType(Type type) {
if (type instanceof ParameterizedType) {
type = ((ParameterizedType) type).getRawType();
} else if (type instanceof GenericArrayType) {
Class<?> componentType = (Class<?>) ((GenericArrayType) type).getGenericComponentType();
type = Array.newInstance(componentType, 0).getClass();
}
return (Class<?>) type;
}
/**
* Resolves the generic types of a field or method by matching up the generics in the class definition to the ones
* used in the method/field. This also works for methods/fields that use other types that are generic. For example,
* Map<T, U> can be resolved.
*
* @param declaringClassGeneric The class where the generic method/field was defined that uses the generic.
* @param currentClass The current class that has completely defined all the generic information to satisfy
* the method/field.
* @param typeVariable The generic type variable from the method/field.
* @return The type of the generic method/field.
*/
public static Type resolveGenericType(Class<?> declaringClassGeneric, Class<?> currentClass,
final TypeVariable<?> typeVariable) {
List<Class<?>> classes = new ArrayList<>();
while (currentClass != declaringClassGeneric) {
classes.add(currentClass);
currentClass = currentClass.getSuperclass();
}
classes.add(declaringClassGeneric);
// Reverse it to work from base class to child class
Collections.reverse(classes);
int position = -1;
TypeVariable<?> currentTypeVariable = typeVariable;
for (Class<?> klass : classes) {
TypeVariable<?>[] genericTypes = klass.getTypeParameters();
if (genericTypes == null) {
break;
}
if (position != -1) {
Type genericSuperclass = klass.getGenericSuperclass();
if (!(genericSuperclass instanceof ParameterizedType)) {
throw new IllegalStateException("Something bad happened while trying to resolve generic types. The class [" + genericSuperclass +
"] was encountered but didn't have generic types. It probably should have.");
}
Type[] inheritedGenericTypes = ((ParameterizedType) genericSuperclass).getActualTypeArguments();
Type nextParameterType = inheritedGenericTypes[position];
if (nextParameterType instanceof TypeVariable<?>) {
currentTypeVariable = (TypeVariable<?>) nextParameterType;
} else if (nextParameterType instanceof Class<?>) {
return nextParameterType;
}
}
String name = currentTypeVariable.getName();
for (int i = 0; i < genericTypes.length; i++) {
if (genericTypes[i].getName().equals(name)) {
position = i;
currentTypeVariable = genericTypes[i];
break;
}
}
}
return typeVariable;
}
}
| prime-framework/prime-mvc | src/main/java/org/primeframework/mvc/util/TypeTools.java | Java | apache-2.0 | 7,911 |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
import os
class TestVersion(TestCase):
"""
Class that tests the method of the version.py file used to format and compare version numbers
of both Ambari (which use 3 digits separated by dots) and stacks (which use 4 digits separated by dots).
"""
def setUp(self):
import imp
self.test_directory = os.path.dirname(os.path.abspath(__file__))
test_file_path = os.path.join(self.test_directory, '../../../../ambari-common/src/main/python/resource_management/libraries/functions/version.py')
with open(test_file_path, 'rb') as fp:
self.version_module = imp.load_module('version', fp, test_file_path, ('.py', 'rb', imp.PY_SOURCE))
def test_format(self):
l = [("2.2", "2.2.0.0"),
("2.2.1", "2.2.1.0"),
("2.2.1.3", "2.2.1.3")]
for input, expected in l:
actual = self.version_module.format_stack_version(input)
self.assertEqual(expected, actual)
gluster_fs_actual = self.version_module.format_stack_version("GlusterFS")
self.assertEqual("", gluster_fs_actual)
def test_format_with_hyphens(self):
actual = self.version_module.format_stack_version("FOO-1.0")
self.assertEqual("1.0.0.0", actual)
actual = self.version_module.format_stack_version("1.0.0-1234")
self.assertEqual("1.0.0.0", actual)
actual = self.version_module.format_stack_version("FOO-1.0-9999")
self.assertEqual("1.0.0.0", actual)
def test_comparison(self):
# All versions to compare, from 1.0.0.0 to 3.0.0.0, and only include elements that are a multiple of 7.
versions = range(1000, 3000, 7)
versions = [".".join(list(str(elem))) for elem in versions]
for idx, x in enumerate(versions):
for idy, y in enumerate(versions):
# Expected value will either be -1, 0, 1, and it relies on the fact
# that an increasing index implies a greater version number.
expected_value = cmp(idx, idy)
actual_value = self.version_module.compare_versions(x, y)
self.assertEqual(expected_value, actual_value)
# Try something fancier
self.assertEqual(0, self.version_module.compare_versions("2.10", "2.10.0"))
self.assertEqual(0, self.version_module.compare_versions("2.10", "2.10.0.0"))
self.assertEqual(0, self.version_module.compare_versions("2.10.0", "2.10.0.0"))
try:
self.version_module.compare_versions("", "GlusterFS")
except ValueError:
pass
else:
self.fail("Did not raise exception") | radicalbit/ambari | ambari-server/src/test/python/TestVersion.py | Python | apache-2.0 | 3,260 |
/*
* Copyright 2005-2017 Dozer Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dozer.config;
import org.dozer.AbstractDozerTest;
import org.dozer.util.DozerConstants;
import org.junit.Before;
import org.junit.Test;
/**
* @author tierney.matt
*/
public class GlobalSettingsTest extends AbstractDozerTest {
@Before
public void setUp() {
System.setProperty(DozerConstants.CONFIG_FILE_SYS_PROP, "");
}
@Test
public void testLoadDefaultPropFile_Default() {
GlobalSettings globalSettings = GlobalSettings.createNew();
assertNotNull("loaded by name should not be null", globalSettings.getLoadedByFileName());
assertEquals("invalid loaded by file name", DozerConstants.DEFAULT_CONFIG_FILE, globalSettings.getLoadedByFileName());
}
@Test
public void testLoadDefaultPropFile_NotFound() {
String propFileName = String.valueOf(System.currentTimeMillis());
System.setProperty(DozerConstants.CONFIG_FILE_SYS_PROP, propFileName);
GlobalSettings globalSettings = GlobalSettings.createNew();
// assert all global settings equal the default values
assertNull("loaded by file name should be null", globalSettings.getLoadedByFileName());
assertEquals("invalid stats enabled value", DozerConstants.DEFAULT_STATISTICS_ENABLED, globalSettings.isStatisticsEnabled());
assertEquals("invalid converter cache max size value", DozerConstants.DEFAULT_CONVERTER_BY_DEST_TYPE_CACHE_MAX_SIZE,
globalSettings.getConverterByDestTypeCacheMaxSize());
assertEquals("invalid super type cache max size value", DozerConstants.DEFAULT_SUPER_TYPE_CHECK_CACHE_MAX_SIZE, globalSettings
.getSuperTypesCacheMaxSize());
assertEquals("invalid autoregister jmx beans", DozerConstants.DEFAULT_AUTOREGISTER_JMX_BEANS, globalSettings
.isAutoregisterJMXBeans());
assertEquals(DozerConstants.DEFAULT_PROXY_RESOLVER_BEAN, globalSettings.getProxyResolverName());
assertEquals(DozerConstants.DEFAULT_CLASS_LOADER_BEAN, globalSettings.getClassLoaderName());
assertEquals(DozerConstants.DEFAULT_EL_ENABLED, globalSettings.isElEnabled());
}
@Test
public void testLoadPropFile_SpecifiedViaSysProp() {
String propFileName = "samplecustomdozer.properties";
System.setProperty(DozerConstants.CONFIG_FILE_SYS_PROP, propFileName);
GlobalSettings globalSettings = GlobalSettings.createNew();
assertNotNull("loaded by name should not be null", globalSettings.getLoadedByFileName());
assertEquals("invalid load by file name", propFileName, globalSettings.getLoadedByFileName());
assertEquals("invalid stats enabled value", true, globalSettings.isStatisticsEnabled());
assertEquals("invalid converter cache max size value", 25000, globalSettings.getConverterByDestTypeCacheMaxSize());
assertEquals("invalid super type cache max size value", 10000, globalSettings.getSuperTypesCacheMaxSize());
assertEquals("invalid autoregister jmx beans", false, globalSettings.isAutoregisterJMXBeans());
assertEquals("org.dozer.CustomLoader", globalSettings.getClassLoaderName());
assertEquals("org.dozer.CustomResolver", globalSettings.getProxyResolverName());
assertEquals(true, globalSettings.isElEnabled());
}
}
| STRiDGE/dozer | core/src/test/java/org/dozer/config/GlobalSettingsTest.java | Java | apache-2.0 | 3,741 |
package org.wrl.spring.tx.service.impl.required;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallbackWithoutResult;
import org.springframework.transaction.support.TransactionTemplate;
import org.wrl.spring.tx.dao.IUserDao;
import org.wrl.spring.tx.model.UserModel;
import org.wrl.spring.tx.service.IAddressService;
import org.wrl.spring.tx.service.IUserService;
import org.wrl.spring.tx.util.TransactionTemplateUtils;
public class RequiredUserServiceImplWithSuccess implements IUserService {
private IUserDao userDao;
private IAddressService addressService;
private PlatformTransactionManager txManager;
public void setUserDao(IUserDao userDao) {
this.userDao = userDao;
}
public void setTxManager(PlatformTransactionManager txManager) {
this.txManager = txManager;
}
public void setAddressService(IAddressService addressService) {
this.addressService = addressService;
}
@Override
public void save(final UserModel user) {
TransactionTemplate transactionTemplate =
TransactionTemplateUtils.getDefaultTransactionTemplate(txManager);
transactionTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
userDao.save(user);
user.getAddress().setUserId(user.getId());
addressService.save(user.getAddress());//将在同一个事务内执行
}
});
}
@Override
public int countAll() {
return userDao.countAll();
}
}
| wangruiling/samples | spring/src/main/java/org/wrl/spring/tx/service/impl/required/RequiredUserServiceImplWithSuccess.java | Java | apache-2.0 | 1,788 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.acmpca.waiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.waiters.WaiterAcceptor;
import com.amazonaws.waiters.WaiterState;
import com.amazonaws.services.acmpca.model.*;
import com.amazonaws.jmespath.*;
import javax.annotation.Generated;
@SdkInternalApi
@Generated("com.amazonaws:aws-java-sdk-code-generator")
class CertificateAuthorityCSRCreated {
static class IsRequestInProgressExceptionMatcher extends WaiterAcceptor<GetCertificateAuthorityCsrResult> {
/**
* Takes the response exception and determines whether this exception matches the expected exception, by
* comparing the respective error codes.
*
* @param e
* Response Exception
* @return True if it matches, False otherwise
*/
@Override
public boolean matches(AmazonServiceException e) {
return "RequestInProgressException".equals(e.getErrorCode());
}
/**
* Represents the current waiter state in the case where resource state matches the expected state
*
* @return Corresponding state of the waiter
*/
@Override
public WaiterState getState() {
return WaiterState.RETRY;
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-acmpca/src/main/java/com/amazonaws/services/acmpca/waiters/CertificateAuthorityCSRCreated.java | Java | apache-2.0 | 1,934 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.pinpointemail.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
* <p>
* A request to enable or disable tracking of reputation metrics for a configuration set.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutConfigurationSetReputationOptions"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class PutConfigurationSetReputationOptionsRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
* </p>
*/
private String configurationSetName;
/**
* <p>
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If <code>false</code>,
* tracking of reputation metrics is disabled for the configuration set.
* </p>
*/
private Boolean reputationMetricsEnabled;
/**
* <p>
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
* </p>
*
* @param configurationSetName
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
*/
public void setConfigurationSetName(String configurationSetName) {
this.configurationSetName = configurationSetName;
}
/**
* <p>
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
* </p>
*
* @return The name of the configuration set that you want to enable or disable reputation metric tracking for.
*/
public String getConfigurationSetName() {
return this.configurationSetName;
}
/**
* <p>
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
* </p>
*
* @param configurationSetName
* The name of the configuration set that you want to enable or disable reputation metric tracking for.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PutConfigurationSetReputationOptionsRequest withConfigurationSetName(String configurationSetName) {
setConfigurationSetName(configurationSetName);
return this;
}
/**
* <p>
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If <code>false</code>,
* tracking of reputation metrics is disabled for the configuration set.
* </p>
*
* @param reputationMetricsEnabled
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If
* <code>false</code>, tracking of reputation metrics is disabled for the configuration set.
*/
public void setReputationMetricsEnabled(Boolean reputationMetricsEnabled) {
this.reputationMetricsEnabled = reputationMetricsEnabled;
}
/**
* <p>
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If <code>false</code>,
* tracking of reputation metrics is disabled for the configuration set.
* </p>
*
* @return If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If
* <code>false</code>, tracking of reputation metrics is disabled for the configuration set.
*/
public Boolean getReputationMetricsEnabled() {
return this.reputationMetricsEnabled;
}
/**
* <p>
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If <code>false</code>,
* tracking of reputation metrics is disabled for the configuration set.
* </p>
*
* @param reputationMetricsEnabled
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If
* <code>false</code>, tracking of reputation metrics is disabled for the configuration set.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public PutConfigurationSetReputationOptionsRequest withReputationMetricsEnabled(Boolean reputationMetricsEnabled) {
setReputationMetricsEnabled(reputationMetricsEnabled);
return this;
}
/**
* <p>
* If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If <code>false</code>,
* tracking of reputation metrics is disabled for the configuration set.
* </p>
*
* @return If <code>true</code>, tracking of reputation metrics is enabled for the configuration set. If
* <code>false</code>, tracking of reputation metrics is disabled for the configuration set.
*/
public Boolean isReputationMetricsEnabled() {
return this.reputationMetricsEnabled;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getConfigurationSetName() != null)
sb.append("ConfigurationSetName: ").append(getConfigurationSetName()).append(",");
if (getReputationMetricsEnabled() != null)
sb.append("ReputationMetricsEnabled: ").append(getReputationMetricsEnabled());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof PutConfigurationSetReputationOptionsRequest == false)
return false;
PutConfigurationSetReputationOptionsRequest other = (PutConfigurationSetReputationOptionsRequest) obj;
if (other.getConfigurationSetName() == null ^ this.getConfigurationSetName() == null)
return false;
if (other.getConfigurationSetName() != null && other.getConfigurationSetName().equals(this.getConfigurationSetName()) == false)
return false;
if (other.getReputationMetricsEnabled() == null ^ this.getReputationMetricsEnabled() == null)
return false;
if (other.getReputationMetricsEnabled() != null && other.getReputationMetricsEnabled().equals(this.getReputationMetricsEnabled()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getConfigurationSetName() == null) ? 0 : getConfigurationSetName().hashCode());
hashCode = prime * hashCode + ((getReputationMetricsEnabled() == null) ? 0 : getReputationMetricsEnabled().hashCode());
return hashCode;
}
@Override
public PutConfigurationSetReputationOptionsRequest clone() {
return (PutConfigurationSetReputationOptionsRequest) super.clone();
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-pinpointemail/src/main/java/com/amazonaws/services/pinpointemail/model/PutConfigurationSetReputationOptionsRequest.java | Java | apache-2.0 | 7,982 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.frauddetector.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.frauddetector.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* GetModelVersionResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetModelVersionResultJsonUnmarshaller implements Unmarshaller<GetModelVersionResult, JsonUnmarshallerContext> {
public GetModelVersionResult unmarshall(JsonUnmarshallerContext context) throws Exception {
GetModelVersionResult getModelVersionResult = new GetModelVersionResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return getModelVersionResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("modelId", targetDepth)) {
context.nextToken();
getModelVersionResult.setModelId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("modelType", targetDepth)) {
context.nextToken();
getModelVersionResult.setModelType(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("modelVersionNumber", targetDepth)) {
context.nextToken();
getModelVersionResult.setModelVersionNumber(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("trainingDataSource", targetDepth)) {
context.nextToken();
getModelVersionResult.setTrainingDataSource(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("trainingDataSchema", targetDepth)) {
context.nextToken();
getModelVersionResult.setTrainingDataSchema(TrainingDataSchemaJsonUnmarshaller.getInstance().unmarshall(context));
}
if (context.testExpression("externalEventsDetail", targetDepth)) {
context.nextToken();
getModelVersionResult.setExternalEventsDetail(ExternalEventsDetailJsonUnmarshaller.getInstance().unmarshall(context));
}
if (context.testExpression("ingestedEventsDetail", targetDepth)) {
context.nextToken();
getModelVersionResult.setIngestedEventsDetail(IngestedEventsDetailJsonUnmarshaller.getInstance().unmarshall(context));
}
if (context.testExpression("status", targetDepth)) {
context.nextToken();
getModelVersionResult.setStatus(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("arn", targetDepth)) {
context.nextToken();
getModelVersionResult.setArn(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return getModelVersionResult;
}
private static GetModelVersionResultJsonUnmarshaller instance;
public static GetModelVersionResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new GetModelVersionResultJsonUnmarshaller();
return instance;
}
}
| aws/aws-sdk-java | aws-java-sdk-frauddetector/src/main/java/com/amazonaws/services/frauddetector/model/transform/GetModelVersionResultJsonUnmarshaller.java | Java | apache-2.0 | 4,920 |
package cbedoy.cblibrary.services;
import android.app.Activity;
import android.app.AlarmManager;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager;
import android.content.res.Configuration;
import android.graphics.Typeface;
import android.os.Handler;
import android.support.multidex.MultiDexApplication;
import android.view.LayoutInflater;
import android.view.inputmethod.InputMethodManager;
import java.util.HashMap;
import java.util.Map;
import cbedoy.cblibrary.utils.ImageLoaderService;
/**
* Created by Carlos Bedoy on 28/12/2014.
*
* Mobile App Developer
* CBLibrary
*
* E-mail: carlos.bedoy@gmail.com
* Facebook: https://www.facebook.com/carlos.bedoy
* Github: https://github.com/cbedoy
*/
public class ApplicationLoader extends MultiDexApplication
{
public static volatile Handler mainHandler;
public static volatile Context mainContext;
public static volatile LayoutInflater mainLayoutInflater;
public static volatile String urlProject;
public static volatile Typeface boldFont;
public static volatile Typeface regularFont;
public static volatile Typeface thinFont;
public static volatile Typeface lightFont;
public static volatile Typeface mediumFont;
public static volatile Typeface cardFont;
public static Integer DISMISS_LOADER;
@Override
public void onCreate()
{
super.onCreate();
mainContext = getApplicationContext();
mainHandler = new Handler(getMainLooper());
mainLayoutInflater = (LayoutInflater) getSystemService(Context.LAYOUT_INFLATER_SERVICE);
boldFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/Roboto-Bold.ttf");
regularFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/Roboto-Regular.ttf");
thinFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/Roboto-Thin.ttf");
lightFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/Roboto-Light.ttf");
cardFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/CardType.ttf");
mediumFont = Typeface.createFromAsset(mainContext.getAssets(), "fonts/Roboto-Medium.ttf");
ImageLoaderService.getInstance();
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
}
public static String getAppVersion() {
try {
PackageInfo packageInfo = mainContext.getPackageManager().getPackageInfo(mainContext.getPackageName(), 0);
return packageInfo.versionName + " ("+packageInfo.versionCode+")";
} catch (PackageManager.NameNotFoundException e) {
e.printStackTrace();
return "";
}
}
public static void savePreferences(String keyShared, HashMap<String, Object> information){
SharedPreferences sharedPreferences = mainContext.getSharedPreferences(keyShared, Context.MODE_PRIVATE);
SharedPreferences.Editor edit = sharedPreferences.edit();
for(String key : information.keySet()){
edit.putString(key, information.get(key).toString());
}
edit.commit();
}
public static HashMap<String, Object> getSharedFromKey(String key){
HashMap<String, Object> information = new HashMap<String, Object>();
SharedPreferences sharedPreferences = mainContext.getSharedPreferences(key, Context.MODE_PRIVATE);
Map<String, ?> all = sharedPreferences.getAll();
for(Map.Entry<String,?> entry : all.entrySet()){
information.put(entry.getKey(), entry.getValue());
}
return information;
}
public static void hideKeyboard(Activity activity) {
if (activity != null && activity.getWindow() != null && activity.getWindow().getDecorView() != null) {
InputMethodManager imm = (InputMethodManager) activity.getSystemService(Context.INPUT_METHOD_SERVICE);
imm.hideSoftInputFromWindow(activity.getWindow().getDecorView().getWindowToken(), 0);
}
}
private boolean verifyIfApplicationInstalled(String uri) {
PackageManager pm = getPackageManager();
boolean app_installed = false;
try {
pm.getPackageInfo(uri, PackageManager.GET_ACTIVITIES);
app_installed = true;
}
catch (PackageManager.NameNotFoundException e) {
app_installed = false;
}
return app_installed ;
}
}
| SelfD3veloper/CBLibrary | app/src/main/java/cbedoy/cblibrary/services/ApplicationLoader.java | Java | apache-2.0 | 4,741 |
describe('sandbox library - xml2Json', function () {
this.timeout(1000 * 60);
var Sandbox = require('../../../'),
context;
beforeEach(function (done) {
Sandbox.createContext({}, function (err, ctx) {
context = ctx;
done(err);
});
});
afterEach(function () {
context.dispose();
context = null;
});
it('should exist', function (done) {
context.execute(`
var assert = require('assert');
assert.strictEqual(typeof xml2Json, 'function', 'typeof xml2Json must be function');
`, done);
});
it('should have basic functionality working', function (done) {
context.execute(`
var assert = require('assert'),
xml = '<food><key>Homestyle Breakfast</key><value>950</value></food>',
object = xml2Json(xml).food;
assert.strictEqual(object.key, 'Homestyle Breakfast', 'xml2Json conversion must be valid');
assert.strictEqual(object.value, '950', 'xml2Json conversion must be valid');
`, done);
});
});
| postmanlabs/postman-sandbox | test/unit/sandbox-libraries/xml2Json.test.js | JavaScript | apache-2.0 | 1,119 |
package org.nutz.plugins.quartz;
import java.util.Date;
import org.nutz.ioc.impl.PropertiesProxy;
import org.nutz.lang.Lang;
import org.nutz.lang.Strings;
import org.nutz.log.Log;
import org.nutz.log.Logs;
import org.nutz.plugins.quartz.annotation.Scheduled;
import org.nutz.resource.Scans;
import org.quartz.CronScheduleBuilder;
import org.quartz.CronTrigger;
import org.quartz.Job;
import org.quartz.JobBuilder;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SimpleScheduleBuilder;
import org.quartz.SimpleTrigger;
import org.quartz.TriggerBuilder;
public class NutQuartzCronJobFactory {
private static final Log log = Logs.get();
protected PropertiesProxy conf;
protected Scheduler scheduler;
public void init() throws Exception {
String prefix = "cron.";
for (String key : conf.getKeys()) {
if (key.length() < prefix.length()+1 || !key.startsWith(prefix))
continue;
String name = key.substring(prefix.length());
if ("pkgs".equals(name)) {
log.debug("found cron job packages = " + conf.get(key));
for (String pkg : Strings.splitIgnoreBlank(conf.get(key), ",")) {
addPackage(pkg);
}
continue;
}
String cron = conf.get(key);
log.debugf("job define name=%s cron=%s", name, cron);
Class<?> klass = null;
if (name.contains(".")) {
klass = Lang.loadClass(name);
} else {
klass = Lang.loadClass(getClass().getPackage().getName() + ".job." + name);
}
cron(cron, klass);
}
}
public void addPackage(String pkg) {
for (Class<?> klass : Scans.me().scanPackage(pkg)) {
Scheduled scheduled = klass.getAnnotation(Scheduled.class);
if (scheduled != null) {
try {
add(klass, scheduled);
}
catch (SchedulerException e) {
throw new RuntimeException(e);
}
}
}
}
@SuppressWarnings("unchecked")
public void add(Class<?> klass, Scheduled scheduled) throws SchedulerException {
String name = klass.getName();
if (!Strings.isBlank(scheduled.cron())) {
try {
log.debugf("job define name=%s cron=%s", name, scheduled.cron());
cron(scheduled.cron(), klass);
return;
}
catch (SchedulerException e) {
throw new RuntimeException(e);
}
}
if (scheduled.fixedRate() > 0){
log.debugf("job define name=%s fixedRate=%s count=%s initialDelay=%s",
name, scheduled.fixedRate(), scheduled.count(), scheduled.initialDelay());
SimpleScheduleBuilder schedule = SimpleScheduleBuilder.simpleSchedule();
if (scheduled.fixedRate() > 0)
schedule.withIntervalInSeconds(scheduled.fixedRate());
if (scheduled.count() > 0) {
schedule.withRepeatCount(scheduled.count());
} else {
schedule.repeatForever();
}
TriggerBuilder<SimpleTrigger> trigger = TriggerBuilder.newTrigger().withIdentity(name).withSchedule(schedule);
if (scheduled.initialDelay() > 0)
trigger.startAt(new Date(System.currentTimeMillis() + scheduled.initialDelay()*1000));
JobDetail job = JobBuilder.newJob((Class<? extends Job>) klass).withIdentity(name).build();
scheduler.scheduleJob(job, trigger.build());
}
}
@SuppressWarnings("unchecked")
public void cron(String cron, Class<?> klass) throws SchedulerException {
String name = klass.getName();
JobDetail job = JobBuilder.newJob((Class<? extends Job>) klass).withIdentity(name).build();
CronTrigger trigger = TriggerBuilder.newTrigger().withIdentity(name)
.withSchedule(CronScheduleBuilder.cronSchedule(cron))
.build();
scheduler.scheduleJob(job, trigger);
}
}
| lusparioTT/fks | src/org/nutz/plugins/quartz/NutQuartzCronJobFactory.java | Java | apache-2.0 | 4,260 |
package ru.stqa.pft.addressbook.tests;
import org.testng.Assert;
import org.testng.annotations.Test;
import ru.stqa.pft.addressbook.model.GroupData;
import java.util.List;
public class GroupDeletionTests extends TestBase {
@Test
public void groupDeletion() {
app.getNavigationHelper().gotoGroupPage();
if (!app.getGroupHelper().isThereGroup()){
app.getGroupHelper().createGroup(new GroupData( "test135", null, null));
}
app.getNavigationHelper().gotoGroupPage();
List<GroupData> groupBefore= app.getGroupHelper().getGroupList();
//int groupBefore=app.getGroupHelper().getGroupCount();
app.getGroupHelper().selectGroup(groupBefore.size()-1);
app.getGroupHelper().deleteSelectedGroups();
app.getGroupHelper().returnGroupsPage();
List <GroupData> groupAfter= app.getGroupHelper().getGroupList();
//int groupAfter=app.getGroupHelper().getGroupCount();
Assert.assertEquals(groupAfter.size(),groupBefore.size()-1);
groupBefore.remove(groupBefore.size()-1);
Assert.assertEquals(groupBefore,groupAfter);
//for (int i=1;i<groupAfter.size()-1;i++){
// Assert.assertEquals(groupBefore.get(i),groupAfter.get(i));
//}
}
}
| evg08/MyJava | addressbook-tests/src/test/java/ru/stqa/pft/addressbook/tests/GroupDeletionTests.java | Java | apache-2.0 | 1,201 |
import click
import webbrowser
import sys
import floyd
from floyd.client.auth import AuthClient
from floyd.manager.auth_config import AuthConfigManager
from floyd.model.access_token import AccessToken
from floyd.model.credentials import Credentials
from floyd.log import logger as floyd_logger
@click.command()
@click.option('--token', is_flag=True, default=False, help='Just enter token')
@click.option('--username', '-u', help='FloydHub username')
@click.option('--password', '-p', help='FloydHub password')
def login(token, username, password):
"""
Log into Floyd via Auth0.
"""
if username:
# Use username / password login
if not password:
password = click.prompt('Please enter your password', type=str, hide_input=True)
password = password.strip()
if not password:
floyd_logger.info('You entered an empty string. Please make sure you enter your password correctly.')
sys.exit(1)
login_credentials = Credentials(username=username,
password=password)
access_code = AuthClient().login(login_credentials)
if not access_code:
floyd_logger.info("Failed to login")
return
else:
# Fallback to the access token from the browser login
if not token:
cli_info_url = "{}/settings/security".format(floyd.floyd_web_host)
click.confirm('Authentication token page will now open in your browser. Continue?',
abort=True,
default=True)
webbrowser.open(cli_info_url)
floyd_logger.info("Please copy and paste the authentication token.")
access_code = click.prompt('This is an invisible field. Paste token and press ENTER', type=str, hide_input=True)
access_code = access_code.strip()
if not access_code:
floyd_logger.info("Empty token received. Make sure your shell is handling the token appropriately.")
floyd_logger.info("See docs for help: http://docs.floydhub.com/faqs/authentication/")
return
access_code = access_code.strip(" ")
user = AuthClient().get_user(access_code)
access_token = AccessToken(username=user.username,
token=access_code)
AuthConfigManager.set_access_token(access_token)
floyd_logger.info("Login Successful as %s", user.username)
@click.command()
def logout():
"""
Logout of Floyd.
"""
AuthConfigManager.purge_access_token()
| mckayward/floyd-cli | floyd/cli/auth.py | Python | apache-2.0 | 2,580 |
var querystring = require('querystring');
var http = require('http');
var config = require("../conf/conf.js");
var static_cookie = "";
var callNsApi = function(opt, post,cb) {
if (!opt.headers)
opt.headers = {};
if (static_cookie && opt) {
cookie = (static_cookie + "").split(";").shift();
opt.headers.Cookie = cookie;
//console.log("SEND",static_cookie,opt.path);
}else{
//console.log(static_cookie);
}
if(post){
opt.headers['Content-Type'] ='application/x-www-form-urlencoded';
opt.headers['Content-Length'] = post.length;
}
//console.log("SEND",opt);
//console.log("SEND",opt.path);
//console.log(opt,post);
var post_req = http.request(opt, function(res) {
res.setEncoding('utf8');
if(res.headers["set-cookie"])
static_cookie = res.headers["set-cookie"];
//console.log("REP",res.headers)
var data = "";
res.on('data', function (chunk) {
data += chunk;
});
res.on('error',function(err){
cb(err,null);
});
res.on('end', function (){
try{
var json_data = JSON.parse(data);
if(json_data.error)
cb(json_data.error,data);
else
cb(null, json_data);
}catch(e){
console.log(data);
cb(e, null);
}
});
});
post_req.on('error',function(err){
cb(err,null);
});
if(post){
post_req.write(post);
}
post_req.end();
};
module.exports.pushUri =function (uri,post,cb){
var post_data = null;
if(post)
post_data = querystring.stringify(post);
var opt = {
host: config.api.url,
port: 80,
path: '/api/1/'+config.api.key+uri,
headers: null
};
if (post)
opt.method='POST';
callNsApi(opt,post_data,cb);
};
| Iragne/NSAPIUnitTest | libs/http.js | JavaScript | apache-2.0 | 1,754 |
// Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.vcs.log.data.index;
import com.intellij.concurrency.ConcurrentCollectionFactory;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.progress.ProcessCanceledException;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.CheckedDisposable;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.objectTree.ThrowableInterner;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.vcs.VcsException;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.Consumer;
import com.intellij.util.EmptyConsumer;
import com.intellij.util.ThrowableRunnable;
import com.intellij.util.containers.ConcurrentIntObjectMap;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.indexing.StorageException;
import com.intellij.util.io.*;
import com.intellij.vcs.log.VcsLogProperties;
import com.intellij.vcs.log.VcsLogProvider;
import com.intellij.vcs.log.VcsUserRegistry;
import com.intellij.vcs.log.data.SingleTaskController;
import com.intellij.vcs.log.data.VcsLogProgress;
import com.intellij.vcs.log.data.VcsLogStorage;
import com.intellij.vcs.log.data.VcsLogStorageImpl;
import com.intellij.vcs.log.impl.FatalErrorHandler;
import com.intellij.vcs.log.impl.HeavyAwareExecutor;
import com.intellij.vcs.log.impl.VcsIndexableLogProvider;
import com.intellij.vcs.log.impl.VcsLogIndexer;
import com.intellij.vcs.log.statistics.VcsLogIndexCollector;
import com.intellij.vcs.log.util.*;
import it.unimi.dsi.fastutil.ints.IntSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.IntStream;
import static com.intellij.vcs.log.data.index.VcsLogFullDetailsIndex.INDEX;
import static com.intellij.vcs.log.util.PersistentUtil.calcIndexId;
public class VcsLogPersistentIndex implements VcsLogModifiableIndex, Disposable {
private static final Logger LOG = Logger.getInstance(VcsLogPersistentIndex.class);
private static final int VERSION = 18;
public static final VcsLogProgress.ProgressKey INDEXING = new VcsLogProgress.ProgressKey("index");
@NotNull private final Project myProject;
@NotNull private final FatalErrorHandler myFatalErrorsConsumer;
@NotNull private final VcsLogProgress myProgress;
@NotNull private final Map<VirtualFile, VcsLogIndexer> myIndexers;
@NotNull private final VcsLogStorage myStorage;
@NotNull private final Set<VirtualFile> myRoots;
@NotNull private final VcsLogBigRepositoriesList myBigRepositoriesList;
@NotNull private final VcsLogIndexCollector myIndexCollector;
@NotNull private final CheckedDisposable myDisposableFlag = Disposer.newCheckedDisposable();
@Nullable private final IndexStorage myIndexStorage;
@Nullable private final IndexDataGetter myDataGetter;
@NotNull private final SingleTaskController<IndexingRequest, Void> mySingleTaskController;
@NotNull private final Map<VirtualFile, AtomicInteger> myNumberOfTasks = new HashMap<>();
@NotNull private final Map<VirtualFile, AtomicLong> myIndexingTime = new HashMap<>();
@NotNull private final Map<VirtualFile, AtomicInteger> myIndexingLimit = new HashMap<>();
@NotNull private final Map<VirtualFile, ConcurrentIntObjectMap<Integer>> myIndexingErrors = new HashMap<>();
@NotNull private final List<IndexingFinishedListener> myListeners = ContainerUtil.createLockFreeCopyOnWriteList();
@NotNull private Map<VirtualFile, IntSet> myCommitsToIndex = new HashMap<>();
public VcsLogPersistentIndex(@NotNull Project project,
@NotNull VcsLogStorage storage,
@NotNull VcsLogProgress progress,
@NotNull Map<VirtualFile, VcsLogProvider> providers,
@NotNull FatalErrorHandler fatalErrorsConsumer,
@NotNull Disposable disposableParent) {
myStorage = storage;
myProject = project;
myProgress = progress;
myFatalErrorsConsumer = fatalErrorsConsumer;
myBigRepositoriesList = VcsLogBigRepositoriesList.getInstance();
myIndexCollector = VcsLogIndexCollector.getInstance(myProject);
myIndexers = getAvailableIndexers(providers);
myRoots = new LinkedHashSet<>(myIndexers.keySet());
VcsUserRegistry userRegistry = myProject.getService(VcsUserRegistry.class);
myIndexStorage = createIndexStorage(fatalErrorsConsumer, myProject.getName(), calcIndexId(myProject, myIndexers), userRegistry);
if (myIndexStorage != null) {
myDataGetter = new IndexDataGetter(myProject, myRoots, myIndexStorage, myStorage, myFatalErrorsConsumer);
}
else {
myDataGetter = null;
}
for (VirtualFile root : myRoots) {
myNumberOfTasks.put(root, new AtomicInteger());
myIndexingTime.put(root, new AtomicLong());
myIndexingLimit.put(root, new AtomicInteger(getIndexingLimit()));
myIndexingErrors.put(root, ConcurrentCollectionFactory.createConcurrentIntObjectMap());
}
mySingleTaskController = new MySingleTaskController(project, myIndexStorage != null ? myIndexStorage : this);
Disposer.register(disposableParent, this);
Disposer.register(this, myDisposableFlag);
}
private static int getIndexingLimit() {
return Math.max(1, Registry.intValue("vcs.log.index.limit.minutes"));
}
protected IndexStorage createIndexStorage(@NotNull FatalErrorHandler fatalErrorHandler,
@NotNull String projectName, @NotNull String logId, @NotNull VcsUserRegistry registry) {
try {
return IOUtil.openCleanOrResetBroken(() -> new IndexStorage(projectName, logId, myStorage, registry,
myRoots, fatalErrorHandler, this),
() -> IndexStorage.cleanup(projectName, logId));
}
catch (IOException e) {
myFatalErrorsConsumer.consume(this, e);
}
return null;
}
@Override
public void scheduleIndex(boolean full) {
doScheduleIndex(full, request -> mySingleTaskController.request(request));
}
@TestOnly
void indexNow(boolean full) {
doScheduleIndex(full, request -> request.run(myProgress.createProgressIndicator(INDEXING)));
}
private synchronized void doScheduleIndex(boolean full, @NotNull Consumer<IndexingRequest> requestConsumer) {
if (myDisposableFlag.isDisposed()) return;
if (myCommitsToIndex.isEmpty() || myIndexStorage == null) return;
// for fresh index, wait for complete log to load and index everything in one command
if (myIndexStorage.isFresh() && !full) return;
Map<VirtualFile, IntSet> commitsToIndex = myCommitsToIndex;
myCommitsToIndex = new HashMap<>();
boolean isFull = full && myIndexStorage.isFresh();
if (isFull) LOG.debug("Index storage for project " + myProject.getName() + " is fresh, scheduling full reindex");
for (VirtualFile root : commitsToIndex.keySet()) {
IntSet commits = commitsToIndex.get(root);
if (commits.isEmpty()) continue;
if (myBigRepositoriesList.isBig(root)) {
myCommitsToIndex.put(root, commits); // put commits back in order to be able to reindex
LOG.info("Indexing repository " + root.getName() + " is skipped");
continue;
}
requestConsumer.consume(new IndexingRequest(root, myIndexStorage.paths.getPathsEncoder(), commits, isFull));
}
if (isFull) {
myIndexCollector.reportFreshIndex();
myIndexStorage.unmarkFresh();
}
}
private void storeDetail(@NotNull VcsLogIndexer.CompressedDetails detail) {
if (myIndexStorage == null) return;
try {
int index = myStorage.getCommitIndex(detail.getId(), detail.getRoot());
myIndexStorage.messages.put(index, detail.getFullMessage());
myIndexStorage.trigrams.update(index, detail);
myIndexStorage.users.update(index, detail);
myIndexStorage.paths.update(index, detail);
myIndexStorage.parents.put(index, ContainerUtil.map(detail.getParents(), p -> myStorage.getCommitIndex(p, detail.getRoot())));
// we know the whole graph without timestamps now
if (!detail.getAuthor().equals(detail.getCommitter())) {
myIndexStorage.committers.put(index, myIndexStorage.users.getUserId(detail.getCommitter()));
}
myIndexStorage.timestamps.put(index, Pair.create(detail.getAuthorTime(), detail.getCommitTime()));
myIndexStorage.commits.put(index);
}
catch (IOException e) {
myFatalErrorsConsumer.consume(this, e);
}
}
private void flush() {
try {
if (myIndexStorage != null) {
myIndexStorage.messages.force();
myIndexStorage.trigrams.flush();
myIndexStorage.users.flush();
myIndexStorage.paths.flush();
myIndexStorage.parents.force();
myIndexStorage.commits.flush();
myIndexStorage.committers.force();
myIndexStorage.timestamps.force();
}
}
catch (StorageException e) {
myFatalErrorsConsumer.consume(this, e);
}
}
@Override
public void markCorrupted() {
if (myIndexStorage != null) myIndexStorage.commits.markCorrupted();
}
@Override
public boolean isIndexed(int commit) {
try {
return myIndexStorage == null || myIndexStorage.commits.contains(commit);
}
catch (IOException e) {
myFatalErrorsConsumer.consume(this, e);
}
return false;
}
@Override
public synchronized boolean isIndexed(@NotNull VirtualFile root) {
return isIndexingEnabled(root) &&
(!myCommitsToIndex.containsKey(root) && myNumberOfTasks.get(root).get() == 0);
}
@Override
public boolean isIndexingEnabled(@NotNull VirtualFile root) {
if (myIndexStorage == null) return false;
return myRoots.contains(root) && !(myBigRepositoriesList.isBig(root));
}
@Override
public synchronized void markForIndexing(int index, @NotNull VirtualFile root) {
if (isIndexed(index) || !myRoots.contains(root)) return;
IntCollectionUtil.add(myCommitsToIndex, root, index);
}
@Nullable
@Override
public IndexDataGetter getDataGetter() {
if (myIndexStorage == null) return null;
return myDataGetter;
}
@Override
public void addListener(@NotNull IndexingFinishedListener l) {
myListeners.add(l);
}
@Override
public void removeListener(@NotNull IndexingFinishedListener l) {
myListeners.remove(l);
}
@Override
public void dispose() {
}
@NotNull
private static Map<VirtualFile, VcsLogIndexer> getAvailableIndexers(@NotNull Map<VirtualFile, VcsLogProvider> providers) {
Map<VirtualFile, VcsLogIndexer> indexers = new LinkedHashMap<>();
for (Map.Entry<VirtualFile, VcsLogProvider> entry : providers.entrySet()) {
VirtualFile root = entry.getKey();
VcsLogProvider provider = entry.getValue();
if (VcsLogProperties.SUPPORTS_INDEXING.getOrDefault(provider) && provider instanceof VcsIndexableLogProvider) {
indexers.put(root, ((VcsIndexableLogProvider)provider).getIndexer());
}
}
return indexers;
}
@NotNull
public static Set<VirtualFile> getRootsForIndexing(@NotNull Map<VirtualFile, VcsLogProvider> providers) {
return getAvailableIndexers(providers).keySet();
}
static class IndexStorage implements Disposable {
private static final String COMMITS = "commits";
private static final String MESSAGES = "messages";
private static final String PARENTS = "parents";
private static final String COMMITTERS = "committers";
private static final String TIMESTAMPS = "timestamps";
@NotNull public final PersistentSet<Integer> commits;
@NotNull public final PersistentMap<Integer, String> messages;
@NotNull public final PersistentMap<Integer, List<Integer>> parents;
@NotNull public final PersistentMap<Integer, Integer> committers;
@NotNull public final PersistentMap<Integer, Pair<Long, Long>> timestamps;
@NotNull public final VcsLogMessagesTrigramIndex trigrams;
@NotNull public final VcsLogUserIndex users;
@NotNull public final VcsLogPathsIndex paths;
private volatile boolean myIsFresh;
IndexStorage(@NotNull String projectName,
@NotNull String logId,
@NotNull VcsLogStorage storage,
@NotNull VcsUserRegistry userRegistry,
@NotNull Set<VirtualFile> roots,
@NotNull FatalErrorHandler fatalErrorHandler,
@NotNull Disposable parentDisposable)
throws IOException {
Disposer.register(parentDisposable, this);
try {
StorageId storageId = indexStorageId(projectName, logId);
StorageLockContext storageLockContext = new StorageLockContext(true);
Path commitsStorage = storageId.getStorageFile(COMMITS);
myIsFresh = !Files.exists(commitsStorage);
commits = new PersistentSetImpl<>(commitsStorage, EnumeratorIntegerDescriptor.INSTANCE, Page.PAGE_SIZE, storageLockContext,
storageId.getVersion());
Disposer.register(this, () -> catchAndWarn(commits::close));
messages = new PersistentHashMap<>(storageId.getStorageFile(MESSAGES), EnumeratorIntegerDescriptor.INSTANCE,
EnumeratorStringDescriptor.INSTANCE, Page.PAGE_SIZE, storageId.getVersion(),
storageLockContext);
Disposer.register(this, () -> catchAndWarn(messages::close));
trigrams = new VcsLogMessagesTrigramIndex(storageId, storageLockContext, fatalErrorHandler, this);
users = new VcsLogUserIndex(storageId, storageLockContext, userRegistry, fatalErrorHandler, this);
paths = new VcsLogPathsIndex(storageId, storage, roots, storageLockContext, fatalErrorHandler, this);
Path parentsStorage = storageId.getStorageFile(PARENTS);
parents = new PersistentHashMap<>(parentsStorage, EnumeratorIntegerDescriptor.INSTANCE,
new IntListDataExternalizer(), Page.PAGE_SIZE, storageId.getVersion(), storageLockContext);
Disposer.register(this, () -> catchAndWarn(parents::close));
Path committersStorage = storageId.getStorageFile(COMMITTERS);
committers = new PersistentHashMap<>(committersStorage, EnumeratorIntegerDescriptor.INSTANCE, EnumeratorIntegerDescriptor.INSTANCE,
Page.PAGE_SIZE, storageId.getVersion(), storageLockContext);
Disposer.register(this, () -> catchAndWarn(committers::close));
Path timestampsStorage = storageId.getStorageFile(TIMESTAMPS);
timestamps = new PersistentHashMap<>(timestampsStorage, EnumeratorIntegerDescriptor.INSTANCE, new LongPairDataExternalizer(),
Page.PAGE_SIZE, storageId.getVersion(), storageLockContext);
Disposer.register(this, () -> catchAndWarn(timestamps::close));
checkConsistency();
}
catch (Throwable t) {
Disposer.dispose(this);
throw t;
}
}
private void checkConsistency() throws IOException {
if (!commits.isEmpty()) {
boolean trigramsEmpty = trigrams.isEmpty();
boolean usersEmpty = users.isEmpty();
boolean pathsEmpty = paths.isEmpty();
if (trigramsEmpty || usersEmpty) {
IOException exception = new IOException("Broken index maps:\n" +
"trigrams empty " + trigramsEmpty + "\n" +
"users empty " + usersEmpty + "\n" +
"paths empty " + pathsEmpty);
LOG.error(exception);
throw exception;
}
if (pathsEmpty) {
LOG.warn("Paths map is empty");
}
}
}
void markCorrupted() {
catchAndWarn(commits::markCorrupted);
}
public void unmarkFresh() {
myIsFresh = false;
}
public boolean isFresh() {
return myIsFresh;
}
@Override
public void dispose() {
}
private static void catchAndWarn(@NotNull ThrowableRunnable<IOException> runnable) {
try {
runnable.run();
}
catch (IOException e) {
LOG.warn(e);
}
}
private static void cleanup(@NotNull String projectName, @NotNull String logId) {
StorageId storageId = indexStorageId(projectName, logId);
if (!storageId.cleanupAllStorageFiles()) {
LOG.error("Could not clean up storage files in " + storageId.getProjectStorageDir());
}
}
@NotNull
private static StorageId indexStorageId(@NotNull String projectName, @NotNull String logId) {
return new StorageId(projectName, INDEX, logId, VcsLogStorageImpl.VERSION + VERSION);
}
}
private class MySingleTaskController extends SingleTaskController<IndexingRequest, Void> {
private static final int LOW_PRIORITY = Thread.MIN_PRIORITY;
@NotNull private final HeavyAwareExecutor myHeavyAwareExecutor;
MySingleTaskController(@NotNull Project project, @NotNull Disposable parent) {
super("index", EmptyConsumer.getInstance(), parent);
myHeavyAwareExecutor = new HeavyAwareExecutor(project, 50, 100, VcsLogPersistentIndex.this);
}
@NotNull
@Override
protected SingleTask startNewBackgroundTask() {
ProgressIndicator indicator = myProgress.createProgressIndicator(true, INDEXING);
Consumer<ProgressIndicator> task = progressIndicator -> {
int previousPriority = setMinimumPriority();
try {
IndexingRequest request;
while ((request = popRequest()) != null) {
try {
request.run(progressIndicator);
progressIndicator.checkCanceled();
}
catch (ProcessCanceledException reThrown) {
throw reThrown;
}
catch (Throwable t) {
request.processException(t);
}
}
}
finally {
taskCompleted(null);
resetPriority(previousPriority);
}
};
Future<?> future = myHeavyAwareExecutor.executeOutOfHeavyOrPowerSave(task, indicator);
return new SingleTaskImpl(future, indicator);
}
public void resetPriority(int previousPriority) {
if (Thread.currentThread().getPriority() == LOW_PRIORITY) Thread.currentThread().setPriority(previousPriority);
}
public int setMinimumPriority() {
int previousPriority = Thread.currentThread().getPriority();
try {
Thread.currentThread().setPriority(LOW_PRIORITY);
}
catch (SecurityException e) {
LOG.debug("Could not set indexing thread priority", e);
}
return previousPriority;
}
}
private class IndexingRequest {
private static final int BATCH_SIZE = 20000;
private static final int FLUSHED_COMMITS_NUMBER = 15000;
private static final int LOGGED_ERRORS_COUNT = 10;
private static final int STOPPING_ERROR_COUNT = 100;
@NotNull private final VirtualFile myRoot;
@NotNull private final IntSet myCommits;
@NotNull private final VcsLogIndexer.PathsEncoder myPathsEncoder;
private final boolean myFull;
@NotNull private final AtomicInteger myNewIndexedCommits = new AtomicInteger();
@NotNull private final AtomicInteger myOldCommits = new AtomicInteger();
private volatile long myStartTime;
IndexingRequest(@NotNull VirtualFile root,
@NotNull VcsLogIndexer.PathsEncoder encoder,
@NotNull IntSet commits,
boolean full) {
myRoot = root;
myPathsEncoder = encoder;
myCommits = commits;
myFull = full;
myNumberOfTasks.get(root).incrementAndGet();
}
public void run(@NotNull ProgressIndicator indicator) {
if (myBigRepositoriesList.isBig(myRoot)) {
LOG.info("Indexing repository " + myRoot.getName() + " is skipped");
markCommits();
myNumberOfTasks.get(myRoot).decrementAndGet();
return;
}
indicator.setIndeterminate(false);
indicator.setFraction(0);
myStartTime = getCurrentTimeMillis();
LOG.info("Indexing " + (myFull ? "full repository" : myCommits.size() + " commits") + " in " + myRoot.getName());
try {
try {
if (myFull) {
indexAll(indicator);
}
else {
IntStream commits = myCommits.intStream().filter(c -> {
if (isIndexed(c)) {
myOldCommits.incrementAndGet();
return false;
}
return true;
});
indexOneByOne(commits, indicator);
}
}
catch (ProcessCanceledException e) {
scheduleReindex();
throw e;
}
catch (VcsException e) {
processException(e);
scheduleReindex();
}
}
finally {
myNumberOfTasks.get(myRoot).decrementAndGet();
myIndexingTime.get(myRoot).updateAndGet(t -> t + (getCurrentTimeMillis() - myStartTime));
if (isIndexed(myRoot)) {
long time = myIndexingTime.get(myRoot).getAndSet(0);
myIndexCollector.reportIndexingTime(time);
myListeners.forEach(listener -> listener.indexingFinished(myRoot));
}
report();
flush();
}
}
private void processException(@NotNull Throwable e) {
int errorHash = ThrowableInterner.computeTraceHashCode(e);
int errors = myIndexingErrors.get(myRoot).cacheOrGet(errorHash, 0);
myIndexingErrors.get(myRoot).put(errorHash, errors + 1);
if (errors <= LOGGED_ERRORS_COUNT) {
LOG.error("Error while indexing " + myRoot.getName(), e);
}
else if (errors >= STOPPING_ERROR_COUNT) {
myBigRepositoriesList.addRepository(myRoot);
LOG.error("Stopping indexing of " + myRoot.getName() + " due to the large amount of exceptions.", e);
}
}
private long getCurrentTimeMillis() {
return TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
}
private void report() {
String formattedTime = StopWatch.formatTime(getCurrentTimeMillis() - myStartTime);
if (myFull) {
LOG.info(formattedTime +
" for indexing " +
myNewIndexedCommits + " commits in " + myRoot.getName());
}
else {
int leftCommits = myCommits.size() - myNewIndexedCommits.get() - myOldCommits.get();
String leftCommitsMessage = (leftCommits > 0) ? ". " + leftCommits + " commits left" : "";
LOG.info(formattedTime +
" for indexing " +
myNewIndexedCommits +
" new commits out of " +
myCommits.size() + " in " + myRoot.getName() + leftCommitsMessage);
}
}
private void scheduleReindex() {
LOG.debug("Schedule reindexing of " +
(myCommits.size() - myNewIndexedCommits.get() - myOldCommits.get()) +
" commits in " +
myRoot.getName());
markCommits();
scheduleIndex(false);
}
private void markCommits() {
myCommits.forEach(value -> {
markForIndexing(value, myRoot);
});
}
private void indexOneByOne(@NotNull IntStream commits, @NotNull ProgressIndicator indicator) throws VcsException {
// We pass hashes to VcsLogProvider#readFullDetails in batches
// in order to avoid allocating too much memory for these hashes
// a batch of 20k will occupy ~2.4Mb
IntCollectionUtil.processBatches(commits, BATCH_SIZE, batch -> {
indicator.checkCanceled();
List<String> hashes = IntCollectionUtil.map2List(batch, value -> myStorage.getCommitId(value).getHash().asString());
myIndexers.get(myRoot).readFullDetails(myRoot, hashes, myPathsEncoder, detail -> {
storeDetail(detail);
if (myNewIndexedCommits.incrementAndGet() % FLUSHED_COMMITS_NUMBER == 0) flush();
checkShouldCancel(indicator);
});
});
}
public void indexAll(@NotNull ProgressIndicator indicator) throws VcsException {
myIndexers.get(myRoot).readAllFullDetails(myRoot, myPathsEncoder, details -> {
storeDetail(details);
if (myNewIndexedCommits.incrementAndGet() % FLUSHED_COMMITS_NUMBER == 0) flush();
checkShouldCancel(indicator);
});
}
private void checkShouldCancel(@NotNull ProgressIndicator indicator) {
long time = myIndexingTime.get(myRoot).get() + (getCurrentTimeMillis() - myStartTime);
int limit = myIndexingLimit.get(myRoot).get();
boolean isOvertime = time >= (Math.max(limit, 1L) * 60 * 1000) && !myBigRepositoriesList.isBig(myRoot);
if (isOvertime || (myBigRepositoriesList.isBig(myRoot) && !indicator.isCanceled())) {
LOG.warn("Indexing " + myRoot.getName() + " was cancelled after " + StopWatch.formatTime(time));
if (isOvertime) {
myBigRepositoriesList.addRepository(myRoot);
myIndexingLimit.get(myRoot).compareAndSet(limit,
Math.max(limit + getIndexingLimit(),
(int)((time / (getIndexingLimit() * 60000) + 1) * getIndexingLimit())));
}
indicator.cancel();
}
}
@Override
public String toString() {
return "IndexingRequest of " + myCommits.size() + " commits in " + myRoot.getName() + (myFull ? " (full)" : "");
}
}
} | smmribeiro/intellij-community | platform/vcs-log/impl/src/com/intellij/vcs/log/data/index/VcsLogPersistentIndex.java | Java | apache-2.0 | 26,215 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.redshift.model;
import javax.annotation.Generated;
/**
*
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public enum TableRestoreStatusType {
PENDING("PENDING"),
IN_PROGRESS("IN_PROGRESS"),
SUCCEEDED("SUCCEEDED"),
FAILED("FAILED"),
CANCELED("CANCELED");
private String value;
private TableRestoreStatusType(String value) {
this.value = value;
}
@Override
public String toString() {
return this.value;
}
/**
* Use this in place of valueOf.
*
* @param value
* real value
* @return TableRestoreStatusType corresponding to the value
*
* @throws IllegalArgumentException
* If the specified value does not map to one of the known values in this enum.
*/
public static TableRestoreStatusType fromValue(String value) {
if (value == null || "".equals(value)) {
throw new IllegalArgumentException("Value cannot be null or empty!");
}
for (TableRestoreStatusType enumEntry : TableRestoreStatusType.values()) {
if (enumEntry.toString().equals(value)) {
return enumEntry;
}
}
throw new IllegalArgumentException("Cannot create enum from " + value + " value!");
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-redshift/src/main/java/com/amazonaws/services/redshift/model/TableRestoreStatusType.java | Java | apache-2.0 | 1,910 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Strings for component 'groupselect', language 'es_mx', branch 'MOODLE_21_STABLE'
*
* @package groupselect
* @copyright 1999 onwards Martin Dougiamas {@link http://moodle.com}
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
defined('MOODLE_INTERNAL') || die();
$string['action'] = 'Acción';
$string['cannotselectclosed'] = 'Usted ya no puede volverse miembro del grupo.';
$string['cannotselectmaxed'] = 'Usted no puede unirse al grupo {$a} - se alcanzó el número máximo de miembros.';
$string['cannotselectnocap'] = 'Usted no tiene permitido seleccionar grupo.';
$string['cannotselectnoenrol'] = 'Usted necesita estar inscrito al curso para poder volverse miembro del grupo.';
$string['cannotunselectclosed'] = 'Usted ya no puede dejar el grupo';
$string['fromallgroups'] = 'Todos los grupos';
$string['groupselect:select'] = 'Permitir volverse miembro del grupo';
$string['groupselect:unselect'] = 'Permitir abandonar el grupo';
$string['incorrectpassword'] = 'Contraseña incorrecta';
$string['managegroups'] = 'Gestionar grupos';
$string['maxlimitreached'] = 'Número máximo alcanzado';
$string['maxmembers'] = 'Miembros máx por grupo';
$string['membercount'] = 'Número';
$string['membershidden'] = 'Lista de miembros no disponible';
$string['memberslist'] = 'Miembros';
$string['modulename'] = 'Auto-selección de grupo';
$string['modulenameplural'] = 'Auto-selecciones de grupo';
$string['nogroups'] = 'Lo siento, no hay grupos disponibles para elegir.';
$string['notavailableanymore'] = 'Lo siento, ya no está disponible la selección de grupo (desde {$a}).';
$string['notavailableyet'] = 'La selección de grupo estará disponible en {$a}.';
$string['password'] = 'Requiere contraseña';
$string['pluginadministration'] = 'Administración del módulo';
$string['pluginname'] = 'Auto-selección de grupo';
$string['select'] = 'Volverse miembro de {$a}';
$string['selectconfirm'] = '¿Realmente desea volverse miembro del grupo <em>{$a}</em>?';
$string['targetgrouping'] = 'Seleccionar grupo de agrupamiento';
$string['timeavailable'] = 'Abierto desde';
$string['timedue'] = 'Abierto hasta antes de';
$string['unselect'] = 'Dejar grupo {$a}';
$string['unselectconfirm'] = '¿Realmente desea dejar el grupo <em>{$a}</em>?';
| carnegiespeech/translations | es_mx/groupselect.php | PHP | apache-2.0 | 2,968 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "grpcpp/generic/generic_stub.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_client_cq_tag.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace eager {
namespace {
/*
* Setting environment variable "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE" to
* true will turn on asynchronous execution of remote op. It means that when
* executing an op on a remote worker, client will not block on waiting
* for the response anymore. Using follow code as example:
*
* with tf.device('worker:0'):
* a = tf.matmul(...)
* b = tf.matmul(...)
* logging.into('Requests sent') # Probably not executed yet
* logging.info('b: %s', b.numpy()) # Block until 'b' finished.
*
* Streaming RPC will preserve order as well. So 'a' must be executed before
* 'b' on 'worker:0'.
*
* When turning on this feature, you should explicitly wait for some result
* from remote workers at the end of you python program. Otherwise, client may
* shutdown remote workers without waiting all pending ops.
*
* TODO(fishx): When exiting client, make sure all pending ops on remote workers
* are finished.
*
* TODO(b/139210648): Move this comment to eager/execute.py when this feature is
* on by default.
*/
bool EnableStreaming() {
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
return result;
}
class GrpcEagerClient : public EagerClient {
public:
GrpcEagerClient(const tensorflow::SharedGrpcChannelPtr& channel,
::grpc::CompletionQueue* cq)
: stub_(channel), cq_(cq) {}
~GrpcEagerClient() override {}
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done), nullptr, nullptr, /*max_retries=*/0); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(Enqueue);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
#undef CLIENT_METHOD
void CloseContextAsync(const CloseContextRequest* request,
CloseContextResponse* response,
StatusCallback done) override {
new RPCState<protobuf::Message>(
&stub_, cq_, "/tensorflow.eager.EagerService/CloseContext", *request,
response, std::move(done), nullptr, nullptr);
VLOG(1) << "Sending RPC to close remote eager context "
<< request->DebugString();
mutex_lock l(mu_);
const auto& it = enqueue_dispatchers_.find(request->context_id());
if (it != enqueue_dispatchers_.end()) {
it->second.CancelCall();
enqueue_dispatchers_.erase(request->context_id());
} else if (EnableStreaming()) {
LOG(ERROR) << "Remote EagerContext with id " << request->context_id()
<< " does not seem to exist.";
}
}
void StreamingEnqueueAsync(const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
if (EnableStreaming()) {
tf_shared_lock l(mu_);
auto it = enqueue_dispatchers_.find(request->context_id());
if (enqueue_dispatchers_.find(request->context_id()) ==
enqueue_dispatchers_.end()) {
auto it_and_bool = enqueue_dispatchers_.emplace(
std::piecewise_construct,
std::forward_as_tuple(request->context_id()),
std::forward_as_tuple(
&stub_, cq_,
"/tensorflow.eager.EagerService/StreamingEnqueue"));
it = it_and_bool.first;
}
it->second.SendNextRequest(*request, response, std::move(done));
} else {
Notification n;
Status status;
EnqueueAsync(request, response, [&n, &status](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
done(status);
}
}
private:
::grpc::GenericStub stub_;
::grpc::CompletionQueue* cq_;
mutable mutex mu_;
std::unordered_map<uint64, StreamingRPCDispatcher<EnqueueResponse>>
enqueue_dispatchers_ GUARDED_BY(mu_);
};
class GrpcEagerClientCache : public EagerClientCache {
public:
explicit GrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> cache)
: next_round_robin_assignment_(0), cache_(cache), threads_(4) {}
~GrpcEagerClientCache() override { threads_.clear(); }
Status GetClient(const string& target, EagerClient** client) override {
auto it = clients_.find(target);
if (it == clients_.end()) {
tensorflow::SharedGrpcChannelPtr shared =
cache_->FindWorkerChannel(target);
if (shared == nullptr) {
return errors::InvalidArgument("Client for target ", target,
" not found.");
}
auto worker = std::unique_ptr<EagerClient>(new GrpcEagerClient(
shared, threads_[AssignClientToThread(target)].completion_queue()));
it = clients_.emplace(target, std::move(worker)).first;
}
*client = it->second.get();
return Status::OK();
}
private:
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ GUARDED_BY(assignment_mu_);
size_t AssignClientToThread(const string& target) {
// Round-robin target assignment, but keeps the same target on the same
// polling thread always, as this is important for gRPC performance
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(
target, (next_round_robin_assignment_++) % threads_.size()))
.first;
}
return it->second;
}
class GrpcEagerClientThread {
public:
GrpcEagerClientThread() {
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "eager_client_thread", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
VLOG(4) << "GrpcEagerClientThread got next tag";
GrpcClientCQTag* callback_tag =
static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
VLOG(4) << "GrpcEagerClientThread blocking for next tag";
}
VLOG(4) << "GrpcEagerClientThread exiting";
}));
}
~GrpcEagerClientThread() {
completion_queue_.Shutdown();
thread_.reset();
}
::grpc::CompletionQueue* completion_queue() { return &completion_queue_; }
private:
::grpc::CompletionQueue completion_queue_;
std::unique_ptr<Thread> thread_;
}; // GrpcEagerClientThread
std::shared_ptr<tensorflow::GrpcChannelCache> cache_;
std::unordered_map<string, std::unique_ptr<EagerClient>> clients_;
std::vector<GrpcEagerClientThread> threads_;
};
} // namespace
EagerClientCache* NewGrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> channel) {
return new GrpcEagerClientCache(channel);
}
} // namespace eager
} // namespace tensorflow
| DavidNorman/tensorflow | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | C++ | apache-2.0 | 8,617 |
using AcceptFramework.Domain.Evaluation;
using FluentNHibernate.Mapping;
namespace AcceptFramework.Mapping.Evaluation
{
public class EvaluationCategoryMap : ClassMap<EvaluationCategory>
{
public EvaluationCategoryMap()
{
Table("EvaluationCategory");
Id(e => e.Id);
Map(e => e.CategoryName).Length(50);
}
}
}
| accept-project/accept-api | AcceptFramework/Mapping/Evaluation/EvaluationCategoryMap.cs | C# | apache-2.0 | 385 |
package me.zbl.fullstack.entity.vo;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import me.zbl.fullstack.entity.Article;
import org.hibernate.validator.constraints.NotEmpty;
/**
* 修改博客表单
* <p>
*
* @author James
* @date 17-12-4
*/
@Setter
@Getter
@NoArgsConstructor
public class BlogModifyModel {
@NotEmpty
private Integer id;
@NotEmpty
private String title;
private String mdMaterial;
private String description;
public BlogModifyModel(Article article) {
this.id = article.getId();
this.title = article.getTitle();
this.mdMaterial = article.getMdMaterial();
this.description = article.getIntroduction();
}
}
| LeFullStack/FullStack | src/main/java/me/zbl/fullstack/entity/vo/BlogModifyModel.java | Java | apache-2.0 | 697 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
import static org.apache.hadoop.util.Time.now;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.URI;
import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import javax.management.StandardMBean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.apache.log4j.Logger;
import org.codehaus.jackson.map.ObjectMapper;
import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
/**
* FSNamesystem is a container of both transient
* and persisted name-space state, and does all the book-keeping
* work on a NameNode.
*
* Its roles are briefly described below:
*
* 1) Is the container for BlockManager, DatanodeManager,
* DelegationTokens, LeaseManager, etc. services.
* 2) RPC calls that modify or inspect the name-space
* should get delegated here.
* 3) Anything that touches only blocks (eg. block reports),
* it delegates to BlockManager.
* 4) Anything that touches only file information (eg. permissions, mkdirs),
* it delegates to FSDirectory.
* 5) Anything that crosses two of the above components should be
* coordinated here.
* 6) Logs mutations to FSEditLog.
*
* This class and its contents keep:
*
* 1) Valid fsname --> blocklist (kept on disk, logged)
* 2) Set of all valid blocks (inverted #1)
* 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
* 4) machine --> blocklist (inverted #2)
* 5) LRU cache of updated-heartbeat machines
*/
@InterfaceAudience.Private
@Metrics(context="dfs")
public class FSNamesystem implements Namesystem, FSNamesystemMBean,
NameNodeMXBean {
public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
private static final ThreadLocal<StringBuilder> auditBuffer =
new ThreadLocal<StringBuilder>() {
@Override
protected StringBuilder initialValue() {
return new StringBuilder();
}
};
private final BlockIdManager blockIdManager;
@VisibleForTesting
public boolean isAuditEnabled() {
return !isDefaultAuditLogger || auditLog.isInfoEnabled();
}
private void logAuditEvent(boolean succeeded, String cmd, String src)
throws IOException {
logAuditEvent(succeeded, cmd, src, null, null);
}
private void logAuditEvent(boolean succeeded, String cmd, String src,
String dst, HdfsFileStatus stat) throws IOException {
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(succeeded, getRemoteUser(), getRemoteIp(),
cmd, src, dst, stat);
}
}
private void logAuditEvent(boolean succeeded,
UserGroupInformation ugi, InetAddress addr, String cmd, String src,
String dst, HdfsFileStatus stat) {
FileStatus status = null;
if (stat != null) {
Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
Path path = dst != null ? new Path(dst) : new Path(src);
status = new FileStatus(stat.getLen(), stat.isDir(),
stat.getReplication(), stat.getBlockSize(),
stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
stat.getGroup(), symlink, path);
}
for (AuditLogger logger : auditLoggers) {
if (logger instanceof HdfsAuditLogger) {
HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger;
hdfsLogger.logAuditEvent(succeeded, ugi.toString(), addr, cmd, src, dst,
status, ugi, dtSecretManager);
} else {
logger.logAuditEvent(succeeded, ugi.toString(), addr,
cmd, src, dst, status);
}
}
}
/**
* Logger for audit events, noting successful FSNamesystem operations. Emits
* to FSNamesystem.audit at INFO. Each event causes a set of tab-separated
* <code>key=value</code> pairs to be written for the following properties:
* <code>
* ugi=<ugi in RPC>
* ip=<remote IP>
* cmd=<command>
* src=<src path>
* dst=<dst path (optional)>
* perm=<permissions (optional)>
* </code>
*/
public static final Log auditLog = LogFactory.getLog(
FSNamesystem.class.getName() + ".audit");
static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
static int BLOCK_DELETION_INCREMENT = 1000;
private final boolean isPermissionEnabled;
private final UserGroupInformation fsOwner;
private final String supergroup;
private final boolean standbyShouldCheckpoint;
// Scan interval is not configurable.
private static final long DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL =
TimeUnit.MILLISECONDS.convert(1, TimeUnit.HOURS);
final DelegationTokenSecretManager dtSecretManager;
private final boolean alwaysUseDelegationTokensForTests;
private static final Step STEP_AWAITING_REPORTED_BLOCKS =
new Step(StepType.AWAITING_REPORTED_BLOCKS);
// Tracks whether the default audit logger is the only configured audit
// logger; this allows isAuditEnabled() to return false in case the
// underlying logger is disabled, and avoid some unnecessary work.
private final boolean isDefaultAuditLogger;
private final List<AuditLogger> auditLoggers;
/** The namespace tree. */
FSDirectory dir;
private final BlockManager blockManager;
private final SnapshotManager snapshotManager;
private final CacheManager cacheManager;
private final DatanodeStatistics datanodeStatistics;
private String nameserviceId;
private volatile RollingUpgradeInfo rollingUpgradeInfo = null;
/**
* A flag that indicates whether the checkpointer should checkpoint a rollback
* fsimage. The edit log tailer sets this flag. The checkpoint will create a
* rollback fsimage if the flag is true, and then change the flag to false.
*/
private volatile boolean needRollbackFsImage;
// Block pool ID used by this namenode
private String blockPoolId;
final LeaseManager leaseManager = new LeaseManager(this);
volatile Daemon smmthread = null; // SafeModeMonitor thread
Daemon nnrmthread = null; // NamenodeResourceMonitor thread
Daemon nnEditLogRoller = null; // NameNodeEditLogRoller thread
// A daemon to periodically clean up corrupt lazyPersist files
// from the name space.
Daemon lazyPersistFileScrubber = null;
/**
* When an active namenode will roll its own edit log, in # edits
*/
private final long editLogRollerThreshold;
/**
* Check interval of an active namenode's edit log roller thread
*/
private final int editLogRollerInterval;
/**
* How frequently we scan and unlink corrupt lazyPersist files.
* (In seconds)
*/
private final int lazyPersistFileScrubIntervalSec;
private volatile boolean hasResourcesAvailable = false;
private volatile boolean fsRunning = true;
/** The start time of the namesystem. */
private final long startTime = now();
/** The interval of namenode checking for the disk space availability */
private final long resourceRecheckInterval;
// The actual resource checker instance.
NameNodeResourceChecker nnResourceChecker;
private final FsServerDefaults serverDefaults;
private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
private volatile SafeModeInfo safeMode; // safe mode information
private final long maxFsObjects; // maximum number of fs objects
private final long minBlockSize; // minimum block size
private final long maxBlocksPerFile; // maximum # of blocks per file
// precision of access times.
private final long accessTimePrecision;
/** Lock to protect FSNamesystem. */
private final FSNamesystemLock fsLock;
/**
* Checkpoint lock to protect FSNamesystem modification on standby NNs.
* Unlike fsLock, it does not affect block updates. On active NNs, this lock
* does not provide proper protection, because there are operations that
* modify both block and name system state. Even on standby, fsLock is
* used when block state changes need to be blocked.
*/
private final ReentrantLock cpLock;
/**
* Used when this NN is in standby state to read from the shared edit log.
*/
private EditLogTailer editLogTailer = null;
/**
* Used when this NN is in standby state to perform checkpoints.
*/
private StandbyCheckpointer standbyCheckpointer;
/**
* Reference to the NN's HAContext object. This is only set once
* {@link #startCommonServices(Configuration, HAContext)} is called.
*/
private HAContext haContext;
private final boolean haEnabled;
/** flag indicating whether replication queues have been initialized */
boolean initializedReplQueues = false;
/**
* Whether the namenode is in the middle of starting the active service
*/
private volatile boolean startingActiveService = false;
private final RetryCache retryCache;
private KeyProviderCryptoExtension provider = null;
private volatile boolean imageLoaded = false;
private final Condition cond;
private final FSImage fsImage;
private final TopConf topConf;
private TopMetrics topMetrics;
/**
* Notify that loading of this FSDirectory is complete, and
* it is imageLoaded for use
*/
void imageLoadComplete() {
Preconditions.checkState(!imageLoaded, "FSDirectory already loaded");
setImageLoaded();
}
void setImageLoaded() {
if(imageLoaded) return;
writeLock();
try {
setImageLoaded(true);
dir.markNameCacheInitialized();
cond.signalAll();
} finally {
writeUnlock();
}
}
//This is for testing purposes only
@VisibleForTesting
boolean isImageLoaded() {
return imageLoaded;
}
// exposed for unit tests
protected void setImageLoaded(boolean flag) {
imageLoaded = flag;
}
/**
* Block until the object is imageLoaded to be used.
*/
void waitForLoadingFSImage() {
if (!imageLoaded) {
writeLock();
try {
while (!imageLoaded) {
try {
cond.await(5000, TimeUnit.MILLISECONDS);
} catch (InterruptedException ignored) {
}
}
} finally {
writeUnlock();
}
}
}
/**
* Clear all loaded data
*/
void clear() {
dir.reset();
dtSecretManager.reset();
blockIdManager.clear();
leaseManager.removeAllLeases();
snapshotManager.clearSnapshottableDirs();
cacheManager.clear();
setImageLoaded(false);
blockManager.clear();
}
@VisibleForTesting
LeaseManager getLeaseManager() {
return leaseManager;
}
boolean isHaEnabled() {
return haEnabled;
}
/**
* Check the supplied configuration for correctness.
* @param conf Supplies the configuration to validate.
* @throws IOException if the configuration could not be queried.
* @throws IllegalArgumentException if the configuration is invalid.
*/
private static void checkConfiguration(Configuration conf)
throws IOException {
final Collection<URI> namespaceDirs =
FSNamesystem.getNamespaceDirs(conf);
final Collection<URI> editsDirs =
FSNamesystem.getNamespaceEditsDirs(conf);
final Collection<URI> requiredEditsDirs =
FSNamesystem.getRequiredNamespaceEditsDirs(conf);
final Collection<URI> sharedEditsDirs =
FSNamesystem.getSharedEditsDirs(conf);
for (URI u : requiredEditsDirs) {
if (u.toString().compareTo(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) {
continue;
}
// Each required directory must also be in editsDirs or in
// sharedEditsDirs.
if (!editsDirs.contains(u) &&
!sharedEditsDirs.contains(u)) {
throw new IllegalArgumentException(
"Required edits directory " + u.toString() + " not present in " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" +
editsDirs.toString() + "; " +
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" +
requiredEditsDirs.toString() + ". " +
DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" +
sharedEditsDirs.toString() + ".");
}
}
if (namespaceDirs.size() == 1) {
LOG.warn("Only one image storage directory ("
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of data loss"
+ " due to lack of redundant storage directories!");
}
if (editsDirs.size() == 1) {
LOG.warn("Only one namespace edits storage directory ("
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of data loss"
+ " due to lack of redundant storage directories!");
}
}
/**
* Instantiates an FSNamesystem loaded from the image and edits
* directories specified in the passed Configuration.
*
* @param conf the Configuration which specifies the storage directories
* from which to load
* @return an FSNamesystem which contains the loaded namespace
* @throws IOException if loading fails
*/
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
checkConfiguration(conf);
FSImage fsImage = new FSImage(conf,
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
StartupOption startOpt = NameNode.getStartupOption(conf);
if (startOpt == StartupOption.RECOVER) {
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
}
long loadStart = now();
try {
namesystem.loadFSImage(startOpt);
} catch (IOException ioe) {
LOG.warn("Encountered exception loading fsimage", ioe);
fsImage.close();
throw ioe;
}
long timeTakenToLoadFSImage = now() - loadStart;
LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
if (nnMetrics != null) {
nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
}
return namesystem;
}
FSNamesystem(Configuration conf, FSImage fsImage) throws IOException {
this(conf, fsImage, false);
}
/**
* Create an FSNamesystem associated with the specified image.
*
* Note that this does not load any data off of disk -- if you would
* like that behavior, use {@link #loadFromDisk(Configuration)}
*
* @param conf configuration
* @param fsImage The FSImage to associate with
* @param ignoreRetryCache Whether or not should ignore the retry cache setup
* step. For Secondary NN this should be set to true.
* @throws IOException on bad configuration
*/
FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache)
throws IOException {
provider = DFSUtil.createKeyProviderCryptoExtension(conf);
if (provider == null) {
LOG.info("No KeyProvider found.");
} else {
LOG.info("Found KeyProvider: " + provider.toString());
}
if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,
DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
LOG.info("Enabling async auditlog");
enableAsyncAuditLog();
}
boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
LOG.info("fsLock is fair:" + fair);
fsLock = new FSNamesystemLock(fair);
cond = fsLock.writeLock().newCondition();
cpLock = new ReentrantLock();
this.fsImage = fsImage;
try {
resourceRecheckInterval = conf.getLong(
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
this.blockManager = new BlockManager(this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
this.blockIdManager = new BlockIdManager(blockManager);
this.fsOwner = UserGroupInformation.getCurrentUser();
this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
DFS_PERMISSIONS_ENABLED_DEFAULT);
LOG.info("fsOwner = " + fsOwner);
LOG.info("supergroup = " + supergroup);
LOG.info("isPermissionEnabled = " + isPermissionEnabled);
// block allocation has to be persisted in HA using a shared edits directory
// so that the standby has up-to-date namespace information
nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);
// Sanity check the HA-related config.
if (nameserviceId != null) {
LOG.info("Determined nameservice ID: " + nameserviceId);
}
LOG.info("HA Enabled: " + haEnabled);
if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) {
LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf));
throw new IOException("Invalid configuration: a shared edits dir " +
"must not be specified if HA is not enabled.");
}
// Get the checksum type from config
String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, DFS_CHECKSUM_TYPE_DEFAULT);
DataChecksum.Type checksumType;
try {
checksumType = DataChecksum.Type.valueOf(checksumTypeStr);
} catch (IllegalArgumentException iae) {
throw new IOException("Invalid checksum type in "
+ DFS_CHECKSUM_TYPE_KEY + ": " + checksumTypeStr);
}
this.serverDefaults = new FsServerDefaults(
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
(short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT),
conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT),
checksumType);
this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY,
DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
this.minBlockSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT);
this.maxBlocksPerFile = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT);
this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
this.standbyShouldCheckpoint = conf.getBoolean(
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
// # edit autoroll threshold is a multiple of the checkpoint threshold
this.editLogRollerThreshold = (long)
(conf.getFloat(
DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD,
DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT) *
conf.getLong(
DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT));
this.editLogRollerInterval = conf.getInt(
DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS,
DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT);
this.lazyPersistFileScrubIntervalSec = conf.getInt(
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT);
if (this.lazyPersistFileScrubIntervalSec == 0) {
throw new IllegalArgumentException(
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC + " must be non-zero.");
}
// For testing purposes, allow the DT secret manager to be started regardless
// of whether security is enabled.
alwaysUseDelegationTokensForTests = conf.getBoolean(
DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
this.dtSecretManager = createDelegationTokenSecretManager(conf);
this.dir = new FSDirectory(this, conf);
this.snapshotManager = new SnapshotManager(dir);
this.cacheManager = new CacheManager(this, conf, blockManager);
this.safeMode = new SafeModeInfo(conf);
this.topConf = new TopConf(conf);
this.auditLoggers = initAuditLoggers(conf);
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
auditLoggers.get(0) instanceof DefaultAuditLogger;
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
} catch(IOException e) {
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
close();
throw e;
} catch (RuntimeException re) {
LOG.error(getClass().getSimpleName() + " initialization failed.", re);
close();
throw re;
}
}
@VisibleForTesting
public List<AuditLogger> getAuditLoggers() {
return auditLoggers;
}
@VisibleForTesting
public RetryCache getRetryCache() {
return retryCache;
}
void lockRetryCache() {
if (retryCache != null) {
retryCache.lock();
}
}
void unlockRetryCache() {
if (retryCache != null) {
retryCache.unlock();
}
}
/** Whether or not retry cache is enabled */
boolean hasRetryCache() {
return retryCache != null;
}
void addCacheEntryWithPayload(byte[] clientId, int callId, Object payload) {
if (retryCache != null) {
retryCache.addCacheEntryWithPayload(clientId, callId, payload);
}
}
void addCacheEntry(byte[] clientId, int callId) {
if (retryCache != null) {
retryCache.addCacheEntry(clientId, callId);
}
}
@VisibleForTesting
public KeyProviderCryptoExtension getProvider() {
return provider;
}
@VisibleForTesting
static RetryCache initRetryCache(Configuration conf) {
boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,
DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT);
LOG.info("Retry cache on namenode is " + (enable ? "enabled" : "disabled"));
if (enable) {
float heapPercent = conf.getFloat(
DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY,
DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT);
long entryExpiryMillis = conf.getLong(
DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY,
DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT);
LOG.info("Retry cache will use " + heapPercent
+ " of total heap and retry cache entry expiry time is "
+ entryExpiryMillis + " millis");
long entryExpiryNanos = entryExpiryMillis * 1000 * 1000;
return new RetryCache("NameNodeRetryCache", heapPercent,
entryExpiryNanos);
}
return null;
}
private List<AuditLogger> initAuditLoggers(Configuration conf) {
// Initialize the custom access loggers if configured.
Collection<String> alClasses = conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
List<AuditLogger> auditLoggers = Lists.newArrayList();
if (alClasses != null && !alClasses.isEmpty()) {
for (String className : alClasses) {
try {
AuditLogger logger;
if (DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME.equals(className)) {
logger = new DefaultAuditLogger();
} else {
logger = (AuditLogger) Class.forName(className).newInstance();
}
logger.initialize(conf);
auditLoggers.add(logger);
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
// Make sure there is at least one logger installed.
if (auditLoggers.isEmpty()) {
auditLoggers.add(new DefaultAuditLogger());
}
// Add audit logger to calculate top users
if (topConf.isEnabled) {
topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
auditLoggers.add(new TopAuditLogger(topMetrics));
}
return Collections.unmodifiableList(auditLoggers);
}
private void loadFSImage(StartupOption startOpt) throws IOException {
final FSImage fsImage = getFSImage();
// format before starting up if requested
if (startOpt == StartupOption.FORMAT) {
fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id
startOpt = StartupOption.REGULAR;
}
boolean success = false;
writeLock();
try {
// We shouldn't be calling saveNamespace if we've come up in standby state.
MetaRecoveryContext recovery = startOpt.createRecoveryContext();
final boolean staleImage
= fsImage.recoverTransitionRead(startOpt, this, recovery);
if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt) ||
RollingUpgradeStartupOption.DOWNGRADE.matches(startOpt)) {
rollingUpgradeInfo = null;
}
final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade();
LOG.info("Need to save fs image? " + needToSave
+ " (staleImage=" + staleImage + ", haEnabled=" + haEnabled
+ ", isRollingUpgrade=" + isRollingUpgrade() + ")");
if (needToSave) {
fsImage.saveNamespace(this);
} else {
updateStorageVersionForRollingUpgrade(fsImage.getLayoutVersion(),
startOpt);
// No need to save, so mark the phase done.
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAVING_CHECKPOINT);
prog.endPhase(Phase.SAVING_CHECKPOINT);
}
// This will start a new log segment and write to the seen_txid file, so
// we shouldn't do it when coming up in standby state
if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
|| (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
fsImage.openEditLogForWrite();
}
success = true;
} finally {
if (!success) {
fsImage.close();
}
writeUnlock();
}
imageLoadComplete();
}
private void updateStorageVersionForRollingUpgrade(final long layoutVersion,
StartupOption startOpt) throws IOException {
boolean rollingStarted = RollingUpgradeStartupOption.STARTED
.matches(startOpt) && layoutVersion > HdfsConstants
.NAMENODE_LAYOUT_VERSION;
boolean rollingRollback = RollingUpgradeStartupOption.ROLLBACK
.matches(startOpt);
if (rollingRollback || rollingStarted) {
fsImage.updateStorageVersion();
}
}
private void startSecretManager() {
if (dtSecretManager != null) {
try {
dtSecretManager.startThreads();
} catch (IOException e) {
// Inability to start secret manager
// can't be recovered from.
throw new RuntimeException(e);
}
}
}
private void startSecretManagerIfNecessary() {
boolean shouldRun = shouldUseDelegationTokens() &&
!isInSafeMode() && getEditLog().isOpenForWrite();
boolean running = dtSecretManager.isRunning();
if (shouldRun && !running) {
startSecretManager();
}
}
private void stopSecretManager() {
if (dtSecretManager != null) {
dtSecretManager.stopThreads();
}
}
/**
* Start services common to both active and standby states
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null && !isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
snapshotManager.registerMXBean();
}
/**
* Stop services common to both active and standby states
*/
void stopCommonServices() {
writeLock();
try {
if (blockManager != null) blockManager.close();
} finally {
writeUnlock();
}
RetryCache.clear(retryCache);
}
/**
* Start services required in active state
* @throws IOException
*/
void startActiveServices() throws IOException {
startingActiveService = true;
LOG.info("Starting services required for active state");
writeLock();
try {
FSEditLog editLog = getFSImage().getEditLog();
if (!editLog.isOpenForWrite()) {
// During startup, we're already open for write during initialization.
editLog.initJournalsForWrite();
// May need to recover
editLog.recoverUnclosedStreams();
LOG.info("Catching up to latest edits from old active before " +
"taking over writer role in edits logs");
editLogTailer.catchupDuringFailover();
blockManager.setPostponeBlocksFromFuture(false);
blockManager.getDatanodeManager().markAllDatanodesStale();
blockManager.clearQueues();
blockManager.processAllPendingDNMessages();
// Only need to re-process the queue, If not in SafeMode.
if (!isInSafeMode()) {
LOG.info("Reprocessing replication and invalidation queues");
initializeReplQueues();
}
if (LOG.isDebugEnabled()) {
LOG.debug("NameNode metadata after re-processing " +
"replication and invalidation queues during failover:\n" +
metaSaveAsString());
}
long nextTxId = getFSImage().getLastAppliedTxId() + 1;
LOG.info("Will take over writing edit logs at txnid " +
nextTxId);
editLog.setNextTxId(nextTxId);
getFSImage().editLog.openForWrite();
}
// Enable quota checks.
dir.enableQuotaChecks();
if (haEnabled) {
// Renew all of the leases before becoming active.
// This is because, while we were in standby mode,
// the leases weren't getting renewed on this NN.
// Give them all a fresh start here.
leaseManager.renewAllLeases();
}
leaseManager.startMonitor();
startSecretManagerIfNecessary();
//ResourceMonitor required only at ActiveNN. See HDFS-2914
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(
editLogRollerThreshold, editLogRollerInterval));
nnEditLogRoller.start();
if (lazyPersistFileScrubIntervalSec > 0) {
lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber(
lazyPersistFileScrubIntervalSec));
lazyPersistFileScrubber.start();
}
cacheManager.startMonitorThread();
blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
} finally {
startingActiveService = false;
checkSafeMode();
writeUnlock();
}
}
private boolean inActiveState() {
return haContext != null &&
haContext.getState().getServiceState() == HAServiceState.ACTIVE;
}
/**
* Initialize replication queues.
*/
private void initializeReplQueues() {
LOG.info("initializing replication queues");
blockManager.processMisReplicatedBlocks();
initializedReplQueues = true;
}
/**
* @return Whether the namenode is transitioning to active state and is in the
* middle of the {@link #startActiveServices()}
*/
public boolean inTransitionToActive() {
return haEnabled && inActiveState() && startingActiveService;
}
private boolean shouldUseDelegationTokens() {
return UserGroupInformation.isSecurityEnabled() ||
alwaysUseDelegationTokensForTests;
}
/**
* Stop services required in active state
*/
void stopActiveServices() {
LOG.info("Stopping services started for active state");
writeLock();
try {
stopSecretManager();
leaseManager.stopMonitor();
if (nnrmthread != null) {
((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
nnrmthread.interrupt();
}
if (nnEditLogRoller != null) {
((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
nnEditLogRoller.interrupt();
}
if (lazyPersistFileScrubber != null) {
((LazyPersistFileScrubber) lazyPersistFileScrubber.getRunnable()).stop();
lazyPersistFileScrubber.interrupt();
}
if (dir != null && getFSImage() != null) {
if (getFSImage().editLog != null) {
getFSImage().editLog.close();
}
// Update the fsimage with the last txid that we wrote
// so that the tailer starts from the right spot.
getFSImage().updateLastAppliedTxIdFromWritten();
}
if (cacheManager != null) {
cacheManager.stopMonitorThread();
cacheManager.clearDirectiveStats();
}
blockManager.getDatanodeManager().clearPendingCachingCommands();
blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
// Don't want to keep replication queues when not in Active.
blockManager.clearQueues();
initializedReplQueues = false;
} finally {
writeUnlock();
}
}
/**
* Start services required in standby state
*
* @throws IOException
*/
void startStandbyServices(final Configuration conf) throws IOException {
LOG.info("Starting services required for standby state");
if (!getFSImage().editLog.isOpenForRead()) {
// During startup, we're already open for read.
getFSImage().editLog.initSharedJournalsForRead();
}
blockManager.setPostponeBlocksFromFuture(true);
// Disable quota checks while in standby.
dir.disableQuotaChecks();
editLogTailer = new EditLogTailer(this, conf);
editLogTailer.start();
if (standbyShouldCheckpoint) {
standbyCheckpointer = new StandbyCheckpointer(conf, this);
standbyCheckpointer.start();
}
}
/**
* Called when the NN is in Standby state and the editlog tailer tails the
* OP_ROLLING_UPGRADE_START.
*/
void triggerRollbackCheckpoint() {
setNeedRollbackFsImage(true);
if (standbyCheckpointer != null) {
standbyCheckpointer.triggerRollbackCheckpoint();
}
}
/**
* Called while the NN is in Standby state, but just about to be
* asked to enter Active state. This cancels any checkpoints
* currently being taken.
*/
void prepareToStopStandbyServices() throws ServiceFailedException {
if (standbyCheckpointer != null) {
standbyCheckpointer.cancelAndPreventCheckpoints(
"About to leave standby state");
}
}
/** Stop services required in standby state */
void stopStandbyServices() throws IOException {
LOG.info("Stopping services started for standby state");
if (standbyCheckpointer != null) {
standbyCheckpointer.stop();
}
if (editLogTailer != null) {
editLogTailer.stop();
}
if (dir != null && getFSImage() != null && getFSImage().editLog != null) {
getFSImage().editLog.close();
}
}
@Override
public void checkOperation(OperationCategory op) throws StandbyException {
if (haContext != null) {
// null in some unit tests
haContext.checkOperation(op);
}
}
/**
* @throws RetriableException
* If 1) The NameNode is in SafeMode, 2) HA is enabled, and 3)
* NameNode is in active state
* @throws SafeModeException
* Otherwise if NameNode is in SafeMode.
*/
void checkNameNodeSafeMode(String errorMsg)
throws RetriableException, SafeModeException {
if (isInSafeMode()) {
SafeModeException se = new SafeModeException(errorMsg, safeMode);
if (haEnabled && haContext != null
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE
&& shouldRetrySafeMode(this.safeMode)) {
throw new RetriableException(se);
} else {
throw se;
}
}
}
boolean isPermissionEnabled() {
return isPermissionEnabled;
}
/**
* We already know that the safemode is on. We will throw a RetriableException
* if the safemode is not manual or caused by low resource.
*/
private boolean shouldRetrySafeMode(SafeModeInfo safeMode) {
if (safeMode == null) {
return false;
} else {
return !safeMode.isManual() && !safeMode.areResourcesLow();
}
}
public static Collection<URI> getNamespaceDirs(Configuration conf) {
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
}
/**
* Get all edits dirs which are required. If any shared edits dirs are
* configured, these are also included in the set of required dirs.
*
* @param conf the HDFS configuration.
* @return all required dirs.
*/
public static Collection<URI> getRequiredNamespaceEditsDirs(Configuration conf) {
Set<URI> ret = new HashSet<URI>();
ret.addAll(getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY));
ret.addAll(getSharedEditsDirs(conf));
return ret;
}
private static Collection<URI> getStorageDirs(Configuration conf,
String propertyName) {
Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
StartupOption startOpt = NameNode.getStartupOption(conf);
if(startOpt == StartupOption.IMPORT) {
// In case of IMPORT this will get rid of default directories
// but will retain directories specified in hdfs-site.xml
// When importing image from a checkpoint, the name-node can
// start with empty set of storage directories.
Configuration cE = new HdfsConfiguration(false);
cE.addResource("core-default.xml");
cE.addResource("core-site.xml");
cE.addResource("hdfs-default.xml");
Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
dirNames.removeAll(dirNames2);
if(dirNames.isEmpty())
LOG.warn("!!! WARNING !!!" +
"\n\tThe NameNode currently runs without persistent storage." +
"\n\tAny changes to the file system meta-data may be lost." +
"\n\tRecommended actions:" +
"\n\t\t- shutdown and restart NameNode with configured \""
+ propertyName + "\" in hdfs-site.xml;" +
"\n\t\t- use Backup Node as a persistent and up-to-date storage " +
"of the file system meta-data.");
} else if (dirNames.isEmpty()) {
dirNames = Collections.singletonList(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
}
return Util.stringCollectionAsURIs(dirNames);
}
/**
* Return an ordered list of edits directories to write to.
* The list is ordered such that all shared edits directories
* are ordered before non-shared directories, and any duplicates
* are removed. The order they are specified in the configuration
* is retained.
* @return Collection of shared edits directories.
* @throws IOException if multiple shared edits directories are configured
*/
public static List<URI> getNamespaceEditsDirs(Configuration conf)
throws IOException {
return getNamespaceEditsDirs(conf, true);
}
public static List<URI> getNamespaceEditsDirs(Configuration conf,
boolean includeShared)
throws IOException {
// Use a LinkedHashSet so that order is maintained while we de-dup
// the entries.
LinkedHashSet<URI> editsDirs = new LinkedHashSet<URI>();
if (includeShared) {
List<URI> sharedDirs = getSharedEditsDirs(conf);
// Fail until multiple shared edits directories are supported (HDFS-2782)
if (sharedDirs.size() > 1) {
throw new IOException(
"Multiple shared edits directories are not yet supported");
}
// First add the shared edits dirs. It's critical that the shared dirs
// are added first, since JournalSet syncs them in the order they are listed,
// and we need to make sure all edits are in place in the shared storage
// before they are replicated locally. See HDFS-2874.
for (URI dir : sharedDirs) {
if (!editsDirs.add(dir)) {
LOG.warn("Edits URI " + dir + " listed multiple times in " +
DFS_NAMENODE_SHARED_EDITS_DIR_KEY + ". Ignoring duplicates.");
}
}
}
// Now add the non-shared dirs.
for (URI dir : getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY)) {
if (!editsDirs.add(dir)) {
LOG.warn("Edits URI " + dir + " listed multiple times in " +
DFS_NAMENODE_SHARED_EDITS_DIR_KEY + " and " +
DFS_NAMENODE_EDITS_DIR_KEY + ". Ignoring duplicates.");
}
}
if (editsDirs.isEmpty()) {
// If this is the case, no edit dirs have been explicitly configured.
// Image dirs are to be used for edits too.
return Lists.newArrayList(getNamespaceDirs(conf));
} else {
return Lists.newArrayList(editsDirs);
}
}
/**
* Returns edit directories that are shared between primary and secondary.
* @param conf configuration
* @return collection of edit directories from {@code conf}
*/
public static List<URI> getSharedEditsDirs(Configuration conf) {
// don't use getStorageDirs here, because we want an empty default
// rather than the dir in /tmp
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
return Util.stringCollectionAsURIs(dirNames);
}
@Override
public void readLock() {
this.fsLock.readLock().lock();
}
@Override
public void readUnlock() {
this.fsLock.readLock().unlock();
}
@Override
public void writeLock() {
this.fsLock.writeLock().lock();
}
@Override
public void writeLockInterruptibly() throws InterruptedException {
this.fsLock.writeLock().lockInterruptibly();
}
@Override
public void writeUnlock() {
this.fsLock.writeLock().unlock();
}
@Override
public boolean hasWriteLock() {
return this.fsLock.isWriteLockedByCurrentThread();
}
@Override
public boolean hasReadLock() {
return this.fsLock.getReadHoldCount() > 0 || hasWriteLock();
}
public int getReadHoldCount() {
return this.fsLock.getReadHoldCount();
}
public int getWriteHoldCount() {
return this.fsLock.getWriteHoldCount();
}
/** Lock the checkpoint lock */
public void cpLock() {
this.cpLock.lock();
}
/** Lock the checkpoint lock interrupibly */
public void cpLockInterruptibly() throws InterruptedException {
this.cpLock.lockInterruptibly();
}
/** Unlock the checkpoint lock */
public void cpUnlock() {
this.cpLock.unlock();
}
NamespaceInfo getNamespaceInfo() {
readLock();
try {
return unprotectedGetNamespaceInfo();
} finally {
readUnlock();
}
}
/**
* Version of @see #getNamespaceInfo() that is not protected by a lock.
*/
NamespaceInfo unprotectedGetNamespaceInfo() {
return new NamespaceInfo(getFSImage().getStorage().getNamespaceID(),
getClusterId(), getBlockPoolId(),
getFSImage().getStorage().getCTime());
}
/**
* Close down this file system manager.
* Causes heartbeat and lease daemons to stop; waits briefly for
* them to finish, but a short timeout returns control back to caller.
*/
void close() {
fsRunning = false;
try {
stopCommonServices();
if (smmthread != null) smmthread.interrupt();
} finally {
// using finally to ensure we also wait for lease daemon
try {
stopActiveServices();
stopStandbyServices();
} catch (IOException ie) {
} finally {
IOUtils.cleanup(LOG, dir);
IOUtils.cleanup(LOG, fsImage);
}
}
}
@Override
public boolean isRunning() {
return fsRunning;
}
@Override
public boolean isInStandbyState() {
if (haContext == null || haContext.getState() == null) {
// We're still starting up. In this case, if HA is
// on for the cluster, we always start in standby. Otherwise
// start in active.
return haEnabled;
}
return HAServiceState.STANDBY == haContext.getState().getServiceState();
}
/**
* Dump all metadata into specified file
*/
void metaSave(String filename) throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
writeLock();
try {
checkOperation(OperationCategory.UNCHECKED);
File file = new File(System.getProperty("hadoop.log.dir"), filename);
PrintWriter out = new PrintWriter(new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
metaSave(out);
out.flush();
out.close();
} finally {
writeUnlock();
}
}
private void metaSave(PrintWriter out) {
assert hasWriteLock();
long totalInodes = this.dir.totalInodes();
long totalBlocks = this.getBlocksTotal();
out.println(totalInodes + " files and directories, " + totalBlocks
+ " blocks = " + (totalInodes + totalBlocks)
+ " total filesystem objects");
blockManager.metaSave(out);
}
private String metaSaveAsString() {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
metaSave(pw);
pw.flush();
return sw.toString();
}
FsServerDefaults getServerDefaults() throws StandbyException {
checkOperation(OperationCategory.READ);
return serverDefaults;
}
long getAccessTimePrecision() {
return accessTimePrecision;
}
private boolean isAccessTimeSupported() {
return accessTimePrecision > 0;
}
/////////////////////////////////////////////////////////
//
// These methods are called by HadoopFS clients
//
/////////////////////////////////////////////////////////
/**
* Set permissions for an existing file.
* @throws IOException
*/
void setPermission(String src, FsPermission permission) throws IOException {
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set permission for " + src);
auditStat = FSDirAttrOp.setPermission(dir, src, permission);
} catch (AccessControlException e) {
logAuditEvent(false, "setPermission", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setPermission", src, null, auditStat);
}
/**
* Set owner for an existing file.
* @throws IOException
*/
void setOwner(String src, String username, String group)
throws IOException {
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set owner for " + src);
auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
} catch (AccessControlException e) {
logAuditEvent(false, "setOwner", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setOwner", src, null, auditStat);
}
static class GetBlockLocationsResult {
final INodesInPath iip;
final LocatedBlocks blocks;
boolean updateAccessTime() {
return iip != null;
}
private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
this.iip = iip;
this.blocks = blocks;
}
}
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
LocatedBlocks getBlockLocations(String clientMachine, String src,
long offset, long length) throws IOException {
checkOperation(OperationCategory.READ);
GetBlockLocationsResult res = null;
readLock();
try {
checkOperation(OperationCategory.READ);
res = getBlockLocations(src, offset, length, true, true);
} catch (AccessControlException e) {
logAuditEvent(false, "open", src);
throw e;
} finally {
readUnlock();
}
logAuditEvent(true, "open", src);
if (res.updateAccessTime()) {
writeLock();
final long now = now();
try {
checkOperation(OperationCategory.WRITE);
INode inode = res.iip.getLastINode();
boolean updateAccessTime = now > inode.getAccessTime() +
getAccessTimePrecision();
if (!isInSafeMode() && updateAccessTime) {
boolean changed = FSDirAttrOp.setTimes(dir,
inode, -1, now, false, res.iip.getLatestSnapshotId());
if (changed) {
getEditLog().logTimes(src, -1, now);
}
}
} catch (Throwable e) {
LOG.warn("Failed to update the access time of " + src, e);
} finally {
writeUnlock();
}
}
LocatedBlocks blocks = res.blocks;
if (blocks != null) {
blockManager.getDatanodeManager().sortLocatedBlocks(
clientMachine, blocks.getLocatedBlocks());
// lastBlock is not part of getLocatedBlocks(), might need to sort it too
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
if (lastBlock != null) {
ArrayList<LocatedBlock> lastBlockList = Lists.newArrayList(lastBlock);
blockManager.getDatanodeManager().sortLocatedBlocks(
clientMachine, lastBlockList);
}
}
return blocks;
}
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
* @throws IOException
*/
GetBlockLocationsResult getBlockLocations(
String src, long offset, long length, boolean needBlockToken,
boolean checkSafeMode) throws IOException {
if (offset < 0) {
throw new HadoopIllegalArgumentException(
"Negative offset is not supported. File: " + src);
}
if (length < 0) {
throw new HadoopIllegalArgumentException(
"Negative length is not supported. File: " + src);
}
final GetBlockLocationsResult ret = getBlockLocationsInt(
src, offset, length, needBlockToken);
if (checkSafeMode && isInSafeMode()) {
for (LocatedBlock b : ret.blocks.getLocatedBlocks()) {
// if safemode & no block locations yet then throw safemodeException
if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
SafeModeException se = new SafeModeException(
"Zero blocklocations for " + src, safeMode);
if (haEnabled && haContext != null &&
haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
throw new RetriableException(se);
} else {
throw se;
}
}
}
}
return ret;
}
private GetBlockLocationsResult getBlockLocationsInt(
final String srcArg, long offset, long length, boolean needBlockToken)
throws IOException {
String src = srcArg;
FSPermissionChecker pc = getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath(src, true);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.READ);
checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
}
final long fileSize = iip.isSnapshot()
? inode.computeFileSize(iip.getPathSnapshotId())
: inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) {
// if src indicates a snapshot file, we need to make sure the returned
// blocks do not exceed the size of the snapshot file.
length = Math.min(length, fileSize - offset);
isUc = false;
}
final FileEncryptionInfo feInfo =
FSDirectory.isReservedRawName(srcArg) ? null
: dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
final LocatedBlocks blocks = blockManager.createLocatedBlocks(
inode.getBlocks(iip.getPathSnapshotId()), fileSize,
isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
cacheManager.setCachedLocations(lb);
}
final long now = now();
boolean updateAccessTime = isAccessTimeSupported() && !isInSafeMode()
&& !iip.isSnapshot()
&& now > inode.getAccessTime() + getAccessTimePrecision();
return new GetBlockLocationsResult(updateAccessTime ? iip : null, blocks);
}
/**
* Moves all the blocks from {@code srcs} and appends them to {@code target}
* To avoid rollbacks we will verify validity of ALL of the args
* before we start actual move.
*
* This does not support ".inodes" relative path
* @param target target to concat into
* @param srcs file that will be concatenated
* @throws IOException on error
*/
void concat(String target, String [] srcs, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
waitForLoadingFSImage();
HdfsFileStatus stat = null;
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot concat " + target);
stat = FSDirConcatOp.concat(dir, target, srcs, logRetryCache);
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
logAuditEvent(success, "concat", Arrays.toString(srcs), target, stat);
}
}
/**
* stores the modification and access time for this inode.
* The access time is precise up to an hour. The transaction, if needed, is
* written to the edits log but is not flushed.
*/
void setTimes(String src, long mtime, long atime) throws IOException {
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set times " + src);
auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
} catch (AccessControlException e) {
logAuditEvent(false, "setTimes", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setTimes", src, null, auditStat);
}
/**
* Truncate file to a lower length.
* Truncate cannot be reverted / recovered from as it causes data loss.
* Truncation at block boundary is atomic, otherwise it requires
* block recovery to truncate the last block of the file.
*
* @return true if client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*/
boolean truncate(String src, long newLength,
String clientName, String clientMachine,
long mtime)
throws IOException, UnresolvedLinkException {
boolean ret;
try {
ret = truncateInt(src, newLength, clientName, clientMachine, mtime);
} catch (AccessControlException e) {
logAuditEvent(false, "truncate", src);
throw e;
}
return ret;
}
boolean truncateInt(String srcArg, long newLength,
String clientName, String clientMachine,
long mtime)
throws IOException, UnresolvedLinkException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
+ src + " newLength=" + newLength);
}
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
boolean res;
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot truncate for " + src);
src = dir.resolvePath(pc, src, pathComponents);
res = truncateInternal(src, newLength, clientName,
clientMachine, mtime, pc, toRemoveBlocks);
stat = dir.getAuditFileInfo(dir.getINodesInPath4Write(src, false));
} finally {
writeUnlock();
}
getEditLog().logSync();
if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
removeBlocks(toRemoveBlocks);
toRemoveBlocks.clear();
}
logAuditEvent(true, "truncate", src, null, stat);
return res;
}
/**
* Truncate a file to a given size
* Update the count at each ancestor directory with quota
*/
boolean truncateInternal(String src, long newLength,
String clientName, String clientMachine,
long mtime, FSPermissionChecker pc,
BlocksMapUpdateInfo toRemoveBlocks)
throws IOException, UnresolvedLinkException {
assert hasWriteLock();
INodesInPath iip = dir.getINodesInPath4Write(src, true);
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
final BlockStoragePolicy lpPolicy =
blockManager.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null &&
lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException(
"Cannot truncate lazy persist file " + src);
}
// Opening an existing file for truncate. May need lease recovery.
recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE,
iip, src, clientName, clientMachine, false);
// Truncate length check.
long oldLength = file.computeFileSize();
if(oldLength == newLength) {
return true;
}
if(oldLength < newLength) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a larger file size. Current size: " + oldLength +
", truncate size: " + newLength + ".");
}
// Perform INodeFile truncation.
boolean onBlockBoundary = dir.truncate(iip, newLength,
toRemoveBlocks, mtime);
Block truncateBlock = null;
if(! onBlockBoundary) {
// Open file for write, but don't log into edits
long lastBlockDelta = file.computeFileSize() - newLength;
assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
truncateBlock = prepareFileForTruncate(iip, clientName, clientMachine,
lastBlockDelta, null);
}
getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime,
truncateBlock);
return onBlockBoundary;
}
/**
* Convert current INode to UnderConstruction.
* Recreate lease.
* Create new block for the truncated copy.
* Schedule truncation of the replicas.
*
* @return the returned block will be written to editLog and passed back into
* this method upon loading.
*/
Block prepareFileForTruncate(INodesInPath iip,
String leaseHolder,
String clientMachine,
long lastBlockDelta,
Block newBlock)
throws IOException {
INodeFile file = iip.getLastINode().asFile();
String src = iip.getPath();
file.recordModification(iip.getLatestSnapshotId());
file.toUnderConstruction(leaseHolder, clientMachine);
assert file.isUnderConstruction() : "inode should be under construction.";
leaseManager.addLease(
file.getFileUnderConstructionFeature().getClientName(), src);
boolean shouldRecoverNow = (newBlock == null);
BlockInfoContiguous oldBlock = file.getLastBlock();
boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
if(newBlock == null) {
newBlock = (shouldCopyOnTruncate) ? createNewBlock() :
new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(),
nextGenerationStamp(blockIdManager.isLegacyBlock(oldBlock)));
}
BlockInfoContiguousUnderConstruction truncatedBlockUC;
if(shouldCopyOnTruncate) {
// Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery
truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock,
file.getBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock);
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
getBlockManager().addBlockCollection(truncatedBlockUC, file);
NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: "
+ "Scheduling copy-on-truncate to new size "
+ truncatedBlockUC.getNumBytes() + " new block " + newBlock
+ " old block " + truncatedBlockUC.getTruncateBlock());
} else {
// Use new generation stamp for in-place truncate recovery
blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
oldBlock = file.getLastBlock();
assert !oldBlock.isComplete() : "oldBlock should be under construction";
truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock;
truncatedBlockUC.setTruncateBlock(new Block(oldBlock));
truncatedBlockUC.getTruncateBlock().setNumBytes(
oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.getTruncateBlock().setGenerationStamp(
newBlock.getGenerationStamp());
NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: "
+ "Scheduling in-place block truncate to new size "
+ truncatedBlockUC.getTruncateBlock().getNumBytes()
+ " block=" + truncatedBlockUC);
}
if(shouldRecoverNow)
truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
// update the quota: use the preferred block size for UC block
final long diff =
file.getPreferredBlockSize() - truncatedBlockUC.getNumBytes();
dir.updateSpaceConsumed(iip, 0, diff, file.getBlockReplication());
return newBlock;
}
/**
* Defines if a replica needs to be copied on truncate or
* can be truncated in place.
*/
boolean shouldCopyOnTruncate(INodeFile file, BlockInfoContiguous blk) {
if(!isUpgradeFinalized()) {
return true;
}
return file.isBlockInLatestSnapshot(blk);
}
/**
* Create a symbolic link.
*/
void createSymlink(String target, String link,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws IOException {
waitForLoadingFSImage();
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create symlink " + link);
auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms,
createParent, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "createSymlink", link, target, null);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "createSymlink", link, target, auditStat);
}
/**
* Set replication for an existing file.
*
* The NameNode sets new replication and schedules either replication of
* under-replicated data blocks or removal of the excessive block copies
* if the blocks are over-replicated.
*
* @see ClientProtocol#setReplication(String, short)
* @param src file name
* @param replication new replication
* @return true if successful;
* false if file does not exist or is a directory
*/
boolean setReplication(final String src, final short replication)
throws IOException {
boolean success = false;
waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set replication for " + src);
success = FSDirAttrOp.setReplication(dir, blockManager, src, replication);
} catch (AccessControlException e) {
logAuditEvent(false, "setReplication", src);
throw e;
} finally {
writeUnlock();
}
if (success) {
getEditLog().logSync();
logAuditEvent(true, "setReplication", src);
}
return success;
}
/**
* Set the storage policy for a file or a directory.
*
* @param src file/directory path
* @param policyName storage policy name
*/
void setStoragePolicy(String src, String policyName) throws IOException {
HdfsFileStatus auditStat;
waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set storage policy for " + src);
auditStat = FSDirAttrOp.setStoragePolicy(
dir, blockManager, src, policyName);
} catch (AccessControlException e) {
logAuditEvent(false, "setStoragePolicy", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setStoragePolicy", src, null, auditStat);
}
/**
* @return All the existing block storage policies
*/
BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ);
waitForLoadingFSImage();
readLock();
try {
checkOperation(OperationCategory.READ);
return FSDirAttrOp.getStoragePolicies(blockManager);
} finally {
readUnlock();
}
}
long getPreferredBlockSize(String src) throws IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return FSDirAttrOp.getPreferredBlockSize(dir, src);
} finally {
readUnlock();
}
}
/**
* If the file is within an encryption zone, select the appropriate
* CryptoProtocolVersion from the list provided by the client. Since the
* client may be newer, we need to handle unknown versions.
*
* @param zone EncryptionZone of the file
* @param supportedVersions List of supported protocol versions
* @return chosen protocol version
* @throws IOException
*/
private CryptoProtocolVersion chooseProtocolVersion(EncryptionZone zone,
CryptoProtocolVersion[] supportedVersions)
throws UnknownCryptoProtocolVersionException, UnresolvedLinkException,
SnapshotAccessControlException {
Preconditions.checkNotNull(zone);
Preconditions.checkNotNull(supportedVersions);
// Right now, we only support a single protocol version,
// so simply look for it in the list of provided options
final CryptoProtocolVersion required = zone.getVersion();
for (CryptoProtocolVersion c : supportedVersions) {
if (c.equals(CryptoProtocolVersion.UNKNOWN)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring unknown CryptoProtocolVersion provided by " +
"client: " + c.getUnknownValue());
}
continue;
}
if (c.equals(required)) {
return c;
}
}
throw new UnknownCryptoProtocolVersionException(
"No crypto protocol versions provided by the client are supported."
+ " Client provided: " + Arrays.toString(supportedVersions)
+ " NameNode supports: " + Arrays.toString(CryptoProtocolVersion
.values()));
}
/**
* Invoke KeyProvider APIs to generate an encrypted data encryption key for an
* encryption zone. Should not be called with any locks held.
*
* @param ezKeyName key name of an encryption zone
* @return New EDEK, or null if ezKeyName is null
* @throws IOException
*/
private EncryptedKeyVersion generateEncryptedDataEncryptionKey(String
ezKeyName) throws IOException {
if (ezKeyName == null) {
return null;
}
EncryptedKeyVersion edek = null;
try {
edek = provider.generateEncryptedKey(ezKeyName);
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
Preconditions.checkNotNull(edek);
return edek;
}
/**
* Create a new file entry in the namespace.
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}, except it returns valid file status upon
* success
*/
HdfsFileStatus startFile(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, boolean logRetryCache)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
HdfsFileStatus status = null;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize, supportedVersions,
logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "create", src);
throw e;
}
return status;
}
private HdfsFileStatus startFileInt(final String srcArg,
PermissionStatus permissions, String holder, String clientMachine,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, CryptoProtocolVersion[] supportedVersions,
boolean logRetryCache)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("DIR* NameSystem.startFile: src=" + src
+ ", holder=" + holder
+ ", clientMachine=" + clientMachine
+ ", createParent=" + createParent
+ ", replication=" + replication
+ ", createFlag=" + flag.toString()
+ ", blockSize=" + blockSize);
builder.append(", supportedVersions=");
if (supportedVersions != null) {
builder.append(Arrays.toString(supportedVersions));
} else {
builder.append("null");
}
NameNode.stateChangeLog.debug(builder.toString());
}
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException(src);
}
blockManager.verifyReplication(src, replication, clientMachine);
boolean skipSync = false;
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
if (blockSize < minBlockSize) {
throw new IOException("Specified block size is less than configured" +
" minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
+ "): " + blockSize + " < " + minBlockSize);
}
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
boolean create = flag.contains(CreateFlag.CREATE);
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
waitForLoadingFSImage();
/**
* If the file is in an encryption zone, we optimistically create an
* EDEK for the file by calling out to the configured KeyProvider.
* Since this typically involves doing an RPC, we take the readLock
* initially, then drop it to do the RPC.
*
* Since the path can flip-flop between being in an encryption zone and not
* in the meantime, we need to recheck the preconditions when we retake the
* lock to do the create. If the preconditions are not met, we throw a
* special RetryStartFileException to ask the DFSClient to try the create
* again later.
*/
CryptoProtocolVersion protocolVersion = null;
CipherSuite suite = null;
String ezKeyName = null;
EncryptedKeyVersion edek = null;
if (provider != null) {
readLock();
try {
src = dir.resolvePath(pc, src, pathComponents);
INodesInPath iip = dir.getINodesInPath4Write(src);
// Nothing to do if the path is not within an EZ
final EncryptionZone zone = dir.getEZForPath(iip);
if (zone != null) {
protocolVersion = chooseProtocolVersion(zone, supportedVersions);
suite = zone.getSuite();
ezKeyName = zone.getKeyName();
Preconditions.checkNotNull(protocolVersion);
Preconditions.checkNotNull(suite);
Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
"Chose an UNKNOWN CipherSuite!");
Preconditions.checkNotNull(ezKeyName);
}
} finally {
readUnlock();
}
Preconditions.checkState(
(suite == null && ezKeyName == null) ||
(suite != null && ezKeyName != null),
"Both suite and ezKeyName should both be null or not null");
// Generate EDEK if necessary while not holding the lock
edek = generateEncryptedDataEncryptionKey(ezKeyName);
EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
}
// Proceed with the create, using the computed cipher suite and
// generated EDEK
BlocksMapUpdateInfo toRemoveBlocks = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create file" + src);
dir.writeLock();
try {
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath4Write(src);
toRemoveBlocks = startFileInternal(
pc, iip, permissions, holder,
clientMachine, create, overwrite,
createParent, replication, blockSize,
isLazyPersist, suite, protocolVersion, edek,
logRetryCache);
stat = FSDirStatAndListingOp.getFileInfo(
dir, src, false, FSDirectory.isReservedRawName(srcArg), true);
} finally {
dir.writeUnlock();
}
} catch (StandbyException se) {
skipSync = true;
throw se;
} finally {
writeUnlock();
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
if (toRemoveBlocks != null) {
removeBlocks(toRemoveBlocks);
toRemoveBlocks.clear();
}
}
}
logAuditEvent(true, "create", srcArg, null, stat);
return stat;
}
/**
* Create a new file or overwrite an existing file<br>
*
* Once the file is create the client then allocates a new block with the next
* call using {@link ClientProtocol#addBlock}.
* <p>
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}
*/
private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc,
INodesInPath iip, PermissionStatus permissions, String holder,
String clientMachine, boolean create, boolean overwrite,
boolean createParent, short replication, long blockSize,
boolean isLazyPersist, CipherSuite suite, CryptoProtocolVersion version,
EncryptedKeyVersion edek, boolean logRetryEntry)
throws IOException {
assert hasWriteLock();
// Verify that the destination does not exist as a directory already.
final INode inode = iip.getLastINode();
final String src = iip.getPath();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException(src +
" already exists as a directory");
}
final INodeFile myFile = INodeFile.valueOf(inode, src, true);
if (isPermissionEnabled) {
if (overwrite && myFile != null) {
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
/*
* To overwrite existing file, need to check 'w' permission
* of parent (equals to ancestor in this case)
*/
dir.checkAncestorAccess(pc, iip, FsAction.WRITE);
}
if (!createParent) {
dir.verifyParentDir(iip, src);
}
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = dir.getEZForPath(iip);
if (zone != null) {
// The path is now within an EZ, but we're missing encryption parameters
if (suite == null || edek == null) {
throw new RetryStartFileException();
}
// Path is within an EZ and we have provided encryption parameters.
// Make sure that the generated EDEK matches the settings of the EZ.
final String ezKeyName = zone.getKeyName();
if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
throw new RetryStartFileException();
}
feInfo = new FileEncryptionInfo(suite, version,
edek.getEncryptedKeyVersion().getMaterial(),
edek.getEncryptedKeyIv(),
ezKeyName, edek.getEncryptionKeyVersionName());
}
try {
BlocksMapUpdateInfo toRemoveBlocks = null;
if (myFile == null) {
if (!create) {
throw new FileNotFoundException("Can't overwrite non-existent " +
src + " for client " + clientMachine);
}
} else {
if (overwrite) {
toRemoveBlocks = new BlocksMapUpdateInfo();
List<INode> toRemoveINodes = new ChunkedArrayList<INode>();
long ret = FSDirDeleteOp.delete(dir, iip, toRemoveBlocks,
toRemoveINodes, now());
if (ret >= 0) {
iip = INodesInPath.replace(iip, iip.length() - 1, null);
FSDirDeleteOp.incrDeletedFileCount(ret);
removeLeasesAndINodes(src, toRemoveINodes, true);
}
} else {
// If lease soft limit time is expired, recover the lease
recoverLeaseInternal(RecoverLeaseOp.CREATE_FILE,
iip, src, holder, clientMachine, false);
throw new FileAlreadyExistsException(src + " for client " +
clientMachine + " already exists");
}
}
checkFsObjectLimit();
INodeFile newNode = null;
// Always do an implicit mkdirs for parent directory tree.
Map.Entry<INodesInPath, String> parent = FSDirMkdirOp
.createAncestorDirectories(dir, iip, permissions);
if (parent != null) {
iip = dir.addFile(parent.getKey(), parent.getValue(), permissions,
replication, blockSize, holder, clientMachine);
newNode = iip != null ? iip.getLastINode().asFile() : null;
}
if (newNode == null) {
throw new IOException("Unable to add " + src + " to namespace");
}
leaseManager.addLease(newNode.getFileUnderConstructionFeature()
.getClientName(), src);
// Set encryption attributes if necessary
if (feInfo != null) {
dir.setFileEncryptionInfo(src, feInfo);
newNode = dir.getInode(newNode.getId()).asFile();
}
setNewINodeStoragePolicy(newNode, iip, isLazyPersist);
// record file record in log, record new generation stamp
getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
src + " inode " + newNode.getId() + " " + holder);
}
return toRemoveBlocks;
} catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " + src + " " +
ie.getMessage());
throw ie;
}
}
private void setNewINodeStoragePolicy(INodeFile inode,
INodesInPath iip,
boolean isLazyPersist)
throws IOException {
if (isLazyPersist) {
BlockStoragePolicy lpPolicy =
blockManager.getStoragePolicy("LAZY_PERSIST");
// Set LAZY_PERSIST storage policy if the flag was passed to
// CreateFile.
if (lpPolicy == null) {
throw new HadoopIllegalArgumentException(
"The LAZY_PERSIST storage policy has been disabled " +
"by the administrator.");
}
inode.setStoragePolicyID(lpPolicy.getId(),
iip.getLatestSnapshotId());
} else {
BlockStoragePolicy effectivePolicy =
blockManager.getStoragePolicy(inode.getStoragePolicyID());
if (effectivePolicy != null &&
effectivePolicy.isCopyOnCreateFile()) {
// Copy effective policy from ancestor directory to current file.
inode.setStoragePolicyID(effectivePolicy.getId(),
iip.getLatestSnapshotId());
}
}
}
/**
* Append to an existing file for append.
* <p>
*
* The method returns the last block of the file if this is a partial block,
* which can still be used for writing more data. The client uses the returned
* block locations to form the data pipeline for this block.<br>
* The method returns null if the last block is full. The client then
* allocates a new block with the next call using
* {@link ClientProtocol#addBlock}.
* <p>
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#append(String, String, EnumSetWritable)}
*
* @return the last block locations if the block is partial or null otherwise
*/
private LocatedBlock appendFileInternal(FSPermissionChecker pc,
INodesInPath iip, String holder, String clientMachine, boolean newBlock,
boolean logRetryCache) throws IOException {
assert hasWriteLock();
// Verify that the destination does not exist as a directory already.
final INode inode = iip.getLastINode();
final String src = iip.getPath();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException("Cannot append to directory " + src
+ "; already exists as a directory.");
}
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
try {
if (inode == null) {
throw new FileNotFoundException("failed to append to non-existent file "
+ src + " for client " + clientMachine);
}
INodeFile myFile = INodeFile.valueOf(inode, src, true);
final BlockStoragePolicy lpPolicy =
blockManager.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null &&
lpPolicy.getId() == myFile.getStoragePolicyID()) {
throw new UnsupportedOperationException(
"Cannot append to lazy persist file " + src);
}
// Opening an existing file for append - may need to recover lease.
recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE,
iip, src, holder, clientMachine, false);
final BlockInfoContiguous lastBlock = myFile.getLastBlock();
// Check that the block has at least minimum replication.
if(lastBlock != null && lastBlock.isComplete() &&
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
throw new IOException("append: lastBlock=" + lastBlock +
" of src=" + src + " is not sufficiently replicated yet.");
}
return prepareFileForAppend(src, iip, holder, clientMachine, newBlock,
true, logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
throw ie;
}
}
/**
* Convert current node to under construction.
* Recreate in-memory lease record.
*
* @param src path to the file
* @param leaseHolder identifier of the lease holder on this file
* @param clientMachine identifier of the client machine
* @param newBlock if the data is appended to a new block
* @param writeToEditLog whether to persist this change to the edit log
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block locations if the block is partial or null otherwise
* @throws UnresolvedLinkException
* @throws IOException
*/
LocatedBlock prepareFileForAppend(String src, INodesInPath iip,
String leaseHolder, String clientMachine, boolean newBlock,
boolean writeToEditLog, boolean logRetryCache) throws IOException {
final INodeFile file = iip.getLastINode().asFile();
file.recordModification(iip.getLatestSnapshotId());
file.toUnderConstruction(leaseHolder, clientMachine);
leaseManager.addLease(
file.getFileUnderConstructionFeature().getClientName(), src);
LocatedBlock ret = null;
if (!newBlock) {
ret = blockManager.convertLastBlockToUnderConstruction(file, 0);
if (ret != null) {
// update the quota: use the preferred block size for UC block
final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
dir.updateSpaceConsumed(iip, 0, diff, file.getBlockReplication());
}
} else {
BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(this.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
}
}
if (writeToEditLog) {
getEditLog().logAppendFile(src, file, newBlock, logRetryCache);
}
return ret;
}
/**
* Recover lease;
* Immediately revoke the lease of the current lease holder and start lease
* recovery so that the file can be forced to be closed.
*
* @param src the path of the file to start lease recovery
* @param holder the lease holder's name
* @param clientMachine the client machine's name
* @return true if the file is already closed
* @throws IOException
*/
boolean recoverLease(String src, String holder, String clientMachine)
throws IOException {
if (!DFSUtil.isValidName(src)) {
throw new IOException("Invalid file name: " + src);
}
boolean skipSync = false;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot recover the lease of " + src);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath4Write(src);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (!inode.isUnderConstruction()) {
return true;
}
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE,
iip, src, holder, clientMachine, true);
} catch (StandbyException se) {
skipSync = true;
throw se;
} finally {
writeUnlock();
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
}
}
return false;
}
private enum RecoverLeaseOp {
CREATE_FILE,
APPEND_FILE,
TRUNCATE_FILE,
RECOVER_LEASE;
private String getExceptionMessage(String src, String holder,
String clientMachine, String reason) {
return "Failed to " + this + " " + src + " for " + holder +
" on " + clientMachine + " because " + reason;
}
}
void recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
String src, String holder, String clientMachine, boolean force)
throws IOException {
assert hasWriteLock();
INodeFile file = iip.getLastINode().asFile();
if (file != null && file.isUnderConstruction()) {
//
// If the file is under construction , then it must be in our
// leases. Find the appropriate lease record.
//
Lease lease = leaseManager.getLease(holder);
if (!force && lease != null) {
Lease leaseFile = leaseManager.getLeaseByPath(src);
if (leaseFile != null && leaseFile.equals(lease)) {
// We found the lease for this file but the original
// holder is trying to obtain it again.
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
holder + " is already the current lease holder."));
}
}
//
// Find the original holder.
//
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
String clientName = uc.getClientName();
lease = leaseManager.getLease(clientName);
if (lease == null) {
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
"the file is under construction but no leases found."));
}
if (force) {
// close now: no need to wait for soft lease expiration and
// close only the file src
LOG.info("recoverLease: " + lease + ", src=" + src +
" from client " + clientName);
internalReleaseLease(lease, src, iip, holder);
} else {
assert lease.getHolder().equals(clientName) :
"Current lease holder " + lease.getHolder() +
" does not match file creator " + clientName;
//
// If the original holder has not renewed in the last SOFTLIMIT
// period, then start lease recovery.
//
if (lease.expiredSoftLimit()) {
LOG.info("startFile: recover " + lease + ", src=" + src + " client "
+ clientName);
boolean isClosed = internalReleaseLease(lease, src, iip, null);
if(!isClosed)
throw new RecoveryInProgressException(
op.getExceptionMessage(src, holder, clientMachine,
"lease recovery is in progress. Try again later."));
} else {
final BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException(
op.getExceptionMessage(src, holder, clientMachine,
"another recovery is in progress by "
+ clientName + " on " + uc.getClientMachine()));
} else {
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
"this file lease is currently owned by "
+ clientName + " on " + uc.getClientMachine()));
}
}
}
}
}
/**
* Append to an existing file in the namespace.
*/
LastBlockWithStatus appendFile(String src, String holder,
String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache)
throws IOException {
try {
return appendFileInt(src, holder, clientMachine,
flag.contains(CreateFlag.NEW_BLOCK), logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "append", src);
throw e;
}
}
private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
String clientMachine, boolean newBlock, boolean logRetryCache)
throws IOException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
+ ", holder=" + holder
+ ", clientMachine=" + clientMachine);
}
boolean skipSync = false;
LocatedBlock lb = null;
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot append to file" + src);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath4Write(src);
lb = appendFileInternal(pc, iip, holder, clientMachine, newBlock,
logRetryCache);
stat = FSDirStatAndListingOp.getFileInfo(dir, src, false,
FSDirectory.isReservedRawName(srcArg), true);
} catch (StandbyException se) {
skipSync = true;
throw se;
} finally {
writeUnlock();
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
}
}
if (lb != null) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "
+src+" for "+holder+" at "+clientMachine
+" block " + lb.getBlock()
+" block size " + lb.getBlock().getNumBytes());
}
}
logAuditEvent(true, "append", srcArg);
return new LastBlockWithStatus(lb, stat);
}
ExtendedBlock getExtendedBlock(Block blk) {
return new ExtendedBlock(blockPoolId, blk);
}
void setBlockPoolId(String bpid) {
blockPoolId = bpid;
blockManager.setBlockPoolId(blockPoolId);
}
/**
* The client would like to obtain an additional block for the indicated
* filename (which is being written-to). Return an array that consists
* of the block, plus a set of machines. The first on this list should
* be where the client writes data. Subsequent items in the list must
* be provided in the connection to the first datanode.
*
* Make sure the previous blocks have been reported by datanodes and
* are replicated. Will return an empty 2-elt array if we want the
* client to "try again later".
*/
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
ExtendedBlock previous, Set<Node> excludedNodes,
List<String> favoredNodes) throws IOException {
final long blockSize;
final int replication;
final byte storagePolicyID;
Node clientNode = null;
String clientMachine = null;
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: "
+ src + " inodeId " + fileId + " for " + clientName);
}
// Part I. Analyze the state of the file with respect to the input data.
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
src = dir.resolvePath(pc, src, pathComponents);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
FileState fileState = analyzeFileState(
src, fileId, clientName, previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
src = fileState.path;
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
return onRetryBlock[0];
}
if (pendingFile.getBlocks().length >= maxBlocksPerFile) {
throw new IOException("File has reached the limit on maximum number of"
+ " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY
+ "): " + pendingFile.getBlocks().length + " >= "
+ maxBlocksPerFile);
}
blockSize = pendingFile.getPreferredBlockSize();
clientMachine = pendingFile.getFileUnderConstructionFeature()
.getClientMachine();
clientNode = blockManager.getDatanodeManager().getDatanodeByHost(
clientMachine);
replication = pendingFile.getFileReplication();
storagePolicyID = pendingFile.getStoragePolicyID();
} finally {
readUnlock();
}
if (clientNode == null) {
clientNode = getClientNode(clientMachine);
}
// choose targets for the new block to be allocated.
final DatanodeStorageInfo targets[] = getBlockManager().chooseTarget4NewBlock(
src, replication, clientNode, excludedNodes, blockSize, favoredNodes,
storagePolicyID);
// Part II.
// Allocate a new block, add it to the INode and the BlocksMap.
Block newBlock = null;
long offset;
checkOperation(OperationCategory.WRITE);
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
// Run the full analysis again, since things could have changed
// while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
FileState fileState =
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
src = fileState.path;
if (onRetryBlock[0] != null) {
if (onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
return onRetryBlock[0];
} else {
// add new chosen targets to already allocated block and return
BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
((BlockInfoContiguousUnderConstruction) lastBlockInFile)
.setExpectedLocations(targets);
offset = pendingFile.computeFileSize();
return makeLocatedBlock(lastBlockInFile, targets, offset);
}
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, fileState.iip,
ExtendedBlock.getLocalBlock(previous));
// allocate new block, record block locations in INode.
newBlock = createNewBlock();
INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
saveAllocatedBlock(src, inodesInPath, newBlock, targets);
persistNewBlock(src, pendingFile);
offset = pendingFile.computeFileSize();
} finally {
writeUnlock();
}
getEditLog().logSync();
// Return located block
return makeLocatedBlock(newBlock, targets, offset);
}
/*
* Resolve clientmachine address to get a network location path
*/
private Node getClientNode(String clientMachine) {
List<String> hosts = new ArrayList<String>(1);
hosts.add(clientMachine);
List<String> rName = getBlockManager().getDatanodeManager()
.resolveNetworkLocation(hosts);
Node clientNode = null;
if (rName != null) {
// Able to resolve clientMachine mapping.
// Create a temp node to findout the rack local nodes
clientNode = new NodeBase(rName.get(0) + NodeBase.PATH_SEPARATOR_STR
+ clientMachine);
}
return clientNode;
}
static class FileState {
public final INodeFile inode;
public final String path;
public final INodesInPath iip;
public FileState(INodeFile inode, String fullPath, INodesInPath iip) {
this.inode = inode;
this.path = fullPath;
this.iip = iip;
}
}
FileState analyzeFileState(String src,
long fileId,
String clientName,
ExtendedBlock previous,
LocatedBlock[] onRetryBlock)
throws IOException {
assert hasReadLock();
checkBlock(previous);
onRetryBlock[0] = null;
checkNameNodeSafeMode("Cannot add block to " + src);
// have we exceeded the configured limit of fs objects.
checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INode inode;
final INodesInPath iip;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = dir.getINodesInPath4Write(src);
inode = iip.getLastINode();
} else {
// Newer clients pass the inode ID, so we can just get the inode
// directly.
inode = dir.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
// four possibilities:
// 1) This is the first block allocation of an append() pipeline
// which started appending exactly at a block boundary.
// In this case, the client isn't passed the previous block,
// so it makes the allocateBlock() call with previous=null.
// We can distinguish this since the last block of the file
// will be exactly a full block.
// 2) This is a retry from a client that missed the response of a
// prior getAdditionalBlock() call, perhaps because of a network
// timeout, or because of an HA failover. In that case, we know
// by the fact that the client is re-issuing the RPC that it
// never began to write to the old block. Hence it is safe to
// to return the existing block.
// 3) This is an entirely bogus request/bug -- we should error out
// rather than potentially appending a new block with an empty
// one in the middle, etc
// 4) This is a retry from a client that timed out while
// the prior getAdditionalBlock() is still being processed,
// currently working on chooseTarget().
// There are no means to distinguish between the first and
// the second attempts in Part I, because the first one hasn't
// changed the namesystem state yet.
// We run this analysis again in Part II where case 4 is impossible.
BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
if (previous == null &&
lastBlockInFile != null &&
lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
lastBlockInFile.isComplete()) {
// Case 1
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
" writing to a file with a complete previous block: src=" +
src + " lastBlock=" + lastBlockInFile);
}
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
if (lastBlockInFile.getNumBytes() != 0) {
throw new IOException(
"Request looked like a retry to allocate block " +
lastBlockInFile + " but it already contains " +
lastBlockInFile.getNumBytes() + " bytes");
}
// Case 2
// Return the last block.
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
"caught retry for allocation of a new block in " +
src + ". Returning previously allocated block " + lastBlockInFile);
long offset = pendingFile.computeFileSize();
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
((BlockInfoContiguousUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
offset);
return new FileState(pendingFile, src, iip);
} else {
// Case 3
throw new IOException("Cannot allocate block in " + src + ": " +
"passed 'previous' block " + previous + " does not match actual " +
"last block in file " + lastBlockInFile);
}
}
// Check if the penultimate block is minimally replicated
if (!checkFileProgress(src, pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
return new FileState(pendingFile, src, iip);
}
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock(
getExtendedBlock(blk), locs, offset, false);
getBlockManager().setBlockToken(
lBlk, BlockTokenSecretManager.AccessMode.WRITE);
return lBlk;
}
/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId,
final ExtendedBlock blk, final DatanodeInfo[] existings,
final String[] storageIDs,
final Set<Node> excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
//check if the feature is enabled
dtpReplaceDatanodeOnFailure.checkEnabled();
Node clientnode = null;
String clientMachine;
final long preferredblocksize;
final byte storagePolicyID;
final List<DatanodeStorageInfo> chosen;
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
//check safe mode
checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
src = dir.resolvePath(pc, src, pathComponents);
//check lease
final INode inode;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
inode = dir.getINode(src);
} else {
inode = dir.getInode(fileId);
if (inode != null) src = inode.getFullPathName();
}
final INodeFile file = checkLease(src, clientName, inode, fileId);
clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
preferredblocksize = file.getPreferredBlockSize();
storagePolicyID = file.getStoragePolicyID();
//find datanode storages
final DatanodeManager dm = blockManager.getDatanodeManager();
chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs));
} finally {
readUnlock();
}
if (clientnode == null) {
clientnode = getClientNode(clientMachine);
}
// choose new datanodes.
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(
src, numAdditionalNodes, clientnode, chosen,
excludes, preferredblocksize, storagePolicyID);
final LocatedBlock lb = new LocatedBlock(blk, targets);
blockManager.setBlockToken(lb, AccessMode.COPY);
return lb;
}
/**
* The client would like to let go of the given block
*/
boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
throws IOException {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
+ "of file " + src);
}
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
src = dir.resolvePath(pc, src, pathComponents);
final INode inode;
final INodesInPath iip;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = dir.getINodesInPath(src, true);
inode = iip.getLastINode();
} else {
inode = dir.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
final INodeFile file = checkLease(src, holder, inode, fileId);
// Remove the block from the pending creates list
boolean removed = dir.removeBlock(src, iip, file,
ExtendedBlock.getLocalBlock(b));
if (!removed) {
return true;
}
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
+ b + " is removed from pendingCreates");
}
persistBlocks(src, file, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
return true;
}
private INodeFile checkLease(String src, String holder, INode inode,
long fileId) throws LeaseExpiredException, FileNotFoundException {
assert hasReadLock();
final String ident = src + " (inode " + fileId + ")";
if (inode == null) {
Lease lease = leaseManager.getLease(holder);
throw new LeaseExpiredException(
"No lease on " + ident + ": File does not exist. "
+ (lease != null ? lease.toString()
: "Holder " + holder + " does not have any open files."));
}
if (!inode.isFile()) {
Lease lease = leaseManager.getLease(holder);
throw new LeaseExpiredException(
"No lease on " + ident + ": INode is not a regular file. "
+ (lease != null ? lease.toString()
: "Holder " + holder + " does not have any open files."));
}
final INodeFile file = inode.asFile();
if (!file.isUnderConstruction()) {
Lease lease = leaseManager.getLease(holder);
throw new LeaseExpiredException(
"No lease on " + ident + ": File is not open for writing. "
+ (lease != null ? lease.toString()
: "Holder " + holder + " does not have any open files."));
}
// No further modification is allowed on a deleted file.
// A file is considered deleted, if it is not in the inodeMap or is marked
// as deleted in the snapshot feature.
if (isFileDeleted(file)) {
throw new FileNotFoundException(src);
}
String clientName = file.getFileUnderConstructionFeature().getClientName();
if (holder != null && !clientName.equals(holder)) {
throw new LeaseExpiredException("Lease mismatch on " + ident +
" owned by " + clientName + " but is accessed by " + holder);
}
return file;
}
/**
* Complete in-progress write to the given file.
* @return true if successful, false if the client should continue to retry
* (e.g if not all blocks have reached minimum replication yet)
* @throws IOException on error (eg lease mismatch, file not open, file deleted)
*/
boolean completeFile(final String srcArg, String holder,
ExtendedBlock last, long fileId)
throws SafeModeException, UnresolvedLinkException, IOException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
src + " for " + holder);
}
checkBlock(last);
boolean success = false;
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot complete file " + src);
src = dir.resolvePath(pc, src, pathComponents);
success = completeFileInternal(src, holder,
ExtendedBlock.getLocalBlock(last), fileId);
} finally {
writeUnlock();
}
getEditLog().logSync();
if (success) {
NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
+ " is closed by " + holder);
}
return success;
}
private boolean completeFileInternal(String src, String holder, Block last,
long fileId) throws IOException {
assert hasWriteLock();
final INodeFile pendingFile;
final INodesInPath iip;
INode inode = null;
try {
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = dir.getINodesInPath(src, true);
inode = iip.getLastINode();
} else {
inode = dir.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
pendingFile = checkLease(src, holder, inode, fileId);
} catch (LeaseExpiredException lee) {
if (inode != null && inode.isFile() &&
!inode.asFile().isUnderConstruction()) {
// This could be a retry RPC - i.e the client tried to close
// the file, but missed the RPC response. Thus, it is trying
// again to close the file. If the file still exists and
// the client's view of the last block matches the actual
// last block, then we'll treat it as a successful close.
// See HDFS-3031.
final Block realLastBlock = inode.asFile().getLastBlock();
if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
NameNode.stateChangeLog.info("DIR* completeFile: " +
"request from " + holder + " to complete inode " + fileId +
"(" + src + ") which is already closed. But, it appears to be " +
"an RPC retry. Returning success");
return true;
}
}
throw lee;
}
// Check the state of the penultimate block. It should be completed
// before attempting to complete the last one.
if (!checkFileProgress(src, pendingFile, false)) {
return false;
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, iip, last);
if (!checkFileProgress(src, pendingFile, true)) {
return false;
}
finalizeINodeFileUnderConstruction(src, pendingFile,
Snapshot.CURRENT_STATE_ID);
return true;
}
/**
* Save allocated block at the given pending filename
*
* @param src path to the file
* @param inodesInPath representing each of the components of src.
* The last INode is the INode for {@code src} file.
* @param newBlock newly allocated block to be save
* @param targets target datanodes where replicas of the new block is placed
* @throws QuotaExceededException If addition of block exceeds space quota
*/
BlockInfoContiguous saveAllocatedBlock(String src, INodesInPath inodesInPath,
Block newBlock, DatanodeStorageInfo[] targets)
throws IOException {
assert hasWriteLock();
BlockInfoContiguous b = dir.addBlock(src, inodesInPath, newBlock, targets);
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
DatanodeStorageInfo.incrementBlocksScheduled(targets);
return b;
}
/**
* Create new block with a unique block id and a new generation stamp.
*/
Block createNewBlock() throws IOException {
assert hasWriteLock();
Block b = new Block(nextBlockId(), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
/**
* Check that the indicated file's blocks are present and
* replicated. If not, return false. If checkall is true, then check
* all blocks, otherwise check only penultimate block.
*/
private boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
readLock();
try {
if (checkall) {
// check all blocks of the file.
for (BlockInfoContiguous block: v.getBlocks()) {
if (!isCompleteBlock(src, block, blockManager.minReplication)) {
return false;
}
}
} else {
// check the penultimate block of this file
BlockInfoContiguous b = v.getPenultimateBlock();
if (b != null
&& !isCompleteBlock(src, b, blockManager.minReplication)) {
return false;
}
}
return true;
} finally {
readUnlock();
}
}
private static boolean isCompleteBlock(String src, BlockInfoContiguous b, int minRepl) {
if (!b.isComplete()) {
final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)b;
final int numNodes = b.numNodes();
LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = "
+ uc.getBlockUCState() + ", replication# = " + numNodes
+ (numNodes < minRepl? " < ": " >= ")
+ " minimum = " + minRepl + ") in file " + src);
return false;
}
return true;
}
////////////////////////////////////////////////////////////////
// Here's how to handle block-copy failure during client write:
// -- As usual, the client's write should result in a streaming
// backup write to a k-machine sequence.
// -- If one of the backup machines fails, no worries. Fail silently.
// -- Before client is allowed to close and finalize file, make sure
// that the blocks are backed up. Namenode may have to issue specific backup
// commands to make up for earlier datanode failures. Once all copies
// are made, edit namespace and return to client.
////////////////////////////////////////////////////////////////
/**
* Change the indicated filename.
* @deprecated Use {@link #renameTo(String, String, boolean,
* Options.Rename...)} instead.
*/
@Deprecated
boolean renameTo(String src, String dst, boolean logRetryCache)
throws IOException {
waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
FSDirRenameOp.RenameOldResult ret = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename " + src);
ret = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "rename", src, dst, null);
throw e;
} finally {
writeUnlock();
}
boolean success = ret != null && ret.success;
if (success) {
getEditLog().logSync();
}
logAuditEvent(success, "rename", src, dst,
ret == null ? null : ret.auditStat);
return success;
}
void renameTo(final String src, final String dst,
boolean logRetryCache, Options.Rename... options)
throws IOException {
waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> res = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename " + src);
res = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache, options);
} catch (AccessControlException e) {
logAuditEvent(false, "rename (options=" + Arrays.toString(options) +
")", src, dst, null);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
BlocksMapUpdateInfo collectedBlocks = res.getKey();
HdfsFileStatus auditStat = res.getValue();
if (!collectedBlocks.getToDeleteList().isEmpty()) {
removeBlocks(collectedBlocks);
collectedBlocks.clear();
}
logAuditEvent(true, "rename (options=" + Arrays.toString(options) +
")", src, dst, auditStat);
}
/**
* Remove the indicated file from namespace.
*
* @see ClientProtocol#delete(String, boolean) for detailed description and
* description of exceptions
*/
boolean delete(String src, boolean recursive, boolean logRetryCache)
throws IOException {
waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
BlocksMapUpdateInfo toRemovedBlocks = null;
writeLock();
boolean ret = false;
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
toRemovedBlocks = FSDirDeleteOp.delete(
this, src, recursive, logRetryCache);
ret = toRemovedBlocks != null;
} catch (AccessControlException e) {
logAuditEvent(false, "delete", src);
throw e;
} finally {
writeUnlock();
}
if (toRemovedBlocks != null) {
removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
}
logAuditEvent(true, "delete", src);
return ret;
}
FSPermissionChecker getPermissionChecker()
throws AccessControlException {
return dir.getPermissionChecker();
}
/**
* From the given list, incrementally remove the blocks from blockManager
* Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
* ensure that other waiters on the lock can get in. See HDFS-2938
*
* @param blocks
* An instance of {@link BlocksMapUpdateInfo} which contains a list
* of blocks that need to be removed from blocksMap
*/
void removeBlocks(BlocksMapUpdateInfo blocks) {
List<Block> toDeleteList = blocks.getToDeleteList();
Iterator<Block> iter = toDeleteList.iterator();
while (iter.hasNext()) {
writeLock();
try {
for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) {
blockManager.removeBlock(iter.next());
}
} finally {
writeUnlock();
}
}
}
/**
* Remove leases and inodes related to a given path
* @param src The given path
* @param removedINodes Containing the list of inodes to be removed from
* inodesMap
* @param acquireINodeMapLock Whether to acquire the lock for inode removal
*/
void removeLeasesAndINodes(String src, List<INode> removedINodes,
final boolean acquireINodeMapLock) {
assert hasWriteLock();
leaseManager.removeLeaseWithPrefixPath(src);
// remove inodes from inodesMap
if (removedINodes != null) {
if (acquireINodeMapLock) {
dir.writeLock();
}
try {
dir.removeFromInodeMap(removedINodes);
} finally {
if (acquireINodeMapLock) {
dir.writeUnlock();
}
}
removedINodes.clear();
}
}
/**
* Removes the blocks from blocksmap and updates the safemode blocks total
*
* @param blocks
* An instance of {@link BlocksMapUpdateInfo} which contains a list
* of blocks that need to be removed from blocksMap
*/
void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
assert hasWriteLock();
// In the case that we are a Standby tailing edits from the
// active while in safe-mode, we need to track the total number
// of blocks and safe blocks in the system.
boolean trackBlockCounts = isSafeModeTrackingBlocks();
int numRemovedComplete = 0, numRemovedSafe = 0;
for (Block b : blocks.getToDeleteList()) {
if (trackBlockCounts) {
BlockInfoContiguous bi = getStoredBlock(b);
if (bi.isComplete()) {
numRemovedComplete++;
if (bi.numNodes() >= blockManager.minReplication) {
numRemovedSafe++;
}
}
}
blockManager.removeBlock(b);
}
if (trackBlockCounts) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adjusting safe-mode totals for deletion."
+ "decreasing safeBlocks by " + numRemovedSafe
+ ", totalBlocks by " + numRemovedComplete);
}
adjustSafeModeBlockTotals(-numRemovedSafe, -numRemovedComplete);
}
}
/**
* @see SafeModeInfo#shouldIncrementallyTrackBlocks
*/
private boolean isSafeModeTrackingBlocks() {
if (!haEnabled) {
// Never track blocks incrementally in non-HA code.
return false;
}
SafeModeInfo sm = this.safeMode;
return sm != null && sm.shouldIncrementallyTrackBlocks();
}
/**
* Get the file info for a specific file.
*
* @param src The string representation of the path to the file
* @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
*
* @return object containing information regarding the file
* or null if file not found
* @throws StandbyException
*/
HdfsFileStatus getFileInfo(final String src, boolean resolveLink)
throws IOException {
checkOperation(OperationCategory.READ);
HdfsFileStatus stat = null;
readLock();
try {
checkOperation(OperationCategory.READ);
stat = FSDirStatAndListingOp.getFileInfo(dir, src, resolveLink);
} catch (AccessControlException e) {
logAuditEvent(false, "getfileinfo", src);
throw e;
} finally {
readUnlock();
}
logAuditEvent(true, "getfileinfo", src);
return stat;
}
/**
* Returns true if the file is closed
*/
boolean isFileClosed(final String src) throws IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return FSDirStatAndListingOp.isFileClosed(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, "isFileClosed", src);
throw e;
} finally {
readUnlock();
}
}
/**
* Create all the necessary directories
*/
boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
} catch (AccessControlException e) {
logAuditEvent(false, "mkdirs", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "mkdirs", src, null, auditStat);
return true;
}
/**
* Get the content summary for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
ContentSummary getContentSummary(final String src) throws IOException {
readLock();
boolean success = true;
try {
return FSDirStatAndListingOp.getContentSummary(dir, src);
} catch (AccessControlException ace) {
success = false;
throw ace;
} finally {
readUnlock();
logAuditEvent(success, "contentSummary", src);
}
}
/**
* Set the namespace quota and diskspace quota for a directory.
* See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the
* contract.
*
* Note: This does not support ".inodes" relative path.
*/
void setQuota(String src, long nsQuota, long dsQuota, StorageType type)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set quota on " + src);
FSDirAttrOp.setQuota(dir, src, nsQuota, dsQuota, type);
} finally {
writeUnlock();
}
getEditLog().logSync();
}
/** Persist all metadata about this file.
* @param src The string representation of the path
* @param fileId The inode ID that we're fsyncing. Older clients will pass
* INodeId.GRANDFATHER_INODE_ID here.
* @param clientName The string representation of the client
* @param lastBlockLength The length of the last block
* under construction reported from client.
* @throws IOException if path does not exist
*/
void fsync(String src, long fileId, String clientName, long lastBlockLength)
throws IOException {
NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot fsync file " + src);
src = dir.resolvePath(pc, src, pathComponents);
final INode inode;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
inode = dir.getINode(src);
} else {
inode = dir.getInode(fileId);
if (inode != null) src = inode.getFullPathName();
}
final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
if (lastBlockLength > 0) {
pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
pendingFile, lastBlockLength);
}
persistBlocks(src, pendingFile, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
}
/**
* Move a file that is being written to be immutable.
* @param src The filename
* @param lease The lease for the client creating the file
* @param recoveryLeaseHolder reassign lease to this holder if the last block
* needs recovery; keep current holder if null.
* @throws AlreadyBeingCreatedException if file is waiting to achieve minimal
* replication;<br>
* RecoveryInProgressException if lease recovery is in progress.<br>
* IOException in case of an error.
* @return true if file has been successfully finalized and closed or
* false if block recovery has been initiated. Since the lease owner
* has been changed and logged, caller should call logSync().
*/
boolean internalReleaseLease(Lease lease, String src, INodesInPath iip,
String recoveryLeaseHolder) throws IOException {
LOG.info("Recovering " + lease + ", src=" + src);
assert !isInSafeMode();
assert hasWriteLock();
final INodeFile pendingFile = iip.getLastINode().asFile();
int nrBlocks = pendingFile.numBlocks();
BlockInfoContiguous[] blocks = pendingFile.getBlocks();
int nrCompleteBlocks;
BlockInfoContiguous curBlock = null;
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
curBlock = blocks[nrCompleteBlocks];
if(!curBlock.isComplete())
break;
assert blockManager.checkMinReplication(curBlock) :
"A COMPLETE block is not minimally replicated in " + src;
}
// If there are no incomplete blocks associated with this file,
// then reap lease immediately and close the file.
if(nrCompleteBlocks == nrBlocks) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: All existing blocks are COMPLETE,"
+ " lease removed, file closed.");
return true; // closed!
}
// Only the last and the penultimate blocks may be in non COMPLETE state.
// If the penultimate block is not COMPLETE, then it must be COMMITTED.
if(nrCompleteBlocks < nrBlocks - 2 ||
nrCompleteBlocks == nrBlocks - 2 &&
curBlock != null &&
curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
final String message = "DIR* NameSystem.internalReleaseLease: "
+ "attempt to release a create lock on "
+ src + " but file is already closed.";
NameNode.stateChangeLog.warn(message);
throw new IOException(message);
}
// The last block is not COMPLETE, and
// that the penultimate block if exists is either COMPLETE or COMMITTED
final BlockInfoContiguous lastBlock = pendingFile.getLastBlock();
BlockUCState lastBlockState = lastBlock.getBlockUCState();
BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
// If penultimate block doesn't exist then its minReplication is met
boolean penultimateBlockMinReplication = penultimateBlock == null ? true :
blockManager.checkMinReplication(penultimateBlock);
switch(lastBlockState) {
case COMPLETE:
assert false : "Already checked that the last block is incomplete";
break;
case COMMITTED:
// Close file if committed blocks are minimally replicated
if(penultimateBlockMinReplication &&
blockManager.checkMinReplication(lastBlock)) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK*"
+ " internalReleaseLease: Committed blocks are minimally replicated,"
+ " lease removed, file closed.");
return true; // closed!
}
// Cannot close file right now, since some blocks
// are not yet minimally replicated.
// This may potentially cause infinite loop in lease recovery
// if there are no valid replicas on data-nodes.
String message = "DIR* NameSystem.internalReleaseLease: " +
"Failed to release lease for file " + src +
". Committed blocks are waiting to be minimally replicated." +
" Try again later.";
NameNode.stateChangeLog.warn(message);
throw new AlreadyBeingCreatedException(message);
case UNDER_CONSTRUCTION:
case UNDER_RECOVERY:
final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock;
// determine if last block was intended to be truncated
Block recoveryBlock = uc.getTruncateBlock();
boolean truncateRecovery = recoveryBlock != null;
boolean copyOnTruncate = truncateRecovery &&
recoveryBlock.getBlockId() != uc.getBlockId();
assert !copyOnTruncate ||
recoveryBlock.getBlockId() < uc.getBlockId() &&
recoveryBlock.getGenerationStamp() < uc.getGenerationStamp() &&
recoveryBlock.getNumBytes() > uc.getNumBytes() :
"wrong recoveryBlock";
// setup the last block locations from the blockManager if not known
if (uc.getNumExpectedLocations() == 0) {
uc.setExpectedLocations(blockManager.getStorages(lastBlock));
}
if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) {
// There is no datanode reported to this block.
// may be client have crashed before writing data to pipeline.
// This blocks doesn't need any recovery.
// We can remove this block and close the file.
pendingFile.removeLastBlock(lastBlock);
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId());
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed empty last block and closed file.");
return true;
}
// start recovery of the last block for this file
long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
if(copyOnTruncate) {
uc.setGenerationStamp(blockRecoveryId);
} else if(truncateRecovery) {
recoveryBlock.setGenerationStamp(blockRecoveryId);
}
uc.initializeBlockRecovery(blockRecoveryId);
leaseManager.renewLease(lease);
// Cannot close file right now, since the last block requires recovery.
// This may potentially cause infinite loop in lease recovery
// if there are no valid replicas on data-nodes.
NameNode.stateChangeLog.warn(
"DIR* NameSystem.internalReleaseLease: " +
"File " + src + " has not been closed." +
" Lease recovery is in progress. " +
"RecoveryId = " + blockRecoveryId + " for block " + lastBlock);
break;
}
return false;
}
private Lease reassignLease(Lease lease, String src, String newHolder,
INodeFile pendingFile) {
assert hasWriteLock();
if(newHolder == null)
return lease;
// The following transaction is not synced. Make sure it's sync'ed later.
logReassignLease(lease.getHolder(), src, newHolder);
return reassignLeaseInternal(lease, src, newHolder, pendingFile);
}
Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
INodeFile pendingFile) {
assert hasWriteLock();
pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
return leaseManager.reassignLease(lease, src, newHolder);
}
private void commitOrCompleteLastBlock(final INodeFile fileINode,
final INodesInPath iip, final Block commitBlock) throws IOException {
assert hasWriteLock();
Preconditions.checkArgument(fileINode.isUnderConstruction());
if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
return;
}
// Adjust disk space consumption if required
final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();
if (diff > 0) {
try {
dir.updateSpaceConsumed(iip, 0, -diff, fileINode.getFileReplication());
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
}
}
private void finalizeINodeFileUnderConstruction(String src,
INodeFile pendingFile, int latestSnapshot) throws IOException {
assert hasWriteLock();
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
Preconditions.checkArgument(uc != null);
leaseManager.removeLease(uc.getClientName(), src);
pendingFile.recordModification(latestSnapshot);
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here
// since we just remove the uc feature from pendingFile
pendingFile.toCompleteFile(now());
waitForLoadingFSImage();
// close file and persist block allocations for this file
closeFile(src, pendingFile);
blockManager.checkReplication(pendingFile);
}
@VisibleForTesting
BlockInfoContiguous getStoredBlock(Block block) {
return blockManager.getStoredBlock(block);
}
@Override
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) {
assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection();
if (bc == null || !(bc instanceof INodeFile)
|| !bc.isUnderConstruction()) {
return false;
}
String fullName = bc.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
&& dir.getINode(fullName) == bc) {
// If file exists in normal path then no need to look in snapshot
return false;
}
} catch (UnresolvedLinkException e) {
LOG.error("Error while resolving the link : " + fullName, e);
return false;
}
/*
* 1. if bc is under construction and also with snapshot, and
* bc is not in the current fsdirectory tree, bc must represent a snapshot
* file.
* 2. if fullName is not an absolute path, bc cannot be existent in the
* current fsdirectory tree.
* 3. if bc is not the current node associated with fullName, bc must be a
* snapshot inode.
*/
return true;
}
void commitBlockSynchronization(ExtendedBlock oldBlock,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
String[] newtargetstorages) throws IOException {
LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock
+ ", newgenerationstamp=" + newgenerationstamp
+ ", newlength=" + newlength
+ ", newtargets=" + Arrays.asList(newtargets)
+ ", closeFile=" + closeFile
+ ", deleteBlock=" + deleteblock
+ ")");
checkOperation(OperationCategory.WRITE);
String src = "";
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
// If a DN tries to commit to the standby, the recovery will
// fail, and the next retry will succeed on the new NN.
checkNameNodeSafeMode(
"Cannot commitBlockSynchronization while in safe mode");
final BlockInfoContiguous storedBlock = getStoredBlock(
ExtendedBlock.getLocalBlock(oldBlock));
if (storedBlock == null) {
if (deleteblock) {
// This may be a retry attempt so ignore the failure
// to locate the block.
if (LOG.isDebugEnabled()) {
LOG.debug("Block (=" + oldBlock + ") not found");
}
return;
} else {
throw new IOException("Block (=" + oldBlock + ") not found");
}
}
//
// The implementation of delete operation (see @deleteInternal method)
// first removes the file paths from namespace, and delays the removal
// of blocks to later time for better performance. When
// commitBlockSynchronization (this method) is called in between, the
// blockCollection of storedBlock could have been assigned to null by
// the delete operation, throw IOException here instead of NPE; if the
// file path is already removed from namespace by the delete operation,
// throw FileNotFoundException here, so not to proceed to the end of
// this method to add a CloseOp to the edit log for an already deleted
// file (See HDFS-6825).
//
BlockCollection blockCollection = storedBlock.getBlockCollection();
if (blockCollection == null) {
throw new IOException("The blockCollection of " + storedBlock
+ " is null, likely because the file owning this block was"
+ " deleted and the block removal is delayed");
}
INodeFile iFile = ((INode)blockCollection).asFile();
if (isFileDeleted(iFile)) {
throw new FileNotFoundException("File not found: "
+ iFile.getFullPathName() + ", likely due to delayed block"
+ " removal");
}
if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) &&
iFile.getLastBlock().isComplete()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unexpected block (=" + oldBlock
+ ") since the file (=" + iFile.getLocalName()
+ ") is not under construction");
}
return;
}
BlockInfoContiguousUnderConstruction truncatedBlock =
(BlockInfoContiguousUnderConstruction) iFile.getLastBlock();
long recoveryId = truncatedBlock.getBlockRecoveryId();
boolean copyTruncate =
truncatedBlock.getBlockId() != storedBlock.getBlockId();
if(recoveryId != newgenerationstamp) {
throw new IOException("The recovery id " + newgenerationstamp
+ " does not match current recovery id "
+ recoveryId + " for block " + oldBlock);
}
if (deleteblock) {
Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
boolean remove = iFile.removeLastBlock(blockToDel);
if (remove) {
blockManager.removeBlock(storedBlock);
}
}
else {
// update last block
if(!copyTruncate) {
storedBlock.setGenerationStamp(newgenerationstamp);
storedBlock.setNumBytes(newlength);
}
// find the DatanodeDescriptor objects
// There should be no locations in the blockManager till now because the
// file is underConstruction
ArrayList<DatanodeDescriptor> trimmedTargets =
new ArrayList<DatanodeDescriptor>(newtargets.length);
ArrayList<String> trimmedStorages =
new ArrayList<String>(newtargets.length);
if (newtargets.length > 0) {
for (int i = 0; i < newtargets.length; ++i) {
// try to get targetNode
DatanodeDescriptor targetNode =
blockManager.getDatanodeManager().getDatanode(newtargets[i]);
if (targetNode != null) {
trimmedTargets.add(targetNode);
trimmedStorages.add(newtargetstorages[i]);
} else if (LOG.isDebugEnabled()) {
LOG.debug("DatanodeDescriptor (=" + newtargets[i] + ") not found");
}
}
}
if ((closeFile) && !trimmedTargets.isEmpty()) {
// the file is getting closed. Insert block locations into blockManager.
// Otherwise fsck will report these blocks as MISSING, especially if the
// blocksReceived from Datanodes take a long time to arrive.
for (int i = 0; i < trimmedTargets.size(); i++) {
DatanodeStorageInfo storageInfo =
trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i));
if (storageInfo != null) {
if(copyTruncate) {
storageInfo.addBlock(truncatedBlock);
} else {
storageInfo.addBlock(storedBlock);
}
}
}
}
// add pipeline locations into the INodeUnderConstruction
DatanodeStorageInfo[] trimmedStorageInfos =
blockManager.getDatanodeManager().getDatanodeStorageInfos(
trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]),
trimmedStorages.toArray(new String[trimmedStorages.size()]));
if(copyTruncate) {
iFile.setLastBlock(truncatedBlock, trimmedStorageInfos);
} else {
iFile.setLastBlock(storedBlock, trimmedStorageInfos);
}
}
if (closeFile) {
if(copyTruncate) {
src = closeFileCommitBlocks(iFile, truncatedBlock);
if(!iFile.isBlockInLatestSnapshot(storedBlock)) {
blockManager.removeBlock(storedBlock);
}
} else {
src = closeFileCommitBlocks(iFile, storedBlock);
}
} else {
// If this commit does not want to close the file, persist blocks
src = iFile.getFullPathName();
persistBlocks(src, iFile, false);
}
} finally {
writeUnlock();
}
getEditLog().logSync();
if (closeFile) {
LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock
+ ", file=" + src
+ ", newgenerationstamp=" + newgenerationstamp
+ ", newlength=" + newlength
+ ", newtargets=" + Arrays.asList(newtargets) + ") successful");
} else {
LOG.info("commitBlockSynchronization(" + oldBlock + ") successful");
}
}
/**
* @param pendingFile open file that needs to be closed
* @param storedBlock last block
* @return Path of the file that was closed.
* @throws IOException on error
*/
@VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
throws IOException {
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
final String src = iip.getPath();
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, iip, storedBlock);
//remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile,
Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));
return src;
}
/**
* Renew the lease(s) held by the given client
*/
void renewLease(String holder) throws IOException {
checkOperation(OperationCategory.WRITE);
readLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot renew lease for " + holder);
leaseManager.renewLease(holder);
} finally {
readUnlock();
}
}
/**
* Get a partial listing of the indicated directory
*
* @param src the directory name
* @param startAfter the name to start after
* @param needLocation if blockLocations need to be returned
* @return a partial listing starting after startAfter
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if symbolic link is encountered
* @throws IOException if other I/O error occurred
*/
DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation)
throws IOException {
checkOperation(OperationCategory.READ);
DirectoryListing dl = null;
readLock();
try {
checkOperation(NameNode.OperationCategory.READ);
dl = FSDirStatAndListingOp.getListingInt(dir, src, startAfter,
needLocation);
} catch (AccessControlException e) {
logAuditEvent(false, "listStatus", src);
throw e;
} finally {
readUnlock();
}
logAuditEvent(true, "listStatus", src);
return dl;
}
/////////////////////////////////////////////////////////
//
// These methods are called by datanodes
//
/////////////////////////////////////////////////////////
/**
* Register Datanode.
* <p>
* The purpose of registration is to identify whether the new datanode
* serves a new data storage, and will report new data block copies,
* which the namenode was not aware of; or the datanode is a replacement
* node for the data storage that was previously served by a different
* or the same (in terms of host:port) datanode.
* The data storages are distinguished by their storageIDs. When a new
* data storage is reported the namenode issues a new unique storageID.
* <p>
* Finally, the namenode returns its namespaceID as the registrationID
* for the datanodes.
* namespaceID is a persistent attribute of the name space.
* The registrationID is checked every time the datanode is communicating
* with the namenode.
* Datanodes with inappropriate registrationID are rejected.
* If the namenode stops, and then restarts it can restore its
* namespaceID and will continue serving the datanodes that has previously
* registered with the namenode without restarting the whole cluster.
*
* @see org.apache.hadoop.hdfs.server.datanode.DataNode
*/
void registerDatanode(DatanodeRegistration nodeReg) throws IOException {
writeLock();
try {
getBlockManager().getDatanodeManager().registerDatanode(nodeReg);
checkSafeMode();
} finally {
writeUnlock();
}
}
/**
* Get registrationID for datanodes based on the namespaceID.
*
* @see #registerDatanode(DatanodeRegistration)
* @return registration ID
*/
String getRegistrationID() {
return Storage.getRegistrationID(getFSImage().getStorage());
}
/**
* The given node has reported in. This method should:
* 1) Record the heartbeat, so the datanode isn't timed out
* 2) Adjust usage stats for future block allocation
*
* If a substantial amount of time passed since the last datanode
* heartbeat then request an immediate block report.
*
* @return an array of datanode commands
* @throws IOException
*/
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
StorageReport[] reports, long cacheCapacity, long cacheUsed,
int xceiverCount, int xmitsInProgress, int failedVolumes)
throws IOException {
readLock();
try {
//get datanode commands
final int maxTransfer = blockManager.getMaxReplicationStreams()
- xmitsInProgress;
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
xceiverCount, maxTransfer, failedVolumes);
//create ha status
final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
haContext.getState().getServiceState(),
getFSImage().getLastAppliedOrWrittenTxId());
return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
} finally {
readUnlock();
}
}
/**
* Returns whether or not there were available resources at the last check of
* resources.
*
* @return true if there were sufficient resources available, false otherwise.
*/
boolean nameNodeHasResourcesAvailable() {
return hasResourcesAvailable;
}
/**
* Perform resource checks and cache the results.
*/
void checkAvailableResources() {
Preconditions.checkState(nnResourceChecker != null,
"nnResourceChecker not initialized");
hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
}
/**
* Persist the block list for the inode.
* @param path
* @param file
* @param logRetryCache
*/
private void persistBlocks(String path, INodeFile file,
boolean logRetryCache) {
assert hasWriteLock();
Preconditions.checkArgument(file.isUnderConstruction());
getEditLog().logUpdateBlocks(path, file, logRetryCache);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("persistBlocks: " + path
+ " with " + file.getBlocks().length + " blocks is persisted to" +
" the file system");
}
}
/**
* Close file.
* @param path
* @param file
*/
private void closeFile(String path, INodeFile file) {
assert hasWriteLock();
waitForLoadingFSImage();
// file is closed
getEditLog().logCloseFile(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("closeFile: "
+path+" with "+ file.getBlocks().length
+" blocks is persisted to the file system");
}
}
/**
* Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
* there are found to be insufficient resources available, causes the NN to
* enter safe mode. If resources are later found to have returned to
* acceptable levels, this daemon will cause the NN to exit safe mode.
*/
class NameNodeResourceMonitor implements Runnable {
boolean shouldNNRmRun = true;
@Override
public void run () {
try {
while (fsRunning && shouldNNRmRun) {
checkAvailableResources();
if(!nameNodeHasResourcesAvailable()) {
String lowResourcesMsg = "NameNode low on available disk space. ";
if (!isInSafeMode()) {
LOG.warn(lowResourcesMsg + "Entering safe mode.");
} else {
LOG.warn(lowResourcesMsg + "Already in safe mode.");
}
enterSafeMode(true);
}
try {
Thread.sleep(resourceRecheckInterval);
} catch (InterruptedException ie) {
// Deliberately ignore
}
}
} catch (Exception e) {
FSNamesystem.LOG.error("Exception in NameNodeResourceMonitor: ", e);
}
}
public void stopMonitor() {
shouldNNRmRun = false;
}
}
class NameNodeEditLogRoller implements Runnable {
private boolean shouldRun = true;
private final long rollThreshold;
private final long sleepIntervalMs;
public NameNodeEditLogRoller(long rollThreshold, int sleepIntervalMs) {
this.rollThreshold = rollThreshold;
this.sleepIntervalMs = sleepIntervalMs;
}
@Override
public void run() {
while (fsRunning && shouldRun) {
try {
FSEditLog editLog = getFSImage().getEditLog();
long numEdits =
editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId();
if (numEdits > rollThreshold) {
FSNamesystem.LOG.info("NameNode rolling its own edit log because"
+ " number of edits in open segment exceeds threshold of "
+ rollThreshold);
rollEditLog();
}
Thread.sleep(sleepIntervalMs);
} catch (InterruptedException e) {
FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
+ " was interrupted, exiting");
break;
} catch (Exception e) {
FSNamesystem.LOG.error("Swallowing exception in "
+ NameNodeEditLogRoller.class.getSimpleName() + ":", e);
}
}
}
public void stop() {
shouldRun = false;
}
}
/**
* Daemon to periodically scan the namespace for lazyPersist files
* with missing blocks and unlink them.
*/
class LazyPersistFileScrubber implements Runnable {
private volatile boolean shouldRun = true;
final int scrubIntervalSec;
public LazyPersistFileScrubber(final int scrubIntervalSec) {
this.scrubIntervalSec = scrubIntervalSec;
}
/**
* Periodically go over the list of lazyPersist files with missing
* blocks and unlink them from the namespace.
*/
private void clearCorruptLazyPersistFiles()
throws SafeModeException, AccessControlException,
UnresolvedLinkException, IOException {
BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
List<BlockCollection> filesToDelete = new ArrayList<BlockCollection>();
writeLock();
try {
final Iterator<Block> it = blockManager.getCorruptReplicaBlockIterator();
while (it.hasNext()) {
Block b = it.next();
BlockInfoContiguous blockInfo = blockManager.getStoredBlock(b);
if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) {
filesToDelete.add(blockInfo.getBlockCollection());
}
}
for (BlockCollection bc : filesToDelete) {
LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
BlocksMapUpdateInfo toRemoveBlocks =
FSDirDeleteOp.deleteInternal(
FSNamesystem.this, bc.getName(),
INodesInPath.fromINode((INodeFile) bc), false);
if (toRemoveBlocks != null) {
removeBlocks(toRemoveBlocks); // Incremental deletion of blocks
}
}
} finally {
writeUnlock();
}
}
@Override
public void run() {
while (fsRunning && shouldRun) {
try {
clearCorruptLazyPersistFiles();
Thread.sleep(scrubIntervalSec * 1000);
} catch (InterruptedException e) {
FSNamesystem.LOG.info(
"LazyPersistFileScrubber was interrupted, exiting");
break;
} catch (Exception e) {
FSNamesystem.LOG.error(
"Ignoring exception in LazyPersistFileScrubber:", e);
}
}
}
public void stop() {
shouldRun = false;
}
}
public FSImage getFSImage() {
return fsImage;
}
public FSEditLog getEditLog() {
return getFSImage().getEditLog();
}
private void checkBlock(ExtendedBlock block) throws IOException {
if (block != null && !this.blockPoolId.equals(block.getBlockPoolId())) {
throw new IOException("Unexpected BlockPoolId " + block.getBlockPoolId()
+ " - expected " + blockPoolId);
}
}
@Metric({"MissingBlocks", "Number of missing blocks"})
public long getMissingBlocksCount() {
// not locking
return blockManager.getMissingBlocksCount();
}
@Metric({"MissingReplOneBlocks", "Number of missing blocks " +
"with replication factor 1"})
public long getMissingReplOneBlocksCount() {
// not locking
return blockManager.getMissingReplOneBlocksCount();
}
@Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
public int getExpiredHeartbeats() {
return datanodeStatistics.getExpiredHeartbeats();
}
@Metric({"TransactionsSinceLastCheckpoint",
"Number of transactions since last checkpoint"})
public long getTransactionsSinceLastCheckpoint() {
return getEditLog().getLastWrittenTxId() -
getFSImage().getStorage().getMostRecentCheckpointTxId();
}
@Metric({"TransactionsSinceLastLogRoll",
"Number of transactions since last edit log roll"})
public long getTransactionsSinceLastLogRoll() {
if (isInStandbyState() || !getEditLog().isSegmentOpen()) {
return 0;
} else {
return getEditLog().getLastWrittenTxId() -
getEditLog().getCurSegmentTxId() + 1;
}
}
@Metric({"LastWrittenTransactionId", "Transaction ID written to the edit log"})
public long getLastWrittenTransactionId() {
return getEditLog().getLastWrittenTxId();
}
@Metric({"LastCheckpointTime",
"Time in milliseconds since the epoch of the last checkpoint"})
public long getLastCheckpointTime() {
return getFSImage().getStorage().getMostRecentCheckpointTime();
}
/** @see ClientProtocol#getStats() */
long[] getStats() {
final long[] stats = datanodeStatistics.getStats();
stats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = getUnderReplicatedBlocks();
stats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = getCorruptReplicaBlocks();
stats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = getMissingBlocksCount();
stats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
getMissingReplOneBlocksCount();
return stats;
}
@Override // FSNamesystemMBean
@Metric({"CapacityTotal",
"Total raw capacity of data nodes in bytes"})
public long getCapacityTotal() {
return datanodeStatistics.getCapacityTotal();
}
@Metric({"CapacityTotalGB",
"Total raw capacity of data nodes in GB"})
public float getCapacityTotalGB() {
return DFSUtil.roundBytesToGB(getCapacityTotal());
}
@Override // FSNamesystemMBean
@Metric({"CapacityUsed",
"Total used capacity across all data nodes in bytes"})
public long getCapacityUsed() {
return datanodeStatistics.getCapacityUsed();
}
@Metric({"CapacityUsedGB",
"Total used capacity across all data nodes in GB"})
public float getCapacityUsedGB() {
return DFSUtil.roundBytesToGB(getCapacityUsed());
}
@Override // FSNamesystemMBean
@Metric({"CapacityRemaining", "Remaining capacity in bytes"})
public long getCapacityRemaining() {
return datanodeStatistics.getCapacityRemaining();
}
@Metric({"CapacityRemainingGB", "Remaining capacity in GB"})
public float getCapacityRemainingGB() {
return DFSUtil.roundBytesToGB(getCapacityRemaining());
}
@Metric({"CapacityUsedNonDFS",
"Total space used by data nodes for non DFS purposes in bytes"})
public long getCapacityUsedNonDFS() {
return datanodeStatistics.getCapacityUsedNonDFS();
}
/**
* Total number of connections.
*/
@Override // FSNamesystemMBean
@Metric
public int getTotalLoad() {
return datanodeStatistics.getXceiverCount();
}
@Metric({ "SnapshottableDirectories", "Number of snapshottable directories" })
public int getNumSnapshottableDirs() {
return this.snapshotManager.getNumSnapshottableDirs();
}
@Metric({ "Snapshots", "The number of snapshots" })
public int getNumSnapshots() {
return this.snapshotManager.getNumSnapshots();
}
@Override
public String getSnapshotStats() {
Map<String, Object> info = new HashMap<String, Object>();
info.put("SnapshottableDirectories", this.getNumSnapshottableDirs());
info.put("Snapshots", this.getNumSnapshots());
return JSON.toString(info);
}
int getNumberOfDatanodes(DatanodeReportType type) {
readLock();
try {
return getBlockManager().getDatanodeManager().getDatanodeListForReport(
type).size();
} finally {
readUnlock();
}
}
DatanodeInfo[] datanodeReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i=0; i<arr.length; i++) {
arr[i] = new DatanodeInfo(results.get(i));
}
return arr;
} finally {
readUnlock();
}
}
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
d.getStorageReports());
}
return reports;
} finally {
readUnlock();
}
}
/**
* Save namespace image.
* This will save current namespace into fsimage file and empty edits file.
* Requires superuser privilege and safe mode.
*
* @throws AccessControlException if superuser privilege is violated.
* @throws IOException if
*/
void saveNamespace() throws AccessControlException, IOException {
checkOperation(OperationCategory.UNCHECKED);
checkSuperuserPrivilege();
cpLock(); // Block if a checkpointing is in progress on standby.
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
if (!isInSafeMode()) {
throw new IOException("Safe mode should be turned ON "
+ "in order to create namespace image.");
}
getFSImage().saveNamespace(this);
} finally {
readUnlock();
cpUnlock();
}
LOG.info("New namespace image has been created");
}
/**
* Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again.
* Requires superuser privilege.
*
* @throws AccessControlException if superuser privilege is violated.
*/
boolean restoreFailedStorage(String arg) throws AccessControlException,
StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
cpLock(); // Block if a checkpointing is in progress on standby.
writeLock();
try {
checkOperation(OperationCategory.UNCHECKED);
// if it is disabled - enable it and vice versa.
if(arg.equals("check"))
return getFSImage().getStorage().getRestoreFailedStorage();
boolean val = arg.equals("true"); // false if not
getFSImage().getStorage().setRestoreFailedStorage(val);
return val;
} finally {
writeUnlock();
cpUnlock();
}
}
Date getStartTime() {
return new Date(startTime);
}
void finalizeUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
cpLock(); // Block if a checkpointing is in progress on standby.
writeLock();
try {
checkOperation(OperationCategory.UNCHECKED);
getFSImage().finalizeUpgrade(this.isHaEnabled() && inActiveState());
} finally {
writeUnlock();
cpUnlock();
}
}
void refreshNodes() throws IOException {
checkOperation(OperationCategory.UNCHECKED);
checkSuperuserPrivilege();
getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration());
}
void setBalancerBandwidth(long bandwidth) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
checkSuperuserPrivilege();
getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
}
/**
* Persist the new block (the last block of the given file).
* @param path
* @param file
*/
private void persistNewBlock(String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction());
getEditLog().logAddBlock(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("persistNewBlock: "
+ path + " with new block " + file.getLastBlock().toString()
+ ", current total block count is " + file.getBlocks().length);
}
}
/**
* SafeModeInfo contains information related to the safe mode.
* <p>
* An instance of {@link SafeModeInfo} is created when the name node
* enters safe mode.
* <p>
* During name node startup {@link SafeModeInfo} counts the number of
* <em>safe blocks</em>, those that have at least the minimal number of
* replicas, and calculates the ratio of safe blocks to the total number
* of blocks in the system, which is the size of blocks in
* {@link FSNamesystem#blockManager}. When the ratio reaches the
* {@link #threshold} it starts the SafeModeMonitor daemon in order
* to monitor whether the safe mode {@link #extension} is passed.
* Then it leaves safe mode and destroys itself.
* <p>
* If safe mode is turned on manually then the number of safe blocks is
* not tracked because the name node is not intended to leave safe mode
* automatically in the case.
*
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
*/
public class SafeModeInfo {
// configuration fields
/** Safe mode threshold condition %.*/
private final double threshold;
/** Safe mode minimum number of datanodes alive */
private final int datanodeThreshold;
/**
* Safe mode extension after the threshold.
* Make it volatile so that getSafeModeTip can read the latest value
* without taking a lock.
*/
private volatile int extension;
/** Min replication required by safe mode. */
private final int safeReplication;
/** threshold for populating needed replication queues */
private final double replQueueThreshold;
// internal fields
/** Time when threshold was reached.
* <br> -1 safe mode is off
* <br> 0 safe mode is on, and threshold is not reached yet
* <br> >0 safe mode is on, but we are in extension period
*/
private long reached = -1;
/** Total number of blocks. */
int blockTotal;
/** Number of safe blocks. */
int blockSafe;
/** Number of blocks needed to satisfy safe mode threshold condition */
private int blockThreshold;
/** Number of blocks needed before populating replication queues */
private int blockReplQueueThreshold;
/** time of the last status printout */
private long lastStatusReport = 0;
/**
* Was safemode entered automatically because available resources were low.
* Make it volatile so that getSafeModeTip can read the latest value
* without taking a lock.
*/
private volatile boolean resourcesLow = false;
/** Should safemode adjust its block totals as blocks come in */
private boolean shouldIncrementallyTrackBlocks = false;
/** counter for tracking startup progress of reported blocks */
private Counter awaitingReportedBlocksCounter;
/**
* Creates SafeModeInfo when the name node enters
* automatic safe mode at startup.
*
* @param conf configuration
*/
private SafeModeInfo(Configuration conf) {
this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT);
if(threshold > 1.0) {
LOG.warn("The threshold value should't be greater than 1, threshold: " + threshold);
}
this.datanodeThreshold = conf.getInt(
DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT);
this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
this.safeReplication = conf.getInt(DFS_NAMENODE_REPLICATION_MIN_KEY,
DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
LOG.info(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY + " = " + threshold);
LOG.info(DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY + " = " + datanodeThreshold);
LOG.info(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + " = " + extension);
// default to safe mode threshold (i.e., don't populate queues before leaving safe mode)
this.replQueueThreshold =
conf.getFloat(DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
(float) threshold);
this.blockTotal = 0;
this.blockSafe = 0;
}
/**
* In the HA case, the StandbyNode can be in safemode while the namespace
* is modified by the edit log tailer. In this case, the number of total
* blocks changes as edits are processed (eg blocks are added and deleted).
* However, we don't want to do the incremental tracking during the
* startup-time loading process -- only once the initial total has been
* set after the image has been loaded.
*/
private boolean shouldIncrementallyTrackBlocks() {
return shouldIncrementallyTrackBlocks;
}
/**
* Creates SafeModeInfo when safe mode is entered manually, or because
* available resources are low.
*
* The {@link #threshold} is set to 1.5 so that it could never be reached.
* {@link #blockTotal} is set to -1 to indicate that safe mode is manual.
*
* @see SafeModeInfo
*/
private SafeModeInfo(boolean resourcesLow) {
this.threshold = 1.5f; // this threshold can never be reached
this.datanodeThreshold = Integer.MAX_VALUE;
this.extension = Integer.MAX_VALUE;
this.safeReplication = Short.MAX_VALUE + 1; // more than maxReplication
this.replQueueThreshold = 1.5f; // can never be reached
this.blockTotal = -1;
this.blockSafe = -1;
this.resourcesLow = resourcesLow;
enter();
reportStatus("STATE* Safe mode is ON.", true);
}
/**
* Check if safe mode is on.
* @return true if in safe mode
*/
private synchronized boolean isOn() {
doConsistencyCheck();
return this.reached >= 0;
}
/**
* Enter safe mode.
*/
private void enter() {
this.reached = 0;
}
/**
* Leave safe mode.
* <p>
* Check for invalid, under- & over-replicated blocks in the end of startup.
*/
private synchronized void leave() {
// if not done yet, initialize replication queues.
// In the standby, do not populate repl queues
if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
initializeReplQueues();
}
long timeInSafemode = now() - startTime;
NameNode.stateChangeLog.info("STATE* Leaving safe mode after "
+ timeInSafemode/1000 + " secs");
NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
//Log the following only once (when transitioning from ON -> OFF)
if (reached >= 0) {
NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
}
reached = -1;
safeMode = null;
final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
NameNode.stateChangeLog.info("STATE* Network topology has "
+ nt.getNumOfRacks() + " racks and "
+ nt.getNumOfLeaves() + " datanodes");
NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
+ blockManager.numOfUnderReplicatedBlocks() + " blocks");
startSecretManagerIfNecessary();
// If startup has not yet completed, end safemode phase.
StartupProgress prog = NameNode.getStartupProgress();
if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
prog.endPhase(Phase.SAFEMODE);
}
}
/**
* Check whether we have reached the threshold for
* initializing replication queues.
*/
private synchronized boolean canInitializeReplQueues() {
return shouldPopulateReplQueues()
&& blockSafe >= blockReplQueueThreshold;
}
/**
* Safe mode can be turned off iff
* the threshold is reached and
* the extension time have passed.
* @return true if can leave or false otherwise.
*/
private synchronized boolean canLeave() {
if (reached == 0) {
return false;
}
if (now() - reached < extension) {
reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
return false;
}
if (needEnter()) {
reportStatus("STATE* Safe mode ON, thresholds not met.", false);
return false;
}
return true;
}
/**
* There is no need to enter safe mode
* if DFS is empty or {@link #threshold} == 0
*/
private boolean needEnter() {
return (threshold != 0 && blockSafe < blockThreshold) ||
(datanodeThreshold != 0 && getNumLiveDataNodes() < datanodeThreshold) ||
(!nameNodeHasResourcesAvailable());
}
/**
* Check and trigger safe mode if needed.
*/
private void checkMode() {
// Have to have write-lock since leaving safemode initializes
// repl queues, which requires write lock
assert hasWriteLock();
if (inTransitionToActive()) {
return;
}
// if smmthread is already running, the block threshold must have been
// reached before, there is no need to enter the safe mode again
if (smmthread == null && needEnter()) {
enter();
// check if we are ready to initialize replication queues
if (canInitializeReplQueues() && !isPopulatingReplQueues()
&& !haEnabled) {
initializeReplQueues();
}
reportStatus("STATE* Safe mode ON.", false);
return;
}
// the threshold is reached or was reached before
if (!isOn() || // safe mode is off
extension <= 0 || threshold <= 0) { // don't need to wait
this.leave(); // leave safe mode
return;
}
if (reached > 0) { // threshold has already been reached before
reportStatus("STATE* Safe mode ON.", false);
return;
}
// start monitor
reached = now();
if (smmthread == null) {
smmthread = new Daemon(new SafeModeMonitor());
smmthread.start();
reportStatus("STATE* Safe mode extension entered.", true);
}
// check if we are ready to initialize replication queues
if (canInitializeReplQueues() && !isPopulatingReplQueues() && !haEnabled) {
initializeReplQueues();
}
}
/**
* Set total number of blocks.
*/
private synchronized void setBlockTotal(int total) {
this.blockTotal = total;
this.blockThreshold = (int) (blockTotal * threshold);
this.blockReplQueueThreshold =
(int) (blockTotal * replQueueThreshold);
if (haEnabled) {
// After we initialize the block count, any further namespace
// modifications done while in safe mode need to keep track
// of the number of total blocks in the system.
this.shouldIncrementallyTrackBlocks = true;
}
if(blockSafe < 0)
this.blockSafe = 0;
checkMode();
}
/**
* Increment number of safe blocks if current block has
* reached minimal replication.
* @param replication current replication
*/
private synchronized void incrementSafeBlockCount(short replication) {
if (replication == safeReplication) {
this.blockSafe++;
// Report startup progress only if we haven't completed startup yet.
StartupProgress prog = NameNode.getStartupProgress();
if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
if (this.awaitingReportedBlocksCounter == null) {
this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
STEP_AWAITING_REPORTED_BLOCKS);
}
this.awaitingReportedBlocksCounter.increment();
}
checkMode();
}
}
/**
* Decrement number of safe blocks if current block has
* fallen below minimal replication.
* @param replication current replication
*/
private synchronized void decrementSafeBlockCount(short replication) {
if (replication == safeReplication-1) {
this.blockSafe--;
//blockSafe is set to -1 in manual / low resources safemode
assert blockSafe >= 0 || isManual() || areResourcesLow();
checkMode();
}
}
/**
* Check if safe mode was entered manually
*/
private boolean isManual() {
return extension == Integer.MAX_VALUE;
}
/**
* Set manual safe mode.
*/
private synchronized void setManual() {
extension = Integer.MAX_VALUE;
}
/**
* Check if safe mode was entered due to resources being low.
*/
private boolean areResourcesLow() {
return resourcesLow;
}
/**
* Set that resources are low for this instance of safe mode.
*/
private void setResourcesLow() {
resourcesLow = true;
}
/**
* A tip on how safe mode is to be turned off: manually or automatically.
*/
String getTurnOffTip() {
if(!isOn()) {
return "Safe mode is OFF.";
}
//Manual OR low-resource safemode. (Admin intervention required)
String adminMsg = "It was turned on manually. ";
if (areResourcesLow()) {
adminMsg = "Resources are low on NN. Please add or free up more "
+ "resources then turn off safe mode manually. NOTE: If you turn off"
+ " safe mode before adding resources, "
+ "the NN will immediately return to safe mode. ";
}
if (isManual() || areResourcesLow()) {
return adminMsg
+ "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off.";
}
boolean thresholdsMet = true;
int numLive = getNumLiveDataNodes();
String msg = "";
if (blockSafe < blockThreshold) {
msg += String.format(
"The reported blocks %d needs additional %d"
+ " blocks to reach the threshold %.4f of total blocks %d.%n",
blockSafe, (blockThreshold - blockSafe) + 1, threshold, blockTotal);
thresholdsMet = false;
} else {
msg += String.format("The reported blocks %d has reached the threshold"
+ " %.4f of total blocks %d. ", blockSafe, threshold, blockTotal);
}
if (numLive < datanodeThreshold) {
msg += String.format(
"The number of live datanodes %d needs an additional %d live "
+ "datanodes to reach the minimum number %d.%n",
numLive, (datanodeThreshold - numLive), datanodeThreshold);
thresholdsMet = false;
} else {
msg += String.format("The number of live datanodes %d has reached "
+ "the minimum number %d. ",
numLive, datanodeThreshold);
}
msg += (reached > 0) ? "In safe mode extension. " : "";
msg += "Safe mode will be turned off automatically ";
if (!thresholdsMet) {
msg += "once the thresholds have been reached.";
} else if (reached + extension - now() > 0) {
msg += ("in " + (reached + extension - now()) / 1000 + " seconds.");
} else {
msg += "soon.";
}
return msg;
}
/**
* Print status every 20 seconds.
*/
private void reportStatus(String msg, boolean rightNow) {
long curTime = now();
if(!rightNow && (curTime - lastStatusReport < 20 * 1000))
return;
NameNode.stateChangeLog.info(msg + " \n" + getTurnOffTip());
lastStatusReport = curTime;
}
@Override
public String toString() {
String resText = "Current safe blocks = "
+ blockSafe
+ ". Target blocks = " + blockThreshold + " for threshold = %" + threshold
+ ". Minimal replication = " + safeReplication + ".";
if (reached > 0)
resText += " Threshold was reached " + new Date(reached) + ".";
return resText;
}
/**
* Checks consistency of the class state.
* This is costly so only runs if asserts are enabled.
*/
private void doConsistencyCheck() {
boolean assertsOn = false;
assert assertsOn = true; // set to true if asserts are on
if (!assertsOn) return;
if (blockTotal == -1 && blockSafe == -1) {
return; // manual safe mode
}
int activeBlocks = blockManager.getActiveBlockCount();
if ((blockTotal != activeBlocks) &&
!(blockSafe >= 0 && blockSafe <= blockTotal)) {
throw new AssertionError(
" SafeMode: Inconsistent filesystem state: "
+ "SafeMode data: blockTotal=" + blockTotal
+ " blockSafe=" + blockSafe + "; "
+ "BlockManager data: active=" + activeBlocks);
}
}
private synchronized void adjustBlockTotals(int deltaSafe, int deltaTotal) {
if (!shouldIncrementallyTrackBlocks) {
return;
}
assert haEnabled;
if (LOG.isDebugEnabled()) {
LOG.debug("Adjusting block totals from " +
blockSafe + "/" + blockTotal + " to " +
(blockSafe + deltaSafe) + "/" + (blockTotal + deltaTotal));
}
assert blockSafe + deltaSafe >= 0 : "Can't reduce blockSafe " +
blockSafe + " by " + deltaSafe + ": would be negative";
assert blockTotal + deltaTotal >= 0 : "Can't reduce blockTotal " +
blockTotal + " by " + deltaTotal + ": would be negative";
blockSafe += deltaSafe;
setBlockTotal(blockTotal + deltaTotal);
}
}
/**
* Periodically check whether it is time to leave safe mode.
* This thread starts when the threshold level is reached.
*
*/
class SafeModeMonitor implements Runnable {
/** interval in msec for checking safe mode: {@value} */
private static final long recheckInterval = 1000;
/**
*/
@Override
public void run() {
while (fsRunning) {
writeLock();
try {
if (safeMode == null) { // Not in safe mode.
break;
}
if (safeMode.canLeave()) {
// Leave safe mode.
safeMode.leave();
smmthread = null;
break;
}
} finally {
writeUnlock();
}
try {
Thread.sleep(recheckInterval);
} catch (InterruptedException ie) {
// Ignored
}
}
if (!fsRunning) {
LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread");
}
}
}
boolean setSafeMode(SafeModeAction action) throws IOException {
if (action != SafeModeAction.SAFEMODE_GET) {
checkSuperuserPrivilege();
switch(action) {
case SAFEMODE_LEAVE: // leave safe mode
leaveSafeMode();
break;
case SAFEMODE_ENTER: // enter safe mode
enterSafeMode(false);
break;
default:
LOG.error("Unexpected safe mode action");
}
}
return isInSafeMode();
}
@Override
public void checkSafeMode() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode != null) {
safeMode.checkMode();
}
}
@Override
public boolean isInSafeMode() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
return false;
return safeMode.isOn();
}
@Override
public boolean isInStartupSafeMode() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
return false;
// If the NN is in safemode, and not due to manual / low resources, we
// assume it must be because of startup. If the NN had low resources during
// startup, we assume it came out of startup safemode and it is now in low
// resources safemode
return !safeMode.isManual() && !safeMode.areResourcesLow()
&& safeMode.isOn();
}
/**
* Check if replication queues are to be populated
* @return true when node is HAState.Active and not in the very first safemode
*/
@Override
public boolean isPopulatingReplQueues() {
if (!shouldPopulateReplQueues()) {
return false;
}
return initializedReplQueues;
}
private boolean shouldPopulateReplQueues() {
if(haContext == null || haContext.getState() == null)
return false;
return haContext.getState().shouldPopulateReplQueues();
}
@Override
public void incrementSafeBlockCount(int replication) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
return;
safeMode.incrementSafeBlockCount((short)replication);
}
@Override
public void decrementSafeBlockCount(Block b) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) // mostly true
return;
BlockInfoContiguous storedBlock = getStoredBlock(b);
if (storedBlock.isComplete()) {
safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas());
}
}
/**
* Adjust the total number of blocks safe and expected during safe mode.
* If safe mode is not currently on, this is a no-op.
* @param deltaSafe the change in number of safe blocks
* @param deltaTotal the change i nnumber of total blocks expected
*/
@Override
public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
return;
safeMode.adjustBlockTotals(deltaSafe, deltaTotal);
}
/**
* Set the total number of blocks in the system.
*/
public void setBlockTotal() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
return;
safeMode.setBlockTotal((int)getCompleteBlocksTotal());
}
/**
* Get the total number of blocks in the system.
*/
@Override // FSNamesystemMBean
@Metric
public long getBlocksTotal() {
return blockManager.getTotalBlocks();
}
/**
* Get the total number of COMPLETE blocks in the system.
* For safe mode only complete blocks are counted.
*/
private long getCompleteBlocksTotal() {
// Calculate number of blocks under construction
long numUCBlocks = 0;
readLock();
numUCBlocks = leaseManager.getNumUnderConstructionBlocks();
try {
return getBlocksTotal() - numUCBlocks;
} finally {
readUnlock();
}
}
/**
* Enter safe mode. If resourcesLow is false, then we assume it is manual
* @throws IOException
*/
void enterSafeMode(boolean resourcesLow) throws IOException {
writeLock();
try {
// Stop the secret manager, since rolling the master key would
// try to write to the edit log
stopSecretManager();
// Ensure that any concurrent operations have been fully synced
// before entering safe mode. This ensures that the FSImage
// is entirely stable on disk as soon as we're in safe mode.
boolean isEditlogOpenForWrite = getEditLog().isOpenForWrite();
// Before Editlog is in OpenForWrite mode, editLogStream will be null. So,
// logSyncAll call can be called only when Edlitlog is in OpenForWrite mode
if (isEditlogOpenForWrite) {
getEditLog().logSyncAll();
}
if (!isInSafeMode()) {
safeMode = new SafeModeInfo(resourcesLow);
return;
}
if (resourcesLow) {
safeMode.setResourcesLow();
} else {
safeMode.setManual();
}
if (isEditlogOpenForWrite) {
getEditLog().logSyncAll();
}
NameNode.stateChangeLog.info("STATE* Safe mode is ON"
+ safeMode.getTurnOffTip());
} finally {
writeUnlock();
}
}
/**
* Leave safe mode.
*/
void leaveSafeMode() {
writeLock();
try {
if (!isInSafeMode()) {
NameNode.stateChangeLog.info("STATE* Safe mode is already OFF");
return;
}
safeMode.leave();
} finally {
writeUnlock();
}
}
String getSafeModeTip() {
// There is no need to take readLock.
// Don't use isInSafeMode as this.safeMode might be set to null.
// after isInSafeMode returns.
boolean inSafeMode;
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) {
inSafeMode = false;
} else {
inSafeMode = safeMode.isOn();
}
if (!inSafeMode) {
return "";
} else {
return safeMode.getTurnOffTip();
}
}
CheckpointSignature rollEditLog() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.JOURNAL);
writeLock();
try {
checkOperation(OperationCategory.JOURNAL);
checkNameNodeSafeMode("Log not rolled");
if (Server.isRpcInvocation()) {
LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
}
return getFSImage().rollEditLog();
} finally {
writeUnlock();
}
}
NamenodeCommand startCheckpoint(NamenodeRegistration backupNode,
NamenodeRegistration activeNamenode) throws IOException {
checkOperation(OperationCategory.CHECKPOINT);
writeLock();
try {
checkOperation(OperationCategory.CHECKPOINT);
checkNameNodeSafeMode("Checkpoint not started");
LOG.info("Start checkpoint for " + backupNode.getAddress());
NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode,
activeNamenode);
getEditLog().logSync();
return cmd;
} finally {
writeUnlock();
}
}
public void processIncrementalBlockReport(final DatanodeID nodeID,
final StorageReceivedDeletedBlocks srdb)
throws IOException {
writeLock();
try {
blockManager.processIncrementalBlockReport(nodeID, srdb);
} finally {
writeUnlock();
}
}
void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
checkOperation(OperationCategory.CHECKPOINT);
readLock();
try {
checkOperation(OperationCategory.CHECKPOINT);
checkNameNodeSafeMode("Checkpoint not ended");
LOG.info("End checkpoint for " + registration.getAddress());
getFSImage().endCheckpoint(sig);
} finally {
readUnlock();
}
}
PermissionStatus createFsOwnerPermissions(FsPermission permission) {
return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission);
}
private void checkUnreadableBySuperuser(FSPermissionChecker pc,
INode inode, int snapshotId)
throws IOException {
if (pc.isSuperUser()) {
for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) {
if (XAttrHelper.getPrefixName(xattr).
equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
throw new AccessControlException("Access is denied for " +
pc.getUser() + " since the superuser is not allowed to " +
"perform this operation.");
}
}
}
}
@Override
public void checkSuperuserPrivilege()
throws AccessControlException {
if (isPermissionEnabled) {
FSPermissionChecker pc = getPermissionChecker();
pc.checkSuperuserPrivilege();
}
}
/**
* Check to see if we have exceeded the limit on the number
* of inodes.
*/
void checkFsObjectLimit() throws IOException {
if (maxFsObjects != 0 &&
maxFsObjects <= dir.totalInodes() + getBlocksTotal()) {
throw new IOException("Exceeded the configured number of objects " +
maxFsObjects + " in the filesystem.");
}
}
/**
* Get the total number of objects in the system.
*/
@Override // FSNamesystemMBean
public long getMaxObjects() {
return maxFsObjects;
}
@Override // FSNamesystemMBean
@Metric
public long getFilesTotal() {
// There is no need to take fSNamesystem's lock as
// FSDirectory has its own lock.
return this.dir.totalInodes();
}
@Override // FSNamesystemMBean
@Metric
public long getPendingReplicationBlocks() {
return blockManager.getPendingReplicationBlocksCount();
}
@Override // FSNamesystemMBean
@Metric
public long getUnderReplicatedBlocks() {
return blockManager.getUnderReplicatedBlocksCount();
}
/** Returns number of blocks with corrupt replicas */
@Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
public long getCorruptReplicaBlocks() {
return blockManager.getCorruptReplicaBlocksCount();
}
@Override // FSNamesystemMBean
@Metric
public long getScheduledReplicationBlocks() {
return blockManager.getScheduledReplicationBlocksCount();
}
@Override
@Metric
public long getPendingDeletionBlocks() {
return blockManager.getPendingDeletionBlocksCount();
}
@Override
public long getBlockDeletionStartTime() {
return startTime + blockManager.getStartupDelayBlockDeletionInMs();
}
@Metric
public long getExcessBlocks() {
return blockManager.getExcessBlocksCount();
}
// HA-only metric
@Metric
public long getPostponedMisreplicatedBlocks() {
return blockManager.getPostponedMisreplicatedBlocksCount();
}
// HA-only metric
@Metric
public int getPendingDataNodeMessageCount() {
return blockManager.getPendingDataNodeMessageCount();
}
// HA-only metric
@Metric
public String getHAState() {
return haContext.getState().toString();
}
// HA-only metric
@Metric
public long getMillisSinceLastLoadedEdits() {
if (isInStandbyState() && editLogTailer != null) {
return now() - editLogTailer.getLastLoadTimestamp();
} else {
return 0;
}
}
@Metric
public int getBlockCapacity() {
return blockManager.getCapacity();
}
@Override // FSNamesystemMBean
public String getFSState() {
return isInSafeMode() ? "safeMode" : "Operational";
}
private ObjectName mbeanName;
private ObjectName mxbeanName;
/**
* Register the FSNamesystem MBean using the name
* "hadoop:service=NameNode,name=FSNamesystemState"
*/
private void registerMBean() {
// We can only implement one MXBean interface, so we keep the old one.
try {
StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean);
} catch (NotCompliantMBeanException e) {
throw new RuntimeException("Bad MBean setup", e);
}
LOG.info("Registered FSNamesystemState MBean");
}
/**
* shutdown FSNamesystem
*/
void shutdown() {
if (snapshotManager != null) {
snapshotManager.shutdown();
}
if (mbeanName != null) {
MBeans.unregister(mbeanName);
mbeanName = null;
}
if (mxbeanName != null) {
MBeans.unregister(mxbeanName);
mxbeanName = null;
}
if (dir != null) {
dir.shutdown();
}
if (blockManager != null) {
blockManager.shutdown();
}
}
@Override // FSNamesystemMBean
public int getNumLiveDataNodes() {
return getBlockManager().getDatanodeManager().getNumLiveDataNodes();
}
@Override // FSNamesystemMBean
public int getNumDeadDataNodes() {
return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
}
@Override // FSNamesystemMBean
public int getNumDecomLiveDataNodes() {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
int liveDecommissioned = 0;
for (DatanodeDescriptor node : live) {
liveDecommissioned += node.isDecommissioned() ? 1 : 0;
}
return liveDecommissioned;
}
@Override // FSNamesystemMBean
public int getNumDecomDeadDataNodes() {
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
int deadDecommissioned = 0;
for (DatanodeDescriptor node : dead) {
deadDecommissioned += node.isDecommissioned() ? 1 : 0;
}
return deadDecommissioned;
}
@Override // FSNamesystemMBean
public int getNumDecommissioningDataNodes() {
return getBlockManager().getDatanodeManager().getDecommissioningNodes()
.size();
}
@Override // FSNamesystemMBean
@Metric({"StaleDataNodes",
"Number of datanodes marked stale due to delayed heartbeat"})
public int getNumStaleDataNodes() {
return getBlockManager().getDatanodeManager().getNumStaleNodes();
}
/**
* Storages are marked as "content stale" after NN restart or fails over and
* before NN receives the first Heartbeat followed by the first Blockreport.
*/
@Override // FSNamesystemMBean
public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
@Override // FSNamesystemMBean
public String getTopUserOpCounts() {
if (!topConf.isEnabled) {
return null;
}
Date now = new Date();
final List<RollingWindowManager.TopWindow> topWindows =
topMetrics.getTopWindows();
Map<String, Object> topMap = new TreeMap<String, Object>();
topMap.put("windows", topWindows);
topMap.put("timestamp", DFSUtil.dateToIso8601String(now));
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.writeValueAsString(topMap);
} catch (IOException e) {
LOG.warn("Failed to fetch TopUser metrics", e);
}
return null;
}
/**
* Increments, logs and then returns the stamp
*/
long nextGenerationStamp(boolean legacyBlock)
throws IOException, SafeModeException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get next generation stamp");
long gs = blockIdManager.nextGenerationStamp(legacyBlock);
if (legacyBlock) {
getEditLog().logGenerationStampV1(gs);
} else {
getEditLog().logGenerationStampV2(gs);
}
// NB: callers sync the log
return gs;
}
/**
* Increments, logs and then returns the block ID
*/
private long nextBlockId() throws IOException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get next block ID");
final long blockId = blockIdManager.nextBlockId();
getEditLog().logAllocateBlockId(blockId);
// NB: callers sync the log
return blockId;
}
private boolean isFileDeleted(INodeFile file) {
// Not in the inodeMap or in the snapshot but marked deleted.
if (dir.getInode(file.getId()) == null) {
return true;
}
// look at the path hierarchy to see if one parent is deleted by recursive
// deletion
INode tmpChild = file;
INodeDirectory tmpParent = file.getParent();
while (true) {
if (tmpParent == null) {
return true;
}
INode childINode = tmpParent.getChild(tmpChild.getLocalNameBytes(),
Snapshot.CURRENT_STATE_ID);
if (childINode == null || !childINode.equals(tmpChild)) {
// a newly created INode with the same name as an already deleted one
// would be a different INode than the deleted one
return true;
}
if (tmpParent.isRoot()) {
break;
}
tmpChild = tmpParent;
tmpParent = tmpParent.getParent();
}
if (file.isWithSnapshot() &&
file.getFileWithSnapshotFeature().isCurrentFileDeleted()) {
return true;
}
return false;
}
private INodeFile checkUCBlock(ExtendedBlock block,
String clientName) throws IOException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get a new generation stamp and an "
+ "access token for block " + block);
// check stored block state
BlockInfoContiguous storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
if (storedBlock == null ||
storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
throw new IOException(block +
" does not exist or is not under Construction" + storedBlock);
}
// check file inode
final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile();
if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) {
throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction.");
}
// check lease
if (clientName == null
|| !clientName.equals(file.getFileUnderConstructionFeature()
.getClientName())) {
throw new LeaseExpiredException("Lease mismatch: " + block +
" is accessed by a non lease holder " + clientName);
}
return file;
}
/**
* Client is reporting some bad block locations.
*/
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
checkOperation(OperationCategory.WRITE);
NameNode.stateChangeLog.info("*DIR* reportBadBlocks");
writeLock();
try {
checkOperation(OperationCategory.WRITE);
for (int i = 0; i < blocks.length; i++) {
ExtendedBlock blk = blocks[i].getBlock();
DatanodeInfo[] nodes = blocks[i].getLocations();
String[] storageIDs = blocks[i].getStorageIDs();
for (int j = 0; j < nodes.length; j++) {
blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
storageIDs == null ? null: storageIDs[j],
"client machine reported it");
}
}
} finally {
writeUnlock();
}
}
/**
* Get a new generation stamp together with an access token for
* a block under construction
*
* This method is called for recovering a failed pipeline or setting up
* a pipeline to append to a block.
*
* @param block a block
* @param clientName the name of a client
* @return a located block with a new generation stamp and an access token
* @throws IOException if any error occurs
*/
LocatedBlock updateBlockForPipeline(ExtendedBlock block,
String clientName) throws IOException {
LocatedBlock locatedBlock;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
// check vadility of parameters
checkUCBlock(block, clientName);
// get a new generation stamp and an access token
block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock())));
locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
} finally {
writeUnlock();
}
// Ensure we record the new generation stamp
getEditLog().logSync();
return locatedBlock;
}
/**
* Update a pipeline for a block under construction
*
* @param clientName the name of the client
* @param oldBlock and old block
* @param newBlock a new block with a new generation stamp and length
* @param newNodes datanodes in the pipeline
* @throws IOException if any error occurs
*/
void updatePipeline(
String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock,
DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
LOG.info("updatePipeline(" + oldBlock.getLocalBlock()
+ ", newGS=" + newBlock.getGenerationStamp()
+ ", newLength=" + newBlock.getNumBytes()
+ ", newNodes=" + Arrays.asList(newNodes)
+ ", client=" + clientName
+ ")");
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Pipeline not updated");
assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and "
+ oldBlock + " has different block identifier";
updatePipelineInternal(clientName, oldBlock, newBlock, newNodes,
newStorageIDs, logRetryCache);
} finally {
writeUnlock();
}
getEditLog().logSync();
LOG.info("updatePipeline(" + oldBlock.getLocalBlock() + " => "
+ newBlock.getLocalBlock() + ") success");
}
private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs,
boolean logRetryCache)
throws IOException {
assert hasWriteLock();
// check the vadility of the block and lease holder name
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final BlockInfoContiguousUnderConstruction blockinfo
= (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock();
// check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||
newBlock.getNumBytes() < blockinfo.getNumBytes()) {
String msg = "Update " + oldBlock + " (len = " +
blockinfo.getNumBytes() + ") to an older state: " + newBlock +
" (len = " + newBlock.getNumBytes() +")";
LOG.warn(msg);
throw new IOException(msg);
}
// Update old block with the new generation stamp and new length
blockinfo.setNumBytes(newBlock.getNumBytes());
blockinfo.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
// find the DatanodeDescriptor objects
final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager()
.getDatanodeStorageInfos(newNodes, newStorageIDs);
blockinfo.setExpectedLocations(storages);
String src = pendingFile.getFullPathName();
persistBlocks(src, pendingFile, logRetryCache);
}
// rename was successful. If any part of the renamed subtree had
// files that were being written to, update with new filename.
void unprotectedChangeLease(String src, String dst) {
assert hasWriteLock();
leaseManager.changeLease(src, dst);
}
/**
* Serializes leases.
*/
void saveFilesUnderConstruction(DataOutputStream out,
Map<Long, INodeFile> snapshotUCMap) throws IOException {
// This is run by an inferior thread of saveNamespace, which holds a read
// lock on our behalf. If we took the read lock here, we could block
// for fairness if a writer is waiting on the lock.
synchronized (leaseManager) {
Map<String, INodeFile> nodes = leaseManager.getINodesUnderConstruction();
for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
// TODO: for HDFS-5428, because of rename operations, some
// under-construction files that are
// in the current fs directory can also be captured in the
// snapshotUCMap. We should remove them from the snapshotUCMap.
snapshotUCMap.remove(entry.getValue().getId());
}
out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size
for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), entry.getKey());
}
for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
// for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
// as their paths
StringBuilder b = new StringBuilder();
b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX)
.append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
.append(Path.SEPARATOR).append(entry.getValue().getId());
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), b.toString());
}
}
}
/**
* @return all the under-construction files in the lease map
*/
Map<String, INodeFile> getFilesUnderConstruction() {
synchronized (leaseManager) {
return leaseManager.getINodesUnderConstruction();
}
}
/**
* Register a Backup name-node, verifying that it belongs
* to the correct namespace, and adding it to the set of
* active journals if necessary.
*
* @param bnReg registration of the new BackupNode
* @param nnReg registration of this NameNode
* @throws IOException if the namespace IDs do not match
*/
void registerBackupNode(NamenodeRegistration bnReg,
NamenodeRegistration nnReg) throws IOException {
writeLock();
try {
if(getFSImage().getStorage().getNamespaceID()
!= bnReg.getNamespaceID())
throw new IOException("Incompatible namespaceIDs: "
+ " Namenode namespaceID = "
+ getFSImage().getStorage().getNamespaceID() + "; "
+ bnReg.getRole() +
" node namespaceID = " + bnReg.getNamespaceID());
if (bnReg.getRole() == NamenodeRole.BACKUP) {
getFSImage().getEditLog().registerBackupNode(
bnReg, nnReg);
}
} finally {
writeUnlock();
}
}
/**
* Release (unregister) backup node.
* <p>
* Find and remove the backup stream corresponding to the node.
* @throws IOException
*/
void releaseBackupNode(NamenodeRegistration registration)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if(getFSImage().getStorage().getNamespaceID()
!= registration.getNamespaceID())
throw new IOException("Incompatible namespaceIDs: "
+ " Namenode namespaceID = "
+ getFSImage().getStorage().getNamespaceID() + "; "
+ registration.getRole() +
" node namespaceID = " + registration.getNamespaceID());
getEditLog().releaseBackupStream(registration);
} finally {
writeUnlock();
}
}
static class CorruptFileBlockInfo {
final String path;
final Block block;
public CorruptFileBlockInfo(String p, Block b) {
path = p;
block = b;
}
@Override
public String toString() {
return block.getBlockName() + "\t" + path;
}
}
/**
* @param path Restrict corrupt files to this portion of namespace.
* @param cookieTab Support for continuation; cookieTab tells where
* to start from
* @return a list in which each entry describes a corrupt file/block
* @throws IOException
*/
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
String[] cookieTab) throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
int count = 0;
ArrayList<CorruptFileBlockInfo> corruptFiles =
new ArrayList<CorruptFileBlockInfo>();
if (cookieTab == null) {
cookieTab = new String[] { null };
}
// Do a quick check if there are any corrupt files without taking the lock
if (blockManager.getMissingBlocksCount() == 0) {
if (cookieTab[0] == null) {
cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0]));
}
LOG.info("there are no corrupt file blocks.");
return corruptFiles;
}
readLock();
try {
checkOperation(OperationCategory.READ);
if (!isPopulatingReplQueues()) {
throw new IOException("Cannot run listCorruptFileBlocks because " +
"replication queues have not been initialized.");
}
// print a limited # of corrupt files per call
final Iterator<Block> blkIterator = blockManager.getCorruptReplicaBlockIterator();
int skip = getIntCookie(cookieTab[0]);
for (int i = 0; i < skip && blkIterator.hasNext(); i++) {
blkIterator.next();
}
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
final INode inode = (INode)blockManager.getBlockCollection(blk);
skip++;
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode);
if (src.startsWith(path)){
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
count++;
if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
break;
}
}
}
cookieTab[0] = String.valueOf(skip);
LOG.info("list corrupt file blocks returned: " + count);
return corruptFiles;
} finally {
readUnlock();
}
}
/**
* Convert string cookie to integer.
*/
private static int getIntCookie(String cookie){
int c;
if(cookie == null){
c = 0;
} else {
try{
c = Integer.parseInt(cookie);
}catch (NumberFormatException e) {
c = 0;
}
}
c = Math.max(0, c);
return c;
}
/**
* Create delegation token secret manager
*/
private DelegationTokenSecretManager createDelegationTokenSecretManager(
Configuration conf) {
return new DelegationTokenSecretManager(conf.getLong(
DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,
DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT),
conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY,
DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT),
conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT),
DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL,
conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT),
this);
}
/**
* Returns the DelegationTokenSecretManager instance in the namesystem.
* @return delegation token secret manager object
*/
DelegationTokenSecretManager getDelegationTokenSecretManager() {
return dtSecretManager;
}
/**
* @param renewer Renewer information
* @return delegation toek
* @throws IOException on error
*/
Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
Token<DelegationTokenIdentifier> token;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot issue delegation token");
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be issued only with kerberos or web authentication");
}
if (dtSecretManager == null || !dtSecretManager.isRunning()) {
LOG.warn("trying to get DT with no secret manager running");
return null;
}
UserGroupInformation ugi = getRemoteUser();
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner,
renewer, realUser);
token = new Token<DelegationTokenIdentifier>(
dtId, dtSecretManager);
long expiryTime = dtSecretManager.getTokenExpiryTime(dtId);
getEditLog().logGetDelegationToken(dtId, expiryTime);
} finally {
writeUnlock();
}
getEditLog().logSync();
return token;
}
/**
*
* @param token token to renew
* @return new expiryTime of the token
* @throws InvalidToken if {@code token} is invalid
* @throws IOException on other errors
*/
long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws InvalidToken, IOException {
long expiryTime;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot renew delegation token");
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be renewed only with kerberos or web authentication");
}
String renewer = getRemoteUser().getShortUserName();
expiryTime = dtSecretManager.renewToken(token, renewer);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
id.readFields(in);
getEditLog().logRenewDelegationToken(id, expiryTime);
} finally {
writeUnlock();
}
getEditLog().logSync();
return expiryTime;
}
/**
*
* @param token token to cancel
* @throws IOException on error
*/
void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot cancel delegation token");
String canceller = getRemoteUser().getUserName();
DelegationTokenIdentifier id = dtSecretManager
.cancelToken(token, canceller);
getEditLog().logCancelDelegationToken(id);
} finally {
writeUnlock();
}
getEditLog().logSync();
}
/**
* @param out save state of the secret manager
* @param sdPath String storage directory path
*/
void saveSecretManagerStateCompat(DataOutputStream out, String sdPath)
throws IOException {
dtSecretManager.saveSecretManagerStateCompat(out, sdPath);
}
SecretManagerState saveSecretManagerState() {
return dtSecretManager.saveSecretManagerState();
}
/**
* @param in load the state of secret manager from input stream
*/
void loadSecretManagerStateCompat(DataInput in) throws IOException {
dtSecretManager.loadSecretManagerStateCompat(in);
}
void loadSecretManagerState(SecretManagerSection s,
List<SecretManagerSection.DelegationKey> keys,
List<SecretManagerSection.PersistToken> tokens) throws IOException {
dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens));
}
/**
* Log the updateMasterKey operation to edit logs
*
* @param key new delegation key.
*/
public void logUpdateMasterKey(DelegationKey key) {
assert !isInSafeMode() :
"this should never be called while in safemode, since we stop " +
"the DT manager before entering safemode!";
// No need to hold FSN lock since we don't access any internal
// structures, and this is stopped before the FSN shuts itself
// down, etc.
getEditLog().logUpdateMasterKey(key);
getEditLog().logSync();
}
/**
* Log the cancellation of expired tokens to edit logs
*
* @param id token identifier to cancel
*/
public void logExpireDelegationToken(DelegationTokenIdentifier id) {
assert !isInSafeMode() :
"this should never be called while in safemode, since we stop " +
"the DT manager before entering safemode!";
// No need to hold FSN lock since we don't access any internal
// structures, and this is stopped before the FSN shuts itself
// down, etc.
getEditLog().logCancelDelegationToken(id);
}
private void logReassignLease(String leaseHolder, String src,
String newHolder) {
assert hasWriteLock();
getEditLog().logReassignLease(leaseHolder, src, newHolder);
}
/**
*
* @return true if delegation token operation is allowed
*/
private boolean isAllowedDelegationTokenOp() throws IOException {
AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
if (UserGroupInformation.isSecurityEnabled()
&& (authMethod != AuthenticationMethod.KERBEROS)
&& (authMethod != AuthenticationMethod.KERBEROS_SSL)
&& (authMethod != AuthenticationMethod.CERTIFICATE)) {
return false;
}
return true;
}
/**
* Returns authentication method used to establish the connection
* @return AuthenticationMethod used to establish connection
* @throws IOException
*/
private AuthenticationMethod getConnectionAuthenticationMethod()
throws IOException {
UserGroupInformation ugi = getRemoteUser();
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
if (authMethod == AuthenticationMethod.PROXY) {
authMethod = ugi.getRealUser().getAuthenticationMethod();
}
return authMethod;
}
/**
* Client invoked methods are invoked over RPC and will be in
* RPC call context even if the client exits.
*/
boolean isExternalInvocation() {
return Server.isRpcInvocation() || NamenodeWebHdfsMethods.isWebHdfsInvocation();
}
private static InetAddress getRemoteIp() {
InetAddress ip = Server.getRemoteIp();
if (ip != null) {
return ip;
}
return NamenodeWebHdfsMethods.getRemoteIp();
}
// optimize ugi lookup for RPC operations to avoid a trip through
// UGI.getCurrentUser which is synch'ed
private static UserGroupInformation getRemoteUser() throws IOException {
return NameNode.getRemoteUser();
}
/**
* Log fsck event in the audit log
*/
void logFsckEvent(String src, InetAddress remoteAddress) throws IOException {
if (isAuditEnabled()) {
logAuditEvent(true, getRemoteUser(),
remoteAddress,
"fsck", src, null, null);
}
}
/**
* Register NameNodeMXBean
*/
private void registerMXBean() {
mxbeanName = MBeans.register("NameNode", "NameNodeInfo", this);
}
/**
* Class representing Namenode information for JMX interfaces
*/
@Override // NameNodeMXBean
public String getVersion() {
return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
}
@Override // NameNodeMXBean
public long getUsed() {
return this.getCapacityUsed();
}
@Override // NameNodeMXBean
public long getFree() {
return this.getCapacityRemaining();
}
@Override // NameNodeMXBean
public long getTotal() {
return this.getCapacityTotal();
}
@Override // NameNodeMXBean
public String getSafemode() {
if (!this.isInSafeMode())
return "";
return "Safe mode is ON. " + this.getSafeModeTip();
}
@Override // NameNodeMXBean
public boolean isUpgradeFinalized() {
return this.getFSImage().isUpgradeFinalized();
}
@Override // NameNodeMXBean
public long getNonDfsUsedSpace() {
return datanodeStatistics.getCapacityUsedNonDFS();
}
@Override // NameNodeMXBean
public float getPercentUsed() {
return datanodeStatistics.getCapacityUsedPercent();
}
@Override // NameNodeMXBean
public long getBlockPoolUsedSpace() {
return datanodeStatistics.getBlockPoolUsed();
}
@Override // NameNodeMXBean
public float getPercentBlockPoolUsed() {
return datanodeStatistics.getPercentBlockPoolUsed();
}
@Override // NameNodeMXBean
public float getPercentRemaining() {
return datanodeStatistics.getCapacityRemainingPercent();
}
@Override // NameNodeMXBean
public long getCacheCapacity() {
return datanodeStatistics.getCacheCapacity();
}
@Override // NameNodeMXBean
public long getCacheUsed() {
return datanodeStatistics.getCacheUsed();
}
@Override // NameNodeMXBean
public long getTotalBlocks() {
return getBlocksTotal();
}
@Override // NameNodeMXBean
@Metric
public long getTotalFiles() {
return getFilesTotal();
}
@Override // NameNodeMXBean
public long getNumberOfMissingBlocks() {
return getMissingBlocksCount();
}
@Override // NameNodeMXBean
public long getNumberOfMissingBlocksWithReplicationFactorOne() {
return getMissingReplOneBlocksCount();
}
@Override // NameNodeMXBean
public int getThreads() {
return ManagementFactory.getThreadMXBean().getThreadCount();
}
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of live node attribute keys to its values
*/
@Override // NameNodeMXBean
public String getLiveNodes() {
final Map<String, Map<String,Object>> info =
new HashMap<String, Map<String,Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
for (DatanodeDescriptor node : live) {
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
.put("infoAddr", node.getInfoAddr())
.put("infoSecureAddr", node.getInfoSecureAddr())
.put("xferaddr", node.getXferAddr())
.put("lastContact", getLastContact(node))
.put("usedSpace", getDfsUsed(node))
.put("adminState", node.getAdminState().toString())
.put("nonDfsUsedSpace", node.getNonDfsUsed())
.put("capacity", node.getCapacity())
.put("numBlocks", node.numBlocks())
.put("version", node.getSoftwareVersion())
.put("used", node.getDfsUsed())
.put("remaining", node.getRemaining())
.put("blockScheduled", node.getBlocksScheduled())
.put("blockPoolUsed", node.getBlockPoolUsed())
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
.put("volfails", node.getVolumeFailures())
.build();
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
}
return JSON.toString(info);
}
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of dead node attribute keys to its values
*/
@Override // NameNodeMXBean
public String getDeadNodes() {
final Map<String, Map<String, Object>> info =
new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(null, dead, true);
for (DatanodeDescriptor node : dead) {
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
.put("lastContact", getLastContact(node))
.put("decommissioned", node.isDecommissioned())
.put("xferaddr", node.getXferAddr())
.build();
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
}
return JSON.toString(info);
}
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of decommissioning node attribute keys to its
* values
*/
@Override // NameNodeMXBean
public String getDecomNodes() {
final Map<String, Map<String, Object>> info =
new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> decomNodeList = blockManager.getDatanodeManager(
).getDecommissioningNodes();
for (DatanodeDescriptor node : decomNodeList) {
Map<String, Object> innerinfo = ImmutableMap
.<String, Object> builder()
.put("xferaddr", node.getXferAddr())
.put("underReplicatedBlocks",
node.decommissioningStatus.getUnderReplicatedBlocks())
.put("decommissionOnlyReplicas",
node.decommissioningStatus.getDecommissionOnlyReplicas())
.put("underReplicateInOpenFiles",
node.decommissioningStatus.getUnderReplicatedInOpenFiles())
.build();
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
}
return JSON.toString(info);
}
private long getLastContact(DatanodeDescriptor alivenode) {
return (Time.now() - alivenode.getLastUpdate())/1000;
}
private long getDfsUsed(DatanodeDescriptor alivenode) {
return alivenode.getDfsUsed();
}
@Override // NameNodeMXBean
public String getClusterId() {
return getFSImage().getStorage().getClusterID();
}
@Override // NameNodeMXBean
public String getBlockPoolId() {
return blockPoolId;
}
@Override // NameNodeMXBean
public String getNameDirStatuses() {
Map<String, Map<File, StorageDirType>> statusMap =
new HashMap<String, Map<File, StorageDirType>>();
Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
for (Iterator<StorageDirectory> it
= getFSImage().getStorage().dirIterator(); it.hasNext();) {
StorageDirectory st = it.next();
activeDirs.put(st.getRoot(), st.getStorageDirType());
}
statusMap.put("active", activeDirs);
List<Storage.StorageDirectory> removedStorageDirs
= getFSImage().getStorage().getRemovedStorageDirs();
Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
for (StorageDirectory st : removedStorageDirs) {
failedDirs.put(st.getRoot(), st.getStorageDirType());
}
statusMap.put("failed", failedDirs);
return JSON.toString(statusMap);
}
@Override // NameNodeMXBean
public String getNodeUsage() {
float median = 0;
float max = 0;
float min = 0;
float dev = 0;
final Map<String, Map<String,Object>> info =
new HashMap<String, Map<String,Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
if (live.size() > 0) {
float totalDfsUsed = 0;
float[] usages = new float[live.size()];
int i = 0;
for (DatanodeDescriptor dn : live) {
usages[i++] = dn.getDfsUsedPercent();
totalDfsUsed += dn.getDfsUsedPercent();
}
totalDfsUsed /= live.size();
Arrays.sort(usages);
median = usages[usages.length / 2];
max = usages[usages.length - 1];
min = usages[0];
for (i = 0; i < usages.length; i++) {
dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
}
dev = (float) Math.sqrt(dev / usages.length);
}
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("min", StringUtils.format("%.2f%%", min));
innerInfo.put("median", StringUtils.format("%.2f%%", median));
innerInfo.put("max", StringUtils.format("%.2f%%", max));
innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
info.put("nodeUsage", innerInfo);
return JSON.toString(info);
}
@Override // NameNodeMXBean
public String getNameJournalStatus() {
List<Map<String, String>> jasList = new ArrayList<Map<String, String>>();
FSEditLog log = getFSImage().getEditLog();
if (log != null) {
boolean openForWrite = log.isOpenForWrite();
for (JournalAndStream jas : log.getJournals()) {
final Map<String, String> jasMap = new HashMap<String, String>();
String manager = jas.getManager().toString();
jasMap.put("required", String.valueOf(jas.isRequired()));
jasMap.put("disabled", String.valueOf(jas.isDisabled()));
jasMap.put("manager", manager);
if (jas.isDisabled()) {
jasMap.put("stream", "Failed");
} else if (openForWrite) {
EditLogOutputStream elos = jas.getCurrentStream();
if (elos != null) {
jasMap.put("stream", elos.generateReport());
} else {
jasMap.put("stream", "not currently writing");
}
} else {
jasMap.put("stream", "open for read");
}
jasList.add(jasMap);
}
}
return JSON.toString(jasList);
}
@Override // NameNodeMxBean
public String getJournalTransactionInfo() {
Map<String, String> txnIdMap = new HashMap<String, String>();
txnIdMap.put("LastAppliedOrWrittenTxId",
Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId()));
txnIdMap.put("MostRecentCheckpointTxId",
Long.toString(this.getFSImage().getMostRecentCheckpointTxId()));
return JSON.toString(txnIdMap);
}
@Override // NameNodeMXBean
public String getNNStarted() {
return getStartTime().toString();
}
@Override // NameNodeMXBean
public String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
" from " + VersionInfo.getBranch();
}
/** @return the block manager. */
public BlockManager getBlockManager() {
return blockManager;
}
public BlockIdManager getBlockIdManager() {
return blockIdManager;
}
/** @return the FSDirectory. */
public FSDirectory getFSDirectory() {
return dir;
}
/** Set the FSDirectory. */
@VisibleForTesting
public void setFSDirectory(FSDirectory dir) {
this.dir = dir;
}
/** @return the cache manager. */
public CacheManager getCacheManager() {
return cacheManager;
}
@Override // NameNodeMXBean
public String getCorruptFiles() {
List<String> list = new ArrayList<String>();
Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks;
try {
corruptFileBlocks = listCorruptFileBlocks("/", null);
int corruptFileCount = corruptFileBlocks.size();
if (corruptFileCount != 0) {
for (FSNamesystem.CorruptFileBlockInfo c : corruptFileBlocks) {
list.add(c.toString());
}
}
} catch (IOException e) {
LOG.warn("Get corrupt file blocks returned error: " + e.getMessage());
}
return JSON.toString(list);
}
@Override //NameNodeMXBean
public int getDistinctVersionCount() {
return blockManager.getDatanodeManager().getDatanodesSoftwareVersions()
.size();
}
@Override //NameNodeMXBean
public Map<String, Integer> getDistinctVersions() {
return blockManager.getDatanodeManager().getDatanodesSoftwareVersions();
}
@Override //NameNodeMXBean
public String getSoftwareVersion() {
return VersionInfo.getVersion();
}
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
* @param password Password in the token.
*/
public synchronized void verifyToken(DelegationTokenIdentifier identifier,
byte[] password) throws InvalidToken, RetriableException {
try {
getDelegationTokenSecretManager().verifyToken(identifier, password);
} catch (InvalidToken it) {
if (inTransitionToActive()) {
throw new RetriableException(it);
}
throw it;
}
}
@Override
public boolean isGenStampInFuture(Block block) {
return blockIdManager.isGenStampInFuture(block);
}
@VisibleForTesting
public EditLogTailer getEditLogTailer() {
return editLogTailer;
}
@VisibleForTesting
public void setEditLogTailerForTests(EditLogTailer tailer) {
this.editLogTailer = tailer;
}
@VisibleForTesting
void setFsLockForTests(ReentrantReadWriteLock lock) {
this.fsLock.coarseLock = lock;
}
@VisibleForTesting
public ReentrantReadWriteLock getFsLockForTests() {
return fsLock.coarseLock;
}
@VisibleForTesting
public ReentrantLock getCpLockForTests() {
return cpLock;
}
@VisibleForTesting
public SafeModeInfo getSafeModeInfoForTests() {
return safeMode;
}
@VisibleForTesting
public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
this.nnResourceChecker = nnResourceChecker;
}
public SnapshotManager getSnapshotManager() {
return snapshotManager;
}
/** Allow snapshot on a directory. */
void allowSnapshot(String path) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot allow snapshot for " + path);
checkSuperuserPrivilege();
FSDirSnapshotOp.allowSnapshot(dir, snapshotManager, path);
success = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(success, "allowSnapshot", path, null, null);
}
/** Disallow snapshot on a directory. */
void disallowSnapshot(String path) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot disallow snapshot for " + path);
checkSuperuserPrivilege();
FSDirSnapshotOp.disallowSnapshot(dir, snapshotManager, path);
success = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(success, "disallowSnapshot", path, null, null);
}
/**
* Create a snapshot
* @param snapshotRoot The directory path where the snapshot is taken
* @param snapshotName The name of the snapshot
*/
String createSnapshot(String snapshotRoot, String snapshotName,
boolean logRetryCache) throws IOException {
String snapshotPath = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create snapshot for " + snapshotRoot);
snapshotPath = FSDirSnapshotOp.createSnapshot(dir,
snapshotManager, snapshotRoot, snapshotName, logRetryCache);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(snapshotPath != null, "createSnapshot", snapshotRoot,
snapshotPath, null);
return snapshotPath;
}
/**
* Rename a snapshot
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old snapshot name
* @param snapshotNewName New snapshot name
* @throws SafeModeException
* @throws IOException
*/
void renameSnapshot(
String path, String snapshotOldName, String snapshotNewName,
boolean logRetryCache) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename snapshot for " + path);
FSDirSnapshotOp.renameSnapshot(dir, snapshotManager, path,
snapshotOldName, snapshotNewName, logRetryCache);
success = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
logAuditEvent(success, "renameSnapshot", oldSnapshotRoot,
newSnapshotRoot, null);
}
/**
* Get the list of snapshottable directories that are owned
* by the current user. Return all the snapshottable directories if the
* current user is a super user.
* @return The list of all the current snapshottable directories
* @throws IOException
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
SnapshottableDirectoryStatus[] status = null;
checkOperation(OperationCategory.READ);
boolean success = false;
readLock();
try {
checkOperation(OperationCategory.READ);
status = FSDirSnapshotOp.getSnapshottableDirListing(dir, snapshotManager);
success = true;
} finally {
readUnlock();
}
logAuditEvent(success, "listSnapshottableDirectory", null, null, null);
return status;
}
/**
* Get the difference between two snapshots (or between a snapshot and the
* current status) of a snapshottable directory.
*
* @param path The full path of the snapshottable directory.
* @param fromSnapshot Name of the snapshot to calculate the diff from. Null
* or empty string indicates the current tree.
* @param toSnapshot Name of the snapshot to calculated the diff to. Null or
* empty string indicates the current tree.
* @return A report about the difference between {@code fromSnapshot} and
* {@code toSnapshot}. Modified/deleted/created/renamed files and
* directories belonging to the snapshottable directories are listed
* and labeled as M/-/+/R respectively.
* @throws IOException
*/
SnapshotDiffReport getSnapshotDiffReport(String path,
String fromSnapshot, String toSnapshot) throws IOException {
SnapshotDiffReport diffs = null;
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
diffs = FSDirSnapshotOp.getSnapshotDiffReport(dir, snapshotManager,
path, fromSnapshot, toSnapshot);
} finally {
readUnlock();
}
logAuditEvent(diffs != null, "computeSnapshotDiff", null, null, null);
return diffs;
}
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws SafeModeException
* @throws IOException
*/
void deleteSnapshot(String snapshotRoot, String snapshotName,
boolean logRetryCache) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
writeLock();
BlocksMapUpdateInfo blocksToBeDeleted = null;
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, snapshotManager,
snapshotRoot, snapshotName, logRetryCache);
success = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
// Breaking the pattern as removing blocks have to happen outside of the
// global lock
if (blocksToBeDeleted != null) {
removeBlocks(blocksToBeDeleted);
}
String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
logAuditEvent(success, "deleteSnapshot", rootPath, null, null);
}
/**
* Remove a list of INodeDirectorySnapshottable from the SnapshotManager
* @param toRemove the list of INodeDirectorySnapshottable to be removed
*/
void removeSnapshottableDirs(List<INodeDirectory> toRemove) {
if (snapshotManager != null) {
snapshotManager.removeSnapshottable(toRemove);
}
}
RollingUpgradeInfo queryRollingUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
readLock();
try {
if (rollingUpgradeInfo != null) {
boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage();
rollingUpgradeInfo.setCreatedRollbackImages(hasRollbackImage);
}
return rollingUpgradeInfo;
} finally {
readUnlock();
}
}
RollingUpgradeInfo startRollingUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isRollingUpgrade()) {
return rollingUpgradeInfo;
}
long startTime = now();
if (!haEnabled) { // for non-HA, we require NN to be in safemode
startRollingUpgradeInternalForNonHA(startTime);
} else { // for HA, NN cannot be in safemode
checkNameNodeSafeMode("Failed to start rolling upgrade");
startRollingUpgradeInternal(startTime);
}
getEditLog().logStartRollingUpgrade(rollingUpgradeInfo.getStartTime());
if (haEnabled) {
// roll the edit log to make sure the standby NameNode can tail
getFSImage().rollEditLog();
}
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "startRollingUpgrade", null, null, null);
}
return rollingUpgradeInfo;
}
/**
* Update internal state to indicate that a rolling upgrade is in progress.
* @param startTime rolling upgrade start time
*/
void startRollingUpgradeInternal(long startTime)
throws IOException {
checkRollingUpgrade("start rolling upgrade");
getFSImage().checkUpgrade(this);
setRollingUpgradeInfo(false, startTime);
}
/**
* Update internal state to indicate that a rolling upgrade is in progress for
* non-HA setup. This requires the namesystem is in SafeMode and after doing a
* checkpoint for rollback the namesystem will quit the safemode automatically
*/
private void startRollingUpgradeInternalForNonHA(long startTime)
throws IOException {
Preconditions.checkState(!haEnabled);
if (!isInSafeMode()) {
throw new IOException("Safe mode should be turned ON "
+ "in order to create namespace image.");
}
checkRollingUpgrade("start rolling upgrade");
getFSImage().checkUpgrade(this);
// in non-HA setup, we do an extra checkpoint to generate a rollback image
getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
LOG.info("Successfully saved namespace for preparing rolling upgrade.");
// leave SafeMode automatically
setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
setRollingUpgradeInfo(true, startTime);
}
void setRollingUpgradeInfo(boolean createdRollbackImages, long startTime) {
rollingUpgradeInfo = new RollingUpgradeInfo(blockPoolId,
createdRollbackImages, startTime, 0L);
}
public void setCreatedRollbackImages(boolean created) {
if (rollingUpgradeInfo != null) {
rollingUpgradeInfo.setCreatedRollbackImages(created);
}
}
public RollingUpgradeInfo getRollingUpgradeInfo() {
return rollingUpgradeInfo;
}
public boolean isNeedRollbackFsImage() {
return needRollbackFsImage;
}
public void setNeedRollbackFsImage(boolean needRollbackFsImage) {
this.needRollbackFsImage = needRollbackFsImage;
}
@Override // NameNodeMXBean
public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
RollingUpgradeInfo upgradeInfo = getRollingUpgradeInfo();
if (upgradeInfo != null) {
return new RollingUpgradeInfo.Bean(upgradeInfo);
}
return null;
}
/** Is rolling upgrade in progress? */
public boolean isRollingUpgrade() {
return rollingUpgradeInfo != null;
}
void checkRollingUpgrade(String action) throws RollingUpgradeException {
if (isRollingUpgrade()) {
throw new RollingUpgradeException("Failed to " + action
+ " since a rolling upgrade is already in progress."
+ " Existing rolling upgrade info:\n" + rollingUpgradeInfo);
}
}
void finalizeRollingUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
writeLock();
final RollingUpgradeInfo returnInfo;
try {
checkOperation(OperationCategory.WRITE);
if (!isRollingUpgrade()) {
return;
}
checkNameNodeSafeMode("Failed to finalize rolling upgrade");
returnInfo = finalizeRollingUpgradeInternal(now());
getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime());
if (haEnabled) {
// roll the edit log to make sure the standby NameNode can tail
getFSImage().rollEditLog();
}
getFSImage().updateStorageVersion();
getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,
NameNodeFile.IMAGE);
} finally {
writeUnlock();
}
if (!haEnabled) {
// Sync not needed for ha since the edit was rolled after logging.
getEditLog().logSync();
}
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
}
return;
}
RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)
throws RollingUpgradeException {
final long startTime = rollingUpgradeInfo.getStartTime();
rollingUpgradeInfo = null;
return new RollingUpgradeInfo(blockPoolId, false, startTime, finalizeTime);
}
long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
CacheDirectiveInfo effectiveDirective = null;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache directive", safeMode);
}
effectiveDirective = FSNDNCacheOp.addCacheDirective(this, cacheManager,
directive, flags, logRetryCache);
} finally {
writeUnlock();
boolean success = effectiveDirective != null;
if (success) {
getEditLog().logSync();
}
String effectiveDirectiveStr = effectiveDirective != null ?
effectiveDirective.toString() : null;
logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr,
null, null);
}
return effectiveDirective != null ? effectiveDirective.getId() : 0;
}
void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache directive", safeMode);
}
FSNDNCacheOp.modifyCacheDirective(this, cacheManager, directive, flags,
logRetryCache);
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
String idStr = "{id: " + directive.getId().toString() + "}";
logAuditEvent(success, "modifyCacheDirective", idStr,
directive.toString(), null);
}
}
void removeCacheDirective(long id, boolean logRetryCache) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot remove cache directives", safeMode);
}
FSNDNCacheOp.removeCacheDirective(this, cacheManager, id, logRetryCache);
success = true;
} finally {
writeUnlock();
String idStr = "{id: " + Long.toString(id) + "}";
logAuditEvent(success, "removeCacheDirective", idStr, null,
null);
}
getEditLog().logSync();
}
BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
long startId, CacheDirectiveInfo filter) throws IOException {
checkOperation(OperationCategory.READ);
BatchedListEntries<CacheDirectiveEntry> results;
cacheManager.waitForRescanIfNeeded();
readLock();
boolean success = false;
try {
checkOperation(OperationCategory.READ);
results = FSNDNCacheOp.listCacheDirectives(this, cacheManager, startId,
filter);
success = true;
} finally {
readUnlock();
logAuditEvent(success, "listCacheDirectives", filter.toString(), null,
null);
}
return results;
}
void addCachePool(CachePoolInfo req, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
String poolInfoStr = null;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache pool " + req.getPoolName(), safeMode);
}
CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
logRetryCache);
poolInfoStr = info.toString();
success = true;
} finally {
writeUnlock();
logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
}
getEditLog().logSync();
}
void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot modify cache pool " + req.getPoolName(), safeMode);
}
FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache);
success = true;
} finally {
writeUnlock();
String poolNameStr = "{poolName: " +
(req == null ? null : req.getPoolName()) + "}";
logAuditEvent(success, "modifyCachePool", poolNameStr,
req == null ? null : req.toString(), null);
}
getEditLog().logSync();
}
void removeCachePool(String cachePoolName, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot remove cache pool " + cachePoolName, safeMode);
}
FSNDNCacheOp.removeCachePool(this, cacheManager, cachePoolName,
logRetryCache);
success = true;
} finally {
writeUnlock();
String poolNameStr = "{poolName: " + cachePoolName + "}";
logAuditEvent(success, "removeCachePool", poolNameStr, null, null);
}
getEditLog().logSync();
}
BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
BatchedListEntries<CachePoolEntry> results;
checkOperation(OperationCategory.READ);
boolean success = false;
cacheManager.waitForRescanIfNeeded();
readLock();
try {
checkOperation(OperationCategory.READ);
results = FSNDNCacheOp.listCachePools(this, cacheManager, prevKey);
success = true;
} finally {
readUnlock();
logAuditEvent(success, "listCachePools", null, null, null);
}
return results;
}
void modifyAclEntries(final String src, List<AclEntry> aclSpec)
throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec);
} catch (AccessControlException e) {
logAuditEvent(false, "modifyAclEntries", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "modifyAclEntries", src, null, auditStat);
}
void removeAclEntries(final String src, List<AclEntry> aclSpec)
throws IOException {
checkOperation(OperationCategory.WRITE);
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
auditStat = FSDirAclOp.removeAclEntries(dir, src, aclSpec);
} catch (AccessControlException e) {
logAuditEvent(false, "removeAclEntries", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAclEntries", src, null, auditStat);
}
void removeDefaultAcl(final String src) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, "removeDefaultAcl", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeDefaultAcl", src, null, auditStat);
}
void removeAcl(final String src) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL on " + src);
auditStat = FSDirAclOp.removeAcl(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, "removeAcl", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAcl", src, null, auditStat);
}
void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set ACL on " + src);
auditStat = FSDirAclOp.setAcl(dir, src, aclSpec);
} catch (AccessControlException e) {
logAuditEvent(false, "setAcl", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setAcl", src, null, auditStat);
}
AclStatus getAclStatus(String src) throws IOException {
checkOperation(OperationCategory.READ);
boolean success = false;
readLock();
try {
checkOperation(OperationCategory.READ);
final AclStatus ret = FSDirAclOp.getAclStatus(dir, src);
success = true;
return ret;
} finally {
readUnlock();
logAuditEvent(success, "getAclStatus", src);
}
}
/**
* Create an encryption zone on directory src using the specified key.
*
* @param src the path of a directory which will be the root of the
* encryption zone. The directory must be empty.
* @param keyName name of a key which must be present in the configured
* KeyProvider.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void createEncryptionZone(final String src, final String keyName,
boolean logRetryCache)
throws IOException, UnresolvedLinkException,
SafeModeException, AccessControlException {
try {
if (provider == null) {
throw new IOException(
"Can't create an encryption zone for " + src +
" since no key provider is available.");
}
if (keyName == null || keyName.isEmpty()) {
throw new IOException("Must specify a key name when creating an " +
"encryption zone");
}
KeyProvider.Metadata metadata = provider.getMetadata(keyName);
if (metadata == null) {
/*
* It would be nice if we threw something more specific than
* IOException when the key is not found, but the KeyProvider API
* doesn't provide for that. If that API is ever changed to throw
* something more specific (e.g. UnknownKeyException) then we can
* update this to match it, or better yet, just rethrow the
* KeyProvider's exception.
*/
throw new IOException("Key " + keyName + " doesn't exist.");
}
// If the provider supports pool for EDEKs, this will fill in the pool
generateEncryptedDataEncryptionKey(keyName);
createEncryptionZoneInt(src, metadata.getCipher(),
keyName, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "createEncryptionZone", src);
throw e;
}
}
private void createEncryptionZoneInt(final String srcArg, String cipher,
String keyName, final boolean logRetryCache) throws IOException {
String src = srcArg;
HdfsFileStatus resultingStat = null;
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
final byte[][] pathComponents =
FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = getPermissionChecker();
writeLock();
try {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create encryption zone on " + src);
src = dir.resolvePath(pc, src, pathComponents);
final CipherSuite suite = CipherSuite.convert(cipher);
// For now this is hardcoded, as we only support one method.
final CryptoProtocolVersion version =
CryptoProtocolVersion.ENCRYPTION_ZONES;
final XAttr ezXAttr = dir.createEncryptionZone(src, suite,
version, keyName);
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(ezXAttr);
getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
final INodesInPath iip = dir.getINodesInPath4Write(src, false);
resultingStat = dir.getAuditFileInfo(iip);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat);
}
/**
* Get the encryption zone for the specified path.
*
* @param srcArg the path of a file or directory to get the EZ for.
* @return the EZ of the of the path or null if none.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
*/
EncryptionZone getEZForPath(final String srcArg)
throws AccessControlException, UnresolvedLinkException, IOException {
String src = srcArg;
HdfsFileStatus resultingStat = null;
final byte[][] pathComponents =
FSDirectory.getPathComponentsForReservedPath(src);
boolean success = false;
final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath(src, true);
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.READ);
}
final EncryptionZone ret = dir.getEZForPath(iip);
resultingStat = dir.getAuditFileInfo(iip);
success = true;
return ret;
} finally {
readUnlock();
logAuditEvent(success, "getEZForPath", srcArg, null, resultingStat);
}
}
BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
throws IOException {
boolean success = false;
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
readLock();
try {
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
final BatchedListEntries<EncryptionZone> ret =
dir.listEncryptionZones(prevId);
success = true;
return ret;
} finally {
readUnlock();
logAuditEvent(success, "listEncryptionZones", null);
}
}
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set XAttr on " + src);
auditStat = FSDirXAttrOp.setXAttr(dir, src, xAttr, flag, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "setXAttr", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setXAttr", src, null, auditStat);
}
List<XAttr> getXAttrs(final String src, List<XAttr> xAttrs)
throws IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return FSDirXAttrOp.getXAttrs(dir, src, xAttrs);
} catch (AccessControlException e) {
logAuditEvent(false, "getXAttrs", src);
throw e;
} finally {
readUnlock();
}
}
List<XAttr> listXAttrs(String src) throws IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return FSDirXAttrOp.listXAttrs(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, "listXAttrs", src);
throw e;
} finally {
readUnlock();
}
}
void removeXAttr(String src, XAttr xAttr, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
HdfsFileStatus auditStat = null;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "removeXAttr", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeXAttr", src, null, auditStat);
}
void checkAccess(String src, FsAction mode) throws IOException {
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
checkOperation(OperationCategory.READ);
src = FSDirectory.resolvePath(src, pathComponents, dir);
final INodesInPath iip = dir.getINodesInPath(src, true);
INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("Path not found");
}
if (isPermissionEnabled) {
FSPermissionChecker pc = getPermissionChecker();
dir.checkPathAccess(pc, iip, mode);
}
} catch (AccessControlException e) {
logAuditEvent(false, "checkAccess", src);
throw e;
} finally {
readUnlock();
}
}
/**
* Default AuditLogger implementation; used when no access logger is
* defined in the config file. It can also be explicitly listed in the
* config file.
*/
private static class DefaultAuditLogger extends HdfsAuditLogger {
private boolean logTokenTrackingId;
@Override
public void initialize(Configuration conf) {
logTokenTrackingId = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT);
}
@Override
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus status, UserGroupInformation ugi,
DelegationTokenSecretManager dtSecretManager) {
if (auditLog.isInfoEnabled()) {
final StringBuilder sb = auditBuffer.get();
sb.setLength(0);
sb.append("allowed=").append(succeeded).append("\t");
sb.append("ugi=").append(userName).append("\t");
sb.append("ip=").append(addr).append("\t");
sb.append("cmd=").append(cmd).append("\t");
sb.append("src=").append(src).append("\t");
sb.append("dst=").append(dst).append("\t");
if (null == status) {
sb.append("perm=null");
} else {
sb.append("perm=");
sb.append(status.getOwner()).append(":");
sb.append(status.getGroup()).append(":");
sb.append(status.getPermission());
}
if (logTokenTrackingId) {
sb.append("\t").append("trackingId=");
String trackingId = null;
if (ugi != null && dtSecretManager != null
&& ugi.getAuthenticationMethod() == AuthenticationMethod.TOKEN) {
for (TokenIdentifier tid: ugi.getTokenIdentifiers()) {
if (tid instanceof DelegationTokenIdentifier) {
DelegationTokenIdentifier dtid =
(DelegationTokenIdentifier)tid;
trackingId = dtSecretManager.getTokenTrackingId(dtid);
break;
}
}
}
sb.append(trackingId);
}
sb.append("\t").append("proto=");
sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc");
logAuditMessage(sb.toString());
}
}
public void logAuditMessage(String message) {
auditLog.info(message);
}
}
private static void enableAsyncAuditLog() {
if (!(auditLog instanceof Log4JLogger)) {
LOG.warn("Log4j is required to enable async auditlog");
return;
}
Logger logger = ((Log4JLogger)auditLog).getLogger();
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
// failsafe against trying to async it more than once
if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
AsyncAppender asyncAppender = new AsyncAppender();
// change logger to have an async appender containing all the
// previously configured appenders
for (Appender appender : appenders) {
logger.removeAppender(appender);
asyncAppender.addAppender(appender);
}
logger.addAppender(asyncAppender);
}
}
}
| zhe-thoughts/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | Java | apache-2.0 | 291,024 |
/**
* Copyright 2014 Fernando Rincon Martin <frm.rincon@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openeos.services.ui.vaadin.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.openeos.services.ui.CustomActionManagerService;
import org.openeos.services.ui.WindowManagerService;
import org.openeos.services.ui.action.CustomAction;
import org.openeos.services.ui.menu.MenuContributor;
import org.openeos.services.ui.model.IMenuDefinition;
import org.openeos.services.ui.model.IWindowDefinition;
import org.openeos.services.ui.vaadin.IVaadinERPWindowFactory;
import org.openeos.vaadin.main.IUnoVaadinApplication;
import org.openeos.vaadin.main.IVaadinMenuContributor;
import com.vaadin.ui.MenuBar;
import com.vaadin.ui.MenuBar.Command;
import com.vaadin.ui.MenuBar.MenuItem;
public class VaadinMenuContributor implements IVaadinMenuContributor {
private static final Logger LOG = LoggerFactory.getLogger(VaadinMenuContributor.class);
//TODO Need to unbind
private WindowManagerService windowManagerService;
private CustomActionManagerService customActionManagerService;
private IVaadinERPWindowFactory windowFactory;
private UIVaadinApplicationFactory applicationFactory;
private List<MenuContributor> menuList = new ArrayList<MenuContributor>();
public VaadinMenuContributor(WindowManagerService windowManagerService, CustomActionManagerService customActionManagerService,
IVaadinERPWindowFactory windowFactory, UIVaadinApplicationFactory applicationFactory) {
this.windowManagerService = windowManagerService;
this.customActionManagerService = customActionManagerService;
this.windowFactory = windowFactory;
this.applicationFactory = applicationFactory;
}
@Override
public void contribute(MenuBar menuBar, IUnoVaadinApplication application) {
Collections.sort(menuList, new Comparator<MenuContributor>() {
@Override
public int compare(MenuContributor o1, MenuContributor o2) {
return new Integer(o1.getOrder()).compareTo(o2.getOrder());
}
});
for (MenuContributor contributor : menuList) {
for (IMenuDefinition menu : contributor.getRootMenuDefinitionList()) {
MenuItem menuItem = menuBar.addItem(menu.getName(), createAction(menu, application));
addRecursiveMenus(contributor, contributor.getChildMenuDefinitionList(menu), menuItem, application);
}
}
}
private Command createAction(IMenuDefinition menu, IUnoVaadinApplication application) {
switch (menu.getType()) {
case MENU:
case SEPARATOR:
return null;
case WINDOW:
return createWindowAction(menu, application);
case CUSTOM:
return createCustomAction(menu, application);
}
return null;
}
private Command createWindowAction(IMenuDefinition menu, IUnoVaadinApplication application) {
IWindowDefinition window = windowManagerService.getWindowDefinition(menu.getWindowId());
if (window == null) {
LOG.warn("Window not found: '{}'", menu.getWindowId());
return null;
} else {
// TODO Make with service factory or prototype bean blueprint
return new OpenWindowCommand(window, applicationFactory.createApplication(application), windowFactory);
}
}
private Command createCustomAction(IMenuDefinition menu, IUnoVaadinApplication application) {
CustomAction customAction = customActionManagerService.getCustomAction(menu.getCustomActionId());
if (customAction == null) {
LOG.warn("Custom action not found: '{}'", menu.getCustomActionId());
return null;
} else {
return new CustomActionCommand(customAction, applicationFactory.createApplication(application));
}
}
private void addRecursiveMenus(MenuContributor contributor, List<? extends IMenuDefinition> subMenuList, MenuItem parent,
IUnoVaadinApplication application) {
if (subMenuList == null)
return;
for (IMenuDefinition subMenu : subMenuList) {
MenuItem child = parent.addItem(subMenu.getName(), createAction(subMenu, application));
addRecursiveMenus(contributor, contributor.getChildMenuDefinitionList(subMenu), child, application);
}
}
public void bindMenuContributor(MenuContributor menuContributor) {
menuList.add(menuContributor);
}
public void unbindMenuContributor(MenuContributor menuContributor) {
if (menuContributor != null) {
menuList.remove(menuContributor);
}
}
}
| frincon/openeos | modules/org.openeos.services.ui.vaadin/src/main/java/org/openeos/services/ui/vaadin/internal/VaadinMenuContributor.java | Java | apache-2.0 | 4,933 |
// ASM: a very small and fast Java bytecode manipulation framework
// Copyright (c) 2000-2011 INRIA, France Telecom
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
package org.springframework.asm;
/**
* An entry of the constant pool, of the BootstrapMethods attribute, or of the (ASM specific) type
* table of a class.
*
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.4">JVMS
* 4.4</a>
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.23">JVMS
* 4.7.23</a>
* @author Eric Bruneton
*/
abstract class Symbol {
// Tag values for the constant pool entries (using the same order as in the JVMS).
/** The tag value of CONSTANT_Class_info JVMS structures. */
static final int CONSTANT_CLASS_TAG = 7;
/** The tag value of CONSTANT_Fieldref_info JVMS structures. */
static final int CONSTANT_FIELDREF_TAG = 9;
/** The tag value of CONSTANT_Methodref_info JVMS structures. */
static final int CONSTANT_METHODREF_TAG = 10;
/** The tag value of CONSTANT_InterfaceMethodref_info JVMS structures. */
static final int CONSTANT_INTERFACE_METHODREF_TAG = 11;
/** The tag value of CONSTANT_String_info JVMS structures. */
static final int CONSTANT_STRING_TAG = 8;
/** The tag value of CONSTANT_Integer_info JVMS structures. */
static final int CONSTANT_INTEGER_TAG = 3;
/** The tag value of CONSTANT_Float_info JVMS structures. */
static final int CONSTANT_FLOAT_TAG = 4;
/** The tag value of CONSTANT_Long_info JVMS structures. */
static final int CONSTANT_LONG_TAG = 5;
/** The tag value of CONSTANT_Double_info JVMS structures. */
static final int CONSTANT_DOUBLE_TAG = 6;
/** The tag value of CONSTANT_NameAndType_info JVMS structures. */
static final int CONSTANT_NAME_AND_TYPE_TAG = 12;
/** The tag value of CONSTANT_Utf8_info JVMS structures. */
static final int CONSTANT_UTF8_TAG = 1;
/** The tag value of CONSTANT_MethodHandle_info JVMS structures. */
static final int CONSTANT_METHOD_HANDLE_TAG = 15;
/** The tag value of CONSTANT_MethodType_info JVMS structures. */
static final int CONSTANT_METHOD_TYPE_TAG = 16;
/** The tag value of CONSTANT_Dynamic_info JVMS structures. */
static final int CONSTANT_DYNAMIC_TAG = 17;
/** The tag value of CONSTANT_InvokeDynamic_info JVMS structures. */
static final int CONSTANT_INVOKE_DYNAMIC_TAG = 18;
/** The tag value of CONSTANT_Module_info JVMS structures. */
static final int CONSTANT_MODULE_TAG = 19;
/** The tag value of CONSTANT_Package_info JVMS structures. */
static final int CONSTANT_PACKAGE_TAG = 20;
// Tag values for the BootstrapMethods attribute entries (ASM specific tag).
/** The tag value of the BootstrapMethods attribute entries. */
static final int BOOTSTRAP_METHOD_TAG = 64;
// Tag values for the type table entries (ASM specific tags).
/** The tag value of a normal type entry in the (ASM specific) type table of a class. */
static final int TYPE_TAG = 128;
/**
* The tag value of an {@link Frame#ITEM_UNINITIALIZED} type entry in the type table of a class.
*/
static final int UNINITIALIZED_TYPE_TAG = 129;
/** The tag value of a merged type entry in the (ASM specific) type table of a class. */
static final int MERGED_TYPE_TAG = 130;
// Instance fields.
/**
* The index of this symbol in the constant pool, in the BootstrapMethods attribute, or in the
* (ASM specific) type table of a class (depending on the {@link #tag} value).
*/
final int index;
/**
* A tag indicating the type of this symbol. Must be one of the static tag values defined in this
* class.
*/
final int tag;
/**
* The internal name of the owner class of this symbol. Only used for {@link
* #CONSTANT_FIELDREF_TAG}, {@link #CONSTANT_METHODREF_TAG}, {@link
* #CONSTANT_INTERFACE_METHODREF_TAG}, and {@link #CONSTANT_METHOD_HANDLE_TAG} symbols.
*/
final String owner;
/**
* The name of the class field or method corresponding to this symbol. Only used for {@link
* #CONSTANT_FIELDREF_TAG}, {@link #CONSTANT_METHODREF_TAG}, {@link
* #CONSTANT_INTERFACE_METHODREF_TAG}, {@link #CONSTANT_NAME_AND_TYPE_TAG}, {@link
* #CONSTANT_METHOD_HANDLE_TAG}, {@link #CONSTANT_DYNAMIC_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols.
*/
final String name;
/**
* The string value of this symbol. This is:
*
* <ul>
* <li>a field or method descriptor for {@link #CONSTANT_FIELDREF_TAG}, {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG}, {@link
* #CONSTANT_NAME_AND_TYPE_TAG}, {@link #CONSTANT_METHOD_HANDLE_TAG}, {@link
* #CONSTANT_METHOD_TYPE_TAG}, {@link #CONSTANT_DYNAMIC_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>an arbitrary string for {@link #CONSTANT_UTF8_TAG} and {@link #CONSTANT_STRING_TAG}
* symbols,
* <li>an internal class name for {@link #CONSTANT_CLASS_TAG}, {@link #TYPE_TAG} and {@link
* #UNINITIALIZED_TYPE_TAG} symbols,
* <li>{@literal null} for the other types of symbol.
* </ul>
*/
final String value;
/**
* The numeric value of this symbol. This is:
*
* <ul>
* <li>the symbol's value for {@link #CONSTANT_INTEGER_TAG},{@link #CONSTANT_FLOAT_TAG}, {@link
* #CONSTANT_LONG_TAG}, {@link #CONSTANT_DOUBLE_TAG},
* <li>the CONSTANT_MethodHandle_info reference_kind field value for {@link
* #CONSTANT_METHOD_HANDLE_TAG} symbols,
* <li>the CONSTANT_InvokeDynamic_info bootstrap_method_attr_index field value for {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>the offset of a bootstrap method in the BootstrapMethods boostrap_methods array, for
* {@link #CONSTANT_DYNAMIC_TAG} or {@link #BOOTSTRAP_METHOD_TAG} symbols,
* <li>the bytecode offset of the NEW instruction that created an {@link
* Frame#ITEM_UNINITIALIZED} type for {@link #UNINITIALIZED_TYPE_TAG} symbols,
* <li>the indices (in the class' type table) of two {@link #TYPE_TAG} source types for {@link
* #MERGED_TYPE_TAG} symbols,
* <li>0 for the other types of symbol.
* </ul>
*/
final long data;
/**
* Additional information about this symbol, generally computed lazily. <i>Warning: the value of
* this field is ignored when comparing Symbol instances</i> (to avoid duplicate entries in a
* SymbolTable). Therefore, this field should only contain data that can be computed from the
* other fields of this class. It contains:
*
* <ul>
* <li>the {@link Type#getArgumentsAndReturnSizes} of the symbol's method descriptor for {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>the index in the InnerClasses_attribute 'classes' array (plus one) corresponding to this
* class, for {@link #CONSTANT_CLASS_TAG} symbols,
* <li>the index (in the class' type table) of the merged type of the two source types for
* {@link #MERGED_TYPE_TAG} symbols,
* <li>0 for the other types of symbol, or if this field has not been computed yet.
* </ul>
*/
int info;
/**
* Constructs a new Symbol. This constructor can't be used directly because the Symbol class is
* abstract. Instead, use the factory methods of the {@link SymbolTable} class.
*
* @param index the symbol index in the constant pool, in the BootstrapMethods attribute, or in
* the (ASM specific) type table of a class (depending on 'tag').
* @param tag the symbol type. Must be one of the static tag values defined in this class.
* @param owner The internal name of the symbol's owner class. Maybe {@literal null}.
* @param name The name of the symbol's corresponding class field or method. Maybe {@literal
* null}.
* @param value The string value of this symbol. Maybe {@literal null}.
* @param data The numeric value of this symbol.
*/
Symbol(
final int index,
final int tag,
final String owner,
final String name,
final String value,
final long data) {
this.index = index;
this.tag = tag;
this.owner = owner;
this.name = name;
this.value = value;
this.data = data;
}
/**
* Returns the result {@link Type#getArgumentsAndReturnSizes} on {@link #value}.
*
* @return the result {@link Type#getArgumentsAndReturnSizes} on {@link #value} (memoized in
* {@link #info} for efficiency). This should only be used for {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols.
*/
int getArgumentsAndReturnSizes() {
if (info == 0) {
info = Type.getArgumentsAndReturnSizes(value);
}
return info;
}
}
| spring-projects/spring-framework | spring-core/src/main/java/org/springframework/asm/Symbol.java | Java | apache-2.0 | 10,365 |
package com.iss.gms.entity;
import java.util.Date;
public class EmployeeRelegationInfo {
//Ô±¹¤±àºÅ
private String employeeId;
//Ô±¹¤±àºÅ(ÐÞ¸ÄËùÓÃ)
private String employeeIdNew;
//Ô±¹¤ÐÕÃû
private String employeeName;
//ÏîÄ¿×ܼà
private String projectDirector;
//ÏîÄ¿¾Àí
private String projectManager;
//ÏîÄ¿×鳤
private String projectLeader;
//µÈ¼¶
private String grade;
//״̬
private String workstaus;
//ÒµÎñÏß
private String businessLine;
//ËùÊôTSÏîÄ¿
private String belongedTSproject;
//ËùÊôTSÏîÄ¿±àºÅ
private String belongedTSprojectId;
//È볡Ãâ·Ñʱ¼ä
private Date admittancefreeDate;
//È볡¼Æ·Ñʱ¼ä
private Date admittancebillingDate;
//Ô¤¼ÆÀ볡ʱ¼ä
private Date outsceneestimateDate;
//ʵ¼ÊÀ볡ʱ¼ä
private Date outscenerealityDate;
//¿ªÊ¼Ê±¼ä
private Date estimateLDateS;
//½áÊøÊ±¼ä
private Date estimateLDateE;
//ÐÞ¸ÄÓÃ-ÐÂÏîÄ¿Ãû³Æ
private String newProName;
//ÐÞ¸ÄÓÃ-ÐÂ״̬
private String newWorkstatus;
//ÈËÔ±±ä¶¯
private String empOperate;
//ËùÊô¹«Ë¾
private String company;
//¼¼ÄÜ
private String skill;
//·ÑÂÊ
private String rate;
public String getEmployeeIdNew() {
return employeeIdNew;
}
public void setEmployeeIdNew(String employeeIdNew) {
this.employeeIdNew = employeeIdNew;
}
public String getCompany() {
return company;
}
public void setCompany(String company) {
this.company = company;
}
public String getSkill() {
return skill;
}
public void setSkill(String skill) {
this.skill = skill;
}
public String getRate() {
return rate;
}
public void setRate(String rate) {
this.rate = rate;
}
public String getEmpOperate() {
return empOperate;
}
public void setEmpOperate(String empOperate) {
this.empOperate = empOperate;
}
public String getNewProName() {
return newProName;
}
public void setNewProName(String newProName) {
this.newProName = newProName;
}
public String getNewWorkstatus() {
return newWorkstatus;
}
public void setNewWorkstatus(String newWorkstatus) {
this.newWorkstatus = newWorkstatus;
}
private Float standardTotalHours;
private Float customerSureTotalHours;
private Float psaWelfareHoliaysTotalHours;
private Float psaNotWelfareHoliaysTotalHours;
private Float lastSwoppedTotalHours;
private Float workoTimeAhughTotalHours;
private Float workoTimeSubsidiesTotalHours;
private Date byTheStatisticalDate;
public String getBelongedTSprojectId() {
return belongedTSprojectId;
}
public void setBelongedTSprojectId(String belongedTSprojectId) {
this.belongedTSprojectId = belongedTSprojectId;
}
public Float getStandardTotalHours() {
return standardTotalHours;
}
public void setStandardTotalHours(Float standardTotalHours) {
this.standardTotalHours = standardTotalHours;
}
public Float getCustomerSureTotalHours() {
return customerSureTotalHours;
}
public void setCustomerSureTotalHours(Float customerSureTotalHours) {
this.customerSureTotalHours = customerSureTotalHours;
}
public Float getPsaWelfareHoliaysTotalHours() {
return psaWelfareHoliaysTotalHours;
}
public void setPsaWelfareHoliaysTotalHours(Float psaWelfareHoliaysTotalHours) {
this.psaWelfareHoliaysTotalHours = psaWelfareHoliaysTotalHours;
}
public Float getPsaNotWelfareHoliaysTotalHours() {
return psaNotWelfareHoliaysTotalHours;
}
public void setPsaNotWelfareHoliaysTotalHours(
Float psaNotWelfareHoliaysTotalHours) {
this.psaNotWelfareHoliaysTotalHours = psaNotWelfareHoliaysTotalHours;
}
public Float getLastSwoppedTotalHours() {
return lastSwoppedTotalHours;
}
public void setLastSwoppedTotalHours(Float lastSwoppedTotalHours) {
this.lastSwoppedTotalHours = lastSwoppedTotalHours;
}
public Float getWorkoTimeAhughTotalHours() {
return workoTimeAhughTotalHours;
}
public void setWorkoTimeAhughTotalHours(Float workoTimeAhughTotalHours) {
this.workoTimeAhughTotalHours = workoTimeAhughTotalHours;
}
public Float getWorkoTimeSubsidiesTotalHours() {
return workoTimeSubsidiesTotalHours;
}
public void setWorkoTimeSubsidiesTotalHours(Float workoTimeSubsidiesTotalHours) {
this.workoTimeSubsidiesTotalHours = workoTimeSubsidiesTotalHours;
}
public Date getByTheStatisticalDate() {
return byTheStatisticalDate;
}
public void setByTheStatisticalDate(Date byTheStatisticalDate) {
this.byTheStatisticalDate = byTheStatisticalDate;
}
public Date getEstimateLDateS() {
return estimateLDateS;
}
public void setEstimateLDateS(Date estimateLDateS) {
this.estimateLDateS = estimateLDateS;
}
public Date getEstimateLDateE() {
return estimateLDateE;
}
public void setEstimateLDateE(Date estimateLDateE) {
this.estimateLDateE = estimateLDateE;
}
public String getEmployeeId() {
return employeeId;
}
public void setEmployeeId(String employeeId) {
this.employeeId = employeeId;
}
public String getEmployeeName() {
return employeeName;
}
public void setEmployeeName(String employeeName) {
this.employeeName = employeeName;
}
public String getProjectDirector() {
return projectDirector;
}
public void setProjectDirector(String projectDirector) {
this.projectDirector = projectDirector;
}
public String getProjectManager() {
return projectManager;
}
public void setProjectManager(String projectManager) {
this.projectManager = projectManager;
}
public String getProjectLeader() {
return projectLeader;
}
public void setProjectLeader(String projectLeader) {
this.projectLeader = projectLeader;
}
public String getGrade() {
return grade;
}
public void setGrade(String grade) {
this.grade = grade;
}
public Date getAdmittancefreeDate() {
return admittancefreeDate;
}
public void setAdmittancefreeDate(Date admittancefreeDate) {
this.admittancefreeDate = admittancefreeDate;
}
public Date getAdmittancebillingDate() {
return admittancebillingDate;
}
public void setAdmittancebillingDate(Date admittancebillingDate) {
this.admittancebillingDate = admittancebillingDate;
}
public Date getOutsceneestimateDate() {
return outsceneestimateDate;
}
public void setOutsceneestimateDate(Date outsceneestimateDate) {
this.outsceneestimateDate = outsceneestimateDate;
}
public Date getOutscenerealityDate() {
return outscenerealityDate;
}
public void setOutscenerealityDate(Date outscenerealityDate) {
this.outscenerealityDate = outscenerealityDate;
}
public String getWorkstaus() {
return workstaus;
}
public void setWorkstaus(String workstaus) {
this.workstaus = workstaus;
}
public String getBusinessLine() {
return businessLine;
}
public void setBusinessLine(String businessLine) {
this.businessLine = businessLine;
}
public String getBelongedTSproject() {
return belongedTSproject;
}
public void setBelongedTSproject(String belongedTSproject) {
this.belongedTSproject = belongedTSproject;
}
}
| yhb612/EETMS | GMS/src/com/iss/gms/entity/EmployeeRelegationInfo.java | Java | apache-2.0 | 6,777 |
package dtos.conversion.converters;
import dtos.conversion.AbstractConverter;
import dtos.generic.RemoteDto;
import models.generic.RemoteModel;
/**
* Created by daniel on 10.08.15.
*/
public abstract class RemoteConverter<T extends RemoteModel, S extends RemoteDto>
extends AbstractConverter<T, S> {
protected RemoteConverter(Class<T> t, Class<S> s) {
super(t, s);
}
@Override public void configure() {
binding().fromField("remoteId").toField("remoteId");
binding().fromField("cloudProviderId").toField("cloudProviderId");
}
}
| cha87de/colosseum | app/dtos/conversion/converters/RemoteConverter.java | Java | apache-2.0 | 577 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.sql.planner.iterative.rule;
import com.facebook.presto.Session;
import com.facebook.presto.matching.Pattern;
import com.facebook.presto.sql.planner.PlanNodeIdAllocator;
import com.facebook.presto.sql.planner.Symbol;
import com.facebook.presto.sql.planner.SymbolAllocator;
import com.facebook.presto.sql.planner.iterative.Lookup;
import com.facebook.presto.sql.planner.iterative.Rule;
import com.facebook.presto.sql.planner.plan.PlanNode;
import com.facebook.presto.sql.planner.plan.ProjectNode;
import com.facebook.presto.sql.planner.plan.TableScanNode;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.facebook.presto.sql.planner.iterative.rule.Util.pruneInputs;
public class PruneTableScanColumns
implements Rule
{
private static final Pattern PATTERN = Pattern.matchByClass(ProjectNode.class);
@Override
public Pattern getPattern()
{
return PATTERN;
}
@Override
public Optional<PlanNode> apply(PlanNode node, Lookup lookup, PlanNodeIdAllocator idAllocator, SymbolAllocator symbolAllocator, Session session)
{
ProjectNode parent = (ProjectNode) node;
PlanNode source = lookup.resolve(parent.getSource());
if (!(source instanceof TableScanNode)) {
return Optional.empty();
}
TableScanNode child = (TableScanNode) source;
Optional<List<Symbol>> dependencies = pruneInputs(child.getOutputSymbols(), parent.getAssignments().getExpressions());
if (!dependencies.isPresent()) {
return Optional.empty();
}
List<Symbol> newOutputs = dependencies.get();
return Optional.of(
new ProjectNode(
parent.getId(),
new TableScanNode(
child.getId(),
child.getTable(),
newOutputs,
newOutputs.stream()
.collect(Collectors.toMap(Function.identity(), e -> child.getAssignments().get(e))),
child.getLayout(),
child.getCurrentConstraint(),
child.getOriginalConstraint()),
parent.getAssignments()));
}
}
| troels/nz-presto | presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/PruneTableScanColumns.java | Java | apache-2.0 | 2,980 |
using Sitecore.Data.Items;
using Sitecore.Security.Accounts;
namespace AdvancedContentSecurity.Core.ItemSecurity
{
public interface IItemSecurityRepository
{
bool HasPermission(string permissionName, Item item, Account account);
}
}
| cardinal252/Sitecore.AdvancedContentSecurity | AdvancedContentSecurity.Core/ItemSecurity/IItemSecurityRepository.cs | C# | apache-2.0 | 257 |
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using CSharpGuidelinesAnalyzer.Extensions;
using JetBrains.Annotations;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.Diagnostics;
using Microsoft.CodeAnalysis.Operations;
namespace CSharpGuidelinesAnalyzer.Rules.MiscellaneousDesign
{
[DiagnosticAnalyzer(LanguageNames.CSharp)]
public sealed class EvaluateQueryBeforeReturnAnalyzer : DiagnosticAnalyzer
{
private const string Title = "Evaluate LINQ query before returning it";
private const string OperationMessageFormat = "{0} '{1}' returns the result of a call to '{2}', which uses deferred execution";
private const string QueryMessageFormat = "{0} '{1}' returns the result of a query, which uses deferred execution";
private const string QueryableMessageFormat = "{0} '{1}' returns an IQueryable, which uses deferred execution";
private const string Description = "Evaluate the result of a LINQ expression before returning it.";
private const string QueryOperationName = "<*>Query";
private const string QueryableOperationName = "<*>Queryable";
public const string DiagnosticId = "AV1250";
[NotNull]
private static readonly AnalyzerCategory Category = AnalyzerCategory.MiscellaneousDesign;
[NotNull]
private static readonly DiagnosticDescriptor OperationRule = new DiagnosticDescriptor(DiagnosticId, Title, OperationMessageFormat, Category.DisplayName,
DiagnosticSeverity.Warning, true, Description, Category.GetHelpLinkUri(DiagnosticId));
[NotNull]
private static readonly DiagnosticDescriptor QueryRule = new DiagnosticDescriptor(DiagnosticId, Title, QueryMessageFormat, Category.DisplayName,
DiagnosticSeverity.Warning, true, Description, Category.GetHelpLinkUri(DiagnosticId));
[NotNull]
private static readonly DiagnosticDescriptor QueryableRule = new DiagnosticDescriptor(DiagnosticId, Title, QueryableMessageFormat, Category.DisplayName,
DiagnosticSeverity.Warning, true, Description, Category.GetHelpLinkUri(DiagnosticId));
[ItemNotNull]
private static readonly ImmutableArray<string> LinqOperatorsDeferred = ImmutableArray.Create("Aggregate", "All", "Any", "Cast", "Concat", "Contains",
"DefaultIfEmpty", "Except", "GroupBy", "GroupJoin", "Intersect", "Join", "OfType", "OrderBy", "OrderByDescending", "Range", "Repeat", "Reverse",
"Select", "SelectMany", "SequenceEqual", "Skip", "SkipWhile", "Take", "TakeWhile", "ThenBy", "ThenByDescending", "Union", "Where", "Zip");
[ItemNotNull]
private static readonly ImmutableArray<string> LinqOperatorsImmediate = ImmutableArray.Create("Average", "Count", "Distinct", "ElementAt",
"ElementAtOrDefault", "Empty", "First", "FirstOrDefault", "Last", "LastOrDefault", "LongCount", "Max", "Min", "Single", "SingleOrDefault", "Sum",
"ToArray", "ToImmutableArray", "ToDictionary", "ToList", "ToLookup");
[ItemNotNull]
private static readonly ImmutableArray<string> LinqOperatorsTransparent = ImmutableArray.Create("AsEnumerable", "AsQueryable");
[NotNull]
private static readonly Action<CompilationStartAnalysisContext> RegisterCompilationStartAction = RegisterCompilationStart;
[NotNull]
private static readonly Action<OperationBlockAnalysisContext, SequenceTypeInfo> AnalyzeCodeBlockAction = (context, sequenceTypeInfo) =>
context.SkipInvalid(_ => AnalyzeCodeBlock(context, sequenceTypeInfo));
[ItemNotNull]
public override ImmutableArray<DiagnosticDescriptor> SupportedDiagnostics => ImmutableArray.Create(OperationRule, QueryRule, QueryableRule);
public override void Initialize([NotNull] AnalysisContext context)
{
context.EnableConcurrentExecution();
context.ConfigureGeneratedCodeAnalysis(GeneratedCodeAnalysisFlags.None);
context.RegisterCompilationStartAction(RegisterCompilationStartAction);
}
private static void RegisterCompilationStart([NotNull] CompilationStartAnalysisContext startContext)
{
var sequenceTypeInfo = new SequenceTypeInfo(startContext.Compilation);
startContext.RegisterOperationBlockAction(context => AnalyzeCodeBlockAction(context, sequenceTypeInfo));
}
private static void AnalyzeCodeBlock(OperationBlockAnalysisContext context, [NotNull] SequenceTypeInfo sequenceTypeInfo)
{
if (context.OwningSymbol.DeclaredAccessibility != Accessibility.Public || !IsInMethodThatReturnsEnumerable(context.OwningSymbol, sequenceTypeInfo))
{
return;
}
var collector = new ReturnStatementCollector(sequenceTypeInfo, context);
collector.VisitBlocks(context.OperationBlocks);
AnalyzeReturnStatements(collector.ReturnStatements, context);
}
private static bool IsInMethodThatReturnsEnumerable([NotNull] ISymbol owningSymbol, [NotNull] SequenceTypeInfo sequenceTypeInfo)
{
return owningSymbol is IMethodSymbol { ReturnsVoid: false } method && sequenceTypeInfo.IsEnumerable(method.ReturnType);
}
private static void AnalyzeReturnStatements([NotNull] [ItemNotNull] IList<IReturnOperation> returnStatements, OperationBlockAnalysisContext context)
{
if (returnStatements.Any())
{
foreach (IReturnOperation returnStatement in returnStatements)
{
context.CancellationToken.ThrowIfCancellationRequested();
var analyzer = new ReturnValueAnalyzer(context);
analyzer.Analyze(returnStatement);
}
}
}
private static void ReportDiagnosticAt([NotNull] IReturnOperation returnStatement, [NotNull] string operationName,
OperationBlockAnalysisContext context)
{
Location location = returnStatement.TryGetLocationForKeyword();
if (location != null)
{
ISymbol containingMember = context.OwningSymbol.GetContainingMember();
string memberName = containingMember.ToDisplayString(SymbolDisplayFormat.CSharpShortErrorMessageFormat);
(DiagnosticDescriptor rule, object[] messageArguments) = GetArgumentsForReport(operationName, containingMember, memberName);
var diagnostic = Diagnostic.Create(rule, location, messageArguments);
context.ReportDiagnostic(diagnostic);
}
}
private static (DiagnosticDescriptor rule, object[] messageArguments) GetArgumentsForReport([NotNull] string operationName,
[NotNull] ISymbol containingMember, [NotNull] string memberName)
{
switch (operationName)
{
case QueryOperationName:
{
return (QueryRule, new object[]
{
containingMember.GetKind(),
memberName
});
}
case QueryableOperationName:
{
return (QueryableRule, new object[]
{
containingMember.GetKind(),
memberName
});
}
default:
{
return (OperationRule, new object[]
{
containingMember.GetKind(),
memberName,
operationName
});
}
}
}
/// <summary>
/// Scans for return statements, skipping over anonymous methods and local functions, whose compile-time type allows for deferred execution.
/// </summary>
private sealed class ReturnStatementCollector : ExplicitOperationWalker
{
[NotNull]
private readonly SequenceTypeInfo sequenceTypeInfo;
private readonly OperationBlockAnalysisContext context;
private int scopeDepth;
[NotNull]
[ItemNotNull]
public IList<IReturnOperation> ReturnStatements { get; } = new List<IReturnOperation>();
public ReturnStatementCollector([NotNull] SequenceTypeInfo sequenceTypeInfo, OperationBlockAnalysisContext context)
{
Guard.NotNull(sequenceTypeInfo, nameof(sequenceTypeInfo));
this.sequenceTypeInfo = sequenceTypeInfo;
this.context = context;
}
public void VisitBlocks([ItemNotNull] ImmutableArray<IOperation> blocks)
{
foreach (IOperation block in blocks)
{
Visit(block);
}
}
public override void VisitLocalFunction([NotNull] ILocalFunctionOperation operation)
{
scopeDepth++;
base.VisitLocalFunction(operation);
scopeDepth--;
}
public override void VisitAnonymousFunction([NotNull] IAnonymousFunctionOperation operation)
{
scopeDepth++;
base.VisitAnonymousFunction(operation);
scopeDepth--;
}
public override void VisitReturn([NotNull] IReturnOperation operation)
{
if (scopeDepth == 0 && operation.ReturnedValue != null && !ReturnsConstant(operation.ReturnedValue) &&
MethodSignatureTypeIsEnumerable(operation.ReturnedValue))
{
ITypeSymbol returnValueType = operation.ReturnedValue.SkipTypeConversions().Type;
if (sequenceTypeInfo.IsQueryable(returnValueType))
{
ReportDiagnosticAt(operation, QueryableOperationName, context);
}
else if (sequenceTypeInfo.IsNonQueryableSequenceType(returnValueType))
{
ReturnStatements.Add(operation);
}
// ReSharper disable once RedundantIfElseBlock
else
{
// No action required.
}
}
base.VisitReturn(operation);
}
private static bool ReturnsConstant([NotNull] IOperation returnValue)
{
return returnValue.ConstantValue.HasValue;
}
private bool MethodSignatureTypeIsEnumerable([NotNull] IOperation returnValue)
{
return sequenceTypeInfo.IsEnumerable(returnValue.Type);
}
}
/// <summary>
/// Analyzes the filtered set of return values in a method.
/// </summary>
private sealed class ReturnValueAnalyzer
{
private readonly OperationBlockAnalysisContext context;
[NotNull]
private readonly IDictionary<ILocalSymbol, EvaluationResult> variableEvaluationCache = new Dictionary<ILocalSymbol, EvaluationResult>();
public ReturnValueAnalyzer(OperationBlockAnalysisContext context)
{
this.context = context;
}
public void Analyze([NotNull] IReturnOperation returnStatement)
{
EvaluationResult result = AnalyzeExpression(returnStatement.ReturnedValue);
if (result.IsConclusive && result.IsDeferred)
{
ReportDiagnosticAt(returnStatement, result.DeferredOperationName, context);
}
}
[NotNull]
private EvaluationResult AnalyzeExpression([NotNull] IOperation expression)
{
Guard.NotNull(expression, nameof(expression));
context.CancellationToken.ThrowIfCancellationRequested();
var walker = new ExpressionWalker(this);
walker.Visit(expression);
return walker.Result;
}
/// <summary>
/// Runs flow analysis on the return value expression of a return statement.
/// </summary>
private sealed class ExpressionWalker : AbstractEvaluatingOperationWalker
{
[NotNull]
private readonly ReturnValueAnalyzer owner;
public ExpressionWalker([NotNull] ReturnValueAnalyzer owner)
{
Guard.NotNull(owner, nameof(owner));
this.owner = owner;
}
public override void VisitConversion([NotNull] IConversionOperation operation)
{
Visit(operation.Operand);
}
public override void VisitInvocation([NotNull] IInvocationOperation operation)
{
base.VisitInvocation(operation);
if (operation.Instance == null)
{
if (IsExecutionDeferred(operation) || IsExecutionImmediate(operation) || IsExecutionTransparent(operation))
{
return;
}
}
Result.SetUnknown();
}
private bool IsExecutionDeferred([NotNull] IInvocationOperation operation)
{
if (LinqOperatorsDeferred.Contains(operation.TargetMethod.Name))
{
if (operation.TargetMethod.ContainingType.SpecialType != SpecialType.System_String)
{
Result.SetDeferred(operation.TargetMethod.Name);
return true;
}
}
return false;
}
private bool IsExecutionImmediate([NotNull] IInvocationOperation operation)
{
if (LinqOperatorsImmediate.Contains(operation.TargetMethod.Name))
{
Result.SetImmediate();
return true;
}
return false;
}
private bool IsExecutionTransparent([NotNull] IInvocationOperation operation)
{
return LinqOperatorsTransparent.Contains(operation.TargetMethod.Name);
}
public override void VisitLocalReference([NotNull] ILocalReferenceOperation operation)
{
if (IsInvokingDelegateVariable(operation))
{
return;
}
var assignmentWalker = new VariableAssignmentWalker(operation.Local, operation.Syntax.GetLocation(), owner);
assignmentWalker.VisitBlockBody();
Result.CopyFrom(assignmentWalker.Result);
}
private static bool IsInvokingDelegateVariable([NotNull] ILocalReferenceOperation operation)
{
return operation.Parent is IInvocationOperation;
}
public override void VisitConditional([NotNull] IConditionalOperation operation)
{
EvaluationResult trueResult = owner.AnalyzeExpression(operation.WhenTrue);
if (operation.WhenFalse == null || trueResult.IsDeferred)
{
Result.CopyFrom(trueResult);
}
else
{
EvaluationResult falseResult = owner.AnalyzeExpression(operation.WhenFalse);
Result.CopyFrom(EvaluationResult.Unify(trueResult, falseResult));
}
}
public override void VisitCoalesce([NotNull] ICoalesceOperation operation)
{
EvaluationResult valueResult = owner.AnalyzeExpression(operation.Value);
if (valueResult.IsDeferred)
{
Result.CopyFrom(valueResult);
}
else
{
EvaluationResult alternativeResult = owner.AnalyzeExpression(operation.WhenNull);
Result.CopyFrom(EvaluationResult.Unify(valueResult, alternativeResult));
}
}
public override void VisitTranslatedQuery([NotNull] ITranslatedQueryOperation operation)
{
Result.CopyFrom(EvaluationResult.Query);
}
public override void VisitObjectCreation([NotNull] IObjectCreationOperation operation)
{
Result.SetImmediate();
}
public override void VisitDynamicObjectCreation([NotNull] IDynamicObjectCreationOperation operation)
{
Result.SetImmediate();
}
public override void VisitArrayCreation([NotNull] IArrayCreationOperation operation)
{
Result.SetImmediate();
}
public override void VisitArrayElementReference([NotNull] IArrayElementReferenceOperation operation)
{
Result.SetUnknown();
}
public override void VisitAnonymousObjectCreation([NotNull] IAnonymousObjectCreationOperation operation)
{
Result.SetUnknown();
}
public override void VisitObjectOrCollectionInitializer([NotNull] IObjectOrCollectionInitializerOperation operation)
{
Result.SetImmediate();
}
public override void VisitCollectionElementInitializer([NotNull] ICollectionElementInitializerOperation operation)
{
Result.SetImmediate();
}
public override void VisitDefaultValue([NotNull] IDefaultValueOperation operation)
{
Result.SetImmediate();
}
public override void VisitDynamicInvocation([NotNull] IDynamicInvocationOperation operation)
{
Result.SetUnknown();
}
public override void VisitDynamicMemberReference([NotNull] IDynamicMemberReferenceOperation operation)
{
Result.SetUnknown();
}
public override void VisitNameOf([NotNull] INameOfOperation operation)
{
Result.SetUnknown();
}
public override void VisitLiteral([NotNull] ILiteralOperation operation)
{
Result.SetImmediate();
}
public override void VisitThrow([NotNull] IThrowOperation operation)
{
Result.SetImmediate();
}
}
/// <summary>
/// Evaluates all assignments to a specific variable in a code block, storing its intermediate states.
/// </summary>
private sealed class VariableAssignmentWalker : AbstractEvaluatingOperationWalker
{
[NotNull]
private readonly ILocalSymbol currentLocal;
[NotNull]
private readonly Location maxLocation;
[NotNull]
private readonly ReturnValueAnalyzer owner;
public VariableAssignmentWalker([NotNull] ILocalSymbol local, [NotNull] Location maxLocation, [NotNull] ReturnValueAnalyzer owner)
{
Guard.NotNull(local, nameof(local));
Guard.NotNull(maxLocation, nameof(maxLocation));
Guard.NotNull(owner, nameof(owner));
currentLocal = local;
this.maxLocation = maxLocation;
this.owner = owner;
}
public void VisitBlockBody()
{
if (owner.variableEvaluationCache.ContainsKey(currentLocal))
{
EvaluationResult resultFromCache = owner.variableEvaluationCache[currentLocal];
Result.CopyFrom(resultFromCache);
}
else
{
foreach (IOperation operation in owner.context.OperationBlocks)
{
Visit(operation);
}
}
}
public override void VisitVariableDeclarator([NotNull] IVariableDeclaratorOperation operation)
{
base.VisitVariableDeclarator(operation);
if (currentLocal.IsEqualTo(operation.Symbol) && EndsBeforeMaxLocation(operation))
{
IVariableInitializerOperation initializer = operation.GetVariableInitializer();
if (initializer != null)
{
AnalyzeAssignmentValue(initializer.Value);
}
}
}
public override void VisitSimpleAssignment([NotNull] ISimpleAssignmentOperation operation)
{
base.VisitSimpleAssignment(operation);
if (operation.Target is ILocalReferenceOperation targetLocal && currentLocal.IsEqualTo(targetLocal.Local) &&
EndsBeforeMaxLocation(operation))
{
AnalyzeAssignmentValue(operation.Value);
}
}
public override void VisitDeconstructionAssignment([NotNull] IDeconstructionAssignmentOperation operation)
{
base.VisitDeconstructionAssignment(operation);
if (operation.Target is ITupleOperation tupleOperation && EndsBeforeMaxLocation(operation))
{
foreach (IOperation element in tupleOperation.Elements)
{
if (element is ILocalReferenceOperation targetLocal && currentLocal.IsEqualTo(targetLocal.Local))
{
UpdateResult(EvaluationResult.Unknown);
}
}
}
}
private bool EndsBeforeMaxLocation([NotNull] IOperation operation)
{
return operation.Syntax.GetLocation().SourceSpan.End < maxLocation.SourceSpan.Start;
}
private void AnalyzeAssignmentValue([NotNull] IOperation assignedValue)
{
Guard.NotNull(assignedValue, nameof(assignedValue));
EvaluationResult result = owner.AnalyzeExpression(assignedValue);
UpdateResult(result);
}
private void UpdateResult([NotNull] EvaluationResult result)
{
if (result.IsConclusive)
{
Result.CopyFrom(result);
owner.variableEvaluationCache[currentLocal] = Result;
}
}
}
}
private abstract class AbstractEvaluatingOperationWalker : OperationWalker
{
[NotNull]
public EvaluationResult Result { get; } = new EvaluationResult();
}
private sealed class EvaluationResult
{
[NotNull]
public static readonly EvaluationResult Query = new EvaluationResult(EvaluationState.Deferred, QueryOperationName);
[NotNull]
public static readonly EvaluationResult Unknown = new EvaluationResult(EvaluationState.Unknown, null);
private EvaluationState evaluationState;
[CanBeNull]
private string deferredOperationNameOrNull;
[NotNull]
public string DeferredOperationName
{
get
{
if (evaluationState != EvaluationState.Deferred)
{
throw new InvalidOperationException("Operation name is not available in non-deferred states.");
}
// ReSharper disable once AssignNullToNotNullAttribute
return deferredOperationNameOrNull;
}
}
public bool IsConclusive => evaluationState != EvaluationState.Initial;
public bool IsDeferred => evaluationState == EvaluationState.Deferred;
public EvaluationResult()
{
}
private EvaluationResult(EvaluationState state, [CanBeNull] string deferredOperationNameOrNull)
{
evaluationState = state;
this.deferredOperationNameOrNull = deferredOperationNameOrNull;
}
public void SetImmediate()
{
evaluationState = EvaluationState.Immediate;
}
public void SetUnknown()
{
evaluationState = EvaluationState.Unknown;
}
public void SetDeferred([NotNull] string operationName)
{
Guard.NotNullNorWhiteSpace(operationName, nameof(operationName));
evaluationState = EvaluationState.Deferred;
deferredOperationNameOrNull = operationName;
}
public void CopyFrom([NotNull] EvaluationResult result)
{
Guard.NotNull(result, nameof(result));
evaluationState = result.evaluationState;
deferredOperationNameOrNull = result.deferredOperationNameOrNull;
}
[NotNull]
public static EvaluationResult Unify([NotNull] EvaluationResult first, [NotNull] EvaluationResult second)
{
Guard.NotNull(first, nameof(first));
Guard.NotNull(second, nameof(second));
if (first.IsConclusive && first.IsDeferred)
{
return first;
}
if (second.IsConclusive && second.IsDeferred)
{
return second;
}
return first.IsConclusive ? first : second;
}
public override string ToString()
{
return evaluationState.ToString();
}
private enum EvaluationState
{
Initial,
Unknown,
Immediate,
Deferred
}
}
private sealed class SequenceTypeInfo
{
[ItemNotNull]
private readonly ImmutableArray<INamedTypeSymbol> queryableTypes;
[ItemNotNull]
private readonly ImmutableArray<INamedTypeSymbol> otherSequenceTypes;
public SequenceTypeInfo([NotNull] Compilation compilation)
{
Guard.NotNull(compilation, nameof(compilation));
queryableTypes = GetQueryableTypes(compilation);
otherSequenceTypes = GetOtherSequenceTypes(compilation);
}
[ItemNotNull]
private ImmutableArray<INamedTypeSymbol> GetQueryableTypes([NotNull] Compilation compilation)
{
ImmutableArray<INamedTypeSymbol>.Builder builder = ImmutableArray.CreateBuilder<INamedTypeSymbol>(4);
AddTypeToBuilder(KnownTypes.SystemLinqIQueryableT(compilation), builder);
AddTypeToBuilder(KnownTypes.SystemLinqIOrderedQueryableT(compilation), builder);
AddTypeToBuilder(KnownTypes.SystemLinqIQueryable(compilation), builder);
AddTypeToBuilder(KnownTypes.SystemLinqIOrderedQueryable(compilation), builder);
return !builder.Any() ? ImmutableArray<INamedTypeSymbol>.Empty : builder.ToImmutable();
}
[ItemNotNull]
private ImmutableArray<INamedTypeSymbol> GetOtherSequenceTypes([NotNull] Compilation compilation)
{
ImmutableArray<INamedTypeSymbol>.Builder builder = ImmutableArray.CreateBuilder<INamedTypeSymbol>(3);
AddTypeToBuilder(KnownTypes.SystemLinqIOrderedEnumerableT(compilation), builder);
AddTypeToBuilder(KnownTypes.SystemLinqIGroupingTKeyTElement(compilation), builder);
AddTypeToBuilder(KnownTypes.SystemLinqILookupTKeyTElement(compilation), builder);
return !builder.Any() ? ImmutableArray<INamedTypeSymbol>.Empty : builder.ToImmutable();
}
private void AddTypeToBuilder([CanBeNull] INamedTypeSymbol type, [NotNull] [ItemNotNull] ImmutableArray<INamedTypeSymbol>.Builder builder)
{
if (type != null)
{
builder.Add(type);
}
}
public bool IsEnumerable([NotNull] ITypeSymbol type)
{
return type.OriginalDefinition.SpecialType == SpecialType.System_Collections_Generic_IEnumerable_T ||
type.SpecialType == SpecialType.System_Collections_IEnumerable;
}
public bool IsQueryable([NotNull] ITypeSymbol type)
{
Guard.NotNull(type, nameof(type));
return queryableTypes.Contains(type.OriginalDefinition);
}
public bool IsNonQueryableSequenceType([NotNull] ITypeSymbol type)
{
Guard.NotNull(type, nameof(type));
return IsEnumerable(type) || otherSequenceTypes.Contains(type.OriginalDefinition);
}
}
}
}
| bkoelman/CSharpGuidelinesAnalyzer | src/CSharpGuidelinesAnalyzer/CSharpGuidelinesAnalyzer/Rules/MiscellaneousDesign/EvaluateQueryBeforeReturnAnalyzer.cs | C# | apache-2.0 | 30,491 |
package dk.itu.pervasive.mobile.socket;
import android.os.AsyncTask;
/**
* Created by centos on 4/7/14.
*/
public class RestartCycleTask extends AsyncTask<Void,Void,Void>{
RequestDelegate _delegate;
public RestartCycleTask( RequestDelegate d ){
_delegate = d;
}
@Override
protected Void doInBackground(Void... params) {
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
_delegate.onRequestFailure();
}
}
| tonybeltramelli/Ubiquitous-Media-Sharing-Surface | dk.itu.pervasive.mobile.android/src/dk/itu/pervasive/mobile/socket/RestartCycleTask.java | Java | apache-2.0 | 490 |
<?php
include './config/config.php';
include './classes/Player.php';
include './classes/Playtime.php';
class Database {
public $mysqli;
function __construct() {
global $config;
$this->mysqli = new mysqli($config['dbhost'], $config['dbuser'], $config['dbpassword'], $config['dbname']);
if (!mysqli_select_db($this->mysqli, $config['dbname'])) {
$this->error();
}
}
function error() {
printf('Datenbank-Fehler: Nummer %d, Meldung %s', $this->mysqli->errno, $this->mysqli->error);
die;
}
function getPlayers() {
$statement = $this->mysqli->prepare('SELECT * FROM Player');
if (!$statement) {
$this->error();
}
$statement->execute();
$result = $statement->get_result();
$players = array();
while($row = $result->fetch_object()) {
$player = new Player();
$player->login = $row->login;
$player->email = $row->email;
$player->administrator = $row->administrator;
$player->maxPerDay = $row->max_per_day;
$player->maxPerWeek = $row->max_per_week;
$player->extraDay = Time::getTimestamp($row->extra_day);
$player->extraMaxPerDay = $row->extra_max_per_day;
$player->extraMaxPerWeek = $row->extra_max_per_week;
$player->extraComment = $row->extra_comment;
$player->lockedUntil = $row->locked_until;
$player->loginSource = $row->login_source;
$player->loginIp = $row->login_ip;
$player->loginDate = Time::getTimestamp($row->login_date);
$player->logoutDate = Time::getTimestamp($row->logout_date);
$player->confirmDate = Time::getTimestamp($row->confirm_date);
$player->playedDay = $row->played_day;
$player->playedWeek = $row->played_week;
$player->passwordHash = $row->password_hash;
$players[$player->login] = $player;
}
$statement->close();
return $players;
}
function createPlayer($player) {
$statement = $this->mysqli->prepare('INSERT INTO Player (login, max_per_day, max_per_week VALUES (?, ?, ?)');
if (!$statement) {
$this->error();
}
$statement->bind_param('sii', $player->login, $player->maxPerDay, $player->maxPerWeek);
$statement->execute();
$statement->close();
}
function updatePlayerEmail($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET email = ? WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('ss', $player->email, $player->login);
$statement->execute();
$statement->close();
}
function updatePlayerPassword($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET password_hash = ? WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('ss', $player->passwordHash, $player->login);
$statement->execute();
$statement->close();
}
function updatePlayerQuota($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET max_per_day = ?, max_per_week = ? WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('iis', $player->maxPerDay, $player->maxPerWeek, $player->login);
$statement->execute();
$statement->close();
}
function updatePlayerExtra($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET extra_day = FROM_UNIXTIME(?), extra_max_per_day = ?, extra_max_per_week = ?, extra_comment = ? WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('iiiss', $player->extraDay, $player->extraMaxPerDay, $player->extraMaxPerWeek, $player->extraComment, $player->login);
$statement->execute();
$statement->close();
}
function updatePlayerLoginData($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET login_date = FROM_UNIXTIME(?), login_source = ?, login_ip = ?, confirm_date = FROM_UNIXTIME(?), logout_date = FROM_UNIXTIME(?), played_day = ?, played_week = ? WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('issiiiis', $player->loginDate, $player->loginSource, $player->loginIp, $player->confirmDate, $player->logoutDate, $player->playedDay, $player->playedWeek, $player->login);
$statement->execute();
$statement->close();
}
function updatePlayerLock($player) {
$statement = $this->mysqli->prepare('UPDATE Player SET locked_until = FROM_UNIXTIME(?) WHERE login = ?');
if (!$statement) {
$this->error();
}
$statement->bind_param('i', $player->lockedUntil, $player->login);
$statement->execute();
$statement->close();
}
function getPlaydays($login, $start, $end) {
$statement = $this->mysqli->prepare('SELECT * FROM Playtime WHERE login = ? AND login_date >= FROM_UNIXTIME(?) AND login_date <= FROM_UNIXTIME(?)');
if ( !$statement ) {
$this->error();
}
$statement->bind_param('sii', $login, $start, $end);
$statement->execute();
$result = $statement->get_result();
$playdays = new Playdays();
while($row = $result->fetch_object()) {
$playtime = new Playtime();
$playtime->login = $row->login;
$playtime->loginSource = $row->login_source;
$playtime->loginIp = $row->login_ip;
$playtime->loginDate = Time::getTimestamp($row->login_date);
$playtime->logoutDate = Time::getTimestamp($row->logout_date);
$playtime->logoutConfirmed = $row->logout_confirmed;
$playdays->add($playtime);
}
$statement->close();
return $playdays;
}
function savePlaytime($playtime) {
$statement = $this->mysqli->prepare('INSERT INTO Playtime (login, login_source, login_ip, login_date, logout_date, logout_confirmed) VALUES (?, ?, ?, FROM_UNIXTIME(?), FROM_UNIXTIME(?), ?)');
if ( !$statement ) {
$this->error();
}
$statement->bind_param('sssiii', $playtime->login, $playtime->loginSource, $playtime->loginIp, $playtime->loginDate, $playtime->logoutDate, $playtime->logoutConfirmed);
$statement->execute();
$statement->close();
}
}
?> | hohwille/playguard | server/php/playguard/classes/Database.php | PHP | apache-2.0 | 6,011 |
package com.hs.mail.imap.dao.test;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.apache.commons.lang3.ArrayUtils;
import org.dbunit.database.DatabaseDataSourceConnection;
import org.dbunit.database.IDatabaseConnection;
import org.dbunit.dataset.xml.XmlDataSet;
import org.dbunit.operation.DatabaseOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.ClassPathResource;
public abstract class AbstractDataSourceDatabaseTest {
@Autowired
DataSource dataSource;
protected void setUpDatabase(String[] resources) throws Exception {
if (ArrayUtils.isNotEmpty(resources)) {
IDatabaseConnection connection = getConnection(dataSource);
try {
for (String resource : resources) {
DatabaseOperation.CLEAN_INSERT.execute(
connection,
new XmlDataSet(new ClassPathResource(resource)
.getInputStream()));
}
} finally {
releaseConnection(connection);
}
}
}
protected IDatabaseConnection getConnection(DataSource dataSource)
throws SQLException {
return new DatabaseDataSourceConnection(dataSource);
}
protected void releaseConnection(IDatabaseConnection connection) {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
}
}
}
}
| svn2github/hwmail-mirror | hedwig-api/src/test/java/com/hs/mail/imap/dao/test/AbstractDataSourceDatabaseTest.java | Java | apache-2.0 | 1,368 |
package de.rwth.idsg.bikeman.service;
import de.rwth.idsg.bikeman.domain.BookedTariff;
import de.rwth.idsg.bikeman.domain.CardAccount;
import de.rwth.idsg.bikeman.domain.CustomerType;
import de.rwth.idsg.bikeman.domain.Manager;
import de.rwth.idsg.bikeman.domain.OperationState;
import de.rwth.idsg.bikeman.domain.TariffType;
import de.rwth.idsg.bikeman.repository.CardAccountRepository;
import de.rwth.idsg.bikeman.repository.ManagerRepository;
import de.rwth.idsg.bikeman.repository.TariffRepository;
import de.rwth.idsg.bikeman.web.rest.dto.modify.CreateEditManagerDTO;
import de.rwth.idsg.bikeman.web.rest.exception.DatabaseException;
import org.joda.time.LocalDateTime;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
/**
* Created by Wolfgang Kluth on 16/02/16.
*/
@Service
@Transactional
public class ManagerService {
@Autowired private ManagerRepository managerRepository;
@Autowired private TariffRepository tariffRepository;
@Autowired private CardAccountRepository cardAccountRepository;
public void createManager(CreateEditManagerDTO managerDTO) {
Manager manager = managerRepository.create(managerDTO);
BookedTariff bookedTariff = new BookedTariff();
bookedTariff.setTariff(tariffRepository.findByName(TariffType.TestSystemTariff));
bookedTariff.setBookedFrom(LocalDateTime.now());
// set the bookedUntil date to null, if no subscription term is declared
if (tariffRepository.findByName(TariffType.TestSystemTariff).getTerm() == null) {
bookedTariff.setBookedUntil(null);
} else {
bookedTariff.setBookedUntil(new LocalDateTime().plusDays(
tariffRepository.findByName(TariffType.TestSystemTariff).getTerm()
));
}
CardAccount cardAccount = new CardAccount();
if (managerDTO.getCardId() != null) {
cardAccount.setCardId(managerDTO.getCardId());
cardAccount.setCardPin(managerDTO.getCardPin());
}
cardAccount.setOperationState(OperationState.OPERATIVE);
cardAccount.setOwnerType(CustomerType.FLEET_MANAGER);
cardAccount.setUser(manager);
cardAccount.setCurrentTariff(bookedTariff);
try {
cardAccountRepository.save(cardAccount);
} catch (Throwable e) {
throw new DatabaseException("CardId already exists.");
}
}
public void updateManager(CreateEditManagerDTO managerDTO) {
Manager manager = managerRepository.update(managerDTO);
CardAccount cardAccount = manager.getCardAccount();
cardAccount.setOwnerType(CustomerType.CUSTOMER);
if (managerDTO.getCardId() != null) {
cardAccount.setCardId(managerDTO.getCardId());
cardAccount.setCardPin(managerDTO.getCardPin());
}
}
}
| RWTH-i5-IDSG/BikeMan | src/main/java/de/rwth/idsg/bikeman/service/ManagerService.java | Java | apache-2.0 | 2,961 |
/*
* Copyright(c) 2017 lizhaotailang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.marktony.espresso.zxing.camera;
import android.graphics.Point;
import android.hardware.Camera;
import android.os.Handler;
import android.os.Message;
import android.util.Log;
public class PreviewCallback implements Camera.PreviewCallback {
private static final String TAG = PreviewCallback.class.getSimpleName();
private final CameraConfigurationManager configManager;
private Handler previewHandler;
private int previewMessage;
public PreviewCallback(CameraConfigurationManager configManager) {
this.configManager = configManager;
}
public void setHandler(Handler previewHandler, int previewMessage) {
this.previewHandler = previewHandler;
this.previewMessage = previewMessage;
}
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
Point cameraResolution = configManager.getCameraResolution();
Handler thePreviewHandler = previewHandler;
if (cameraResolution != null && thePreviewHandler != null) {
Message message = thePreviewHandler.obtainMessage(previewMessage, cameraResolution.x, cameraResolution.y, data);
message.sendToTarget();
previewHandler = null;
} else {
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
}
| TonnyL/Espresso | app/src/main/java/io/github/marktony/espresso/zxing/camera/PreviewCallback.java | Java | apache-2.0 | 1,832 |
package de.hftstuttgart.projectindoorweb.web.internal.requests.building;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
public class AddNewBuilding {
private String buildingName;
private int numberOfFloors;
private int imagePixelWidth;
private int imagePixelHeight;
private BuildingPositionAnchor northWest;
private BuildingPositionAnchor northEast;
private BuildingPositionAnchor southEast;
private BuildingPositionAnchor southWest;
private BuildingPositionAnchor buildingCenterPoint;
private double rotationAngle;
private double metersPerPixel;
@JsonCreator
public AddNewBuilding(@JsonProperty("buildingName") String buildingName,
@JsonProperty("numberOfFloors") int numberOfFloors,
@JsonProperty("imagePixelWidth") int imagePixelWidth,
@JsonProperty("imagePixelHeight") int imagePixelHeight,
@JsonProperty("northWestAnchor") BuildingPositionAnchor northWestAnchor,
@JsonProperty("northEastAnchor") BuildingPositionAnchor northEastAnchor,
@JsonProperty("southEastAnchor") BuildingPositionAnchor southEastAnchor,
@JsonProperty("southWestAnchor") BuildingPositionAnchor southWestAnchor,
@JsonProperty("buildingCenterPoint") BuildingPositionAnchor buildingCenterPoint,
@JsonProperty("rotationAngle") double rotationAngle,
@JsonProperty("metersPerPixel") double metersPerPixel) {
this.buildingName = buildingName;
this.numberOfFloors = numberOfFloors;
this.imagePixelWidth = imagePixelWidth;
this.imagePixelHeight = imagePixelHeight;
this.northWest = northWestAnchor;
this.northEast = northEastAnchor;
this.southEast = southEastAnchor;
this.southWest = southWestAnchor;
this.buildingCenterPoint = buildingCenterPoint;
this.rotationAngle = rotationAngle;
this.metersPerPixel = metersPerPixel;
}
public String getBuildingName() {
return buildingName;
}
public void setBuildingName(String buildingName) {
this.buildingName = buildingName;
}
public int getNumberOfFloors() {
return numberOfFloors;
}
public void setNumberOfFloors(int numberOfFloors) {
this.numberOfFloors = numberOfFloors;
}
public int getImagePixelWidth() {
return imagePixelWidth;
}
public void setImagePixelWidth(int imagePixelWidth) {
this.imagePixelWidth = imagePixelWidth;
}
public int getImagePixelHeight() {
return imagePixelHeight;
}
public void setImagePixelHeight(int imagePixelHeight) {
this.imagePixelHeight = imagePixelHeight;
}
public BuildingPositionAnchor getNorthWest() {
return northWest;
}
public void setNorthWest(BuildingPositionAnchor northWest) {
this.northWest = northWest;
}
public BuildingPositionAnchor getNorthEast() {
return northEast;
}
public void setNorthEast(BuildingPositionAnchor northEast) {
this.northEast = northEast;
}
public BuildingPositionAnchor getSouthEast() {
return southEast;
}
public void setSouthEast(BuildingPositionAnchor southEast) {
this.southEast = southEast;
}
public BuildingPositionAnchor getSouthWest() {
return southWest;
}
public void setSouthWest(BuildingPositionAnchor southWest) {
this.southWest = southWest;
}
public BuildingPositionAnchor getBuildingCenterPoint() {
return buildingCenterPoint;
}
public void setBuildingCenterPoint(BuildingPositionAnchor buildingCenterPoint) {
this.buildingCenterPoint = buildingCenterPoint;
}
public double getRotationAngle() {
return rotationAngle;
}
public void setRotationAngle(double rotationAngle) {
this.rotationAngle = rotationAngle;
}
public double getMetersPerPixel() {
return metersPerPixel;
}
public void setMetersPerPixel(double metersPerPixel) {
this.metersPerPixel = metersPerPixel;
}
}
| ProjectIndoor/projectindoorweb | src/main/java/de/hftstuttgart/projectindoorweb/web/internal/requests/building/AddNewBuilding.java | Java | apache-2.0 | 4,305 |
/*
* Copyright (C) 2011-2014 GUIGUI Simon, fyhertz@gmail.com
*
* This file is part of libstreaming (https://github.com/fyhertz/libstreaming)
*
* Spydroid is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This source code is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this source code; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package com.jjcamera.apps.iosched.streaming.rtp;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import android.annotation.SuppressLint;
import android.util.Log;
import com.jjcamera.apps.iosched.streaming.mp4.MP4Muxer;
import com.jjcamera.apps.iosched.streaming.video.VideoStream;
/**
*
* RFC 3984.
*
* H.264 streaming over RTP.
*
* Must be fed with an InputStream containing H.264 NAL units preceded by their length (4 bytes).
* The stream must start with mpeg4 or 3gpp header, it will be skipped.
*
*/
public class H264Packetizer extends AbstractPacketizer implements Runnable {
public final static String TAG = "H264Packetizer";
private Thread t = null;
private volatile boolean mStopped = false;
private int naluLength = 0;
private long delay = 0, oldtime = 0;
private Statistics stats = new Statistics();
private byte[] sps = null, pps = null, stapa = null;
byte[] header = new byte[5];
private int count = 0;
private int streamType = 1;
private final int keyFrameSync = 1;
private final static int MAX_NALU_LENGTH = 10000000; //10MB for 720p
private final static byte START_CODE[] = new byte[] { 0, 0, 0, 1 }; // H264 AnnexB format
public enum NalUnitType {
NAL_UNKNOWN (0),
NAL_SLICE (1),
NAL_SLICE_DPA (2),
NAL_SLICE_DPB (3),
NAL_SLICE_DPC (4),
NAL_SLICE_IDR (5), /* ref_idc != 0 */ /*key frame!!!!*/
NAL_SEI (6), /* ref_idc == 0 */
NAL_SPS (7),
NAL_PPS (8),
NAL_AU_DELIMITER(9);
/* ref_idc == 0 for 6,9,10,11,12 */
private int type;
private NalUnitType(int type){
this.type = type;
}
public int getType(){
return this.type;
}
}
public H264Packetizer() {
super();
socket.setClockFrequency(90000);
socket.setPayloadType((byte)96);
}
public void start() {
if (t == null) {
mStopped = false;
t = new Thread(this);
t.setPriority(Thread.MAX_PRIORITY);
t.start();
}
}
public void stop() {
if (t != null) {
//t.interrupt();
mStopped = true;
try {
t.join();
} catch (InterruptedException e) {}
t = null;
try {
is.close();
if(os!=null) {
os.flush();
os.close();
}
} catch (IOException e) {}
}
}
public void setStreamParameters(byte[] pps, byte[] sps) {
this.pps = pps;
this.sps = sps;
// A STAP-A NAL (NAL type 24) containing the sps and pps of the stream
if (pps != null && sps != null) {
// STAP-A NAL header + NALU 1 (SPS) size + NALU 2 (PPS) size + 5 bytes
stapa = new byte[sps.length + pps.length + 5];
// STAP-A NAL header is 24
stapa[0] = 24;
// Write NALU 1 size into the array (NALU 1 is the SPS).
stapa[1] = (byte) (sps.length >> 8);
stapa[2] = (byte) (sps.length & 0xFF);
// Write NALU 2 size into the array (NALU 2 is the PPS).
stapa[sps.length + 3] = (byte) (pps.length >> 8);
stapa[sps.length + 4] = (byte) (pps.length & 0xFF);
// Write NALU 1 into the array, then write NALU 2 into the array.
System.arraycopy(sps, 0, stapa, 3, sps.length);
System.arraycopy(pps, 0, stapa, 5 + sps.length, pps.length);
}
}
public void run() {
int run = 0;
long duration = 0;
Log.d(TAG,"H264 packetizer started !");
stats.reset();
count = 0;
if (is instanceof MediaCodecInputStream) {
streamType = 1;
socket.setCacheSize(0);
} else {
streamType = 0;
socket.setCacheSize(10);
}
try {
//socket.createSendThread();
while (!Thread.interrupted() && !mStopped) {
oldtime = System.nanoTime();
// We read a NAL units from the input stream and we send them
send();
if(endstream) break;
// We measure how long it took to receive NAL units from the phone
duration = System.nanoTime() - oldtime;
stats.push(duration);
// Computes the average duration of a NAL unit
delay = stats.average();
if(run++ < 10 || delay > 500000000)
Log.d(TAG,"duration: "+duration/1000000+" delay: "+delay/1000000 + " ts: " + ts);
}
} catch (IOException e) {
Log.e(TAG,"H264 packetizer IOException ! " + e.getMessage());
} catch (InterruptedException e) {}
Log.d(TAG,"H264 packetizer stopped !");
}
/**
* Reads a NAL unit in the FIFO and sends it.
* If it is too big, we split it in FU-A units (RFC 3984).
*/
@SuppressLint("NewApi")
private void send() throws IOException, InterruptedException {
int sum = 1, len = 0, type;
if (streamType == 0) {
// NAL units are preceeded by their length, we parse the length
fill(header,0,5);
ts += delay;
naluLength = header[3]&0xFF | (header[2]&0xFF)<<8 | (header[1]&0xFF)<<16 | (header[0]&0xFF)<<24;
if (naluLength>MAX_NALU_LENGTH || naluLength<0) resync();
} else if (streamType == 1) {
// NAL units are preceeded with 0x00000001
fill(header,0,5);
ts = ((MediaCodecInputStream)is).getLastBufferInfo().presentationTimeUs*1000L;
//ts += delay;
naluLength = is.available()+1;
if (!(header[0]==0 && header[1]==0 && header[2]==0)) {
// Turns out, the NAL units are not preceeded with 0x00000001
Log.e(TAG, "NAL units are not preceeded by 0x00000001");
streamType = 2;
return;
}
} else {
// Nothing preceededs the NAL units
fill(header,0,1);
header[4] = header[0];
ts = ((MediaCodecInputStream)is).getLastBufferInfo().presentationTimeUs*1000L;
//ts += delay;
naluLength = is.available()+1;
}
// Parses the NAL unit type
type = header[4]&0x1F;
if(streamType == 0 && type == 0){ //unknown, will end of stream?
Log.d(TAG,"get 0 NAL type in the stream! Length = " + naluLength);
int nBufLen = Math.min(8, naluLength);
byte pBuffer[] = new byte[nBufLen];
if(naluLength >= 8){
fill(pBuffer, 0, nBufLen);
if(pBuffer[3] == 'm' && pBuffer[4] == 'o' && pBuffer[5] == 'o' && pBuffer[6] == 'v'){
Log.d(TAG, "This is moov which is for MP4 info, ignore it and end stream");
endstream = true;
return;
}
else{
resync();
type = header[4]&0x1F;
}
}
else{
fill(pBuffer, 0, naluLength - 1);
return;
}
}
// The stream already contains NAL unit type 7 or 8, we don't need
// to add them to the stream ourselves
if (type == 7 || type == 8) {
Log.v(TAG,"SPS or PPS present in the stream.");
count++;
if (count>4) {
sps = null;
pps = null;
}
}
if(type == 5 && os!=null){
long time = MP4Muxer.getInstance().getVideoStartTime();
long now = System.nanoTime();
if(time == 0){
MP4Muxer.getInstance().setVideoStartTime(now);
}
else if((now - time) > 60000000000L){ // 1 min
os.flush();
os.close();
MP4Muxer.getInstance().setVideoReady();
FileOutputStream fop = VideoStream.createTempRecorder();
setOutputStream(fop);
MP4Muxer.getInstance().setVideoStartTime(now);
}
}
// We send two packets containing NALU type 7 (SPS) and 8 (PPS)
// Those should allow the H264 stream to be decoded even if no SDP was sent to the decoder.
if (type == 5 && sps != null && pps != null) {
buffer = socket.requestBuffer();
socket.markNextPacket(buffer.mBuffers);
socket.updateTimestamp(buffer, ts);
System.arraycopy(stapa, 0, buffer.mBuffers, rtphl, stapa.length);
streamWrite(START_CODE, 0, 4);
streamWrite(stapa, 3, sps.length);
streamWrite(START_CODE, 0, 4);
streamWrite(stapa, sps.length + 5, pps.length);
super.send(buffer, rtphl+stapa.length, keyFrameSync);
}
streamWrite(START_CODE, 0, 4);
//Log.d(TAG,"- Nal unit length: " + naluLength + " delay: "+delay/1000000+" type: "+type);
// Small NAL unit => Single NAL unit
if (naluLength<=MAXPACKETSIZE-rtphl-2) {
buffer = socket.requestBuffer();
buffer.mBuffers[rtphl] = header[4];
len = fill(buffer.mBuffers, rtphl+1, naluLength-1);
socket.updateTimestamp(buffer, ts);
socket.markNextPacket(buffer.mBuffers);
streamWrite(buffer.mBuffers, rtphl, naluLength);
super.send(buffer, naluLength+rtphl, 0);
//Log.d(TAG,"----- Single NAL unit - len:"+len+" delay: "+delay);
}
// Large NAL unit => Split nal unit
else {
streamWrite(header, 4, 1);
// Set FU-A header
header[1] = (byte) (header[4] & 0x1F); // FU header type
header[1] += 0x80; // Start bit
// Set FU-A indicator
header[0] = (byte) ((header[4] & 0x60) & 0xFF); // FU indicator NRI
header[0] += 28;
while (sum < naluLength) {
buffer = socket.requestBuffer();
buffer.mBuffers[rtphl] = header[0];
buffer.mBuffers[rtphl+1] = header[1];
socket.updateTimestamp(buffer, ts);
if ((len = fill(buffer.mBuffers, rtphl+2, naluLength-sum > MAXPACKETSIZE-rtphl-2 ? MAXPACKETSIZE-rtphl-2 : naluLength-sum ))<0) return;
sum += len;
// Last packet before next NAL
if (sum >= naluLength) {
// End bit on
buffer.mBuffers[rtphl+1] += 0x40;
socket.markNextPacket(buffer.mBuffers);
}
streamWrite(buffer.mBuffers, rtphl + 2, len);
super.send(buffer, len+rtphl+2, 0);
// Switch start bit
header[1] = (byte) (header[1] & 0x7F);
//Log.d(TAG,"----- FU-A unit, sum:"+sum);
}
}
}
private int fill(byte[] buffer, int offset,int length) throws IOException {
int sum = 0, len;
while (sum<length) {
len = is.read(buffer, offset+sum, length-sum);
if (len<0) {
throw new IOException("End of stream for fill");
}
else sum+=len;
}
return sum;
}
private void resync() throws IOException {
int type;
int len;
Log.e(TAG,"Packetizer out of sync ! Let's try to fix that...(NAL length: "+naluLength+")");
while (true) {
header[0] = header[1];
header[1] = header[2];
header[2] = header[3];
header[3] = header[4];
header[4] = (byte) is.read();
if (header[4]<0) {
throw new IOException("End of stream for resync");
}
type = header[4]&0x1F;
if (type == 5 || type == 1) {
naluLength = header[3]&0xFF | (header[2]&0xFF)<<8 | (header[1]&0xFF)<<16 | (header[0]&0xFF)<<24;
if (naluLength>0 && naluLength<=MAX_NALU_LENGTH) {
oldtime = System.nanoTime();
Log.e(TAG,"A NAL unit may have been found in the bit stream !");
break;
}
if (naluLength==0) {
Log.e(TAG,"NAL unit with NULL size found...");
} else if (header[3]==0xFF && header[2]==0xFF && header[1]==0xFF && header[0]==0xFF) {
Log.e(TAG,"NAL unit with 0xFFFFFFFF size found...");
}
}
}
}
}
| xunboo/JJCamera | android/src/main/java/com/jjcamera/apps/iosched/streaming/rtp/H264Packetizer.java | Java | apache-2.0 | 11,342 |
package com.planet_ink.coffee_mud.Races;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2004-2016 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class Moose extends StdRace
{
@Override
public String ID()
{
return "Moose";
}
private final static String localizedStaticName = CMLib.lang().L("Moose");
@Override
public String name()
{
return localizedStaticName;
}
@Override
public int shortestMale()
{
return 80;
}
@Override
public int shortestFemale()
{
return 75;
}
@Override
public int heightVariance()
{
return 6;
}
@Override
public int lightestWeight()
{
return 150;
}
@Override
public int weightVariance()
{
return 50;
}
@Override
public long forbiddenWornBits()
{
return ~(Wearable.WORN_FEET | Wearable.WORN_NECK | Wearable.WORN_EARS | Wearable.WORN_EYES);
}
private final static String localizedStaticRacialCat = CMLib.lang().L("Equine");
@Override
public String racialCategory()
{
return localizedStaticRacialCat;
}
private final String[] racialAbilityNames = { "MooseSpeak" };
private final int[] racialAbilityLevels = { 1 };
private final int[] racialAbilityProficiencies = { 100 };
private final boolean[] racialAbilityQuals = { false };
@Override
protected String[] racialAbilityNames()
{
return racialAbilityNames;
}
@Override
protected int[] racialAbilityLevels()
{
return racialAbilityLevels;
}
@Override
protected int[] racialAbilityProficiencies()
{
return racialAbilityProficiencies;
}
@Override
protected boolean[] racialAbilityQuals()
{
return racialAbilityQuals;
}
// an ey ea he ne ar ha to le fo no gi mo wa ta wi
private static final int[] parts={0 ,2 ,2 ,1 ,1 ,0 ,0 ,1 ,4 ,4 ,1 ,0 ,1 ,1 ,1 ,0 };
@Override
public int[] bodyMask()
{
return parts;
}
private final int[] agingChart = { 0, 1, 2, 4, 7, 15, 20, 21, 22 };
@Override
public int[] getAgingChart()
{
return agingChart;
}
protected static Vector<RawMaterial> resources = new Vector<RawMaterial>();
@Override
public int availabilityCode()
{
return Area.THEME_FANTASY | Area.THEME_SKILLONLYMASK;
}
@Override
public void affectCharStats(MOB affectedMOB, CharStats affectableStats)
{
super.affectCharStats(affectedMOB, affectableStats);
affectableStats.setRacialStat(CharStats.STAT_STRENGTH,16);
affectableStats.setRacialStat(CharStats.STAT_DEXTERITY,18);
affectableStats.setRacialStat(CharStats.STAT_INTELLIGENCE,1);
}
@Override
public Weapon myNaturalWeapon()
{
if(naturalWeapon==null)
{
naturalWeapon=CMClass.getWeapon("StdWeapon");
naturalWeapon.setName(L("a set of sharp horns"));
naturalWeapon.setMaterial(RawMaterial.RESOURCE_BONE);
naturalWeapon.setUsesRemaining(1000);
naturalWeapon.setWeaponDamageType(Weapon.TYPE_PIERCING);
}
return naturalWeapon;
}
@Override
public String makeMobName(char gender, int age)
{
switch(age)
{
case Race.AGE_INFANT:
case Race.AGE_TODDLER:
case Race.AGE_CHILD:
return name().toLowerCase()+" calf";
case Race.AGE_YOUNGADULT:
switch(gender)
{
case 'M':
case 'm':
return "young " + name().toLowerCase() + " bull";
case 'F':
case 'f':
return "young " + name().toLowerCase() + " cow";
default:
return "young " + name().toLowerCase();
}
case Race.AGE_MATURE:
case Race.AGE_MIDDLEAGED:
default:
switch(gender)
{
case 'M':
case 'm':
return name().toLowerCase() + " bull";
case 'F':
case 'f':
return name().toLowerCase() + " cow";
default:
return name().toLowerCase();
}
case Race.AGE_OLD:
case Race.AGE_VENERABLE:
case Race.AGE_ANCIENT:
switch(gender)
{
case 'M':
case 'm':
return "old male " + name().toLowerCase();
case 'F':
case 'f':
return "old female " + name().toLowerCase();
default:
return "old " + name().toLowerCase();
}
}
}
@Override
public String healthText(MOB viewer, MOB mob)
{
final double pct=(CMath.div(mob.curState().getHitPoints(),mob.maxState().getHitPoints()));
if(pct<.10)
return L("^r@x1^r is hovering on deaths door!^N",mob.name(viewer));
else
if(pct<.20)
return L("^r@x1^r is covered in blood and matted hair.^N",mob.name(viewer));
else
if(pct<.30)
return L("^r@x1^r is bleeding badly from lots of wounds.^N",mob.name(viewer));
else
if(pct<.40)
return L("^y@x1^y has large patches of bloody matted fur.^N",mob.name(viewer));
else
if(pct<.50)
return L("^y@x1^y has some bloody matted fur.^N",mob.name(viewer));
else
if(pct<.60)
return L("^p@x1^p has a lot of cuts and gashes.^N",mob.name(viewer));
else
if(pct<.70)
return L("^p@x1^p has a few cut patches.^N",mob.name(viewer));
else
if(pct<.80)
return L("^g@x1^g has a cut patch of fur.^N",mob.name(viewer));
else
if(pct<.90)
return L("^g@x1^g has some disheveled fur.^N",mob.name(viewer));
else
if(pct<.99)
return L("^g@x1^g has some misplaced hairs.^N",mob.name(viewer));
else
return L("^c@x1^c is in perfect health.^N",mob.name(viewer));
}
@Override
public List<RawMaterial> myResources()
{
synchronized(resources)
{
if(resources.size()==0)
{
resources.addElement(makeResource
(L("a pair of @x1 horns",name().toLowerCase()),RawMaterial.RESOURCE_BONE));
for(int i=0;i<7;i++)
{
resources.addElement(makeResource
(L("a strip of @x1 leather",name().toLowerCase()),RawMaterial.RESOURCE_LEATHER));
}
for(int i=0;i<3;i++)
{
resources.addElement(makeResource
(L("a pound of @x1 meat",name().toLowerCase()),RawMaterial.RESOURCE_BEEF));
}
resources.addElement(makeResource
(L("some @x1 blood",name().toLowerCase()),RawMaterial.RESOURCE_BLOOD));
resources.addElement(makeResource
(L("a pile of @x1 bones",name().toLowerCase()),RawMaterial.RESOURCE_BONE));
}
}
return resources;
}
}
| oriontribunal/CoffeeMud | com/planet_ink/coffee_mud/Races/Moose.java | Java | apache-2.0 | 7,510 |
package com.modinfodesigns.ontology;
import com.modinfodesigns.app.ApplicationManager;
import com.modinfodesigns.app.ObjectFactoryCreator;
import com.modinfodesigns.ontology.TaxonomyIndexMatcherFactory;
import com.modinfodesigns.classify.Classifier;
import com.modinfodesigns.property.DataObject;
import com.modinfodesigns.property.IProperty;
import com.modinfodesigns.property.string.StringProperty;
import com.modinfodesigns.property.transform.PropertyTransformException;
import com.modinfodesigns.utils.FileMethods;
import junit.framework.TestCase;
// Needs to use ITaxonomyBuilder ---
public class TestTaxonomyIndexMatcherFactory extends TestCase
{
private static String textFile = "resources/TestTaxonomyClassification/TestData/TestFile.txt";
private static String configFile = "resources/TestTaxonomyClassification/ObjectFactoryCreator.xml";
public void testTaxonomyIndexMatcherFactory( ) throws PropertyTransformException
{
ObjectFactoryCreator.initialize( configFile );
ApplicationManager appMan = ApplicationManager.getInstance( );
TaxonomyIndexMatcherFactory timf = (TaxonomyIndexMatcherFactory)appMan.getApplicationObject( "SoftwareConceptsIndexMatcherFactory", "IndexMatcherFactory" );
Classifier classifier = new Classifier( );
classifier.addIndexMatcherFactory( timf );
classifier.addClassifyField( "text" );
// Create a DataObject and Classify it...
DataObject dobj = new DataObject( );
String fileText = FileMethods.readFile( textFile );
dobj.addProperty( new StringProperty( "text", fileText ));
classifier.transformPropertyHolder( dobj );
System.out.println( dobj.getValue( IProperty.XML_FORMAT ));
}
}
| detnavillus/modular-informatic-designs | pipeline/src/notReadyYet/com/modinfodesigns/ontology/TestTaxonomyIndexMatcherFactory.java | Java | apache-2.0 | 1,785 |
/*
* Copyright 2014 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dashbuilder.client.widgets.dataset.editor.workflow.edit;
import com.google.gwt.editor.client.SimpleBeanEditorDriver;
import org.dashbuilder.client.widgets.dataset.editor.driver.SQLDataSetDefDriver;
import org.dashbuilder.client.widgets.dataset.event.CancelRequestEvent;
import org.dashbuilder.client.widgets.dataset.event.SaveRequestEvent;
import org.dashbuilder.client.widgets.dataset.event.TestDataSetRequestEvent;
import org.dashbuilder.dataprovider.DataSetProviderType;
import org.dashbuilder.dataset.client.DataSetClientServices;
import org.dashbuilder.dataset.client.editor.SQLDataSetDefEditor;
import org.dashbuilder.dataset.def.SQLDataSetDef;
import org.dashbuilder.validations.dataset.DataSetDefValidator;
import org.jboss.errai.ioc.client.container.SyncBeanManager;
import javax.enterprise.context.Dependent;
import javax.enterprise.event.Event;
import javax.inject.Inject;
import javax.validation.ConstraintViolation;
/**
* <p>SQL Data Set Editor workflow presenter for editing a data set definition instance.</p>
* <p>This class is the main entry point for editing an existing data set definition instance. It links the gwt editors with the given driver to perform flushing and validations.</p>
*
* @since 0.4.0
*/
@Dependent
public class SQLDataSetEditWorkflow extends DataSetEditWorkflow<SQLDataSetDef, SQLDataSetDefEditor> {
@Inject
public SQLDataSetEditWorkflow(final DataSetClientServices clientServices,
final DataSetDefValidator dataSetDefValidator,
final SyncBeanManager beanManager,
final Event<SaveRequestEvent> saveRequestEvent,
final Event<TestDataSetRequestEvent> testDataSetEvent,
final Event<CancelRequestEvent> cancelRequestEvent,
final View view) {
super(clientServices, dataSetDefValidator, beanManager, saveRequestEvent, testDataSetEvent, cancelRequestEvent, view);
}
@Override
protected Class<? extends SimpleBeanEditorDriver<SQLDataSetDef, SQLDataSetDefEditor>> getDriverClass() {
return SQLDataSetDefDriver.class;
}
@Override
protected Class<? extends SQLDataSetDefEditor> getEditorClass() {
return org.dashbuilder.client.widgets.dataset.editor.sql.SQLDataSetEditor.class;
}
@Override
protected Iterable<ConstraintViolation<?>> validate(boolean isCacheEnabled, boolean isPushEnabled, boolean isRefreshEnabled) {
final Iterable<ConstraintViolation<?>> violations = dataSetDefValidator.validatorFor(DataSetProviderType.SQL).validate(dataSetDef,
isCacheEnabled, isPushEnabled, isRefreshEnabled, editor.isUsingQuery());
return violations;
}
@Override
protected void afterFlush() {
super.afterFlush();
if (editor.isUsingQuery()) {
dataSetDef.setDbTable(null);
} else {
dataSetDef.setDbSQL(null);
}
}
} | dgutierr/dashbuilder | dashbuilder-client/dashbuilder-widgets/src/main/java/org/dashbuilder/client/widgets/dataset/editor/workflow/edit/SQLDataSetEditWorkflow.java | Java | apache-2.0 | 3,663 |
package com.hediapps.authentication.service;
import com.hediapps.authentication.dto.UserDto;
import org.springframework.security.core.userdetails.UserDetailsService;
public interface AuthenticationService extends UserDetailsService {
void save(UserDto user);
}
| HediBenAhmed/hediApps | hediapps-authentication/src/main/java/com/hediapps/authentication/service/AuthenticationService.java | Java | apache-2.0 | 275 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.cron;
import org.apache.camel.FailedToCreateRouteException;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class CronPatternsTest extends CamelTestSupport {
@Test
void testTooManyParts() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("cron:tab?schedule=0/1 * * * * ? 1 2")
.to("mock:result");
}
});
assertThrows(FailedToCreateRouteException.class, () -> {
context.start();
});
}
@Test
void testTooLittleParts() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("cron:tab?schedule=wrong pattern")
.to("mock:result");
}
});
assertThrows(FailedToCreateRouteException.class, () -> {
context.start();
});
}
@Test
void testPlusInURI() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("cron://name?schedule=0+0/5+12-18+?+*+MON-FRI")
.to("mock:result");
}
});
context.start();
}
@Test
void testPlusInURINok() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("cron://name?schedule=0+0/5+12-18+?+*+MON-FRI+2019+1")
.to("mock:result");
}
});
assertThrows(FailedToCreateRouteException.class, () -> {
context.start();
});
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
}
| DariusX/camel | components/camel-cron/src/test/java/org/apache/camel/component/cron/CronPatternsTest.java | Java | apache-2.0 | 2,808 |
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.echo.config;
import com.netflix.spinnaker.echo.rest.RestService;
import java.util.ArrayList;
import java.util.List;
import lombok.Builder;
import lombok.Data;
/** Wrapper class for a collection of rest clients */
public class RestUrls {
private List<Service> services = new ArrayList<>();
public List<Service> getServices() {
return services;
}
public void setServices(List<Service> services) {
this.services = services;
}
@Data
@Builder
public static class Service {
private RestService client;
private RestProperties.RestEndpointConfiguration config;
}
}
| spinnaker/echo | echo-rest/src/main/java/com/netflix/spinnaker/echo/config/RestUrls.java | Java | apache-2.0 | 1,222 |
<?php
/**
* Created by PhpStorm.
* User: meathill
* Date: 14-6-26
* Time: 下午5:17
*/
require_once "Base.class.php";
class Source extends \gamepop\Base {
const VS = '`t_4399_vs_ptbus`';
static $VS_ptbus = 'ptbusid';
static $VS_4399 = '4399id';
public function getTable($fields) {
return self::VS;
}
} | Dianjoy/gamepop-inc | Source.class.php | PHP | apache-2.0 | 325 |
"""Unit tests for the Azure Devops Server issues collector."""
from .base import AzureDevopsTestCase
class AzureDevopsIssuesTest(AzureDevopsTestCase):
"""Unit tests for the Azure Devops Server issues metric."""
METRIC_TYPE = "issues"
async def test_nr_of_issues(self):
"""Test that the number of issues is returned."""
response = await self.collect(
get_request_json_return_value=dict(value=[self.work_item, self.work_item]),
post_request_json_return_value=dict(workItems=[dict(id="id1"), dict(id="id2")]),
)
self.assert_measurement(response, value="2")
async def test_no_issues(self):
"""Test zero issues."""
response = await self.collect(post_request_json_return_value=dict(workItems=[]))
self.assert_measurement(response, value="0", entities=[])
async def test_issues(self):
"""Test that the issues are returned."""
response = await self.collect(
get_request_json_return_value=dict(value=[self.work_item]),
post_request_json_return_value=dict(workItems=[dict(id="id")]),
)
self.assert_measurement(
response,
entities=[
dict(
key="id",
project="Project",
title="Title",
work_item_type="Task",
state="New",
url=self.work_item_url,
)
],
)
| ICTU/quality-time | components/collector/tests/source_collectors/azure_devops/test_issues.py | Python | apache-2.0 | 1,498 |
({
browsers: ['GOOGLECHROME', "IE11"],
selector: {
literal: '.m-literal span',
expression: '.m-expr span',
changeValuesBtn: '.change-values'
},
testFalsy: {
test: [
function(cmp) {
var expected = 'false';
var element = cmp
.getElement()
.shadowRoot
.querySelector(this.selector.literal);
return new Promise(function(resolve) {
var actual = element.textContent;
$A.test.assertEquals(actual, expected, 'Wrong literal');
resolve();
});
}
]
},
testGVPexpression: {
test: [
function(cmp) {
var expected = 'Renderer';
var element = cmp
.getElement()
.shadowRoot
.querySelector(this.selector.expression);
return new Promise(function(resolve) {
var actual = element.textContent;
$A.test.assertEquals(actual, expected, 'Wrong expression result');
resolve();
});
}
]
},
testProgrammaticInstantiation: {
test: [
function (cmp) {
var done = false;
$A.createComponent('markup://moduleTest:simpleCmp', {
'aura:id': 'programmatic'
}, $A.getCallback(function (simpleCmp) {
cmp.set('v.programmatic', simpleCmp);
done = true;
}));
$A.test.addWaitFor(true, function () {
return done;
});
},
function (cmp) {
var el = document
.querySelector('.programmatic')
.querySelector('moduletest-simple-cmp');
var message = 'Should support programmatic instantiation with an aura:id';
$A.test.assertTrue(el !== null, message);
}
]
},
testAttributesAreReflectedOnInteropComponent: {
test: [
function defaultProps(cmp) {
var list = cmp.find('list');
$A.test.assertEquals(
list.get('v.items').length,
0,
'Wrong number of items on InteropComponent'
);
$A.test.assertEquals(
list.getElement().items.length,
0,
'Wrong number of items on Element'
);
},
function updateProps(cmp) {
var list = cmp.find('list');
cmp.set('v.items', [{ label: 'item1', id: "1" }, { label: 'item2', id: "2" }]);
$A.test.assertEquals(
list.get('v.items').length,
2,
'Wrong number of items on InteropComponent'
);
$A.test.assertEquals(
list.getElement().items.length,
2,
'Wrong number of items on Element'
);
},
function renderUpdatedProps(cmp) {
var itemElement = cmp
.find('list')
.getElement()
.shadowRoot
.querySelectorAll('li');
$A.test.assertEquals(
itemElement.length,
2,
'Wrong number of items has been rendered'
);
}
]
},
testReturnsDefaultFromInteropComponent: {
test: [
function defaultProps(cmp) {
var list = cmp.find('list-without-items');
// The default value held by the InteropComponent element shouldn't be retrievable using the cmp.get
$A.test.assertEquals(
0,
list.get('v.items').length,
'Wrong number of items on InteropComponent'
);
$A.test.assertEquals(
0,
list.getElement().items.length,
'Wrong number of items on Element'
);
}
]
},
testUpdateAttributeWhenNotBoundInTheTemplate: {
test: [
function updateProps(cmp) {
var list = cmp.find('list-without-items');
list.set('v.items', [{ label: 'item1', id: 1 }, { label: 'item2', id: 2 }]);
$A.test.assertEquals(
2,
list.get('v.items').length,
'Wrong number of items on InteropComponent'
);
$A.test.assertEquals(
2,
list.getElement().items.length,
'Wrong number of items on Element'
);
},
function renderUpdatedProps(cmp) {
var itemElement = cmp
.find('list-without-items')
.getElement()
.shadowRoot
.querySelectorAll('li');
$A.test.assertEquals(
2,
itemElement.length,
'Wrong number of items has been rendered'
);
}
]
},
testCanReadPublicAccessors: {
test: [
function (cmp) {
var interopCmp = cmp.find('main');
$A.test.assertEquals('accessor-test-value', interopCmp.get('v.myAccessor'), 'should be able to read public accessor');
}
]
},
testCanReadUpdatedAccessorValue: {
test: [
function (cmp) {
var interopCmp = cmp.find('main');
interopCmp.getElement().shadowRoot.querySelector(this.selector.changeValuesBtn).click();
$A.test.assertEquals('modified-accessor-value', interopCmp.get('v.myAccessor'), 'should be able to read accessor modified value');
}
]
},
testCanPassPRV: {
test: [
function (cmp) {
$A.test.assertEquals('accessor-test-value', cmp.get('v.accessorValue'), 'accessor value should be reflected on the PRV.');
var interopCmp = cmp.find('main');
interopCmp.getElement().shadowRoot.querySelector(this.selector.changeValuesBtn).click();
$A.test.assertEquals('modified-accessor-value', cmp.get('v.accessorValue'), 'should be able to read accessor modified value from the bound template');
}
]
},
testAccessorIgnoresPassedPrimitiveValue: {
test: [
function (cmp) {
var interopCmp = cmp.find('accessor-primitive-value');
$A.test.assertEquals('accessor-test-value', interopCmp.get('v.myAccessor'), 'accessor should ignore passed primitive value.');
var interopCmp = cmp.find('accessor-primitive-value');
interopCmp.getElement().shadowRoot.querySelector(this.selector.changeValuesBtn).click();
$A.test.assertEquals('modified-accessor-value', interopCmp.get('v.myAccessor'), 'should be able to read accessor modified value');
}
]
},
// Interop: Cannot get value of attribute that's not bound to parent cmp #784
testCanGetUnboundAttributes: {
test: [
function(cmp) {
var unboundChild = cmp.find('unbound');
var attributes = ['literal', 'bound', 'unbound', 'expression', 'nested'];
attributes.forEach(function(attribute) {
$A.test.assertDefined(unboundChild.get('v.' + attribute), 'attribute [' + attribute + '] should be defined');
});
}
]
},
getNullValueText: function (cmp) {
return cmp
.find('nullTest')
.getElement()
.shadowRoot
.querySelector('.null-test')
.innerText;
},
testNullValue: {
attributes: {
'nullValueTest': 'John',
},
test: [
function(cmp) {
var actual = this.getNullValueText(cmp);
$A.test.assertEquals('John', actual, 'The bound value should be John');
cmp.set("v.nullValueTest", null);
},
function (cmp) {
var actual = this.getNullValueText(cmp);
$A.test.assertEquals('', actual, 'After setting the nullTest attribute to null the rendered text should be empty');
}
]
},
testUndefinedValue: {
attributes: {
'nullValueTest': 'John',
},
test: [
function(cmp) {
var actual = this.getNullValueText(cmp);
$A.test.assertEquals('John', actual, 'The bound value should be John');
cmp.set("v.nullValueTest", undefined);
},
function (cmp) {
var actual = this.getNullValueText(cmp);
$A.test.assertEquals('', actual, 'After setting the nullTest attribute to undefined the rendered text should be empty');
}
]
},
testReadOnlyAttrUpdatesWhenItsDependentChanged: {
test: [
function(cmp) {
var target = cmp.find('input');
var validity = target.get('v.validity');
$A.test.assertEquals('', validity);
target.set('v.value', 'foo');
validity = target.get('v.validity');
$A.test.assertEquals('foo', validity);
}
]
},
testReadOnlyBoundAttrUpdatesWhenItsDependentChanged: {
test: [
function(cmp) {
var target = cmp.find('input');
var myValidity = cmp.get('v.myValidity');
$A.test.assertEquals('', myValidity);
target.set('v.value', 'foo');
myValidity = cmp.get('v.myValidity');
$A.test.assertEquals('foo', myValidity);
}
]
},
testAuraActionAttributeIsCalledWhenEventIsFired: {
attributes: {
'result': '',
},
test: [
function(cmp) {
var target = cmp.find('input1');
target.set('v.value', 'foo');
var detail = {
value: 'foo'
};
target.getElement().shadowRoot.querySelector('input').dispatchEvent(
new CustomEvent('change', {
composed: true,
bubbles: true,
detail: detail,
})
);
$A.test.assertEquals(cmp.get('v.result'), 'foo');
}
]
},
testCreateComponentWithAuraActionAttribute: {
test: [
function(cmp) {
$A.createComponent(
"moduleTest:simpleInput",
{
"onchange": cmp.get('v.onChange'),
"aura:id": "input2"
},
function(newCmp) {
var body = cmp.get("v.body");
body.push(newCmp);
cmp.set("v.body", body);
}
);
},
function(cmp) {
var target = cmp.find('input2');
target.set('v.value', 'bar');
var detail = {
value: 'bar'
};
target.getElement().shadowRoot.querySelector('input').dispatchEvent(
new CustomEvent('change', {
composed: true,
bubbles: true,
detail: detail,
})
);
$A.test.assertEquals(cmp.get('v.result'), 'bar');
}
]
},
testBooleanAttributeUpdatesWhenChangeHappenedInElement: {
test: [
function(cmp) {
var target = cmp.find('inputRadio');
target.set('v.checked', false);
target.getElement().shadowRoot.querySelector('input').click();
$A.test.assertTrue(cmp.get('v.radioChecked'));
}
]
},
testCompatGetAttribute: {
test: [
function(cmp) {
var target = cmp.find('input');
target.set('v.value', 'foo');
var validity = target.get('v.inputValidity');
$A.test.assertTrue(validity.valid);
}
]
},
testDynamicCreationNonExistentAttr: {
test: [
function(cmp) {
var createdCmp;
$A.createComponent("moduletest:simpleCmp", { nonExistent: "foo" }, function(newCmp) {
createdCmp = newCmp;
});
$A.test.addWaitFor(true, function() { return createdCmp !== undefined; }, function() {
$A.test.assertNotNull(createdCmp, "No component returned from $A.createComponent");
var qualifiedName = createdCmp.getDef().getDescriptor().getQualifiedName();
$A.test.assertEquals("markup://moduleTest:simpleCmp", qualifiedName, "Unexpected component returned from $A.createComponent");
});
}
]
}
})
| forcedotcom/aura | aura-modules/src/test/components/moduleTest/interopAttrTest/interopAttrTestTest.js | JavaScript | apache-2.0 | 13,703 |
/*
* Copyright 2005-2017 Dozer Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dozer.vo;
/**
* @author garsombke.franz
* @author sullins.ben
* @author tierney.matt
*
*/
public class TheFirstSubClassPrime extends BaseTestObject {
private String s;
public TheFirstSubClassPrime() {
}
public String getS() {
return s;
}
public void setS(String s) {
this.s = s;
}
public TheFirstSubClassPrime(String s) {
this.s = s;
}
} | STRiDGE/dozer | core/src/test/java/org/dozer/vo/TheFirstSubClassPrime.java | Java | apache-2.0 | 993 |
# Copyright 2020 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
class TestUtils(unittest.TestCase):
skip_condition = sys.version_info[0] < 3
skip_message = "Subtests are not supported in Python 2"
@unittest.skipIf(skip_condition, skip_message)
def test_PeekIterator(self):
from google.cloud.spanner_dbapi.utils import PeekIterator
cases = [
("list", [1, 2, 3, 4, 6, 7], [1, 2, 3, 4, 6, 7]),
("iter_from_list", iter([1, 2, 3, 4, 6, 7]), [1, 2, 3, 4, 6, 7]),
("tuple", ("a", 12, 0xFF), ["a", 12, 0xFF]),
("iter_from_tuple", iter(("a", 12, 0xFF)), ["a", 12, 0xFF]),
("no_args", (), []),
]
for name, data_in, expected in cases:
with self.subTest(name=name):
pitr = PeekIterator(data_in)
actual = list(pitr)
self.assertEqual(actual, expected)
@unittest.skipIf(skip_condition, "Python 2 has an outdated iterator definition")
def test_peekIterator_list_rows_converted_to_tuples(self):
from google.cloud.spanner_dbapi.utils import PeekIterator
# Cloud Spanner returns results in lists e.g. [result].
# PeekIterator is used by BaseCursor in its fetch* methods.
# This test ensures that anything passed into PeekIterator
# will be returned as a tuple.
pit = PeekIterator([["a"], ["b"], ["c"], ["d"], ["e"]])
got = list(pit)
want = [("a",), ("b",), ("c",), ("d",), ("e",)]
self.assertEqual(got, want, "Rows of type list must be returned as tuples")
seventeen = PeekIterator([[17]])
self.assertEqual(list(seventeen), [(17,)])
pit = PeekIterator([["%", "%d"]])
self.assertEqual(next(pit), ("%", "%d"))
pit = PeekIterator([("Clark", "Kent")])
self.assertEqual(next(pit), ("Clark", "Kent"))
@unittest.skipIf(skip_condition, "Python 2 has an outdated iterator definition")
def test_peekIterator_nonlist_rows_unconverted(self):
from google.cloud.spanner_dbapi.utils import PeekIterator
pi = PeekIterator(["a", "b", "c", "d", "e"])
got = list(pi)
want = ["a", "b", "c", "d", "e"]
self.assertEqual(got, want, "Values should be returned unchanged")
@unittest.skipIf(skip_condition, skip_message)
def test_backtick_unicode(self):
from google.cloud.spanner_dbapi.utils import backtick_unicode
cases = [
("SELECT (1) as foo WHERE 1=1", "SELECT (1) as foo WHERE 1=1"),
("SELECT (1) as föö", "SELECT (1) as `föö`"),
("SELECT (1) as `föö`", "SELECT (1) as `föö`"),
("SELECT (1) as `föö` `umläut", "SELECT (1) as `föö` `umläut"),
("SELECT (1) as `föö", "SELECT (1) as `föö"),
]
for sql, want in cases:
with self.subTest(sql=sql):
got = backtick_unicode(sql)
self.assertEqual(got, want)
@unittest.skipIf(skip_condition, skip_message)
def test_StreamedManyResultSets(self):
from google.cloud.spanner_dbapi.utils import StreamedManyResultSets
cases = [
("iter_from_list", iter([1, 2, 3, 4, 6, 7]), [1, 2, 3, 4, 6, 7]),
("iter_from_tuple", iter(("a", 12, 0xFF)), ["a", 12, 0xFF]),
]
for name, data_in, expected in cases:
with self.subTest(name=name):
stream_result = StreamedManyResultSets()
stream_result._iterators.append(data_in)
actual = list(stream_result)
self.assertEqual(actual, expected)
| googleapis/python-spanner | tests/unit/spanner_dbapi/test_utils.py | Python | apache-2.0 | 4,189 |
/*
* This file is part of "lunisolar-magma".
*
* (C) Copyright 2014-2022 Lunisolar (http://lunisolar.eu/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.lunisolar.magma.func.predicate;
import javax.annotation.Nonnull; // NOSONAR
import javax.annotation.Nullable; // NOSONAR
import javax.annotation.concurrent.NotThreadSafe; // NOSONAR
import java.util.Comparator; // NOSONAR
import java.util.Objects; // NOSONAR
import eu.lunisolar.magma.basics.*; //NOSONAR
import eu.lunisolar.magma.basics.builder.*; // NOSONAR
import eu.lunisolar.magma.basics.exceptions.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.aType.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.type.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.domain.*; // NOSONAR
import eu.lunisolar.magma.func.IA;
import eu.lunisolar.magma.func.SA;
import eu.lunisolar.magma.func.*; // NOSONAR
import eu.lunisolar.magma.func.tuple.*; // NOSONAR
import java.util.concurrent.*; // NOSONAR
import java.util.function.*; // NOSONAR
import java.util.*; // NOSONAR
import java.lang.reflect.*; // NOSONAR
import eu.lunisolar.magma.func.action.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.bi.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.obj.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.tri.*; // NOSONAR
import eu.lunisolar.magma.func.function.*; // NOSONAR
import eu.lunisolar.magma.func.function.conversion.*; // NOSONAR
import eu.lunisolar.magma.func.function.from.*; // NOSONAR
import eu.lunisolar.magma.func.function.to.*; // NOSONAR
import eu.lunisolar.magma.func.operator.binary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.ternary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.unary.*; // NOSONAR
import eu.lunisolar.magma.func.predicate.*; // NOSONAR
import eu.lunisolar.magma.func.supplier.*; // NOSONAR
/**
* Non-throwing functional interface (lambda) LBiObjDblPredicate for Java 8.
*
* Type: predicate
*
* Domain (lvl: 3): T1 a1,T2 a2,double a3
*
* Co-domain: boolean
*
*/
@FunctionalInterface
@SuppressWarnings("UnusedDeclaration")
public interface LBiObjDblPredicate<T1, T2> extends MetaPredicate, MetaInterface.NonThrowing, Codomain<aBool>, Domain3<a<T1>, a<T2>, aDouble> { // NOSONAR
String DESCRIPTION = "LBiObjDblPredicate: boolean test(T1 a1,T2 a2,double a3)";
// boolean test(T1 a1,T2 a2,double a3) ;
default boolean test(T1 a1, T2 a2, double a3) {
// return nestingTest(a1,a2,a3);
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call test(T1 a1,T2 a2,double a3)
*/
boolean testX(T1 a1, T2 a2, double a3) throws Throwable;
default boolean tupleTest(LBiObjDblTriple<T1, T2> args) {
return test(args.first(), args.second(), args.third());
}
/** Function call that handles exceptions according to the instructions. */
default boolean handlingTest(T1 a1, T2 a2, double a3, HandlingInstructions<Throwable, RuntimeException> handling) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handler.handleOrNest(e, handling);
}
}
default LBiObjDblPredicate<T1, T2> handling(HandlingInstructions<Throwable, RuntimeException> handling) {
return (a1, a2, a3) -> handlingTest(a1, a2, a3, handling);
}
default boolean test(T1 a1, T2 a2, double a3, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.wrap(e, factory, newMessage);
}
}
default boolean test(T1 a1, T2 a2, double a3, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.wrap(e, factory, newMessage, param1);
}
}
default boolean test(T1 a1, T2 a2, double a3, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.wrap(e, factory, newMessage, param1, param2);
}
}
default boolean test(T1 a1, T2 a2, double a3, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2, @Nullable Object param3) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.wrap(e, factory, newMessage, param1, param2, param3);
}
}
default LBiObjDblPredicate<T1, T2> trying(@Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage) {
return (a1, a2, a3) -> test(a1, a2, a3, factory, newMessage);
}
default LBiObjDblPredicate<T1, T2> trying(@Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1) {
return (a1, a2, a3) -> test(a1, a2, a3, factory, newMessage, param1);
}
default LBiObjDblPredicate<T1, T2> trying(@Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2) {
return (a1, a2, a3) -> test(a1, a2, a3, factory, newMessage, param1, param1);
}
default LBiObjDblPredicate<T1, T2> trying(@Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2, @Nullable Object param3) {
return (a1, a2, a3) -> test(a1, a2, a3, factory, newMessage, param1, param2, param3);
}
default boolean test(T1 a1, T2 a2, double a3, @Nonnull ExWF<RuntimeException> factory) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.wrap(e, factory);
}
}
default LBiObjDblPredicate<T1, T2> trying(@Nonnull ExWF<RuntimeException> factory) {
return (a1, a2, a3) -> test(a1, a2, a3, factory);
}
default boolean testThen(T1 a1, T2 a2, double a3, @Nonnull LPredicate<Throwable> handler) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
Handling.handleErrors(e);
return handler.test(e);
}
}
default LBiObjDblPredicate<T1, T2> tryingThen(@Nonnull LPredicate<Throwable> handler) {
return (a1, a2, a3) -> testThen(a1, a2, a3, handler);
}
/** Function call that handles exceptions by always nesting checked exceptions and propagating the others as is. */
default boolean nestingTest(T1 a1, T2 a2, double a3) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/** Function call that handles exceptions by always propagating them as is, even when they are undeclared checked ones. */
default boolean shovingTest(T1 a1, T2 a2, double a3) {
try {
return this.testX(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.shoveIt(e);
}
}
static <T1, T2> boolean shovingTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
return func.shovingTest(a1, a2, a3);
}
static <T1, T2> boolean handlingTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, HandlingInstructions<Throwable, RuntimeException> handling) { // <-
Null.nonNullArg(func, "func");
return func.handlingTest(a1, a2, a3, handling);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
return func.nestingTest(a1, a2, a3);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage) {
Null.nonNullArg(func, "func");
return func.test(a1, a2, a3, factory, newMessage);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1) {
Null.nonNullArg(func, "func");
return func.test(a1, a2, a3, factory, newMessage, param1);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2) {
Null.nonNullArg(func, "func");
return func.test(a1, a2, a3, factory, newMessage, param1, param2);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull ExWMF<RuntimeException> factory, @Nonnull String newMessage, @Nullable Object param1, @Nullable Object param2, @Nullable Object param3) {
Null.nonNullArg(func, "func");
return func.test(a1, a2, a3, factory, newMessage, param1, param2, param3);
}
static <T1, T2> boolean tryTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull ExWF<RuntimeException> factory) {
Null.nonNullArg(func, "func");
return func.test(a1, a2, a3, factory);
}
static <T1, T2> boolean tryTestThen(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull LPredicate<Throwable> handler) {
Null.nonNullArg(func, "func");
return func.testThen(a1, a2, a3, handler);
}
default boolean failSafeTest(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> failSafe) {
try {
return test(a1, a2, a3);
} catch (Throwable e) { // NOSONAR
Handling.handleErrors(e);
return failSafe.test(a1, a2, a3);
}
}
static <T1, T2> boolean failSafeTest(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> func, @Nonnull LBiObjDblPredicate<T1, T2> failSafe) {
Null.nonNullArg(failSafe, "failSafe");
if (func == null) {
return failSafe.test(a1, a2, a3);
} else {
return func.failSafeTest(a1, a2, a3, failSafe);
}
}
static <T1, T2> LBiObjDblPredicate<T1, T2> failSafe(LBiObjDblPredicate<T1, T2> func, @Nonnull LBiObjDblPredicate<T1, T2> failSafe) {
Null.nonNullArg(failSafe, "failSafe");
return (a1, a2, a3) -> failSafeTest(a1, a2, a3, func, failSafe);
}
default boolean doIf(T1 a1, T2 a2, double a3, LAction action) {
Null.nonNullArg(action, "action");
if (test(a1, a2, a3)) {
action.execute();
return true;
} else {
return false;
}
}
static <T1, T2> boolean doIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> predicate, @Nonnull LAction action) {
Null.nonNullArg(predicate, "predicate");
return predicate.doIf(a1, a2, a3, action);
}
static <T1, T2> boolean doIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> predicate, @Nonnull LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Null.nonNullArg(predicate, "predicate");
return predicate.doIf(a1, a2, a3, consumer);
}
default boolean doIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Null.nonNullArg(consumer, "consumer");
if (test(a1, a2, a3)) {
consumer.accept(a1, a2, a3);
return true;
} else {
return false;
}
}
/** Just to mirror the method: Ensures the result is not null */
default boolean nonNullTest(T1 a1, T2 a2, double a3) {
return test(a1, a2, a3);
}
/** For convenience, where "test()" makes things more confusing than "applyAsBoolean()". */
default boolean doApplyAsBoolean(T1 a1, T2 a2, double a3) {
return test(a1, a2, a3);
}
/** Returns description of the functional interface. */
@Nonnull
default String functionalInterfaceDescription() {
return LBiObjDblPredicate.DESCRIPTION;
}
/** From-To. Intended to be used with non-capturing lambda. */
public static <T1, T2> void fromTo(int min_i, int max_i, T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
if (min_i <= max_i) {
for (int i = min_i; i <= max_i; i++) {
func.test(a1, a2, a3);
}
} else {
for (int i = min_i; i >= max_i; i--) {
func.test(a1, a2, a3);
}
}
}
/** From-To. Intended to be used with non-capturing lambda. */
public static <T1, T2> void fromTill(int min_i, int max_i, T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
if (min_i <= max_i) {
for (int i = min_i; i < max_i; i++) {
func.test(a1, a2, a3);
}
} else {
for (int i = min_i; i > max_i; i--) {
func.test(a1, a2, a3);
}
}
}
/** From-To. Intended to be used with non-capturing lambda. */
public static <T1, T2> void times(int max_i, T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<T1, T2> func) {
if (max_i < 0)
return;
fromTill(0, max_i, a1, a2, a3, func);
}
/** Extract and apply function. */
public static <M, K, V, T2> boolean from(@Nonnull M container, LBiFunction<M, K, V> extractor, K key, T2 a2, double a3, @Nonnull LBiObjDblPredicate<V, T2> function) {
Null.nonNullArg(container, "container");
Null.nonNullArg(function, "function");
V value = extractor.apply(container, key);
if (value != null) {
return function.test(value, a2, a3);
}
return false;
}
default LObjDblPredicate<T2> lShrink(@Nonnull LObjDblFunction<T2, T1> left) {
Null.nonNullArg(left, "left");
return (a2, a3) -> test(left.apply(a2, a3), a2, a3);
}
default LObjDblPredicate<T2> lShrink_(T1 a1) {
return (a2, a3) -> test(a1, a2, a3);
}
public static <T2, T1> LObjDblPredicate<T2> lShrunken(@Nonnull LObjDblFunction<T2, T1> left, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(left, "left");
Null.nonNullArg(func, "func");
return func.lShrink(left);
}
public static <T2, T1> LObjDblPredicate<T2> lShrunken_(T1 a1, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
return func.lShrink_(a1);
}
default LBiPredicate<T1, T2> rShrink(@Nonnull LToDblBiFunction<T1, T2> right) {
Null.nonNullArg(right, "right");
return (a1, a2) -> test(a1, a2, right.applyAsDbl(a1, a2));
}
default LBiPredicate<T1, T2> rShrink_(double a3) {
return (a1, a2) -> test(a1, a2, a3);
}
public static <T1, T2> LBiPredicate<T1, T2> rShrunken(@Nonnull LToDblBiFunction<T1, T2> right, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(right, "right");
Null.nonNullArg(func, "func");
return func.rShrink(right);
}
public static <T1, T2> LBiPredicate<T1, T2> rShrunken_(double a3, @Nonnull LBiObjDblPredicate<T1, T2> func) {
Null.nonNullArg(func, "func");
return func.rShrink_(a3);
}
/** */
public static <T1, T2> LBiObjDblPredicate<T1, T2> uncurry(@Nonnull LFunction<T1, LFunction<T2, LDblPredicate>> func) {
Null.nonNullArg(func, "func");
return (T1 a1, T2 a2, double a3) -> func.apply(a1).apply(a2).test(a3);
}
/** Cast that removes generics. */
default LBiObjDblPredicate untyped() {
return this;
}
/** Cast that replace generics. */
default <V2, V3> LBiObjDblPredicate<V2, V3> cast() {
return untyped();
}
/** Cast that replace generics. */
public static <V2, V3> LBiObjDblPredicate<V2, V3> cast(LBiObjDblPredicate<?, ?> function) {
return (LBiObjDblPredicate) function;
}
/** Change function to consumer that ignores output. */
default LBiObjDblConsumer<T1, T2> toConsumer() {
return this::test;
}
/** Calls domain consumer before main function. */
default LBiObjDblPredicate<T1, T2> beforeDo(@Nonnull LBiObjDblConsumer<T1, T2> before) {
Null.nonNullArg(before, "before");
return (T1 a1, T2 a2, double a3) -> {
before.accept(a1, a2, a3);
return test(a1, a2, a3);
};
}
/** Calls codomain consumer after main function. */
default LBiObjDblPredicate<T1, T2> afterDo(@Nonnull LBoolConsumer after) {
Null.nonNullArg(after, "after");
return (T1 a1, T2 a2, double a3) -> {
final boolean retval = test(a1, a2, a3);
after.accept(retval);
return retval;
};
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> msgFunc) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msgFunc, "msgFunc");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, msgFunc.apply(a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(msg, a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2,
@Nullable Object param3) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2, param3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> msgFunc)
throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msgFunc, "msgFunc");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, msgFunc.apply(a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(msg, a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2,
@Nullable Object param3) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2, param3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExF<X> noArgFactory) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(noArgFactory, "noArgFactory");
if (pred.test(a1, a2, a3)) {
throw Handling.create(noArgFactory);
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, @Nonnull ExF<X> noArgFactory) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(noArgFactory, "noArgFactory");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(noArgFactory);
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> msgFunc) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msgFunc, "msgFunc");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, msgFunc.apply(a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(msg, a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2,
@Nullable Object param3) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2, param3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> msgFunc)
throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msgFunc, "msgFunc");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, msgFunc.apply(a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(msg, a1, a2, a3));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2));
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1, @Nullable Object param2,
@Nullable Object param3) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(factory, String.format(message, param1, param2, param3));
}
return a1;
}
/** Throws new exception if condition is met. */
public static <T1, T2, X extends Throwable> T1 throwIf(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExF<X> noArgFactory) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(noArgFactory, "noArgFactory");
if (pred.test(a1, a2, a3)) {
throw Handling.create(noArgFactory);
}
return a1;
}
/** Throws new exception if condition is NOT met. */
public static <T1, T2, X extends Throwable> T1 throwIfNot(T1 a1, @Nonnull LBiObjDblPredicate<? super T1, ? super T2> pred, T2 a2, double a3, @Nonnull ExF<X> noArgFactory) throws X {
Null.nonNullArg(pred, "pred");
Null.nonNullArg(noArgFactory, "noArgFactory");
if (!pred.test(a1, a2, a3)) {
throw Handling.create(noArgFactory);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, @Nonnull ExMF<X> factory) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(msg, a1, a2, a3) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1)
throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1,
@Nullable Object param2) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1, param2) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, T2 a2, double a3, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1,
@Nullable Object param2, @Nullable Object param3) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1, param2, param3) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, T2 a2, double a3, @Nonnull ExMF<X> factory) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String msg) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(msg, "msg");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(msg, a1, a2, a3) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1)
throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1,
@Nullable Object param2) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1, param2) + ' ' + m);
}
return a1;
}
/** Throws new exception if condition is not met (non null message is returned by 'predicate') */
public static <T1, T2, X extends Throwable> T1 throwIfNot$(T1 a1, @Nonnull LBiObjDblFunction<? super T1, ? super T2, ? extends String> specialPredicate, T2 a2, double a3, @Nonnull ExMF<X> factory, @Nonnull String message, @Nullable Object param1,
@Nullable Object param2, @Nullable Object param3) throws X {
Null.nonNullArg(specialPredicate, "specialPredicate");
Null.nonNullArg(factory, "factory");
Null.nonNullArg(message, "message");
var m = specialPredicate.apply(a1, a2, a3);
if (m != null) {
throw Handling.create(factory, String.format(message, param1, param2, param3) + ' ' + m);
}
return a1;
}
/** Captures arguments but delays the evaluation. */
default LBoolSupplier capture(T1 a1, T2 a2, double a3) {
return () -> this.test(a1, a2, a3);
}
/** Creates function that always returns the same value. */
static <T1, T2> LBiObjDblPredicate<T1, T2> constant(boolean r) {
return (a1, a2, a3) -> r;
}
/** Captures single parameter function into this interface where only 1st parameter will be used. */
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> test1st(@Nonnull LPredicate<T1> func) {
return (a1, a2, a3) -> func.test(a1);
}
/** Captures single parameter function into this interface where only 2nd parameter will be used. */
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> test2nd(@Nonnull LPredicate<T2> func) {
return (a1, a2, a3) -> func.test(a2);
}
/** Captures single parameter function into this interface where only 3rd parameter will be used. */
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> test3rd(@Nonnull LDblPredicate func) {
return (a1, a2, a3) -> func.test(a3);
}
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> biObjDblPred(final @Nonnull LBiObjDblPredicate<T1, T2> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
/** A completely inconvenient method in case lambda expression and generic arguments are ambiguous for the compiler. */
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> biObjDblPred(@Nullable Class<T1> c1, @Nullable Class<T2> c2, final @Nonnull LBiObjDblPredicate<T1, T2> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
final class S<T1, T2> implements LBiObjDblPredicate<T1, T2> {
private LBiObjDblPredicate<T1, T2> target = null;
@Override
public boolean testX(T1 a1, T2 a2, double a3) throws Throwable {
return target.testX(a1, a2, a3);
}
}
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> recursive(final @Nonnull LFunction<LBiObjDblPredicate<T1, T2>, LBiObjDblPredicate<T1, T2>> selfLambda) {
final S<T1, T2> single = new S();
LBiObjDblPredicate<T1, T2> func = selfLambda.apply(single);
single.target = func;
return func;
}
public static <T1, T2> M<T1, T2> mementoOf(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> function) {
var initialValue = function.test(a1, a2, a3);
return initializedMementoOf(initialValue, function);
}
public static <T1, T2> M<T1, T2> initializedMementoOf(boolean initialValue, LBiObjDblPredicate<T1, T2> function) {
return memento(initialValue, initialValue, function, (m, x1, x2) -> x2);
}
public static <T1, T2> M<T1, T2> deltaOf(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> function, LLogicalBinaryOperator deltaFunction) {
var initialValue = function.test(a1, a2, a3);
return initializedDeltaOf(initialValue, function, deltaFunction);
}
public static <T1, T2> M<T1, T2> deltaOf(T1 a1, T2 a2, double a3, LBiObjDblPredicate<T1, T2> function) {
var initialValue = function.test(a1, a2, a3);
return initializedDeltaOf(initialValue, function, (x1, x2) -> x1 != x2);
}
public static <T1, T2> M<T1, T2> initializedDeltaOf(boolean initialValue, LBiObjDblPredicate<T1, T2> function, LLogicalBinaryOperator deltaFunction) {
return memento(initialValue, deltaFunction.apply(initialValue, initialValue), function, (m, x1, x2) -> deltaFunction.apply(x1, x2));
}
public static <T1, T2> M<T1, T2> memento(boolean initialBaseValue, boolean initialValue, LBiObjDblPredicate<T1, T2> baseFunction, LLogicalTernaryOperator mementoFunction) {
return new M(initialBaseValue, initialValue, baseFunction, mementoFunction);
}
/**
* Implementation that allows to create derivative functions (do not confuse it with math concepts). Very short name is intended to be used with parent (LBiObjDblPredicate.M)
*/
@NotThreadSafe
final class M<T1, T2> implements LBiObjDblPredicate<T1, T2> {
private final LBiObjDblPredicate<T1, T2> baseFunction;
private boolean lastBaseValue;
private boolean lastValue;
private final LLogicalTernaryOperator mementoFunction;
private M(boolean lastBaseValue, boolean lastValue, LBiObjDblPredicate<T1, T2> baseFunction, LLogicalTernaryOperator mementoFunction) {
this.baseFunction = baseFunction;
this.lastBaseValue = lastBaseValue;
this.lastValue = lastValue;
this.mementoFunction = mementoFunction;
}
@Override
public boolean testX(T1 a1, T2 a2, double a3) throws Throwable {
boolean x1 = lastBaseValue;
boolean x2 = lastBaseValue = baseFunction.testX(a1, a2, a3);
return lastValue = mementoFunction.apply(lastValue, x1, x2);
}
public boolean lastValue() {
return lastValue;
};
public boolean lastBaseValue() {
return lastBaseValue;
};
}
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> biObjDblPredThrowing(final @Nonnull ExF<Throwable> exF) {
Null.nonNullArg(exF, "exF");
return (a1, a2, a3) -> {
throw exF.produce();
};
}
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> biObjDblPredThrowing(final String message, final @Nonnull ExMF<Throwable> exF) {
Null.nonNullArg(exF, "exF");
return (a1, a2, a3) -> {
throw exF.produce(message);
};
}
// <editor-fold desc="wrap variants">
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T1, T2> LBiObjDblPredicate.LObj0Dbl2Obj1Pred<T1, T2> obj0Dbl2Obj1Pred(final @Nonnull LBiObjDblPredicate.LObj0Dbl2Obj1Pred<T1, T2> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T2, T1> LBiObjDblPredicate.LObj1Obj0Dbl2Pred<T2, T1> obj1Obj0Dbl2Pred(final @Nonnull LBiObjDblPredicate.LObj1Obj0Dbl2Pred<T2, T1> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T2, T1> LBiObjDblPredicate.LObj1Dbl2Obj0Pred<T2, T1> obj1Dbl2Obj0Pred(final @Nonnull LBiObjDblPredicate.LObj1Dbl2Obj0Pred<T2, T1> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T1, T2> LBiObjDblPredicate.LDbl2Obj0Obj1Pred<T1, T2> dbl2Obj0Obj1Pred(final @Nonnull LBiObjDblPredicate.LDbl2Obj0Obj1Pred<T1, T2> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
/** Convenient method in case lambda expression is ambiguous for the compiler (that might happen for overloaded methods accepting different interfaces). */
@Nonnull
static <T2, T1> LBiObjDblPredicate.LDbl2Obj1Obj0Pred<T2, T1> dbl2Obj1Obj0Pred(final @Nonnull LBiObjDblPredicate.LDbl2Obj1Obj0Pred<T2, T1> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda;
}
// </editor-fold>
static <T1, T2> boolean call(T1 a1, T2 a2, double a3, final @Nonnull LBiObjDblPredicate<T1, T2> lambda) {
Null.nonNullArg(lambda, "lambda");
return lambda.test(a1, a2, a3);
}
// <editor-fold desc="wrap">
// </editor-fold>
// <editor-fold desc="predicate">
/**
* Returns a predicate that represents the logical negation of this predicate.
*
* @see {@link java.util.function.Predicate#negate}
*/
@Nonnull
default LBiObjDblPredicate<T1, T2> negate() {
return (a1, a2, a3) -> !test(a1, a2, a3);
}
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> not(@Nonnull LBiObjDblPredicate<T1, T2> pred) {
Null.nonNullArg(pred, "pred");
return pred.negate();
}
/**
* Returns a predicate that represents the logical AND of evaluation of this predicate and the argument one.
* @see {@link java.util.function.Predicate#and()}
*/
@Nonnull
default LBiObjDblPredicate<T1, T2> and(@Nonnull LBiObjDblPredicate<? super T1, ? super T2> other) {
Null.nonNullArg(other, "other");
return (a1, a2, a3) -> test(a1, a2, a3) && other.test(a1, a2, a3);
}
@Nonnull
public static <T1, T2> LBiObjDblPredicate<T1, T2> and(@Nonnull LBiObjDblPredicate<? super T1, ? super T2>... predicates) {
Null.nonNullArg(predicates, "predicates");
return (a1, a2, a3) -> {
for (LBiObjDblPredicate<? super T1, ? super T2> p : predicates) {
if (!p.test(a1, a2, a3)) {
return false;
}
}
return true;
};
}
/**
* Returns a predicate that represents the logical OR of evaluation of this predicate and the argument one.
* @see {@link java.util.function.Predicate#or}
*/
@Nonnull
default LBiObjDblPredicate<T1, T2> or(@Nonnull LBiObjDblPredicate<? super T1, ? super T2> other) {
Null.nonNullArg(other, "other");
return (a1, a2, a3) -> test(a1, a2, a3) || other.test(a1, a2, a3);
}
@Nonnull
public static <T1, T2> LBiObjDblPredicate<T1, T2> or(@Nonnull LBiObjDblPredicate<? super T1, ? super T2>... predicates) {
Null.nonNullArg(predicates, "predicates");
return (a1, a2, a3) -> {
for (LBiObjDblPredicate<? super T1, ? super T2> p : predicates) {
if (p.test(a1, a2, a3)) {
return true;
}
}
return false;
};
}
/**
* Returns a predicate that represents the logical XOR of evaluation of this predicate and the argument one.
* @see {@link java.util.function.Predicate#or}
*/
@Nonnull
default LBiObjDblPredicate<T1, T2> xor(@Nonnull LBiObjDblPredicate<? super T1, ? super T2> other) {
Null.nonNullArg(other, "other");
return (a1, a2, a3) -> test(a1, a2, a3) ^ other.test(a1, a2, a3);
}
/**
* Creates predicate that evaluates if an object is equal with the argument one.
* @see {@link java.util.function.Predicate#isEqual()
*/
@Nonnull
static <T1, T2> LBiObjDblPredicate<T1, T2> isEqual(T1 v1, T2 v2, double v3) {
return (a1, a2, a3) -> (a1 == null ? v1 == null : a1.equals(v1)) && (a2 == null ? v2 == null : a2.equals(v2)) && (a3 == v3);
}
// </editor-fold>
// <editor-fold desc="compose (functional)">
/** Allows to manipulate the domain of the function. */
@Nonnull
default <V1, V2> LBiObjDblPredicate<V1, V2> compose(@Nonnull final LFunction<? super V1, ? extends T1> before1, @Nonnull final LFunction<? super V2, ? extends T2> before2, @Nonnull final LDblUnaryOperator before3) {
Null.nonNullArg(before1, "before1");
Null.nonNullArg(before2, "before2");
Null.nonNullArg(before3, "before3");
return (v1, v2, v3) -> this.test(before1.apply(v1), before2.apply(v2), before3.applyAsDbl(v3));
}
public static <V1, V2, T1, T2> LBiObjDblPredicate<V1, V2> composed(@Nonnull final LFunction<? super V1, ? extends T1> before1, @Nonnull final LFunction<? super V2, ? extends T2> before2, @Nonnull final LDblUnaryOperator before3,
LBiObjDblPredicate<T1, T2> after) {
return after.compose(before1, before2, before3);
}
/** Allows to manipulate the domain of the function. */
@Nonnull
default <V1, V2, V3> LTriPredicate<V1, V2, V3> biObjDblPredCompose(@Nonnull final LFunction<? super V1, ? extends T1> before1, @Nonnull final LFunction<? super V2, ? extends T2> before2, @Nonnull final LToDblFunction<? super V3> before3) {
Null.nonNullArg(before1, "before1");
Null.nonNullArg(before2, "before2");
Null.nonNullArg(before3, "before3");
return (v1, v2, v3) -> this.test(before1.apply(v1), before2.apply(v2), before3.applyAsDbl(v3));
}
public static <V1, V2, V3, T1, T2> LTriPredicate<V1, V2, V3> composed(@Nonnull final LFunction<? super V1, ? extends T1> before1, @Nonnull final LFunction<? super V2, ? extends T2> before2, @Nonnull final LToDblFunction<? super V3> before3,
LBiObjDblPredicate<T1, T2> after) {
return after.biObjDblPredCompose(before1, before2, before3);
}
// </editor-fold>
// <editor-fold desc="then (functional)">
/** Combines two functions together in a order. */
@Nonnull
default <V> LBiObjDblFunction<T1, T2, V> boolToBiObjDblFunc(@Nonnull LBoolFunction<? extends V> after) {
Null.nonNullArg(after, "after");
return (a1, a2, a3) -> after.apply(this.test(a1, a2, a3));
}
/** Combines two functions together in a order. */
@Nonnull
default LBiObjDblPredicate<T1, T2> boolToBiObjDblPred(@Nonnull LLogicalOperator after) {
Null.nonNullArg(after, "after");
return (a1, a2, a3) -> after.apply(this.test(a1, a2, a3));
}
// </editor-fold>
// <editor-fold desc="variant conversions">
// </editor-fold>
// <editor-fold desc="interface variants">
/** Permutation of LBiObjDblPredicate for method references. */
@FunctionalInterface
interface LObj0Dbl2Obj1Pred<T1, T2> extends LBiObjDblPredicate<T1, T2> {
/**
* Implement this, but call test(T1 a1,T2 a2,double a3)
*/
default boolean testX(T1 a1, T2 a2, double a3) {
return this.testObj0Dbl2Obj1(a1, a3, a2);
}
// boolean testObj0Dbl2Obj1(T1 a1,double a3,T2 a2) ;
default boolean testObj0Dbl2Obj1(T1 a1, double a3, T2 a2) {
// return nestingTestObj0Dbl2Obj1(a1,a3,a2);
try {
return this.testObj0Dbl2Obj1X(a1, a3, a2);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call testObj0Dbl2Obj1(T1 a1,double a3,T2 a2)
*/
boolean testObj0Dbl2Obj1X(T1 a1, double a3, T2 a2) throws Throwable;
}
/** Permutation of LBiObjDblPredicate for method references. */
@FunctionalInterface
interface LObj1Obj0Dbl2Pred<T2, T1> extends LBiObjDblPredicate<T1, T2> {
/**
* Implement this, but call testObj0Dbl2Obj1(T1 a1,double a3,T2 a2)
*/
default boolean testX(T1 a1, T2 a2, double a3) {
return this.testObj1Obj0Dbl2(a2, a1, a3);
}
// boolean testObj1Obj0Dbl2(T2 a2,T1 a1,double a3) ;
default boolean testObj1Obj0Dbl2(T2 a2, T1 a1, double a3) {
// return nestingTestObj1Obj0Dbl2(a2,a1,a3);
try {
return this.testObj1Obj0Dbl2X(a2, a1, a3);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call testObj1Obj0Dbl2(T2 a2,T1 a1,double a3)
*/
boolean testObj1Obj0Dbl2X(T2 a2, T1 a1, double a3) throws Throwable;
}
/** Permutation of LBiObjDblPredicate for method references. */
@FunctionalInterface
interface LObj1Dbl2Obj0Pred<T2, T1> extends LBiObjDblPredicate<T1, T2> {
/**
* Implement this, but call testObj1Obj0Dbl2(T2 a2,T1 a1,double a3)
*/
default boolean testX(T1 a1, T2 a2, double a3) {
return this.testObj1Dbl2Obj0(a2, a3, a1);
}
// boolean testObj1Dbl2Obj0(T2 a2,double a3,T1 a1) ;
default boolean testObj1Dbl2Obj0(T2 a2, double a3, T1 a1) {
// return nestingTestObj1Dbl2Obj0(a2,a3,a1);
try {
return this.testObj1Dbl2Obj0X(a2, a3, a1);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call testObj1Dbl2Obj0(T2 a2,double a3,T1 a1)
*/
boolean testObj1Dbl2Obj0X(T2 a2, double a3, T1 a1) throws Throwable;
}
/** Permutation of LBiObjDblPredicate for method references. */
@FunctionalInterface
interface LDbl2Obj0Obj1Pred<T1, T2> extends LBiObjDblPredicate<T1, T2> {
/**
* Implement this, but call testObj1Dbl2Obj0(T2 a2,double a3,T1 a1)
*/
default boolean testX(T1 a1, T2 a2, double a3) {
return this.testDbl2Obj0Obj1(a3, a1, a2);
}
// boolean testDbl2Obj0Obj1(double a3,T1 a1,T2 a2) ;
default boolean testDbl2Obj0Obj1(double a3, T1 a1, T2 a2) {
// return nestingTestDbl2Obj0Obj1(a3,a1,a2);
try {
return this.testDbl2Obj0Obj1X(a3, a1, a2);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call testDbl2Obj0Obj1(double a3,T1 a1,T2 a2)
*/
boolean testDbl2Obj0Obj1X(double a3, T1 a1, T2 a2) throws Throwable;
}
/** Permutation of LBiObjDblPredicate for method references. */
@FunctionalInterface
interface LDbl2Obj1Obj0Pred<T2, T1> extends LBiObjDblPredicate<T1, T2> {
/**
* Implement this, but call testDbl2Obj0Obj1(double a3,T1 a1,T2 a2)
*/
default boolean testX(T1 a1, T2 a2, double a3) {
return this.testDbl2Obj1Obj0(a3, a2, a1);
}
// boolean testDbl2Obj1Obj0(double a3,T2 a2,T1 a1) ;
default boolean testDbl2Obj1Obj0(double a3, T2 a2, T1 a1) {
// return nestingTestDbl2Obj1Obj0(a3,a2,a1);
try {
return this.testDbl2Obj1Obj0X(a3, a2, a1);
} catch (Throwable e) { // NOSONAR
throw Handling.nestCheckedAndThrow(e);
}
}
/**
* Implement this, but call testDbl2Obj1Obj0(double a3,T2 a2,T1 a1)
*/
boolean testDbl2Obj1Obj0X(double a3, T2 a2, T1 a1) throws Throwable;
}
// </editor-fold>
// >>> LBiObjDblPredicate<T1,T2>
/** Returns TRUE. */
public static <T1, T2> boolean alwaysTrue(T1 a1, T2 a2, double a3) {
return true;
}
/** Returns FALSE. */
public static <T1, T2> boolean alwaysFalse(T1 a1, T2 a2, double a3) {
return false;
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, C2, C3> void filterForEach(IndexedRead<C1, a<T1>> ia1, C1 source1, IndexedRead<C2, a<T2>> ia2, C2 source2, IndexedRead<C3, aDouble> ia3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
int size = ia1.size(source1);
LOiFunction<Object, T1> oiFunc1 = (LOiFunction) ia1.getter();
size = Integer.min(size, ia2.size(source2));
LOiFunction<Object, T2> oiFunc2 = (LOiFunction) ia2.getter();
size = Integer.min(size, ia3.size(source3));
LOiToDblFunction<Object> oiFunc3 = (LOiToDblFunction) ia3.getter();
int i = 0;
for (; i < size; i++) {
T1 a1 = oiFunc1.apply(source1, i);
T2 a2 = oiFunc2.apply(source2, i);
double a3 = oiFunc3.applyAsDbl(source3, i);
doIf(a1, a2, a3, consumer);
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, I1, C2, C3> void filterIterate(SequentialRead<C1, I1, a<T1>> sa1, C1 source1, IndexedRead<C2, a<T2>> ia2, C2 source2, IndexedRead<C3, aDouble> ia3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Object iterator1 = ((LFunction) sa1.adapter()).apply(source1);
LPredicate<Object> testFunc1 = (LPredicate) sa1.tester();
LFunction<Object, T1> nextFunc1 = (LFunction) sa1.supplier();
int size = ia2.size(source2);
LOiFunction<Object, T2> oiFunc2 = (LOiFunction) ia2.getter();
size = Integer.min(size, ia3.size(source3));
LOiToDblFunction<Object> oiFunc3 = (LOiToDblFunction) ia3.getter();
int i = 0;
while (testFunc1.test(iterator1) && i < size) {
T1 a1 = nextFunc1.apply(iterator1);
T2 a2 = oiFunc2.apply(source2, i);
double a3 = oiFunc3.applyAsDbl(source3, i);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, C2, I2, C3> void filterIterate(IndexedRead<C1, a<T1>> ia1, C1 source1, SequentialRead<C2, I2, a<T2>> sa2, C2 source2, IndexedRead<C3, aDouble> ia3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
int size = ia1.size(source1);
LOiFunction<Object, T1> oiFunc1 = (LOiFunction) ia1.getter();
Object iterator2 = ((LFunction) sa2.adapter()).apply(source2);
LPredicate<Object> testFunc2 = (LPredicate) sa2.tester();
LFunction<Object, T2> nextFunc2 = (LFunction) sa2.supplier();
size = Integer.min(size, ia3.size(source3));
LOiToDblFunction<Object> oiFunc3 = (LOiToDblFunction) ia3.getter();
int i = 0;
while (i < size && testFunc2.test(iterator2)) {
T1 a1 = oiFunc1.apply(source1, i);
T2 a2 = nextFunc2.apply(iterator2);
double a3 = oiFunc3.applyAsDbl(source3, i);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, I1, C2, I2, C3> void filterIterate(SequentialRead<C1, I1, a<T1>> sa1, C1 source1, SequentialRead<C2, I2, a<T2>> sa2, C2 source2, IndexedRead<C3, aDouble> ia3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Object iterator1 = ((LFunction) sa1.adapter()).apply(source1);
LPredicate<Object> testFunc1 = (LPredicate) sa1.tester();
LFunction<Object, T1> nextFunc1 = (LFunction) sa1.supplier();
Object iterator2 = ((LFunction) sa2.adapter()).apply(source2);
LPredicate<Object> testFunc2 = (LPredicate) sa2.tester();
LFunction<Object, T2> nextFunc2 = (LFunction) sa2.supplier();
int size = ia3.size(source3);
LOiToDblFunction<Object> oiFunc3 = (LOiToDblFunction) ia3.getter();
int i = 0;
while (testFunc1.test(iterator1) && testFunc2.test(iterator2) && i < size) {
T1 a1 = nextFunc1.apply(iterator1);
T2 a2 = nextFunc2.apply(iterator2);
double a3 = oiFunc3.applyAsDbl(source3, i);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, C2, C3, I3> void filterIterate(IndexedRead<C1, a<T1>> ia1, C1 source1, IndexedRead<C2, a<T2>> ia2, C2 source2, SequentialRead<C3, I3, aDouble> sa3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
int size = ia1.size(source1);
LOiFunction<Object, T1> oiFunc1 = (LOiFunction) ia1.getter();
size = Integer.min(size, ia2.size(source2));
LOiFunction<Object, T2> oiFunc2 = (LOiFunction) ia2.getter();
Object iterator3 = ((LFunction) sa3.adapter()).apply(source3);
LPredicate<Object> testFunc3 = (LPredicate) sa3.tester();
LToDblFunction<Object> nextFunc3 = (LToDblFunction) sa3.supplier();
int i = 0;
while (i < size && testFunc3.test(iterator3)) {
T1 a1 = oiFunc1.apply(source1, i);
T2 a2 = oiFunc2.apply(source2, i);
double a3 = nextFunc3.applyAsDbl(iterator3);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, I1, C2, C3, I3> void filterIterate(SequentialRead<C1, I1, a<T1>> sa1, C1 source1, IndexedRead<C2, a<T2>> ia2, C2 source2, SequentialRead<C3, I3, aDouble> sa3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Object iterator1 = ((LFunction) sa1.adapter()).apply(source1);
LPredicate<Object> testFunc1 = (LPredicate) sa1.tester();
LFunction<Object, T1> nextFunc1 = (LFunction) sa1.supplier();
int size = ia2.size(source2);
LOiFunction<Object, T2> oiFunc2 = (LOiFunction) ia2.getter();
Object iterator3 = ((LFunction) sa3.adapter()).apply(source3);
LPredicate<Object> testFunc3 = (LPredicate) sa3.tester();
LToDblFunction<Object> nextFunc3 = (LToDblFunction) sa3.supplier();
int i = 0;
while (testFunc1.test(iterator1) && i < size && testFunc3.test(iterator3)) {
T1 a1 = nextFunc1.apply(iterator1);
T2 a2 = oiFunc2.apply(source2, i);
double a3 = nextFunc3.applyAsDbl(iterator3);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method is not expected.
*/
default <C1, C2, I2, C3, I3> void filterIterate(IndexedRead<C1, a<T1>> ia1, C1 source1, SequentialRead<C2, I2, a<T2>> sa2, C2 source2, SequentialRead<C3, I3, aDouble> sa3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
int size = ia1.size(source1);
LOiFunction<Object, T1> oiFunc1 = (LOiFunction) ia1.getter();
Object iterator2 = ((LFunction) sa2.adapter()).apply(source2);
LPredicate<Object> testFunc2 = (LPredicate) sa2.tester();
LFunction<Object, T2> nextFunc2 = (LFunction) sa2.supplier();
Object iterator3 = ((LFunction) sa3.adapter()).apply(source3);
LPredicate<Object> testFunc3 = (LPredicate) sa3.tester();
LToDblFunction<Object> nextFunc3 = (LToDblFunction) sa3.supplier();
int i = 0;
while (i < size && testFunc2.test(iterator2) && testFunc3.test(iterator3)) {
T1 a1 = oiFunc1.apply(source1, i);
T2 a2 = nextFunc2.apply(iterator2);
double a3 = nextFunc3.applyAsDbl(iterator3);
doIf(a1, a2, a3, consumer);
i++;
}
}
/**
* For each element (or tuple) from arguments, calls the consumer if predicate test passes.
* Thread safety, fail-fast, fail-safety of this method depends highly on the arguments.
*/
default <C1, I1, C2, I2, C3, I3> void filterIterate(SequentialRead<C1, I1, a<T1>> sa1, C1 source1, SequentialRead<C2, I2, a<T2>> sa2, C2 source2, SequentialRead<C3, I3, aDouble> sa3, C3 source3, LBiObjDblConsumer<? super T1, ? super T2> consumer) {
Object iterator1 = ((LFunction) sa1.adapter()).apply(source1);
LPredicate<Object> testFunc1 = (LPredicate) sa1.tester();
LFunction<Object, T1> nextFunc1 = (LFunction) sa1.supplier();
Object iterator2 = ((LFunction) sa2.adapter()).apply(source2);
LPredicate<Object> testFunc2 = (LPredicate) sa2.tester();
LFunction<Object, T2> nextFunc2 = (LFunction) sa2.supplier();
Object iterator3 = ((LFunction) sa3.adapter()).apply(source3);
LPredicate<Object> testFunc3 = (LPredicate) sa3.tester();
LToDblFunction<Object> nextFunc3 = (LToDblFunction) sa3.supplier();
while (testFunc1.test(iterator1) && testFunc2.test(iterator2) && testFunc3.test(iterator3)) {
T1 a1 = nextFunc1.apply(iterator1);
T2 a2 = nextFunc2.apply(iterator2);
double a3 = nextFunc3.applyAsDbl(iterator3);
doIf(a1, a2, a3, consumer);
}
}
}
| lunisolar/magma | magma-func/src/main/java/eu/lunisolar/magma/func/predicate/LBiObjDblPredicate.java | Java | apache-2.0 | 60,642 |
package com.HQHub.DAO;
import java.util.List;
import org.hibernate.Query;
import org.hibernate.Session;
import com.HQHub.Util.Connection;
import com.HQHub.pojo.F1DynFlight;
import com.HQHub.pojo.F3Airport;
import com.HQHub.pojo.RRdt;
public class FlightQueryDAO {
public static List<F1DynFlight> getFlight(String origin_airport_iata, String dest_airport_iata, String operation_date ){
Session session = Connection.getSession("oracle");
String hql = "select f"
+ " from F1DynFlight f"
+ " where f.origin_airport_iata =:origin_airport_iata"
+ " and f.dest_airport_iata =:dest_airport_iata"
+ " and to_char(f.operation_date,'yyyy-MM-dd') = to_char(:operation_date)"
+ " order by f.std"
+ " asc";
Query query = session.createQuery(hql);
query.setString("origin_airport_iata",toAirportAbb(session,origin_airport_iata));
query.setString("dest_airport_iata", toAirportAbb(session,dest_airport_iata));
query.setString("operation_date", operation_date);
List <F1DynFlight> list = query.list();
return list;
}
public static List<F1DynFlight> getFlight(String flight_no, String operation_date){
Session session = Connection.getSession("oracle");
String hql = "select f"
+ " from F1DynFlight f "
+ " where f.flight_no =:flight_no"
+ " and to_char(f.operation_date,'yyyy-MM-dd') = to_char(:operation_date)"
+ " order by f.std"
+ " asc";
Query query = session.createQuery(hql);
query.setString("flight_no", flight_no);
query.setString("operation_date", operation_date);
List <F1DynFlight> list = query.list();
System.out.println();
return list;
}
private static String toAirportAbb(Session session, String airport){
String hql = "select f from F3Airport as f where instr(f.name_xml,:airport)<>0";
Query query = session.createQuery(hql);
query.setString("airport", airport);
List <F3Airport> list = query.list();
String airportAbb = null;
for(int i = 0 ;i< list.size();i++){
if(airport.equals(parseXML(list.get(i).getName_xml()))){
airportAbb = list.get(i).getAirport_iata();
}
}
return airportAbb;
}
public static List<F1DynFlight> getFlight() {
Session session = Connection.getSession("oracle");
String hql = "select f from F1DynFlight f"
+ " where f.origin_airport_iata = 'SHA' and to_char(f.std,'yyyy-MM-dd HH24:MI:SS') >= to_char(sysdate, 'yyyy-MM-dd HH24:MI:SS')"
+ " order by f.std asc ";
Query query = session.createQuery(hql);
List<F1DynFlight> list = query.list();
return list.subList(0, 20);
}
private static String parseXML(String xml ){
String result = xml.substring(xml.indexOf("<zh_cn>")+7,xml.indexOf("</zh_cn>"));
return result;
}
}
| ahhbzyz/HQHubAndroid | src/com/HQHub/DAO/FlightQueryDAO.java | Java | apache-2.0 | 2,823 |
#
# Cookbook Name:: turbobil
# Recipe:: iptables
#
# unicorn redirect
include_recipe "simple_iptables"
simple_iptables_rule "unicorn-iptables" do
table "nat"
direction "PREROUTING"
rule [ "--protocol tcp --dport 80 --jump REDIRECT --to-port 8080"]
jump false
end
| roramirez/turbobil-kitchen | my-cookbooks/turbobil/recipes/iptables.rb | Ruby | apache-2.0 | 272 |
// ---------------------------------------------------------------------------------------------------------------------
// <copyright file="IMsSqlTableStreamWriterAdapterMapper.cs" company="">
//
// </copyright>
// <summary>
// Defines the IMsSqlTableStreamWriterAdapterMapper type.
// </summary>
// ---------------------------------------------------------------------------------------------------------------------
namespace DbFriend.Core.Provider.MsSql.Mappers
{
/// <summary>
/// </summary>
public interface IMsSqlTableStreamWriterAdapterMapper : IMsSqlObjectStreamWriterAdapterMapper
{
}
} | brettveenstra/dbfriend | src/Core/Provider/MsSql/Mappers/IMsSqlTableStreamWriterAdapterMapper.cs | C# | apache-2.0 | 623 |
package in.ejava.rtc.plugin.rest.api;
import in.ejava.rest.plugin.core.DataType;
import in.ejava.rest.plugin.core.HttpMethod;
import in.ejava.rest.plugin.core.client.Response;
import in.ejava.rest.plugin.core.client.RestClientFactory;
import in.ejava.rest.plugin.core.session.SessionCookieHolder;
import java.util.HashMap;
import java.util.Map;
public class AuthenticationAPI extends BaseAPI {
public AuthenticationAPI(String username) {
super(username);
}
private static final String REST_FORM_URI = "authenticated/identity";
private static final String REST_URI = "authenticated/j_security_check";
public String authenticate(String password) {
try {
System.out.println("rtc server:" + getBaseUrl());
RestClientFactory.getInstance().getRestClient();
SessionCookieHolder.getSessionCookieMap().remove(
getSessionIdentifier());
Map<String, String> headers = new HashMap<String, String>();
Response response = (Response) makeCall(buildUrl(REST_FORM_URI),
HttpMethod.GET, headers, null, "", DataType.RAW,
Response.class);
String payload = "j_username=" + getSessionIdentifier()
+ "&j_password=" + password;
headers.put("Content-Type", "application/x-www-form-urlencoded");
int postDataLength = payload.length();
headers.put("Content-Length", Integer.toString(postDataLength));
Response loginResponse = (Response) makeCall(buildUrl(REST_URI),
HttpMethod.POST, headers, null, payload, DataType.RAW,
Response.class, false);
return (String) makeCall(buildUrl(REST_FORM_URI), HttpMethod.GET,
headers, null, null, DataType.STRING, String.class);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
}
| spranab/rest-plugin | rtc-plugin-rest/src/main/java/in/ejava/rtc/plugin/rest/api/AuthenticationAPI.java | Java | apache-2.0 | 1,743 |
#! /usr/bin/env ruby
require 'spec_helper'
require 'puppet/util/nimsoft_section.rb'
describe Puppet::Util::NimsoftSection do
describe "when creating a new section" do
it "should be possible to create a section without a parent" do
section = described_class.new('root')
expect(section.parent).to be_nil
expect(section.children).to be_empty
end
it "should add the update the children list of the parent section" do
rootsection = described_class.new('root')
subsection1 = described_class.new('s1', rootsection)
subsection2 = described_class.new('s2', rootsection)
expect(rootsection.children).to eq([ subsection1, subsection2 ])
expect(subsection1.parent).to eq(rootsection)
expect(subsection1.children).to be_empty
expect(subsection2.parent).to eq(rootsection)
expect(subsection2.children).to be_empty
end
end
describe "to_cfg" do
it "should list the attributes in the order they were inserted" do
section = described_class.new('root')
section[:a] = 'value1'
section[:z] = 'value2'
section[:b] = 'value3'
expect(section.to_cfg).to eq(<<'EOS')
<root>
a = value1
z = value2
b = value3
</root>
EOS
end
it "should use the specified tabsize for indention" do
section = described_class.new('root')
section[:a] = 'value1'
expect(section.to_cfg(2, 0)).to eq(<<'EOS')
<root>
a = value1
</root>
EOS
end
it "should indent the section when specified" do
section = described_class.new('root')
section[:a] = 'value1'
expect(section.to_cfg(3, 1)).to eq(<<'EOS')
<root>
a = value1
</root>
EOS
expect(section.to_cfg(3, 2)).to eq(<<'EOS')
<root>
a = value1
</root>
EOS
end
it "should print all subcategories" do
section = described_class.new('root')
subsection1 = described_class.new('s1', section)
subsection2 = described_class.new('s2', section)
subsection1[:s1a] = 'some_value'
subsection1[:s2a] = 'some_other_value'
subsection2[:key3] = 'next key'
section[:foo] = 'bar'
expect(section.to_cfg).to eq(<<'EOS')
<root>
foo = bar
<s1>
s1a = some_value
s2a = some_other_value
</s1>
<s2>
key3 = next key
</s2>
</root>
EOS
end
end
describe "del_attr" do
it "should remove an attribute" do
section = described_class.new('root')
section[:a] = 'value1'
section[:b] = 'value2'
section.del_attr(:a)
expect(section[:a]).to be_nil
expect(section.to_cfg).to eq(<<'EOS')
<root>
b = value2
</root>
EOS
end
it "should do nothing if attribute does not exist" do
section = described_class.new('root')
section[:a] = 'value1'
section[:b] = 'value2'
section.del_attr(:c)
expect(section[:a]).to eq('value1')
expect(section[:b]).to eq('value2')
expect(section[:c]).to be_nil
expect(section.to_cfg).to eq(<<'EOS')
<root>
a = value1
b = value2
</root>
EOS
end
end
describe "keys_in_order" do
it "should return an empty array if section has no attributes" do
section = described_class.new('root')
expect(section.keys_in_order).to be_empty
end
it "should return the keys in the correct order" do
section = described_class.new('root')
section[:first] = 'foo'
section[:second] = 'bar'
section[:remove_later] = '123'
section[:third] = 'baz'
section.del_attr(:remove_later)
expect(section.keys_in_order).to eq([:first, :second, :third ])
end
end
describe "values_in_order" do
it "should return an empty array if section has no attributes" do
section = described_class.new('root')
expect(section.values_in_order).to be_empty
end
it "should return the values in the correct order" do
section = described_class.new('root')
section[:first] = 'foo'
section[:second] = 'bar'
section[:remove_later] = '123'
section[:third] = 'baz'
section.del_attr(:remove_later)
expect(section.values_in_order).to eq(%w{foo bar baz})
end
end
describe "path" do
it "should return self when no path is given" do
section = described_class.new('root')
expect(section.path(nil)).to eq(section)
end
it "should return the corresponding subsection" do
section = described_class.new('root')
s1 = described_class.new('level1_child1', section)
expect(section.path('level1_child1')).to eq(s1)
end
it "should be possible to access a nested subsection" do
section = described_class.new('root')
l1c1 = described_class.new('level1_child1', section)
l1c2 = described_class.new('level1_child2', section)
l2c1 = described_class.new('level2_child1', l1c1)
l3c1 = described_class.new('level3_child1', l2c1)
expect(section.path('level1_child1/level2_child1/level3_child1')).to eq(l3c1)
end
it "should create missing sections" do
section = described_class.new('root')
expect(section.children).to be_empty
child = section.path('level1')
expect(child.parent).to eq(section)
expect(section.children[0]).to eq(child)
end
end
end
| stschulte/puppet-nimsoft | spec/unit/util/nimsoft_section_spec.rb | Ruby | apache-2.0 | 5,267 |
package org.nutz.plugins.cache.dao.impl.convert;
import org.nutz.plugins.cache.dao.api.CacheSerializer;
public abstract class AbstractCacheSerializer implements CacheSerializer {
public static final byte[] NULL_OBJ = new byte[0];
protected boolean isNULL_OBJ(Object obj) {
if (obj == null)
return true;
if (!(obj instanceof byte[]))
throw new IllegalArgumentException("Not byte[] --> " + obj.getClass());
byte[] data = (byte[])obj;
if (data.length == 0)
return true;
return false;
}
}
| lusparioTT/fks | src/org/nutz/plugins/cache/dao/impl/convert/AbstractCacheSerializer.java | Java | apache-2.0 | 577 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* VideoPositionType.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.admanager.axis.v202202;
public class VideoPositionType implements java.io.Serializable {
private java.lang.String _value_;
private static java.util.HashMap _table_ = new java.util.HashMap();
// Constructor
protected VideoPositionType(java.lang.String value) {
_value_ = value;
_table_.put(_value_,this);
}
public static final java.lang.String _UNKNOWN = "UNKNOWN";
public static final java.lang.String _ALL = "ALL";
public static final java.lang.String _PREROLL = "PREROLL";
public static final java.lang.String _MIDROLL = "MIDROLL";
public static final java.lang.String _POSTROLL = "POSTROLL";
public static final VideoPositionType UNKNOWN = new VideoPositionType(_UNKNOWN);
public static final VideoPositionType ALL = new VideoPositionType(_ALL);
public static final VideoPositionType PREROLL = new VideoPositionType(_PREROLL);
public static final VideoPositionType MIDROLL = new VideoPositionType(_MIDROLL);
public static final VideoPositionType POSTROLL = new VideoPositionType(_POSTROLL);
public java.lang.String getValue() { return _value_;}
public static VideoPositionType fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException {
VideoPositionType enumeration = (VideoPositionType)
_table_.get(value);
if (enumeration==null) throw new java.lang.IllegalArgumentException();
return enumeration;
}
public static VideoPositionType fromString(java.lang.String value)
throws java.lang.IllegalArgumentException {
return fromValue(value);
}
public boolean equals(java.lang.Object obj) {return (obj == this);}
public int hashCode() { return toString().hashCode();}
public java.lang.String toString() { return _value_;}
public java.lang.Object readResolve() throws java.io.ObjectStreamException { return fromValue(_value_);}
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumSerializer(
_javaType, _xmlType);
}
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumDeserializer(
_javaType, _xmlType);
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(VideoPositionType.class);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v202202", "VideoPosition.Type"));
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
}
| googleads/googleads-java-lib | modules/dfp_axis/src/main/java/com/google/api/ads/admanager/axis/v202202/VideoPositionType.java | Java | apache-2.0 | 3,790 |
#Exercise 6
import yaml
import json
def main():
yaml_file = 'test_file.yml'
json_file = 'jsontest.json'
dict = {
'ip_add': '192.168.1.100',
'vendor': 'cisco'
}
list = [
'week one',
99,
18
]
with open(yaml_file, "w") as f:
f.write(yaml.dump(list, default_flow_style=False))
with open(json_file, "w") as f:
json.dump(list, f)
if __name__ == "__main__":
main() | ttomasello/pynetclass | ttexercise6.py | Python | apache-2.0 | 406 |
/*
* Copyright 2014 Basis Technology Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.basistech.tclre;
import com.google.common.base.MoreObjects;
/**
* Store information about a capturing pattern.
*/
class RegMatch {
final int start;
final int end;
RegMatch(int start, int end) {
this.start = start;
this.end = end;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RegMatch regMatch = (RegMatch)o;
if (end != regMatch.end) {
return false;
}
return start == regMatch.start;
}
@Override
public int hashCode() {
int result = start;
result = 31 * result + end;
return result;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("start", start)
.add("end", end)
.toString();
}
}
| basis-technology-corp/tcl-regex-java | src/main/java/com/basistech/tclre/RegMatch.java | Java | apache-2.0 | 1,573 |
"""Base OpHandler for ops that use group lasso regularizer.
This OpHandler should not be called directly. It is a virtual base class
for regularization source OpHandlers that use Group Lasso as their regularizer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from morph_net.framework import op_handler
from morph_net.framework import op_handler_util
from morph_net.framework import tpu_util
from morph_net.op_regularizers import group_lasso_regularizer
class GroupLassoBaseSourceOpHandler(op_handler.OpHandler):
"""Base OpHandler for source ops that use Group Lasso."""
__metaclass__ = abc.ABCMeta
def __init__(self, threshold, l1_fraction=0.0):
"""Instantiate an instance.
Args:
threshold: Float scalar used as threshold for GroupLassoRegularizer.
l1_fraction: Float scalar used as l1_fraction for GroupLassoRegularizer.
"""
self._threshold = threshold
self._l1_fraction = l1_fraction
@abc.abstractmethod
def _reduce_dims(self, op):
# Reduction dimensions for Group Lasso.
pass
@property
def is_source_op(self):
return True
@property
def is_passthrough(self):
return False
def assign_grouping(self, op, op_reg_manager):
"""Assign grouping to the given op and updates the manager.
Args:
op: tf.Operation to assign grouping to.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
"""
# This is a source op so begin by getting the OpGroup or creating one.
op_slices = op_reg_manager.get_op_slices(op)
for op_slice in op_slices:
op_group = op_reg_manager.get_op_group(op_slice)
if op_group is None:
op_reg_manager.create_op_group_for_op_slice(op_slice)
# Check if all input ops have groups, or tell the manager to process them.
input_ops = op_handler_util.get_input_ops(op, op_reg_manager)
input_ops_without_group = op_handler_util.get_ops_without_groups(
input_ops, op_reg_manager)
# Check if all output ops have groups, or tell the manager to process them.
output_ops = op_handler_util.get_output_ops(op, op_reg_manager)
output_ops_without_group = op_handler_util.get_ops_without_groups(
output_ops, op_reg_manager)
# Remove non-passthrough ops from outputs ops to group with.
output_ops = op_handler_util.remove_non_passthrough_ops(
output_ops, op_reg_manager)
# Only group with ops that have the same size. Process the ops that have
# mismatched size.
output_ops_to_group, output_ops_to_process = (
op_handler_util.separate_same_size_ops(op, output_ops))
# Also process ungrouped ops.
input_ops_to_process = input_ops_without_group
output_ops_to_process.extend(output_ops_without_group)
# Align op slice sizes if needed.
output_op_slices = op_handler_util.get_op_slices(
output_ops_to_group, op_reg_manager)
aligned_op_slice_sizes = op_handler_util.get_aligned_op_slice_sizes(
op_slices, [], output_op_slices)
op_handler_util.reslice_ops([op] + output_ops_to_group,
aligned_op_slice_sizes, op_reg_manager)
# Repopulate OpSlice data, as ops may have been resliced.
output_op_slices = op_handler_util.get_op_slices(
output_ops_to_group, op_reg_manager)
# Group with outputs.
op_handler_util.group_op_with_inputs_and_outputs(
op, [], output_op_slices, aligned_op_slice_sizes,
op_reg_manager)
# Reprocess ops.
op_reg_manager.process_ops(output_ops_to_process + input_ops_to_process)
def create_regularizer(self, op_slice):
"""Create a regularizer for this conv2d OpSlice.
Args:
op_slice: op_regularizer_manager.OpSlice that is a conv2d OpSlice.
Returns:
OpRegularizer for this conv2d op.
"""
start_index = op_slice.slice.start_index
size = op_slice.slice.size
weights = op_slice.op.inputs[1] # Input 1 are the weights.
weights = tpu_util.maybe_convert_to_variable(weights)
reduce_dims = self._reduce_dims(op_slice.op)
rank = len(weights.shape.as_list())
if rank != len(reduce_dims) + 1:
raise ValueError('Rank %d incompatible with reduce_dims %s for op %s' %
(rank, reduce_dims, op_slice.op.name))
def _slice_weights():
"""Slices the weight tensor according to op_slice information."""
if rank == 2:
if reduce_dims[0] == 0:
return weights[:, start_index:start_index + size]
else:
return weights[start_index:start_index + size, :]
if rank == 3:
if 2 not in reduce_dims:
return weights[:, :, start_index:start_index + size]
if 1 not in reduce_dims:
return weights[:, start_index:start_index + size, :]
if 0 not in reduce_dims:
return weights[start_index:start_index + size, :, :]
if rank == 4:
if 3 not in reduce_dims:
return weights[:, :, :, start_index:start_index + size]
if 2 not in reduce_dims:
return weights[:, :, start_index:start_index + size, :]
if 1 not in reduce_dims:
return weights[:, start_index:start_index + size, :, :]
if 0 not in reduce_dims:
return weights[start_index:start_index + size, :, :, :]
if rank == 5:
if 4 not in reduce_dims:
return weights[:, :, :, :, start_index:start_index + size]
raise ValueError('Unsupported reduce_dim for rank 5 tensors (Conv3D)')
raise ValueError('Unsupported rank or bad reduce_dim')
weight_tensor = _slice_weights()
# If OpSlice size matches tensor size, use the entire tensor. Otherwise,
# slice the tensor accordingly.
return group_lasso_regularizer.GroupLassoRegularizer(
weight_tensor=weight_tensor,
reduce_dims=self._reduce_dims(op_slice.op),
threshold=self._threshold,
l1_fraction=self._l1_fraction)
| google-research/morph-net | morph_net/framework/group_lasso_base_op_handler.py | Python | apache-2.0 | 5,974 |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.powertac.logtool;
/**
* This is the top level of the top-down version of the
* Power TAC logtool analyzer. To use it, give a filename and the
* classnames of a set of Analyzers.
* @author John Collins
*/
public class Logtool extends LogtoolContext
{
/**
* Sets up the logtool, delegates everything to a LogtoolCore instance.
*/
public static void main (String[] args)
{
Logtool lt = new Logtool();
LogtoolCore lc = lt.getCore();
int exitCode = 0;
String error = lc.processCmdLine(args);
if (error != null) {
System.out.println(error);
exitCode = -1;
}
// if we get here, it's time to exit
System.exit(exitCode);
}
}
| powertac/powertac-server | logtool-core/src/main/java/org/powertac/logtool/Logtool.java | Java | apache-2.0 | 1,307 |
/*
* Copyright 2014 ElecEntertainment
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.larry1123.elec.util.logger;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.text.SimpleDateFormat;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.SimpleFormatter;
public class UtilsLogFormat extends SimpleFormatter {
private final SimpleDateFormat dateform = new SimpleDateFormat("dd-MM-yyyy HH:mm:ss");
private final String linesep = System.getProperty("line.separator");
/**
* {@inheritDoc}
* <p/>
* [Level] [Prefix] Message
*/
@Override
public final String format(LogRecord rec) {
Level level = rec.getLevel();
StringBuilder message = new StringBuilder();
message.append(dateform.format(rec.getMillis())).append(" ");
message.append("[").append(level.getName()).append("] ");
if (level instanceof LoggerLevel) {
LoggerLevel handle = (LoggerLevel) level;
if (!handle.getPrefix().equals("")) {
message.append("[").append(handle.getPrefix()).append("] ");
}
}
message.append(rec.getMessage());
message.append(linesep);
if (rec.getThrown() != null) {
StringWriter stringwriter = new StringWriter();
rec.getThrown().printStackTrace(new PrintWriter(stringwriter));
message.append(stringwriter.toString());
}
return message.toString();
}
}
| ElecEntertainment/EEUtils | src/main/java/net/larry1123/elec/util/logger/UtilsLogFormat.java | Java | apache-2.0 | 2,042 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.myfaces.ov2021.lifecycle;
import java.util.HashMap;
import java.util.Map;
import javax.faces.context.FacesContext;
import javax.faces.event.ExceptionQueuedEvent;
import javax.faces.event.ExceptionQueuedEventContext;
import javax.faces.event.PhaseEvent;
import javax.faces.event.PhaseId;
import javax.faces.event.PhaseListener;
import javax.faces.lifecycle.Lifecycle;
/**
* This class encapsulates the logic used to call PhaseListeners. It was needed because of issue 9 of the JSF 1.2 spec.
* See section 11.3 for more details.
*
* @author Stan Silvert
*/
class PhaseListenerManager
{
private Lifecycle lifecycle;
private FacesContext facesContext;
private PhaseListener[] phaseListeners;
// Tracks success in the beforePhase. Listeners that throw an exception
// in beforePhase or were never called because a previous listener threw
// an exception should not have its afterPhase called
private Map<PhaseId, boolean[]> listenerSuccessMap = new HashMap<PhaseId, boolean[]>();
/** Creates a new instance of PhaseListenerManager */
PhaseListenerManager(Lifecycle lifecycle, FacesContext facesContext, PhaseListener[] phaseListeners)
{
this.lifecycle = lifecycle;
this.facesContext = facesContext;
this.phaseListeners = phaseListeners;
}
private boolean isListenerForThisPhase(PhaseListener phaseListener, PhaseId phaseId)
{
int listenerPhaseId = phaseListener.getPhaseId().getOrdinal();
return (listenerPhaseId == PhaseId.ANY_PHASE.getOrdinal() || listenerPhaseId == phaseId.getOrdinal());
}
void informPhaseListenersBefore(PhaseId phaseId)
{
boolean[] beforePhaseSuccess = new boolean[phaseListeners.length];
listenerSuccessMap.put(phaseId, beforePhaseSuccess);
PhaseEvent event = new PhaseEvent(facesContext, phaseId, lifecycle);
for (int i = 0; i < phaseListeners.length; i++)
{
PhaseListener phaseListener = phaseListeners[i];
if (isListenerForThisPhase(phaseListener, phaseId))
{
try
{
phaseListener.beforePhase(event);
beforePhaseSuccess[i] = true;
}
catch (Throwable e)
{
beforePhaseSuccess[i] = false; // redundant - for clarity
// JSF 2.0: publish exceptions instead of logging them.
publishException (e, phaseId, ExceptionQueuedEventContext.IN_BEFORE_PHASE_KEY);
return;
}
}
}
}
void informPhaseListenersAfter(PhaseId phaseId)
{
boolean[] beforePhaseSuccess = listenerSuccessMap.get(phaseId);
if (beforePhaseSuccess == null)
{
// informPhaseListenersBefore method was not called : maybe an exception in LifecycleImpl.executePhase
return;
}
PhaseEvent event = null;
for (int i = phaseListeners.length - 1; i >= 0; i--)
{
PhaseListener phaseListener = phaseListeners[i];
if (isListenerForThisPhase(phaseListener, phaseId) && beforePhaseSuccess[i])
{
if (event == null)
{
event = new PhaseEvent(facesContext, phaseId, lifecycle);
}
try
{
phaseListener.afterPhase(event);
}
catch (Throwable e)
{
// JSF 2.0: publish exceptions instead of logging them.
publishException (e, phaseId, ExceptionQueuedEventContext.IN_AFTER_PHASE_KEY);
}
}
}
}
private void publishException (Throwable e, PhaseId phaseId, String key)
{
ExceptionQueuedEventContext context = new ExceptionQueuedEventContext (facesContext, e, null, phaseId);
context.getAttributes().put (key, Boolean.TRUE);
facesContext.getApplication().publishEvent (facesContext, ExceptionQueuedEvent.class, context);
}
}
| lu4242/ext-myfaces-2.0.2-patch | trunk/myfaces-impl-2021override/src/main/java/org/apache/myfaces/ov2021/lifecycle/PhaseListenerManager.java | Java | apache-2.0 | 5,062 |
package com.activiti.asyncTest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.AsyncResult;
import org.springframework.stereotype.Component;
import java.util.concurrent.Future;
@Component
public class AsyncTask {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
@Async("mySimpleAsync")
public Future<String> doTask1() throws InterruptedException{
logger.info("Task1 started.");
long start = System.currentTimeMillis();
Thread.sleep(10000);
long end = System.currentTimeMillis();
logger.info("Task1 finished, time elapsed: {} ms.", end-start);
return new AsyncResult<>("Task1 accomplished!");
}
@Async("myAsync")
public Future<String> doTask2() throws InterruptedException{
logger.info("Task2 started.");
long start = System.currentTimeMillis();
Thread.sleep(30000);
long end = System.currentTimeMillis();
logger.info("Task2 finished, time elapsed: {} ms.", end-start);
return new AsyncResult<>("Task2 accomplished!");
}
}
| oyidZh/SpringcloudConfig | src/test/java/com/activiti/asyncTest/AsyncTask.java | Java | apache-2.0 | 1,195 |
import { Injectable } from '@angular/core';
import { Subject, Observable } from 'rxjs';
@Injectable()
export class GroupMemberInteractionService {
private outChangeGroupMember = new Subject<string[]>();
onChangeGroupMember$: Observable<string[]>;
constructor() {
this.onChangeGroupMember$ = this.outChangeGroupMember.asObservable();
}
setChangeGroupMember(groupMemberList: string[]): void {
this.outChangeGroupMember.next(groupMemberList);
}
}
| denzelsN/pinpoint | web/src/main/webapp/v2/src/app/core/components/group-member/group-member-interaction.service.ts | TypeScript | apache-2.0 | 487 |
using System;
using System.Xml.Serialization;
namespace Aop.Api.Domain
{
/// <summary>
/// AlipayOpenMiniPayeeUnbindModel Data Structure.
/// </summary>
[Serializable]
public class AlipayOpenMiniPayeeUnbindModel : AopObject
{
/// <summary>
/// 支付宝登陆账号,和pid两者必选其一,小程序如收款pid与小程序PID非同主体,则只支持通过pid解绑
/// </summary>
[XmlElement("logonid")]
public string Logonid { get; set; }
/// <summary>
/// 支付宝账号id,和logonid两者必选其一,小程序如收款pid与小程序PID非同主体,则只支持通过pid解绑
/// </summary>
[XmlElement("pid")]
public string Pid { get; set; }
}
}
| 329277920/Snail | Snail.Pay.Ali.Sdk/Domain/AlipayOpenMiniPayeeUnbindModel.cs | C# | apache-2.0 | 774 |
package com.cloudoa.framework.flow.entity;
// Generated by Hibernate Tools
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
/**
* BpmInstance 流程实例.
*
* @author Lingo
*/
@Entity
@Table(name = "BPM_INSTANCE")
public class BpmInstance implements java.io.Serializable {
private static final long serialVersionUID = 0L;
/** 主键. */
private Long id;
/** 外键,流程定义. */
private BpmProcess bpmProcess;
/** 名称. */
private String name;
/** 业务标识. */
private String businessKey;
/** 外部引用. */
private String ref;
/** 创建时间. */
private Date createTime;
/** 发起人. */
private String initiator;
/** 优先级. */
private Integer priority;
/** 租户. */
private String tenantId;
public BpmInstance() {
}
public BpmInstance(Long id) {
this.id = id;
}
public BpmInstance(Long id, BpmProcess bpmProcess, String name,
String businessKey, String ref, Date createTime, String initiator,
Integer priority, String tenantId) {
this.id = id;
this.bpmProcess = bpmProcess;
this.name = name;
this.businessKey = businessKey;
this.ref = ref;
this.createTime = createTime;
this.initiator = initiator;
this.priority = priority;
this.tenantId = tenantId;
}
/** @return 主键. */
@Id
@Column(name = "ID", unique = true, nullable = false)
public Long getId() {
return this.id;
}
/**
* @param id
* 主键.
*/
public void setId(Long id) {
this.id = id;
}
/** @return 外键,流程定义. */
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "PROCESS_ID")
public BpmProcess getBpmProcess() {
return this.bpmProcess;
}
/**
* @param bpmProcess
* 外键,流程定义.
*/
public void setBpmProcess(BpmProcess bpmProcess) {
this.bpmProcess = bpmProcess;
}
/** @return 名称. */
@Column(name = "NAME", length = 200)
public String getName() {
return this.name;
}
/**
* @param name
* 名称.
*/
public void setName(String name) {
this.name = name;
}
/** @return 业务标识. */
@Column(name = "BUSINESS_KEY", length = 64)
public String getBusinessKey() {
return this.businessKey;
}
/**
* @param businessKey
* 业务标识.
*/
public void setBusinessKey(String businessKey) {
this.businessKey = businessKey;
}
/** @return 外部引用. */
@Column(name = "REF", length = 64)
public String getRef() {
return this.ref;
}
/**
* @param ref
* 外部引用.
*/
public void setRef(String ref) {
this.ref = ref;
}
/** @return 创建时间. */
@Temporal(TemporalType.TIMESTAMP)
@Column(name = "CREATE_TIME", length = 26)
public Date getCreateTime() {
return this.createTime;
}
/**
* @param createTime
* 创建时间.
*/
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
/** @return 发起人. */
@Column(name = "INITIATOR", length = 64)
public String getInitiator() {
return this.initiator;
}
/**
* @param initiator
* 发起人.
*/
public void setInitiator(String initiator) {
this.initiator = initiator;
}
/** @return 优先级. */
@Column(name = "PRIORITY")
public Integer getPriority() {
return this.priority;
}
/**
* @param priority
* 优先级.
*/
public void setPriority(Integer priority) {
this.priority = priority;
}
/** @return 租户. */
@Column(name = "TENANT_ID", length = 64)
public String getTenantId() {
return this.tenantId;
}
/**
* @param tenantId
* 租户.
*/
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
}
| lufei1344/cloud-oa | src/main/java/com/cloudoa/framework/flow/entity/BpmInstance.java | Java | apache-2.0 | 4,453 |
package com.nanyang.app.main.home.sport.cricket;
import com.nanyang.app.main.home.sport.basketball.BasketballMixInfo;
import com.nanyang.app.main.home.sport.main.SportContract;
import com.nanyang.app.main.home.sport.tennis.TennisState;
import com.nanyang.app.main.home.sportInterface.IBetHelper;
/**
* Created by Administrator on 2017/3/10.
*/
public abstract class CricketState extends TennisState{
public CricketState(SportContract.View baseView) {
super(baseView);
}
@Override
protected IBetHelper<BasketballMixInfo> onSetBetHelper() {
return new CricketBetHelper(getBaseView());
}
}
| q197585312/testApp | Afb88/src/main/java/com/nanyang/app/main/home/sport/cricket/CricketState.java | Java | apache-2.0 | 629 |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1
import (
"context"
"time"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes/typed/core/v1"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
)
type NamespaceHandler func(string, *v1.Namespace) (*v1.Namespace, error)
type NamespaceController interface {
generic.ControllerMeta
NamespaceClient
OnChange(ctx context.Context, name string, sync NamespaceHandler)
OnRemove(ctx context.Context, name string, sync NamespaceHandler)
Enqueue(name string)
EnqueueAfter(name string, duration time.Duration)
Cache() NamespaceCache
}
type NamespaceClient interface {
Create(*v1.Namespace) (*v1.Namespace, error)
Update(*v1.Namespace) (*v1.Namespace, error)
UpdateStatus(*v1.Namespace) (*v1.Namespace, error)
Delete(name string, options *metav1.DeleteOptions) error
Get(name string, options metav1.GetOptions) (*v1.Namespace, error)
List(opts metav1.ListOptions) (*v1.NamespaceList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error)
}
type NamespaceCache interface {
Get(name string) (*v1.Namespace, error)
List(selector labels.Selector) ([]*v1.Namespace, error)
AddIndexer(indexName string, indexer NamespaceIndexer)
GetByIndex(indexName, key string) ([]*v1.Namespace, error)
}
type NamespaceIndexer func(obj *v1.Namespace) ([]string, error)
type namespaceController struct {
controllerManager *generic.ControllerManager
clientGetter clientset.NamespacesGetter
informer informers.NamespaceInformer
gvk schema.GroupVersionKind
}
func NewNamespaceController(gvk schema.GroupVersionKind, controllerManager *generic.ControllerManager, clientGetter clientset.NamespacesGetter, informer informers.NamespaceInformer) NamespaceController {
return &namespaceController{
controllerManager: controllerManager,
clientGetter: clientGetter,
informer: informer,
gvk: gvk,
}
}
func FromNamespaceHandlerToHandler(sync NamespaceHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v1.Namespace
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v1.Namespace))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *namespaceController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v1.Namespace))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdateNamespaceDeepCopyOnChange(client NamespaceClient, obj *v1.Namespace, handler func(obj *v1.Namespace) (*v1.Namespace, error)) (*v1.Namespace, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *namespaceController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controllerManager.AddHandler(ctx, c.gvk, c.informer.Informer(), name, handler)
}
func (c *namespaceController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
removeHandler := generic.NewRemoveHandler(name, c.Updater(), handler)
c.controllerManager.AddHandler(ctx, c.gvk, c.informer.Informer(), name, removeHandler)
}
func (c *namespaceController) OnChange(ctx context.Context, name string, sync NamespaceHandler) {
c.AddGenericHandler(ctx, name, FromNamespaceHandlerToHandler(sync))
}
func (c *namespaceController) OnRemove(ctx context.Context, name string, sync NamespaceHandler) {
removeHandler := generic.NewRemoveHandler(name, c.Updater(), FromNamespaceHandlerToHandler(sync))
c.AddGenericHandler(ctx, name, removeHandler)
}
func (c *namespaceController) Enqueue(name string) {
c.controllerManager.Enqueue(c.gvk, c.informer.Informer(), "", name)
}
func (c *namespaceController) EnqueueAfter(name string, duration time.Duration) {
c.controllerManager.EnqueueAfter(c.gvk, c.informer.Informer(), "", name, duration)
}
func (c *namespaceController) Informer() cache.SharedIndexInformer {
return c.informer.Informer()
}
func (c *namespaceController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *namespaceController) Cache() NamespaceCache {
return &namespaceCache{
lister: c.informer.Lister(),
indexer: c.informer.Informer().GetIndexer(),
}
}
func (c *namespaceController) Create(obj *v1.Namespace) (*v1.Namespace, error) {
return c.clientGetter.Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{})
}
func (c *namespaceController) Update(obj *v1.Namespace) (*v1.Namespace, error) {
return c.clientGetter.Namespaces().Update(context.TODO(), obj, metav1.UpdateOptions{})
}
func (c *namespaceController) UpdateStatus(obj *v1.Namespace) (*v1.Namespace, error) {
return c.clientGetter.Namespaces().UpdateStatus(context.TODO(), obj, metav1.UpdateOptions{})
}
func (c *namespaceController) Delete(name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.clientGetter.Namespaces().Delete(context.TODO(), name, *options)
}
func (c *namespaceController) Get(name string, options metav1.GetOptions) (*v1.Namespace, error) {
return c.clientGetter.Namespaces().Get(context.TODO(), name, options)
}
func (c *namespaceController) List(opts metav1.ListOptions) (*v1.NamespaceList, error) {
return c.clientGetter.Namespaces().List(context.TODO(), opts)
}
func (c *namespaceController) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return c.clientGetter.Namespaces().Watch(context.TODO(), opts)
}
func (c *namespaceController) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) {
return c.clientGetter.Namespaces().Patch(context.TODO(), name, pt, data, metav1.PatchOptions{}, subresources...)
}
type namespaceCache struct {
lister listers.NamespaceLister
indexer cache.Indexer
}
func (c *namespaceCache) Get(name string) (*v1.Namespace, error) {
return c.lister.Get(name)
}
func (c *namespaceCache) List(selector labels.Selector) ([]*v1.Namespace, error) {
return c.lister.List(selector)
}
func (c *namespaceCache) AddIndexer(indexName string, indexer NamespaceIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v1.Namespace))
},
}))
}
func (c *namespaceCache) GetByIndex(indexName, key string) (result []*v1.Namespace, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v1.Namespace, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v1.Namespace))
}
return result, nil
}
type NamespaceStatusHandler func(obj *v1.Namespace, status v1.NamespaceStatus) (v1.NamespaceStatus, error)
type NamespaceGeneratingHandler func(obj *v1.Namespace, status v1.NamespaceStatus) ([]runtime.Object, v1.NamespaceStatus, error)
func RegisterNamespaceStatusHandler(ctx context.Context, controller NamespaceController, condition condition.Cond, name string, handler NamespaceStatusHandler) {
statusHandler := &namespaceStatusHandler{
client: controller,
condition: condition,
handler: handler,
}
controller.AddGenericHandler(ctx, name, FromNamespaceHandlerToHandler(statusHandler.sync))
}
func RegisterNamespaceGeneratingHandler(ctx context.Context, controller NamespaceController, apply apply.Apply,
condition condition.Cond, name string, handler NamespaceGeneratingHandler, opts *generic.GeneratingHandlerOptions) {
statusHandler := &namespaceGeneratingHandler{
NamespaceGeneratingHandler: handler,
apply: apply,
name: name,
gvk: controller.GroupVersionKind(),
}
if opts != nil {
statusHandler.opts = *opts
}
controller.OnChange(ctx, name, statusHandler.Remove)
RegisterNamespaceStatusHandler(ctx, controller, condition, name, statusHandler.Handle)
}
type namespaceStatusHandler struct {
client NamespaceClient
condition condition.Cond
handler NamespaceStatusHandler
}
func (a *namespaceStatusHandler) sync(key string, obj *v1.Namespace) (*v1.Namespace, error) {
if obj == nil {
return obj, nil
}
origStatus := obj.Status.DeepCopy()
obj = obj.DeepCopy()
newStatus, err := a.handler(obj, obj.Status)
if err != nil {
// Revert to old status on error
newStatus = *origStatus.DeepCopy()
}
if a.condition != "" {
if errors.IsConflict(err) {
a.condition.SetError(&newStatus, "", nil)
} else {
a.condition.SetError(&newStatus, "", err)
}
}
if !equality.Semantic.DeepEqual(origStatus, &newStatus) {
var newErr error
obj.Status = newStatus
obj, newErr = a.client.UpdateStatus(obj)
if err == nil {
err = newErr
}
}
return obj, err
}
type namespaceGeneratingHandler struct {
NamespaceGeneratingHandler
apply apply.Apply
opts generic.GeneratingHandlerOptions
gvk schema.GroupVersionKind
name string
}
func (a *namespaceGeneratingHandler) Remove(key string, obj *v1.Namespace) (*v1.Namespace, error) {
if obj != nil {
return obj, nil
}
obj = &v1.Namespace{}
obj.Namespace, obj.Name = kv.RSplit(key, "/")
obj.SetGroupVersionKind(a.gvk)
return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects()
}
func (a *namespaceGeneratingHandler) Handle(obj *v1.Namespace, status v1.NamespaceStatus) (v1.NamespaceStatus, error) {
objs, newStatus, err := a.NamespaceGeneratingHandler(obj, status)
if err != nil {
return newStatus, err
}
return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects(objs...)
}
| rancher/k3s | vendor/github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1/namespace.go | GO | apache-2.0 | 11,182 |
public class App {
public static void main(String[] args) {
Office theOffice = new Office();
Person p1 = new Person("p1");
Person p2 = new Person("p2");
Person p3 = new Person("p3");
theOffice.registerObserver(p1);
theOffice.registerObserver(p2);
theOffice.registerObserver(p3);
theOffice.notifyAllObservers("Hello world");
}
}
| nishtahir/MektoryJava2 | Module5/ObserverPattern/src/main/java/com/example/App.java | Java | apache-2.0 | 349 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A user-friendly interface for program.py."""
import collections
import logging
import os
import socket
import sys
import tempfile
import time
import gflags
import makani
from makani.avionics.bootloader import generate_image
from makani.avionics.common import aio
from makani.avionics.common import aio_version
from makani.avionics.linux.provision import process_helper
from makani.avionics.linux.provision import ui_helper
from makani.avionics.network import network_config
from makani.lib.bazel import bazel_util
from makani.lib.python import c_helpers
import program
_NETWORK_CONFIG = network_config.NetworkConfig()
def IpToAioNode(ip):
assert ip.startswith('192.168.1.'), ('Ip {} not in AIO range '
'192.168.1.0/24.'.format(ip))
final_octet = int(ip.split('.')[-1])
node_num = aio.aio_node_to_ip_address.FinalOctetToAioNode(final_octet)
return _NETWORK_CONFIG.aio_nodes[node_num]
def DetectAio(timeout=1.1):
"""Detect AIO nodes on the network, present all options if none detected."""
sources = aio.aio_node_helper.Names()
types = aio.message_type_helper.Names()
client = aio.AioClient(types, timeout=0.1, allowed_sources=sources)
ip_list = []
version_list = []
timer_start = time.time()
while time.time() - timer_start < timeout:
try:
ip, header, _ = client.Recv(accept_invalid=True)
ip_list.append(ip)
version_list.append(header.version)
except socket.error:
pass
client.Close()
if ip_list and version_list:
# De-duplication using set conversion.
ip_tuple, version_tuple = zip(*set(zip(ip_list, version_list)))
return tuple([IpToAioNode(ip) for ip in ip_tuple]), version_tuple
return tuple(), tuple()
def Tms570NodeDict():
my_dict = collections.defaultdict(list)
for node in _NETWORK_CONFIG.aio_nodes:
if node.tms570_node or node.label_name == 'unknown':
my_dict[node.label_name].append(node)
return my_dict
def NodeSelectMenu(dialog_ui, dialog_kwargs=None):
"""Ask the user to select and AIO node using a menu and sub-menu."""
kwargs = dialog_kwargs or {}
aio_node_tree = Tms570NodeDict()
top_menu = sorted([(key, key) for key in aio_node_tree])
sub_ok, select = dialog_ui.Menu('Select AIO node label:', top_menu, **kwargs)
if not sub_ok:
return None
sub_menu = [(str(val.enum_value), val.snake_name) for val in
aio_node_tree[select]]
sub_ok, select = dialog_ui.Menu('Select AIO node:', sub_menu, **kwargs)
if not sub_ok:
return None
return select
def GetSerialRevisions(serial_type='aio'):
yaml_file = program.SERIAL_PARAM_FILE_TEMPLATE.format(serial_type)
return program.codec.DecodeYaml(open(yaml_file).read()).keys()
def GetSerialTypes():
serial_type = []
filename_suffix = os.path.basename(
program.SERIAL_PARAM_FILE_TEMPLATE.format(''))
filename_dirname = os.path.dirname(program.SERIAL_PARAM_FILE_TEMPLATE)
for filename in os.listdir(filename_dirname):
if filename.endswith(filename_suffix):
serial_type.append(filename.replace(filename_suffix, ''))
return serial_type
class MenuFunction(object):
"""A mapping dialog menu entries to functions."""
def __init__(self, dialog_ui, title=None):
self.dialog_ui = dialog_ui
self.title = title
self.tag = []
self.name = []
self.func = []
self.args = []
def Register(self, name, func, func_args=None, tag=None):
"""Creates a mapping from a dialog menu entry to func."""
if not tag:
tag = str(len(self.tag))
assert isinstance(tag, str), 'Invalid parameter: tag must be a str.'
assert isinstance(name, str), 'Invalid parameter: name must be a str.'
assert hasattr(func, '__call__'), ('Invalid parameter: func must have '
'__call__ attribute.')
self.tag.append(tag)
self.name.append(name)
self.func.append(func)
self.args.append(func_args or [])
def Run(self, menu_text, dialog_kwargs=None):
"""Presents a dialog menu and runs the user-selected entry."""
kwargs = dialog_kwargs or {}
if self.title:
kwargs['title'] = self.title
assert isinstance(menu_text, str), ('Invalid parameter: menu_text must be '
'a str.')
sub_ok, tag = self.dialog_ui.Menu(menu_text, zip(self.tag, self.name),
**kwargs)
if sub_ok:
index = self.tag.index(tag)
return sub_ok, self.func[index](*self.args[index])
return sub_ok, None
class ProgramWrapper(object):
"""A wrapper for program.py operations which provides a UI using dialog."""
program_py_command = [
'python',
os.path.join(makani.HOME, 'avionics', 'bootloader', 'program.py')]
def __init__(self):
self.dialog_ui = ui_helper.UserInterface()
self.AutoDetect()
def AutoDetect(self):
timeout = 1.1
while True:
if self.DetectNodeAndVersion(detection_timeout=timeout):
break
else:
ok = self.dialog_ui.YesNo('Please select a node from the list.',
title='Node Scanner',
yes_label='OK', no_label='Quit')
if not ok:
break
if self.SelectNode():
break
# User selected cancel in SelectNode menu, try harder to detect.
else:
timeout = 3.3
def DetectNodeAndVersion(self, detection_timeout=1.1):
"""Detect an AIO nodes, offer selection if more than one is found."""
ok = False
while not ok:
self.dialog_ui.Info('Scanning for AIO nodes...')
try:
detected_nodes, versions = DetectAio(timeout=detection_timeout)
if detected_nodes:
select_node_menu = [(str(node.enum_value), str(node.camel_name)) for
node in detected_nodes]
sentinel = str(detected_nodes[-1].enum_value + 1)
select_node_menu.append((sentinel, 'Select from complete list'))
ok, node_ind = self.dialog_ui.Menu('Please select an AIO node:',
options=select_node_menu,
cancel_label='Rescan')
if node_ind == sentinel:
return False
else:
return False
except socket.error, e:
self.dialog_ui.Message('Socket error: {}'.format(e),
title='Node Scanner')
return False
self.select_node = detected_nodes[int(node_ind)]
self.version = versions[int(node_ind)]
return detected_nodes and ok
def SelectNode(self):
node_ind = NodeSelectMenu(self.dialog_ui, dialog_kwargs={'cancel_label':
'Re-scan'})
if node_ind:
self.select_node = _NETWORK_CONFIG.aio_nodes[int(node_ind)]
self.version = None
return True
else:
return False
def HasCarrierBoard(self):
node_prefix = self.select_node.snake_name.partition('_')[0]
return node_prefix not in ('cs', 'motor')
def SerialParamSelect(self, hardware_type):
"""Present menu to select serial parameter revision, and serial_number."""
rev_list = sorted(GetSerialRevisions(hardware_type))
if 'common' in rev_list:
rev_list.remove('common') # 'common' is not to be used on hardware.
sub_ok, rev = self.dialog_ui.Menu('Select the hardware revision:',
zip(rev_list, rev_list))
if not sub_ok:
return None, None
sub_ok, serial_number = self.dialog_ui.Input('Please enter the'
' serial number:')
if not sub_ok:
return None, None
return rev, serial_number
def RunDialogProcess(self, process, title):
"""Run process in an dialog progress-box with munged stdout & stderr."""
with tempfile.NamedTemporaryFile() as temp_file:
def WriteAndDisplay(line):
temp_file.write(line)
temp_file.flush()
self.dialog_ui.dialog_instance.progressbox(file_path=temp_file.name,
title=title, width=100,
height=30)
return process_helper.RunProcess(process, parse_stdout=WriteAndDisplay,
parse_stderr=WriteAndDisplay)
def GetFirmwareFiles(self, file_suffix):
"""Returns a list of firmware files with file_suffix removed."""
assert isinstance(self.select_node, network_config.AioNode)
firmware_path = os.path.join(
bazel_util.GetTms570BinDirectory(), 'avionics',
os.path.dirname(self.select_node.application_path))
matching_files = []
for filename in os.listdir(firmware_path):
if filename.endswith(file_suffix):
matching_files.append(filename.replace(file_suffix, ''))
return matching_files
def GetConfigNames(self):
"""Returns a list of config file short-names compiled for select_node."""
return self.GetFirmwareFiles('_config_params.bin')
def GetCalibNames(self):
"""Returns a list of calib file short-names compiled for select_node."""
return self.GetFirmwareFiles('_calib_params.bin')
def RenameNode(self):
"""Rename the node from node to a user-selected node type."""
rename_to = NodeSelectMenu(self.dialog_ui)
if rename_to:
rename_to = _NETWORK_CONFIG.aio_nodes[int(rename_to)]
rc, stdout, stderr = self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name,
'--rename_to', rename_to.snake_name],
title='Renaming node...')
if rc == 0:
self.select_node = rename_to
self.UpdateVersion(timeout=3)
return rc, stdout, stderr
return -1, '', 'User cancelled.'
def ProgramApplication(self):
"""Program the application on select_node."""
rc, stdout, stderr = self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name],
title='Programming application...')
return rc, stdout, stderr
def ProgramBootloader(self):
"""Program the bootloader on select_node."""
rc, stdout, stderr = self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name, '--bootloader'],
title='Programming bootloader...')
return rc, stdout, stderr
def ProgramSerial(self):
"""Program the serial number into select_node."""
hardware_type = self.SerialTypeMenu(is_carrier=False)
if not hardware_type:
return -1, '', 'User cancelled'
rev, serial_number = self.SerialParamSelect(hardware_type)
if not rev or not serial_number:
return -1, '', 'User cancelled.'
rc, stdout, stderr = self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name, '--serial',
hardware_type, rev, serial_number],
title='Programming serial...')
return rc, stdout, stderr
def ProgramCarrierSerial(self):
"""Program the carrier serial board onto select_node."""
node_prefix = self.select_node.snake_name.partition('_')[0]
if node_prefix in ['cs', 'motor']:
self.dialog_ui.Message('{} type nodes do not have carriers.'.format(
node_prefix), title='Carrier Serial Programmer')
else:
carrier_type = self.SerialTypeMenu(is_carrier=True)
if not carrier_type:
return -1, '', 'User cancelled.'
rev, serial_number = self.SerialParamSelect(carrier_type)
if not rev or not serial_number:
return -1, '', 'User cancelled.'
rc, stdout, stderr = self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name,
'--carrier_serial', carrier_type, rev,
serial_number],
title='Programming carrier serial...')
return rc, stdout, stderr
def ProgramCalib(self):
"""Program the calib params into select_node."""
calibs = self.GetCalibNames()
if not calibs:
return (-1, '', 'No valid calibrations for '
'{}.'.format(self.select_node.snake_name))
sub_ok, calib_name = self.dialog_ui.Menu('Select calibration:',
zip(calibs, calibs))
if sub_ok:
return self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name, '--calib',
calib_name],
title='Programming calib...')
return -1, '', 'User cancelled.'
def ProgramConfig(self):
"""Program the config params into select_node."""
configs = self.GetConfigNames()
if not configs:
return (-1, '',
'No valid configs for {}.'.format(self.select_node.snake_name))
sub_ok, config_name = self.dialog_ui.Menu('Select config:',
zip(configs, configs))
if sub_ok:
return self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name, '--config',
config_name],
title='Programming config...')
return -1, '', 'User cancelled.'
def UpgradeBootloader(self):
"""Upgrade the bootloader on select_node."""
return self.RunDialogProcess(
self.program_py_command + [self.select_node.snake_name,
'--upgrade_bootloader'],
title='Upgrading bootloader...')
def CheckConsole(self):
"""Display the Stdio and SelfTest messages from select_node using dialog."""
sources = [self.select_node.enum_name]
types = ['kMessageTypeStdio', 'kMessageTypeSelfTest']
self.dialog_ui.Info('Watching for Stdio messages...(3s)',
title='Console Checker')
try:
client = aio.AioClient(types, timeout=3, allowed_sources=sources)
_, _, message = client.Recv(accept_invalid=True)
self.dialog_ui.Message(getattr(message, 'text', repr(message)),
title='Console Message')
except socket.timeout, e:
return -1, '', 'No Stdio messages found.'
except socket.error, e:
if str(e) == 'timed out':
return -1, '', 'No Stdio messages found.'
raise
finally:
client.Close()
return 0, '', ''
def UpdateVersion(self, timeout=1):
"""Get AIO version from header of any packet from select_node."""
sources = [self.select_node.enum_name]
types = aio.message_type_helper.Names()
self.dialog_ui.Info('Watching for messages...({}s)'.format(timeout),
title='Version Checker')
try:
client = aio.AioClient(types, timeout=timeout, allowed_sources=sources)
_, header, _ = client.Recv(accept_invalid=True)
except socket.timeout as e:
self.version = None
return -1, '', str(e)
except socket.error as e:
if str(e) == 'timed out':
self.version = None
return -1, '', str(e)
else:
raise
else:
self.version = str(header.version)
finally:
client.Close()
return 0, '', ''
def SerialTypeMenu(self, is_carrier=False):
"""Ask the user to select a hardware type of select_node.
The menu options are derived from serial_params files.
Args:
is_carrier: Set to True if flashing serial params to a carrier board.
Returns:
Selected hardware type or None if the user cancelled.
"""
default = 'aio'
serial_types = GetSerialTypes()
node_prefix = self.select_node.snake_name.partition('_')[0]
if is_carrier:
menu_text = 'Select carrier hardware type:'
serial_types.remove('aio') # Carrier cannot be AIO node.
serial_types.remove('motor')
serial_types.remove('cs')
if node_prefix in serial_types:
default = node_prefix
else:
if (node_prefix in serial_types and not
self.HasCarrierBoard()):
default = node_prefix
menu_text = 'Select hardware type:'
serial_types.sort()
sub_ok, serial_type = self.dialog_ui.Menu(menu_text, zip(serial_types,
serial_types),
default_item=default)
if sub_ok:
return serial_type
def HardwareIdentityMenu(self):
"""Ask the user to select a hardware identity of select_node.
The menu options are derived from identity_types.h.
Returns:
Selected hardware identity or None if the user cancelled.
"""
default = 'aio'
node_prefix = self.select_node.snake_name.partition('_')[0]
hardware_types = [c_helpers.CamelToSnake(camel) for camel in
generate_image.hardware_type_helper.ShortNames()]
hardware_types.remove('unknown')
if (node_prefix in hardware_types and not
self.HasCarrierBoard()):
default = node_prefix
rc, hardware_type = self.dialog_ui.Menu(
'Select hardware type:', zip(hardware_types, hardware_types),
default_item=default)
if rc:
return hardware_type
def ToggleJlink(self):
"""Toggles the --jlink flag for program.py, creates a new menu."""
if '--jlink' not in self.program_py_command:
hardware_type = self.HardwareIdentityMenu()
if hardware_type:
self.program_py_command.append('--jlink')
self.program_py_command += ['--force_hardware', hardware_type]
else:
self.program_py_command = [
'python',
os.path.join(makani.HOME, 'avionics', 'bootloader', 'program.py')]
return 0, '', ''
def CreateMenu(program_wrapper):
"""Creates a MenuFunction instance and returns it."""
jlink_mode = '--jlink' in program_wrapper.program_py_command
new_menu = MenuFunction(program_wrapper.dialog_ui, 'Program.py Menu')
new_menu.Register('Program Application', program_wrapper.ProgramApplication)
new_menu.Register('Program Bootloader', program_wrapper.ProgramBootloader)
if not jlink_mode:
new_menu.Register('Rename Node', program_wrapper.RenameNode)
new_menu.Register('Program Serial', program_wrapper.ProgramSerial)
if not jlink_mode and program_wrapper.HasCarrierBoard():
new_menu.Register('Program Carrier Serial',
program_wrapper.ProgramCarrierSerial)
new_menu.Register('Program Config', program_wrapper.ProgramConfig)
new_menu.Register('Program Calib', program_wrapper.ProgramCalib)
if not jlink_mode:
new_menu.Register('Upgrade Bootloader', program_wrapper.UpgradeBootloader)
new_menu.Register('Check Console', program_wrapper.CheckConsole)
new_menu.Register('Check Version', program_wrapper.UpdateVersion)
new_menu.Register('Toggle Jlink', program_wrapper.ToggleJlink)
return new_menu
def main():
logging.basicConfig(level=logging.WARNING)
flags = gflags.FLAGS
try:
_ = flags(sys.argv)
except gflags.FlagsError, e:
print ('%s\nUsage: %s ARGS\n%s'
% (e, sys.argv[0], flags))
sys.exit(1)
program_py = ProgramWrapper()
assert hasattr(program_py, 'select_node'), 'No node was selected.'
ok = True
while ok:
menu_title = 'Node: {}\n'.format(program_py.select_node.snake_name)
if program_py.version:
menu_title += 'Detected AIO Version: {}\n'.format(program_py.version)
menu_title += 'Compiled AIO Version: {}\n'.format(aio_version.AIO_VERSION)
menu_title += 'Using jlink: {}'.format('--jlink' in
program_py.program_py_command)
ok, result = CreateMenu(program_py).Run(menu_title,
dialog_kwargs={'cancel_label':
'Quit'})
if ok and result[0] != 0:
# Replace \\n with spaces, or dialog will assume that all newlines are
# escaped twice and the output won't have newlines where it should.
program_py.dialog_ui.Message(('Failure (process returned {}):\n'
'{}').format(result[0],
result[2].replace('\\n', ' ')),
title='Error')
if __name__ == '__main__':
main()
| google/makani | avionics/bootloader/program_ui.py | Python | apache-2.0 | 20,711 |
<?php
// configuration
include('../connect.php');
// new data
$id = $_POST['memi'];
$a = $_POST['code'];
$z = $_POST['gen'];
$g = $_POST['o_price'];
$j = $_POST['sold'];
$k = $_POST['price'];
$m = $_POST['level'];
$qtyl = $_POST['qtyl'];
$l = $_POST['wholesaleprice'];
$maxr = $_POST['maxr'];
$maxws = $_POST['maxws']/2;
$maxpricer =(100-$maxr)*0.01; //this is my fraction for retail
$maxpricews =(100-$maxws)*0.01;
// query
$sql = "UPDATE products
SET product_code=?,gen_name=?,o_price=?,price=?,wholesaleprice=?,level=?,maxdiscre=?,qty=?
WHERE product_id=?";
$q = $db->prepare($sql);
$q->execute(array($a,$z,$g,$k,$l,$m,$maxr,$qtyl,$id));
header("location: products.php?");
?>
| mathunjoroge/pharmacy | main/saveeditproduct.php | PHP | apache-2.0 | 692 |
package org.adligo.fabricate_tests.common.system;
import org.adligo.fabricate.common.log.I_FabLog;
import org.adligo.fabricate.common.system.AlreadyLoggedException;
import org.adligo.fabricate.common.system.I_FabSystem;
import org.adligo.fabricate.common.system.I_LocatableRunnable;
import org.adligo.fabricate.common.system.RunMonitor;
import org.adligo.tests4j.system.shared.trials.BeforeTrial;
import org.adligo.tests4j.system.shared.trials.SourceFileScope;
import org.adligo.tests4j.system.shared.trials.Test;
import org.adligo.tests4j_4mockito.MockMethod;
import org.adligo.tests4j_4mockito.MockitoSourceFileTrial;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
@SourceFileScope (sourceClass=RunMonitor.class, minCoverage=96.0)
public class RunMonitorTrial extends MockitoSourceFileTrial {
private static ExecutorService THREAD_POOL_;
@BeforeTrial
public static void beforeTrial(Map<String,Object> params) {
THREAD_POOL_ = Executors.newFixedThreadPool(1);
}
@SuppressWarnings("boxing")
@Test
public void testConstructorAndRun() {
I_FabSystem system = mock(I_FabSystem.class);
Thread thread = mock(Thread.class);
when(system.currentThread()).thenReturn(thread);
when(system.newArrayBlockingQueue(Boolean.class, 1)).thenReturn(
new ArrayBlockingQueue<Boolean>(1));
AtomicBoolean ran = new AtomicBoolean(false);
I_LocatableRunnable run = new I_LocatableRunnable() {
@Override
public String getCurrentLocation() {
return "cl";
}
@Override
public void run() {
try {
Thread.sleep(201);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
ran.set(true);
}
@Override
public String getAdditionalDetail() {
// TODO Auto-generated method stub
return null;
}
};
RunMonitor rm = new RunMonitor(system, run, 0);
assertNull(rm.getThread());
assertEquals(0, rm.getSequence());
assertFalse(rm.isFinished());
assertNull(rm.getCaught());
THREAD_POOL_.submit(rm);
long time = System.currentTimeMillis();
try {
rm.waitUntilFinished(200);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
long dur = System.currentTimeMillis() - time;
assertGreaterThanOrEquals(200, dur);
try {
rm.waitUntilFinished(200);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
assertSame(thread, rm.getThread());
assertTrue(ran.get());
assertFalse(rm.hasFailure());
assertTrue(rm.isFinished());
assertNull(rm.getCaught());
assertSame(run, rm.getDelegate());
}
@SuppressWarnings("boxing")
@Test
public void testConstructorAndRunWithThrown() {
I_FabSystem system = mock(I_FabSystem.class);
I_FabLog log = mock(I_FabLog.class);
MockMethod<Void> printTraceMethod = new MockMethod<Void>();
doAnswer(printTraceMethod).when(log).printTrace(any());
when(system.getLog()).thenReturn(log);
when(system.newArrayBlockingQueue(Boolean.class, 1)).thenReturn(
new ArrayBlockingQueue<Boolean>(1));
RuntimeException toThrow = new RuntimeException("x");
I_LocatableRunnable run = new I_LocatableRunnable() {
@Override
public String getCurrentLocation() {
return "cl";
}
@Override
public void run() {
try {
Thread.sleep(201);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw toThrow;
}
@Override
public String getAdditionalDetail() {
// TODO Auto-generated method stub
return null;
}
};
RunMonitor rm = new RunMonitor(system, run, 1);
assertEquals(1, rm.getSequence());
assertFalse(rm.isFinished());
assertNull(rm.getCaught());
THREAD_POOL_.submit(rm);
long time = System.currentTimeMillis();
try {
rm.waitUntilFinished(200);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
long dur = System.currentTimeMillis() - time;
assertGreaterThanOrEquals(200, dur);
try {
rm.waitUntilFinished(2000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
assertTrue(rm.isFinished());
assertTrue(rm.hasFailure());
assertSame(toThrow, printTraceMethod.getArg(0));
assertEquals(1, printTraceMethod.count());
Throwable caught = rm.getCaught();
assertSame(toThrow, caught);
assertEquals("x", caught.getMessage());
}
@SuppressWarnings("boxing")
@Test
public void testConstructorAndRunWithThrownAlreadLoggedException() {
I_FabSystem system = mock(I_FabSystem.class);
I_FabLog log = mock(I_FabLog.class);
when(system.getLog()).thenReturn(log);
MockMethod<Void> printTraceMethod = new MockMethod<Void>();
doAnswer(printTraceMethod).when(log).printTrace(any());
when(system.newArrayBlockingQueue(Boolean.class, 1)).thenReturn(
new ArrayBlockingQueue<Boolean>(1));
AlreadyLoggedException toThrow = new AlreadyLoggedException(new IllegalStateException("x"));
I_LocatableRunnable run = new I_LocatableRunnable() {
@Override
public String getCurrentLocation() {
return "cl";
}
@Override
public void run() {
try {
Thread.sleep(201);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw toThrow;
}
@Override
public String getAdditionalDetail() {
// TODO Auto-generated method stub
return null;
}
};
RunMonitor rm = new RunMonitor(system, run, 1);
assertEquals(1, rm.getSequence());
assertFalse(rm.isFinished());
assertNull(rm.getCaught());
THREAD_POOL_.submit(rm);
long time = System.currentTimeMillis();
try {
rm.waitUntilFinished(200);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
long dur = System.currentTimeMillis() - time;
assertGreaterThanOrEquals(200, dur);
try {
rm.waitUntilFinished(2000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
assertTrue(rm.isFinished());
assertTrue(rm.hasFailure());
assertEquals(0, printTraceMethod.count());
Throwable caught = rm.getCaught();
assertSame(toThrow, caught);
}
}
| adligo/fabricate_tests.adligo.org | src/org/adligo/fabricate_tests/common/system/RunMonitorTrial.java | Java | apache-2.0 | 6,655 |
/*
* Copyright 2015-16, Yahoo! Inc.
* Licensed under the terms of the Apache License 2.0. See LICENSE file at the project root for terms.
*/
package com.yahoo.sketches.theta;
import static com.yahoo.sketches.theta.ForwardCompatibilityTest.convertSerV3toSerV1;
import static com.yahoo.sketches.theta.ForwardCompatibilityTest.convertSerV3toSerV2;
import static com.yahoo.sketches.theta.HeapUnionTest.testAllCompactForms;
import static com.yahoo.sketches.theta.PreambleUtil.SER_VER_BYTE;
import static com.yahoo.sketches.theta.SetOperation.getMaxUnionBytes;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import org.testng.annotations.Test;
import com.yahoo.memory.Memory;
import com.yahoo.memory.NativeMemory;
import com.yahoo.sketches.Family;
import com.yahoo.sketches.SketchesArgumentException;
import com.yahoo.sketches.Util;
/**
* @author Lee Rhodes
*/
public class DirectUnionTest {
@Test
public void checkExactUnionNoOverlap() {
int lgK = 9; //512
int k = 1 << lgK;
int u = k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u/2; i++) usk1.update(i); //256
for (int i=u/2; i<u; i++) usk2.update(i); //256 no overlap
assertEquals(u, usk1.getEstimate() + usk2.getEstimate(), 0.0); //exact, no overlap
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch
testAllCompactForms(union, u, 0.0);
}
@Test
public void checkEstUnionNoOverlap() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u/2; i++) usk1.update(i); //2*k
for (int i=u/2; i<u; i++) usk2.update(i); //2*k no overlap
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch
testAllCompactForms(union, u, 0.05);
}
@Test
public void checkExactUnionWithOverlap() {
int lgK = 9; //512
int k = 1 << lgK;
int u = k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u/2; i++) usk1.update(i); //256
for (int i=0; i<u ; i++) usk2.update(i); //512, 256 overlapped
assertEquals(u, usk1.getEstimate() + usk2.getEstimate()/2, 0.0); //exact, overlapped
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch
testAllCompactForms(union, u, 0.0);
}
@Test
public void checkHeapifyExact() {
int lgK = 9; //512
int k = 1 << lgK;
int u = k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u/2; i++) usk1.update(i); //256
for (int i=u/2; i<u; i++) usk2.update(i); //256 no overlap
assertEquals(u, usk1.getEstimate() + usk2.getEstimate(), 0.0); //exact, no overlap
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch
testAllCompactForms(union, u, 0.0);
Union union2 = (Union)SetOperation.heapify(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.0);
}
//these parallel the checkHeapifyExact, etc.
@Test
public void checkWrapExact() {
int lgK = 9; //512
int k = 1 << lgK;
int u = k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u/2; i++) usk1.update(i); //256
for (int i=u/2; i<u; i++) usk2.update(i); //256 no overlap
assertEquals(u, usk1.getEstimate() + usk2.getEstimate(), 0.0); //exact, no overlap
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch
testAllCompactForms(union, u, 0.0);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.0);
}
@Test
public void checkWrapEstNoOverlap() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k); //2k estimating
UpdateSketch usk2 = UpdateSketch.builder().build(2*k); //2k exact
for (int i=0; i<u/2; i++) usk1.update(i); //2k
for (int i=u/2; i<u; i++) usk2.update(i); //2k no overlap, exact
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(usk2); //update with heap UpdateSketch, early stop not possible
testAllCompactForms(union, u, 0.05);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.05);
}
@Test
public void checkWrapEstNoOverlapOrderedIn() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k); //2k estimating
UpdateSketch usk2 = UpdateSketch.builder().build(2*k); //2k exact for early stop test
for (int i=0; i<u/2; i++) usk1.update(i); //2k estimating
for (int i=u/2; i<u; i++) usk2.update(i); //2k no overlap, exact, will force early stop
CompactSketch cosk2 = usk2.compact(true, null);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //update with heap UpdateSketch
union.update(cosk2); //update with heap Compact, Ordered input, early stop
UpdateSketch emptySketch = UpdateSketch.builder().build(k);
union.update(emptySketch); //updates with empty
emptySketch = null;
union.update(emptySketch); //updates with null
testAllCompactForms(union, u, 0.05);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.05);
union2.reset();
assertEquals(union2.getResult(true, null).getEstimate(), 0.0, 0.0);
}
@Test
public void checkWrapEstNoOverlapOrderedDirectIn() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k); //2k estimating
UpdateSketch usk2 = UpdateSketch.builder().build(2*k); //2k exact for early stop test
for (int i=0; i<u/2; i++) usk1.update(i); //2k estimating
for (int i=u/2; i<u; i++) usk2.update(i); //2k no overlap, exact, will force early stop
NativeMemory cskMem2 = new NativeMemory(new byte[usk2.getCurrentBytes(true)]);
CompactSketch cosk2 = usk2.compact(true, cskMem2); //ordered, loads the cskMem2 as ordered
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //updates with heap UpdateSketch
union.update(cosk2); //updates with direct CompactSketch, ordered, use early stop
UpdateSketch emptySketch = UpdateSketch.builder().build(k);
union.update(emptySketch); //updates with empty sketch
emptySketch = null;
union.update(emptySketch); //updates with null sketch
testAllCompactForms(union, u, 0.05);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.05);
union2.reset();
assertEquals(union2.getResult(true, null).getEstimate(), 0.0, 0.0);
}
@Test
public void checkWrapEstNoOverlapOrderedMemIn() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k); //2k estimating
UpdateSketch usk2 = UpdateSketch.builder().build(2*k); //2k exact for early stop test
for (int i=0; i<u/2; i++) usk1.update(i); //2k estimating
for (int i=u/2; i<u; i++) usk2.update(i); //2k no overlap, exact, will force early stop
NativeMemory cskMem2 = new NativeMemory(new byte[usk2.getCurrentBytes(true)]);
usk2.compact(true, cskMem2); //ordered, loads the cskMem2 as ordered
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //updates with heap UpdateSketch
union.update(cskMem2); //updates with direct CompactSketch, ordered, use early stop
UpdateSketch emptySketch = UpdateSketch.builder().build(k);
union.update(emptySketch); //updates with empty sketch
emptySketch = null;
union.update(emptySketch); //updates with null sketch
testAllCompactForms(union, u, 0.05);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.05);
union2.reset();
assertEquals(union2.getResult(true, null).getEstimate(), 0.0, 0.0);
}
@Test
public void checkWrapEstNoOverlapUnorderedMemIn() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 4*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k); //2k estimating
UpdateSketch usk2 = UpdateSketch.builder().build(2*k); //2k exact for early stop test
for (int i=0; i<u/2; i++) usk1.update(i); //2k estimating
for (int i=u/2; i<u; i++) usk2.update(i); //2k no overlap, exact, will force early stop
NativeMemory cskMem2 = new NativeMemory(new byte[usk2.getCurrentBytes(true)]);
usk2.compact(false, cskMem2); //unordered, loads the cskMem2 as unordered
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //updates with heap UpdateSketch
union.update(cskMem2); //updates with direct CompactSketch, ordered, use early stop
UpdateSketch emptySketch = UpdateSketch.builder().build(k);
union.update(emptySketch); //updates with empty sketch
emptySketch = null;
union.update(emptySketch); //updates with null sketch
testAllCompactForms(union, u, 0.05);
Union union2 = Sketches.wrapUnion(new NativeMemory(union.toByteArray()));
testAllCompactForms(union2, u, 0.05);
union2.reset();
assertEquals(union2.getResult(true, null).getEstimate(), 0.0, 0.0);
}
@Test
public void checkMultiUnion() {
int lgK = 13; //8192
int k = 1 << lgK;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
UpdateSketch usk3 = UpdateSketch.builder().build(k);
UpdateSketch usk4 = UpdateSketch.builder().build(k);
int v=0;
int u = 1000000;
for (int i=0; i<u; i++) usk1.update(i+v);
v += u;
u = 26797;
for (int i=0; i<u; i++) usk2.update(i+v);
v += u;
for (int i=0; i<u; i++) usk3.update(i+v);
v += u;
for (int i=0; i<u; i++) usk4.update(i+v);
v += u;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(usk1); //updates with heap UpdateSketch
union.update(usk2); //updates with heap UpdateSketch
union.update(usk3); //updates with heap UpdateSketch
union.update(usk4); //updates with heap UpdateSketch
CompactSketch csk = union.getResult(true, null);
double est = csk.getEstimate();
assertEquals(est, v, .01*v);
}
@Test
public void checkDirectMemoryIn() {
int lgK = 12; //4096
int k = 1 << lgK;
int u1 = 2*k;
int u2 = 1024; //smaller exact sketch forces early stop
int totU = u1+u2;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u1; i++) usk1.update(i); //2*k
for (int i=u1; i<totU; i++) usk2.update(i); //2*k + 1024 no overlap
NativeMemory skMem1 = new NativeMemory(usk1.compact(false, null).toByteArray());
NativeMemory skMem2 = new NativeMemory(usk2.compact(true, null).toByteArray());
CompactSketch csk1 = (CompactSketch)Sketch.wrap(skMem1);
CompactSketch csk2 = (CompactSketch)Sketch.wrap(skMem2);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(csk1);
union.update(csk2);
CompactSketch cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), totU, .05*k);
}
@Test
public void checkSerVer1Handling() {
int lgK = 12; //4096
int k = 1 << lgK;
int u1 = 2*k;
int u2 = 1024; //smaller exact sketch forces early stop
int totU = u1+u2;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u1; i++) usk1.update(i); //2*k
for (int i=u1; i<totU; i++) usk2.update(i); //2*k + 1024 no overlap
NativeMemory skMem1 = new NativeMemory(usk1.compact(true, null).toByteArray());
NativeMemory skMem2 = new NativeMemory(usk2.compact(true, null).toByteArray());
Memory v1mem1 = convertSerV3toSerV1(skMem1);
Memory v1mem2 = convertSerV3toSerV1(skMem2);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v1mem1);
union.update(v1mem2);
CompactSketch cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), totU, .05*k);
}
@Test
public void checkSerVer2Handling() {
int lgK = 12; //4096
int k = 1 << lgK;
int u1 = 2*k;
int u2 = 1024; //smaller exact sketch forces early stop
int totU = u1+u2;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
UpdateSketch usk2 = UpdateSketch.builder().build(k);
for (int i=0; i<u1; i++) usk1.update(i); //2*k
for (int i=u1; i<totU; i++) usk2.update(i); //2*k + 1024 no overlap
NativeMemory skMem1 = new NativeMemory(usk1.compact(true, null).toByteArray());
NativeMemory skMem2 = new NativeMemory(usk2.compact(true, null).toByteArray());
Memory v2mem1 = convertSerV3toSerV2(skMem1);
Memory v2mem2 = convertSerV3toSerV2(skMem2);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v2mem1);
union.update(v2mem2);
CompactSketch cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), totU, .05*k);
}
@Test
public void checkUpdateMemorySpecialCases() {
int lgK = 12; //4096
int k = 1 << lgK;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
CompactSketch usk1c = usk1.compact(true, null);
NativeMemory v3mem1 = new NativeMemory(usk1c.toByteArray());
Memory v1mem1 = convertSerV3toSerV1(v3mem1);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v1mem1);
CompactSketch cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), 0.0, 0.0);
Memory v2mem1 = convertSerV3toSerV2(v3mem1);
uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v2mem1);
cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), 0.0, 0.0);
uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v3mem1);
cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), 0.0, 0.0);
uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
union = SetOperation.builder().initMemory(uMem).buildUnion(k);
v3mem1 = null;
union.update(v3mem1);
cOut = union.getResult(true, null);
assertEquals(cOut.getEstimate(), 0.0, 0.0);
}
@Test
public void checkUpdateMemorySpecialCases2() {
int lgK = 12; //4096
int k = 1 << lgK;
int u = 2*k;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
for (int i=0; i<u; i++) usk1.update(i); //force prelongs to 3
CompactSketch usk1c = usk1.compact(true, null);
NativeMemory v3mem1 = new NativeMemory(usk1c.toByteArray());
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v3mem1);
}
@Test(expectedExceptions = SketchesArgumentException.class)
public void checkMemBadSerVer() {
int lgK = 12; //4096
int k = 1 << lgK;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
CompactSketch usk1c = usk1.compact(true, null);
NativeMemory v3mem1 = new NativeMemory(usk1c.toByteArray());
//corrupt SerVer
v3mem1.putByte(SER_VER_BYTE, (byte)0);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v3mem1);
}
@Test
//where the granted mem is larger than required
public void checkEmptySerVer2and3() {
int lgK = 12; //4096
int k = 1 << lgK;
UpdateSketch usk1 = UpdateSketch.builder().build(k);
CompactSketch usk1c = usk1.compact(true, null);
byte[] skArr = usk1c.toByteArray();
byte[] skArr2 = Arrays.copyOf(skArr, skArr.length * 2);
NativeMemory v3mem1 = new NativeMemory(skArr2);
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v3mem1);
Memory v2mem1 = convertSerV3toSerV2(v3mem1);
Memory v2mem2 = new NativeMemory(new byte[16]);
NativeMemory.copy(v2mem1, 0, v2mem2, 0, 8);
uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(v2mem2);
}
//Special DirectUnion cases
@Test //Himanshu's issue
public void checkDirectWrap() {
int nomEntries = 16;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(nomEntries)]);
SetOperation.builder().initMemory(uMem).buildUnion(nomEntries);
UpdateSketch sk1 = UpdateSketch.builder().build(nomEntries);
sk1.update("a");
sk1.update("b");
UpdateSketch sk2 = UpdateSketch.builder().build(nomEntries);
sk2.update("c");
sk2.update("d");
Union union = Sketches.wrapUnion(uMem);
union.update(sk1);
union = Sketches.wrapUnion(uMem);
union.update(sk2);
CompactSketch sketch = union.getResult(true, null);
assertEquals(4.0, sketch.getEstimate(), 0.0);
}
@Test
public void checkEmptyUnionCompactResult() {
int k = 64;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
Memory mem = new NativeMemory(new byte[Sketch.getMaxCompactSketchBytes(0)]);
CompactSketch csk = union.getResult(false, mem); //DirectCompactSketch
assertTrue(csk.isEmpty());
}
@Test
public void checkEmptyUnionCompactOrderedResult() {
int k = 64;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
Memory mem = new NativeMemory(new byte[Sketch.getMaxCompactSketchBytes(0)]);
CompactSketch csk = union.getResult(true, mem); //DirectCompactSketch
assertTrue(csk.isEmpty());
}
@Test
public void checkUnionMemToString() {
int k = 64;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]); //union memory
SetOperation.builder().initMemory(uMem).buildUnion(k);
}
@Test
public void checkGetResult() {
int k = 1024;
UpdateSketch sk = Sketches.updateSketchBuilder().build();
int memBytes = getMaxUnionBytes(k);
byte[] memArr = new byte[memBytes];
Memory iMem = new NativeMemory(memArr);
Union union = Sketches.setOperationBuilder().initMemory(iMem).buildUnion(k);
union.update(sk);
CompactSketch csk = union.getResult();
assertEquals(csk.getCurrentBytes(true), 8);
}
@Test
public void checkPrimitiveUpdates() {
int k = 32;
Memory uMem = new NativeMemory(new byte[getMaxUnionBytes(k)]);
Union union = SetOperation.builder().initMemory(uMem).buildUnion(k);
union.update(1L);
union.update(1.5); //#1 double
union.update(0.0);
union.update(-0.0);
String s = null;
union.update(s); //null string
s = "";
union.update(s); //empty string
s = "String";
union.update(s); //#2 actual string
byte[] byteArr = null;
union.update(byteArr); //null byte[]
byteArr = new byte[0];
union.update(byteArr); //empty byte[]
byteArr = "Byte Array".getBytes(UTF_8);
union.update(byteArr); //#3 actual byte[]
int[] intArr = null;
union.update(intArr); //null int[]
intArr = new int[0];
union.update(intArr); //empty int[]
int[] intArr2 = { 1, 2, 3, 4, 5 };
union.update(intArr2); //#4 actual int[]
long[] longArr = null;
union.update(longArr); //null long[]
longArr = new long[0];
union.update(longArr); //empty long[]
long[] longArr2 = { 6, 7, 8, 9 };
union.update(longArr2); //#5 actual long[]
CompactSketch comp = union.getResult();
double est = comp.getEstimate();
boolean empty = comp.isEmpty();
assertEquals(est, 7.0, 0.0);
assertFalse(empty);
}
@Test
public void checkGetFamily() {
int k = 16;
Memory mem = new NativeMemory(new byte[k*16 +32]);
SetOperation setOp = new SetOperationBuilder().initMemory(mem).build(k,Family.UNION);
assertEquals(setOp.getFamily(), Family.UNION);
}
@Test(expectedExceptions = SketchesArgumentException.class)
public void checkPreambleLongsCorruption() {
int k = 16;
Memory mem = new NativeMemory(new byte[k*16 +32]);
Object memObj = mem.array(); //may be null
long memAdd = mem.getCumulativeOffset(0L);
SetOperation setOp = new SetOperationBuilder().initMemory(mem).build(k,Family.UNION);
println(setOp.toString());
int familyID = PreambleUtil.extractFamilyID(memObj, memAdd);
int preLongs = PreambleUtil.extractPreLongs(memObj, memAdd);
assertEquals(familyID, Family.UNION.getID());
assertEquals(preLongs, Family.UNION.getMaxPreLongs());
PreambleUtil.insertPreLongs(memObj, memAdd, 3); //Corrupt with 3; correct value is 4
DirectQuickSelectSketch.wrapInstance(mem, Util.DEFAULT_UPDATE_SEED);
}
@Test(expectedExceptions = SketchesArgumentException.class)
public void checkSizeTooSmall() {
int k = 16;
Memory mem = new NativeMemory(new byte[k*16 +32]); //initialized
SetOperation setOp = new SetOperationBuilder().initMemory(mem).build(k,Family.UNION);
println(setOp.toString());
Memory mem2 = new NativeMemory(new byte[32]); //for just preamble
NativeMemory.copy(mem, 0, mem2, 0, 32); //too small
DirectQuickSelectSketch.wrapInstance(mem2, Util.DEFAULT_UPDATE_SEED);
}
@Test
public void printlnTest() {
println("PRINTING: "+this.getClass().getName());
}
/**
* @param s value to print
*/
static void println(String s) {
//System.out.println(s); //Disable here
}
}
| pjain1/sketches-core | sketches/src/test/java/com/yahoo/sketches/theta/DirectUnionTest.java | Java | apache-2.0 | 24,164 |
"""Unit tests for the OWASP ZAP security warnings collector."""
from collector_utilities.functions import md5_hash
from .base import OWASPZAPTestCase
class OWASPZAPSecurityWarningsTest(OWASPZAPTestCase):
"""Unit tests for the OWASP ZAP security warnings collector."""
METRIC_TYPE = "security_warnings"
OWASP_ZAP_XML = """<?xml version="1.0"?>
<OWASPZAPReport version="2.7.0" generated="Thu, 28 Mar 2019 13:20:20">
<site name="http://www.hackazon.com" host="www.hackazon.com" port="80" ssl="false">
<alerts>
<alertitem>
<pluginid>10021</pluginid>
<alert>X-Content-Type-Options Header Missing</alert>
<name>X-Content-Type-Options Header Missing</name>
<riskcode>1</riskcode>
<confidence>2</confidence>
<riskdesc>Low (Medium)</riskdesc>
<desc><p>The Anti-MIME-Sniffing header X-Content-Type-Options was not set to 'nosniff'.</desc>
<instances>
<instance>
<uri>http://www.hackazon.com/products_pictures/Ray_Ban.jpg</uri>
<method>GET</method>
<param>X-Content-Type-Options</param>
</instance>
<instance>
<uri>http://www.hackazon.com/products_pictures/How_to_Marry_a_Millionaire.jpg</uri>
<method>GET</method>
<param>X-Content-Type-Options</param>
</instance>
</instances>
<count>759</count>
<solution><p>Ensure that the application/web server sets the Content-Type header appropriately</solution>
<otherinfo><p>This issue still applies to error type pages</otherinfo>
<reference><p>http://msdn.microsoft.com/en-us/library/ie/gg622941%28v</reference>
<cweid>16</cweid>
<wascid>15</wascid>
<sourceid>3</sourceid>
</alertitem>
</alerts>
</site>
</OWASPZAPReport>"""
WARNING_NAME = "X-Content-Type-Options Header Missing"
WARNING_DESCRIPTION = "The Anti-MIME-Sniffing header X-Content-Type-Options was not set to 'nosniff'."
WARNING_RISK = "Low (Medium)"
async def test_alert_instances(self):
"""Test that the number of alert instances is returned."""
response = await self.collect(get_request_text=self.OWASP_ZAP_XML)
url1 = "http://www.hackazon.com/products_pictures/Ray_Ban.jpg"
url2 = "http://www.hackazon.com/products_pictures/How_to_Marry_a_Millionaire.jpg"
expected_entities = [
dict(
key=md5_hash(f"X-Content-Type-Options Header Missing:10021:16:15:GET:{url1}"),
name=self.WARNING_NAME,
description=self.WARNING_DESCRIPTION,
location=f"GET {url1}",
uri=url1,
risk=self.WARNING_RISK,
),
dict(
key=md5_hash(f"X-Content-Type-Options Header Missing:10021:16:15:GET:{url2}"),
name=self.WARNING_NAME,
description=self.WARNING_DESCRIPTION,
location=f"GET {url2}",
uri=url2,
risk=self.WARNING_RISK,
),
]
self.assert_measurement(response, value="2", entities=expected_entities)
async def test_alert_types(self):
"""Test that the number of alert types is returned."""
self.set_source_parameter("alerts", "alert types")
response = await self.collect(get_request_text=self.OWASP_ZAP_XML)
expected_entities = [
dict(
key=md5_hash("X-Content-Type-Options Header Missing:10021:16:15"),
name=self.WARNING_NAME,
description=self.WARNING_DESCRIPTION,
risk=self.WARNING_RISK,
),
]
self.assert_measurement(response, value="1", entities=expected_entities)
async def test_variable_url_regexp(self):
"""Test that parts of URLs can be ignored."""
self.set_source_parameter("variable_url_regexp", ["[A-Za-z_]+.jpg"])
response = await self.collect(get_request_text=self.OWASP_ZAP_XML)
stable_url = "http://www.hackazon.com/products_pictures/variable-part-removed"
expected_entities = [
dict(
key=md5_hash(f"X-Content-Type-Options Header Missing:10021:16:15:GET:{stable_url}"),
name=self.WARNING_NAME,
uri=stable_url,
description=self.WARNING_DESCRIPTION,
location=f"GET {stable_url}",
risk=self.WARNING_RISK,
)
]
self.assert_measurement(response, value="1", entities=expected_entities)
| ICTU/quality-time | components/collector/tests/source_collectors/owasp_zap/test_security_warnings.py | Python | apache-2.0 | 5,009 |
package com.gmail.thelimeglass.Expressions;
import java.util.ArrayList;
import org.bukkit.entity.Entity;
import org.bukkit.event.Event;
import org.eclipse.jdt.annotation.Nullable;
import com.gmail.thelimeglass.Utils.Annotations.Config;
import com.gmail.thelimeglass.Utils.Annotations.PropertyType;
import com.gmail.thelimeglass.Utils.Annotations.Syntax;
import ch.njol.skript.lang.Expression;
import ch.njol.skript.lang.ExpressionType;
import ch.njol.skript.lang.SkriptParser.ParseResult;
import ch.njol.skript.lang.util.SimpleExpression;
import ch.njol.util.Kleenean;
@Syntax({"[the] [entity] [number] id (of|from) %entities%", "%entities%'s [entity] [number] id"})
@Config("EntityID")
@PropertyType(ExpressionType.COMBINED)
public class ExprEntityID extends SimpleExpression<Number>{
private Expression<Entity> entity;
@Override
public Class<? extends Number> getReturnType() {
return Number.class;
}
@Override
public boolean isSingle() {
return true;
}
@SuppressWarnings("unchecked")
@Override
public boolean init(Expression<?>[] e, int arg1, Kleenean arg2, ParseResult arg3) {
entity = (Expression<Entity>) e[0];
return true;
}
@Override
public String toString(@Nullable Event e, boolean arg1) {
return "[the] [entity] [number] id (of|from) %entities%";
}
@Override
@Nullable
protected Number[] get(Event e) {
ArrayList<Number> entityIDs = new ArrayList<>();
for (final Entity ent : entity.getAll(e)) {
entityIDs.add(ent.getEntityId());
}
return entityIDs.toArray(new Number[entityIDs.size()]);
}
} | TheLimeGlass/Skellett | src/main/java/com/gmail/thelimeglass/Expressions/ExprEntityID.java | Java | apache-2.0 | 1,548 |
package com.cardpay.pccredit.intopieces.model;
import java.util.Date;
import com.wicresoft.jrad.base.database.model.BusinessModel;
//冻结流程和授信信息维护共用
public class QuotaFreezeInfo extends BusinessModel{
private static final long serialVersionUID = 1L;
private String clientNo;
private Date startDate;
private Date endDate;
private String globalType;
private String globalId;
private String retContno;
private String loanStatus;
private String productId;
private String clientName;
private String contractAmt;
private String cardNo;
private String circleId;
private String processId;
private String processStatus;
private String operateType;
private String nodeName;
private String applyReason;
public String getClientNo() {
return clientNo;
}
public void setClientNo(String clientNo) {
this.clientNo = clientNo;
}
public Date getStartDate() {
return startDate;
}
public void setStartDate(Date startDate) {
this.startDate = startDate;
}
public Date getEndDate() {
return endDate;
}
public void setEndDate(Date endDate) {
this.endDate = endDate;
}
public String getGlobalType() {
return globalType;
}
public void setGlobalType(String globalType) {
this.globalType = globalType;
}
public String getGlobalId() {
return globalId;
}
public void setGlobalId(String globalId) {
this.globalId = globalId;
}
public String getRetContno() {
return retContno;
}
public void setRetContno(String retContno) {
this.retContno = retContno;
}
public String getLoanStatus() {
return loanStatus;
}
public void setLoanStatus(String loanStatus) {
this.loanStatus = loanStatus;
}
public String getProductId() {
return productId;
}
public void setProductId(String productId) {
this.productId = productId;
}
public String getClientName() {
return clientName;
}
public void setClientName(String clientName) {
this.clientName = clientName;
}
public String getContractAmt() {
return contractAmt;
}
public void setContractAmt(String contractAmt) {
this.contractAmt = contractAmt;
}
public String getCardNo() {
return cardNo;
}
public void setCardNo(String cardNo) {
this.cardNo = cardNo;
}
public String getProcessId() {
return processId;
}
public void setProcessId(String processId) {
this.processId = processId;
}
public String getCircleId() {
return circleId;
}
public void setCircleId(String circleId) {
this.circleId = circleId;
}
public String getProcessStatus() {
return processStatus;
}
public void setProcessStatus(String processStatus) {
this.processStatus = processStatus;
}
public String getOperateType() {
return operateType;
}
public void setOperateType(String operateType) {
this.operateType = operateType;
}
public String getNodeName() {
return nodeName;
}
public void setNodeName(String nodeName) {
this.nodeName = nodeName;
}
public String getApplyReason() {
return applyReason;
}
public void setApplyReason(String applyReason) {
this.applyReason = applyReason;
}
}
| JohnCny/PCCREDIT_QZ | src/java/com/cardpay/pccredit/intopieces/model/QuotaFreezeInfo.java | Java | apache-2.0 | 3,171 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.IO;
using WpfApplicationLauncher.StaticData;
using System.Collections.Concurrent;
namespace WpfApplicationLauncher.DataSourcing.FileSearchProvider
{
/// <summary>
/// Simple in-memory file database with real-time updates.
/// </summary>
public class FileDB : IDisposable, ISearchProvider
{
#region Static
private enum FileDBDeltaResult
{
Success,
FileAlreadyExists,
FileDoesNotExist,
Failure
}
private static FileDB lastDB = null;
/// <summary>
/// ISearchProvider helper. Shared between FileDB and FileDBSearchQuery to search FileDB lists.
/// </summary>
/// <param name="provider">ISearchProvider providing the search.</param>
/// <param name="sourceData">Data to search.</param>
/// <param name="words">Search words.</param>
/// <returns>FileDBSearchQuery representing relevent search results.</returns>
internal static FileDBSearchQuery Search(FileDB provider, ICollection<StaticFileInfo> sourceData, params string[] words)
{
return new FileDBSearchQuery(
provider,
sourceData.Where(fi => words.All(w => fi.FullName.ToLower().IndexOf(w.ToLower()) > -1)).ToList());
}
/// <summary>
/// Factory to ensure that FileDBs are not unnecessarily created.
/// Only one FileDB worth of history will be maintained as only one should be used at a time.
/// </summary>
/// <param name="cfg">Application configuration to base the DB on.</param>
/// <returns>Either the existing or a new FileDB.</returns>
public static FileDB CreateFileDB(Configuration cfg)
{
HashSet<string> fileExts = new HashSet<string>(cfg.SearchExtensions);
HashSet<StaticDirectoryInfo> paths = new HashSet<StaticDirectoryInfo>(cfg.SearchPaths.Select(s => new StaticDirectoryInfo(s)));
if (lastDB != null
&& !(paths.IsSubsetOf(lastDB.baseDirs)
&& fileExts.IsSubsetOf(lastDB.extensions)))
{
lastDB.Dispose();
lastDB = null;
}
if (lastDB == null)
{
lastDB = new FileDB(paths, fileExts);
}
return lastDB;
}
#endregion
#region Variables
protected List<FileSystemWatcher> watchers = new List<FileSystemWatcher>();
protected ConcurrentDictionary<string, StaticFileInfo> database = new ConcurrentDictionary<string, StaticFileInfo>();
protected HashSet<StaticDirectoryInfo> baseDirs = null;
protected HashSet<string> extensions = null;
protected bool initialScanDone = false;
#endregion
#region Properties
public String Title
{
get
{
return "File System Database";
}
}
#endregion
#region Construction
/// <summary>
/// Single, private constructor.
/// </summary>
/// <param name="baseDirList">Collection of base directories to database.</param>
/// <param name="extensionList">Collection of file extensions to match within the base directories.</param>
private FileDB(IEnumerable<StaticDirectoryInfo> baseDirList, IEnumerable<string> extensionList = null)
{
if (baseDirList == null)
throw new ArgumentException("baseDirList must be non-null");
this.baseDirs = new HashSet<StaticDirectoryInfo>(baseDirList);
if (extensionList != null)
{
this.extensions = new HashSet<string>(extensionList);
}
else
{
this.extensions = new HashSet<string>();
this.extensions.Add("*");
}
ProcessAndWatchAll();
}
public void Dispose()
{
if (watchers != null)
{
foreach (FileSystemWatcher fsw in watchers)
{
fsw.EnableRaisingEvents = false;
fsw.Dispose();
}
watchers.Clear();
watchers = null;
}
}
~FileDB()
{
Dispose();
}
#endregion
#region Event Handlers
/// <summary>
/// Handle a file system rename event.
/// </summary>
void FileRenamedHandler(object sender, RenamedEventArgs e)
{
ReplaceDatabaseEntry(e.OldFullPath, e.FullPath);
}
/// <summary>
/// Handle a file system delete event.
/// </summary>
void FileDeletedHandler(object sender, FileSystemEventArgs e)
{
RemoveDatabaseEntry(e.FullPath);
}
/// <summary>
/// Handle a file system create event.
/// </summary>
void FileCreatedHandler(object sender, FileSystemEventArgs e)
{
if (SatasfiesCriteria(e.FullPath))
{
StaticFileInfo fi = new StaticFileInfo(e.FullPath);
database[fi.FullName] = fi;
}
}
#endregion
#region Methods
/// <summary>
/// Initialization is part of construction as this is not a Plugin.
/// </summary>
/// <returns>true</returns>
bool ISearchProvider.Initialize()
{
return true;
}
/// <summary>
/// Perform a query on the file database.
/// </summary>
/// <param name="words">Search words to use.</param>
/// <returns>Search results.</returns>
ISearchQuery ISearchProvider.Query(params string[] words)
{
return Search(this, this.database.Values, words);
}
/// <summary>
/// Start scan of all files beneath the base dirs and watch for file system changes.
/// </summary>
private void ProcessAndWatchAll()
{
baseDirs.RemoveWhere(d => d == null);
baseDirs.RemoveWhere(d => !d.Exists);
if (baseDirs.Count < 1)
return;
foreach (StaticDirectoryInfo dir in baseDirs)
{
ProcessDir(dir);
}
foreach (FileSystemWatcher watcher in watchers)
{
watcher.EnableRaisingEvents = true;
}
this.initialScanDone = true;
}
/// <summary>
/// Scan base directory based on configured file extensions.
/// </summary>
/// <param name="dir">Base directory info.</param>
/// <returns>Update watcher for this base dir.</returns>
private FileSystemWatcher ProcessDir(StaticDirectoryInfo dir)
{
extensions
.SelectMany(ext => dir.GetFiles("*" + ext, SearchOption.AllDirectories))
.ToList()
.ForEach(fi => database[fi.FullName] = fi);
FileSystemWatcher watcher = new FileSystemWatcher(dir.FullName);
watcher.IncludeSubdirectories = true;
watcher.Created += FileCreatedHandler;
watcher.Deleted += FileDeletedHandler;
watcher.Renamed += FileRenamedHandler;
watchers.Add(watcher);
return watcher;
}
/// <summary>
/// Replace an existing file entry in the file db.
/// </summary>
/// <param name="oldEntry">Old path.</param>
/// <param name="newEntry">New path.</param>
/// <returns>True or false that both operations were successful.</returns>
private bool ReplaceDatabaseEntry(string oldEntry, string newEntry)
{
StaticFileInfo fi;
return database.TryRemove(oldEntry, out fi) && database.TryAdd(newEntry, new StaticFileInfo(newEntry));
}
/// <summary>
/// Remove an entry from the file db.
/// </summary>
/// <param name="entry">File path.</param>
/// <returns>True of false that it was removed.</returns>
private bool RemoveDatabaseEntry(string entry)
{
StaticFileInfo fi;
return database.TryRemove(entry, out fi);
}
/// <summary>
/// Ensure that file updates satasfy the file extension criteria.
/// </summary>
/// <param name="path">File path.</param>
/// <returns>Should path be in the database.</returns>
private bool SatasfiesCriteria(string path)
{
if (Directory.Exists(path))
{
return false;
}
else
{
if (extensions != null)
{
if (extensions.Any(ext => path.EndsWith(ext)))
{
return true;
}
}
else
{
return true;
}
}
return false;
}
#endregion
}
}
| tlmorgen/WpfApplicationLauncher | WpfApplicationLauncher/WpfApplicationLauncher/DataSourcing/FileSearchProvider/FileDB.cs | C# | apache-2.0 | 9,370 |
package springfox.documentation.grails;
public class DefaultGeneratedClassNamingStrategy implements GeneratedClassNamingStrategy {
@Override
public String name(Class clazz) {
return String.format("%s.generated.%s", clazz.getPackage().getName(), clazz.getSimpleName());
}
}
| springfox/springfox-grails-integration | springfox-grails/src/main/java/springfox/documentation/grails/DefaultGeneratedClassNamingStrategy.java | Java | apache-2.0 | 284 |
/**
* RDeviceOutput.java 2010/09/06
*/
package com.ycsoft.beans.device;
import java.io.Serializable;
import java.util.Date;
import com.ycsoft.commons.constants.DictKey;
import com.ycsoft.commons.store.MemoryDict;
import com.ycsoft.daos.config.POJO;
/**
* RDeviceOutput -> R_DEVICE_OUTPUT mapping
*/
@POJO(
tn="R_DEVICE_OUTPUT",
sn="",
pk="")
public class RDeviceOutput extends RDeviceDoneDetail implements Serializable {
// RDeviceOutput all properties
/**
*
*/
private static final long serialVersionUID = -2290684723998068616L;
private Integer device_done_code ;
private String output_no ;
private String depot_id ;
private String supplier_id ;
private String output_type ;
private Date create_time ;
private String optr_id ;
private String remark ;
private String supplier_name ;
private String output_type_text;
/**
* default empty constructor
*/
public RDeviceOutput() {}
// device_done_code getter and setter
public Integer getDevice_done_code(){
return device_done_code ;
}
public void setDevice_done_code(Integer device_done_code){
this.device_done_code = device_done_code ;
}
// output_no getter and setter
public String getOutput_no(){
return output_no ;
}
public void setOutput_no(String output_no){
this.output_no = output_no ;
}
// depot_id getter and setter
public String getDepot_id(){
return depot_id ;
}
public void setDepot_id(String depot_id){
this.depot_id = depot_id ;
}
// supplier_id getter and setter
public String getSupplier_id(){
return supplier_id ;
}
public void setSupplier_id(String supplier_id){
this.supplier_id = supplier_id ;
}
// output_type getter and setter
public String getOutput_type(){
return output_type ;
}
public void setOutput_type(String output_type){
output_type_text = MemoryDict.getDictName(DictKey.DEVICE_OUT_TYPE, output_type);
this.output_type = output_type ;
}
// create_time getter and setter
public Date getCreate_time(){
return create_time ;
}
public void setCreate_time(Date create_time){
this.create_time = create_time ;
}
// optr_id getter and setter
public String getOptr_id(){
return optr_id ;
}
public void setOptr_id(String optr_id){
this.optr_id = optr_id ;
}
// remark getter and setter
public String getRemark(){
return remark ;
}
public void setRemark(String remark){
this.remark = remark ;
}
/**
* @return the supplier_name
*/
public String getSupplier_name() {
return supplier_name;
}
/**
* @param supplier_name the supplier_name to set
*/
public void setSupplier_name(String supplier_name) {
this.supplier_name = supplier_name;
}
/**
* @return the output_type_text
*/
public String getOutput_type_text() {
return output_type_text;
}
} | leopardoooo/cambodia | ycsoft-lib/src/main/java/com/ycsoft/beans/device/RDeviceOutput.java | Java | apache-2.0 | 2,913 |
package com.natpryce.jnirn.examples;
public class AnotherClassWithNativeMethods {
public static class ArgClass {}
public static class RetClass {}
public native int nativeMethodOne();
public native int nativeMethodTwo();
public native RetClass nativeMethodThree(ArgClass a);
}
| npryce/jnirn | test-input/com/natpryce/jnirn/examples/AnotherClassWithNativeMethods.java | Java | apache-2.0 | 299 |
package jdepend.metadata;
import java.util.ArrayList;
import java.util.List;
public class CandidateUtil {
public final static String IDDecollator = "$";
public final static String getId(String place, String name) {
if (place != null) {
return place + IDDecollator + name;
} else {
return name;
}
}
public final static String getId(Candidate candidate) {
return getId(candidate.getPlace(), candidate.getName());
}
public final static String getPlace(String id) {
if (containPlace(id)) {
return id.substring(0, id.indexOf(IDDecollator));
} else {
return null;
}
}
public final static String getName(String id) {
if (containPlace(id)) {
return id.substring(id.indexOf(IDDecollator) + 1);
} else {
return id;
}
}
public final static List<String> getNames(List<String> ids) {
List<String> names = new ArrayList<String>();
for (String id : ids) {
names.add(getName(id));
}
return names;
}
public final static boolean containPlace(String id) {
return id.indexOf(IDDecollator) != -1;
}
}
| jdepend/cooper | cooper-source/cooper-metadata/src/main/java/jdepend/metadata/CandidateUtil.java | Java | apache-2.0 | 1,106 |
define(['backbone', 'backbone.paginator'], function(Backbone, PageableCollection) {
var LogLine = Backbone.Model.extend({
idAttribute: 'LINE',
})
return PageableCollection.extend({
model: LogLine,
url: function() { return '/status/log/'+this.bl },
mode: 'infinite',
initialize: function(attrs, options){
this.bl = options.bl
this.running = true
if (options && options.running == false) this.running = false
this.refresh_thread = null
},
stop: function() {
clearTimeout(this.refresh_thread)
this.running = false
},
parseRecords: function(r, options) {
clearTimeout(this.refresh_thread)
if (this.running) this.refresh_thread = setTimeout(this.fetch.bind(this), 5000)
var lines = []
_.each(r.reverse(), function(l) {
lines.push({ LINE: l })
})
return lines
},
})
}) | DiamondLightSource/SynchWeb | client/src/js/modules/status/collections/gda.js | JavaScript | apache-2.0 | 1,142 |
function manualInput(){
let manualCheckBox = document.getElementById("checkBoxManual").checked;
let manualID = document.getElementById("manual");
if(manualCheckBox === true){
manualID.style.display = "block";
}else{
manualID.style.display = "none";
}
}
function printArray(arrayString){
let length = arrayString.length;
let result = "";
for(let i = 0; i < length; i++){
result += arrayString[i] + " ";
}
console.log(result);
result = "Result: " + result;
let resultID = document.getElementById("result");
resultID.innerHTML = result;
resultID.style.display = "block";
}
function animateBubbleSort(arrayString){
let length = arrayString.length;
let swapCounter = 0;
for(let i = 0; i < length; i++){
for(let j = 1; j < length; j++){
if(arrayString[j-1] > arrayString[j]){
let temp = arrayString[j-1];
arrayString[j-1] = arrayString[j];
arrayString[j] = temp;
swapCounter++;
}
}
if(swapCounter < 0) break;
}
printArray(arrayString);
}
function startAlgorithm(){
let bubbleSort = document.getElementById("bubbleSort").checked;
if(bubbleSort === true){
let manualinput = document.getElementById("checkBoxManual").checked;
if(manualinput === true){
let numberString = document.getElementById("numberString").value.trim();
let arrayString = numberString.split(" ");
animateBubbleSort(arrayString);
}else{
let length = document.getElementById("length").value;
let array = [length];
for(let i = 0; i < length; i++){
array[i] = Math.floor(Math.random()*50);
}
animateBubbleSort(array);
}
}else{
console.log("Please select a algorithm");
}
} | jliang27/jliang27.github.io | projects/pr3/scripts/algorithm.js | JavaScript | apache-2.0 | 1,688 |
using MassTransit;
using Microsoft.Practices.Unity;
using Miles.MassTransit.AspNet;
using Miles.MassTransit.MessageDispatch;
using Miles.Sample.Infrastructure.Unity;
using System;
namespace Miles.Sample.Web.App_Start
{
/// <summary>
/// Specifies the Unity configuration for the main container.
/// </summary>
public class UnityConfig
{
#region Unity Container
private static Lazy<IUnityContainer> container = new Lazy<IUnityContainer>(() =>
{
var container = new UnityContainer();
RegisterTypes(container);
return container;
});
/// <summary>
/// Gets the configured Unity container.
/// </summary>
public static IUnityContainer GetConfiguredContainer()
{
return container.Value;
}
#endregion
/// <summary>Registers the type mappings with the Unity container.</summary>
/// <param name="container">The unity container to configure.</param>
/// <remarks>There is no need to register concrete types such as controllers or API controllers (unless you want to
/// change the defaults), as Unity allows resolving a concrete type even if it was not previously registered.</remarks>
public static void RegisterTypes(IUnityContainer container)
{
container.ConfigureSample(t => new PerRequestLifetimeManager())
// Miles.MassTransit
.RegisterType<IActivityContext, RequestActivityContext>(new PerRequestLifetimeManager())
.RegisterType<IMessageDispatchProcess, HostingEnvrionmentMessageDispatchProcess<IBus>>(new ContainerControlledLifetimeManager())
// MassTransit
.RegisterInstance<IBus>(MassTransitBusConfig.GetBus())
.RegisterInstance<IPublishEndpoint>(MassTransitBusConfig.GetBus())
.RegisterInstance<ISendEndpointProvider>(MassTransitBusConfig.GetBus());
}
}
}
| adz21c/miles | Miles.Sample.Web/App_Start/UnityConfig.cs | C# | apache-2.0 | 2,002 |
package jp.sourceforge.ea2ddl.dao.cbean.cq;
import jp.sourceforge.ea2ddl.dao.cbean.cq.bs.BsTGenoptCQ;
import org.seasar.dbflute.cbean.ConditionQuery;
import org.seasar.dbflute.cbean.sqlclause.SqlClause;
/**
* The condition-query of t_genopt.
* <p>
* You can implement your original methods here.
* This class is NOT overrided when re-generating.
* </p>
* @author DBFlute(AutoGenerator)
*/
@SuppressWarnings("unchecked")
public class TGenoptCQ extends BsTGenoptCQ {
// ===================================================================================
// Constructor
// ===========
/**
* Constructor.
* @param childQuery Child query as abstract class. (Nullable: If null, this is base instance.)
* @param sqlClause SQL clause instance. (NotNull)
* @param aliasName My alias name. (NotNull)
* @param nestLevel Nest level.
*/
public TGenoptCQ(ConditionQuery childQuery, SqlClause sqlClause, String aliasName, int nestLevel) {
super(childQuery, sqlClause, aliasName, nestLevel);
}
// ===================================================================================
// Arrange Method
// ==============
// You can make original arrange query methods here.
// public void arranegeXxx() {
// ...
// }
}
| taktos/ea2ddl | ea2ddl-dao/src/main/java/jp/sourceforge/ea2ddl/dao/cbean/cq/TGenoptCQ.java | Java | apache-2.0 | 1,617 |
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.sql;
import java.util.Collection;
import java.util.Map;
import com.orientechnologies.orient.core.command.OCommandRequest;
import com.orientechnologies.orient.core.record.impl.ODocument;
/**
* Explains the execution of a command returning profiling information.
*
* @author Luca Garulli
*/
public class OCommandExecutorSQLExplain extends OCommandExecutorSQLDelegate {
public static final String KEYWORD_EXPLAIN = "EXPLAIN";
@SuppressWarnings("unchecked")
@Override
public OCommandExecutorSQLExplain parse(OCommandRequest iCommand) {
String cmd = ((OCommandSQL) iCommand).getText();
super.parse(new OCommandSQL(cmd.substring(KEYWORD_EXPLAIN.length())));
return this;
}
@Override
public Object execute(Map<Object, Object> iArgs) {
delegate.getContext().setRecordingMetrics(true);
final long startTime = System.nanoTime();
final Object result = super.execute(iArgs);
final ODocument report = new ODocument(delegate.getContext().getVariables());
report.field("elapsed", (System.nanoTime() - startTime) / 1000000f);
if (result instanceof Collection<?>) {
report.field("resultType", "collection");
report.field("resultSize", ((Collection<?>) result).size());
} else if (result instanceof ODocument) {
report.field("resultType", "document");
report.field("resultSize", 1);
} else if (result instanceof Number) {
report.field("resultType", "number");
}
return report;
}
}
| delebash/orientdb-parent | core/src/main/java/com/orientechnologies/orient/core/sql/OCommandExecutorSQLExplain.java | Java | apache-2.0 | 2,109 |
package com.suscipio_solutions.consecro_mud.WebMacros;
import java.util.Enumeration;
import com.suscipio_solutions.consecro_mud.Common.interfaces.PlayerAccount;
import com.suscipio_solutions.consecro_mud.core.CMLib;
import com.suscipio_solutions.consecro_mud.core.CMProps;
import com.suscipio_solutions.consecro_web.interfaces.HTTPRequest;
public class AccountNext extends StdWebMacro
{
@Override public String name() { return "AccountNext"; }
@Override
public String runMacro(HTTPRequest httpReq, String parm)
{
if(!CMProps.getBoolVar(CMProps.Bool.MUDSTARTED))
return CMProps.getVar(CMProps.Str.MUDSTATUS);
final java.util.Map<String,String> parms=parseParms(parm);
final String last=httpReq.getUrlParameter("ACCOUNT");
if(parms.containsKey("RESET"))
{
if(last!=null) httpReq.removeUrlParameter("ACCOUNT");
return "";
}
String lastID="";
String sort=httpReq.getUrlParameter("SORTBY");
if(sort==null) sort="";
final Enumeration<PlayerAccount> pe=CMLib.players().accounts(sort,httpReq.getRequestObjects());
for(;pe.hasMoreElements();)
{
final PlayerAccount account=pe.nextElement();
if((last==null)||((last.length()>0)&&(last.equals(lastID))&&(!account.getAccountName().equals(lastID))))
{
httpReq.addFakeUrlParameter("ACCOUNT",account.getAccountName());
return "";
}
lastID=account.getAccountName();
}
httpReq.addFakeUrlParameter("ACCOUNT","");
if(parms.containsKey("EMPTYOK"))
return "<!--EMPTY-->";
return " @break@";
}
}
| ConsecroMUD/ConsecroMUD | com/suscipio_solutions/consecro_mud/WebMacros/AccountNext.java | Java | apache-2.0 | 1,502 |
package kr.dogfoot.hwplib.reader.bodytext.paragraph.control;
import kr.dogfoot.hwplib.object.bodytext.control.ControlEquation;
import kr.dogfoot.hwplib.object.etc.HWPTag;
import kr.dogfoot.hwplib.reader.bodytext.paragraph.control.eqed.ForEQEdit;
import kr.dogfoot.hwplib.reader.bodytext.paragraph.control.gso.part.ForCaption;
import kr.dogfoot.hwplib.reader.bodytext.paragraph.control.gso.part.ForCtrlHeaderGso;
import kr.dogfoot.hwplib.util.compoundFile.reader.StreamReader;
import java.io.IOException;
/**
* 수식 컨트롤을 읽기 위한 객체
*
* @author neolord
*/
public class ForControlEquation {
/**
* 수식 컨트롤
*/
private ControlEquation eqed;
/**
* 스트림 리더
*/
private StreamReader sr;
/**
* 컨트롤 헤더 레코드의 레벨
*/
private int ctrlHeaderLevel;
/**
* 생성자
*/
public ForControlEquation() {
}
/**
* 수식 컨트롤을 읽는다.
*
* @param eqed 수식 컨트롤
* @param sr 스트림 리더
* @throws Exception
*/
public void read(ControlEquation eqed, StreamReader sr) throws Exception {
this.eqed = eqed;
this.sr = sr;
this.ctrlHeaderLevel = sr.getCurrentRecordHeader().getLevel();
ctrlHeader();
caption();
while (sr.isEndOfStream() == false) {
if (sr.isImmediatelyAfterReadingHeader() == false) {
sr.readRecordHeder();
}
if (ctrlHeaderLevel >= sr.getCurrentRecordHeader().getLevel()) {
break;
}
readBody();
}
}
/**
* 수식 컨트롤의 컨트롤 헤더 레코드를 읽는다.
*
* @throws IOException
*/
private void ctrlHeader() throws IOException {
ForCtrlHeaderGso.read(eqed.getHeader(), sr);
}
/**
* 캡션 정보를 읽는다.
*
* @throws Exception
*/
private void caption() throws Exception {
sr.readRecordHeder();
if (sr.getCurrentRecordHeader().getTagID() == HWPTag.LIST_HEADER) {
eqed.createCaption();
ForCaption.read(eqed.getCaption(), sr);
}
}
/**
* 이미 읽은 레코드 헤더에 따른 레코드 내용을 읽는다.
*
* @throws IOException
*/
private void readBody() throws IOException {
switch (sr.getCurrentRecordHeader().getTagID()) {
case HWPTag.EQEDIT:
eqEdit();
break;
}
}
/**
* 수식 정보 레코드를 읽는다.
*
* @throws IOException
*/
private void eqEdit() throws IOException {
ForEQEdit.read(eqed.getEQEdit(), sr);
}
}
| neolord0/hwplib | src/main/java/kr/dogfoot/hwplib/reader/bodytext/paragraph/control/ForControlEquation.java | Java | apache-2.0 | 2,741 |
/*
* Copyright (c) 2017. tangzx(love.tangzx@qq.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tang.intellij.lua.debugger;
/**
*
* Created by tangzx on 2017/5/7.
*/
public enum DebuggerType {
Attach(1, "Attach Debugger(Not available)"), Mob(2, "Remote Debugger(Mobdebug)");
private int v;
private String desc;
DebuggerType(int v, String desc) {
this.v = v;
this.desc = desc;
}
public static DebuggerType valueOf(int v) {
switch (v) {
case 1: return Attach;
case 2: return Mob;
default: return null;
}
}
public int value() { return v; }
public String toString() { return desc; }
}
| EmmyLua/IntelliJ-EmmyLua | src/main/java/com/tang/intellij/lua/debugger/DebuggerType.java | Java | apache-2.0 | 1,217 |
module XDR::Concerns::ConvertsToXDR
include XDR::Concerns::ReadsBytes
#
# Serialized the provided `val` to xdr and writes it to `io`
#
# @param val [Object] The object to serialize
# @param io [IO] an IO object to write to
#
def write(val, io)
raise NotImplementedError, "implement in including class"
end
#
# Reads from the provided IO an instance of the implementing class
# @param io [IO] the io to read from
#
# @return [Object] the deserialized value
def read(io)
raise NotImplementedError, "implement in including class"
end
#
# Returns true if the value provided is compatible with this serializer class
#
# @param value [Object] the value to test
#
# @return [Boolean] true if valid, false otherwise
def valid?(value)
raise NotImplementedError, "implement in including class"
end
#
# Serialized the provided val to xdr, returning a string
# of the serialized data
#
# @param val [Object] the value to serialize
#
# @return [String] the produced bytes
def to_xdr(val)
StringIO.
new.
tap{|io| write(val, io)}.
string.force_encoding("ASCII-8BIT")
end
#
# Deserializes an object from the provided string of bytes
#
# @param string [String] the bytes to read from
#
# @return [Object] the deserialized value
def from_xdr(string)
io = StringIO.new(string)
read(io)
end
private
def padding_for(length)
case length % 4
when 0 ; 0
when 1 ; 3
when 2 ; 2
when 3 ; 1
end
end
end | Payshare/ruby-xdr | lib/xdr/concerns/converts_to_xdr.rb | Ruby | apache-2.0 | 1,552 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codedeploy.model;
import javax.annotation.Generated;
/**
* <p>
* The trigger was specified in an invalid format.
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class InvalidTriggerConfigException extends com.amazonaws.services.codedeploy.model.AmazonCodeDeployException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new InvalidTriggerConfigException with the specified error message.
*
* @param message
* Describes the error encountered.
*/
public InvalidTriggerConfigException(String message) {
super(message);
}
}
| aws/aws-sdk-java | aws-java-sdk-codedeploy/src/main/java/com/amazonaws/services/codedeploy/model/InvalidTriggerConfigException.java | Java | apache-2.0 | 1,242 |
package com.orionplatform.math.statistics.classes;
import java.util.Map;
import com.orionplatform.core.exception.Assert;
import com.orionplatform.data.data_structures.list.OrionList;
import com.orionplatform.math.MathRule;
import com.orionplatform.math.geometry.vector.Vector;
import com.orionplatform.math.geometry.vector.VectorRules;
import com.orionplatform.math.number.ANumber;
import com.orionplatform.math.statistics.classes.aclass.StatisticalClass;
import com.orionplatform.math.statistics.classes.aclass.StatisticalClassRules;
public class StatisticalClassesRules extends MathRule
{
public static synchronized void isValid(OrionList<StatisticalClass> statisticalClasses)
{
Assert.notEmpty(statisticalClasses, "The statisticalClasses input cannot be null/empty.");
statisticalClasses.forAll(sc -> StatisticalClassRules.isValid(sc));
}
public static synchronized void isValid(OrionList<StatisticalClass> statisticalClasses, Map<ANumber, StatisticalClass> valuesAndClassMembershipMapper)
{
isValid(statisticalClasses);
Assert.notEmpty(valuesAndClassMembershipMapper, "The valuesAndClassMembershipMapper input cannot be null/empty.");
}
public static synchronized void isValid(Vector values, Map<ANumber, StatisticalClass> valuesAndClassMembershipMapper)
{
VectorRules.isValid(values);
Assert.notNull(valuesAndClassMembershipMapper, "The valuesAndClassMembershipMapper input cannot be null.");
Assert.areEqual(values.filter(value -> valuesAndClassMembershipMapper.containsKey(value)).count(), values.getDimensions(), "valuesAndClassMembershipMapper does not have all values as keys.");
}
public static synchronized void isValid(StatisticalClasses statisticalClasses)
{
Assert.notNull(statisticalClasses, "The statisticalClasses input cannot be null.");
isValid(statisticalClasses.getStatisticalClasses(), statisticalClasses.getValuesAndClassMembershipMapper());
}
} | orioncode/orionplatform | orion_math/orion_math_core/src/main/java/com/orionplatform/math/statistics/classes/StatisticalClassesRules.java | Java | apache-2.0 | 2,021 |
package me.tatarka.fakeartist.api.listeners;
import com.shephertz.app42.gaming.multiplayer.client.events.AllRoomsEvent;
import com.shephertz.app42.gaming.multiplayer.client.events.AllUsersEvent;
import com.shephertz.app42.gaming.multiplayer.client.events.LiveUserInfoEvent;
import com.shephertz.app42.gaming.multiplayer.client.events.MatchedRoomsEvent;
import com.shephertz.app42.gaming.multiplayer.client.events.RoomEvent;
import com.shephertz.app42.gaming.multiplayer.client.listener.ZoneRequestListener;
public abstract class ZoneRequestAdapter implements ZoneRequestListener {
@Override
public void onDeleteRoomDone(RoomEvent roomEvent) {
}
@Override
public void onGetAllRoomsDone(AllRoomsEvent allRoomsEvent) {
}
@Override
public void onCreateRoomDone(RoomEvent roomEvent) {
}
@Override
public void onGetOnlineUsersDone(AllUsersEvent allUsersEvent) {
}
@Override
public void onGetLiveUserInfoDone(LiveUserInfoEvent liveUserInfoEvent) {
}
@Override
public void onSetCustomUserDataDone(LiveUserInfoEvent liveUserInfoEvent) {
}
@Override
public void onGetMatchedRoomsDone(MatchedRoomsEvent matchedRoomsEvent) {
}
@Override
public void onGetAllRoomsCountDone(AllRoomsEvent allRoomsEvent) {
}
@Override
public void onGetOnlineUsersCountDone(AllUsersEvent allUsersEvent) {
}
@Override
public void onGetUserStatusDone(LiveUserInfoEvent liveUserInfoEvent) {
}
}
| evant/fake-artist | app/src/main/java/me/tatarka/fakeartist/api/listeners/ZoneRequestAdapter.java | Java | apache-2.0 | 1,508 |