repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
rven/odoo | addons/partner_autocomplete/models/res_partner.py | 7687 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import json
from odoo import api, fields, models, exceptions, _
from odoo.addons.iap.tools import iap_tools
# TDE FIXME: check those errors at iap level ?
from requests.exceptions import ConnectionError, HTTPError
_logger = logging.getLogger(__name__)
DEFAULT_ENDPOINT = 'https://partner-autocomplete.odoo.com'
class ResPartner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
partner_gid = fields.Integer('Company database ID')
additional_info = fields.Char('Additional info')
@api.model
def _replace_location_code_by_id(self, record):
record['country_id'], record['state_id'] = self._find_country_data(
state_code=record.pop('state_code', False),
state_name=record.pop('state_name', False),
country_code=record.pop('country_code', False),
country_name=record.pop('country_name', False)
)
return record
@api.model
def _format_data_company(self, company):
self._replace_location_code_by_id(company)
if company.get('child_ids'):
child_ids = []
for child in company.get('child_ids'):
child_ids.append(self._replace_location_code_by_id(child))
company['child_ids'] = child_ids
if company.get('additional_info'):
company['additional_info'] = json.dumps(company['additional_info'])
return company
@api.model
def _find_country_data(self, state_code, state_name, country_code, country_name):
country = self.env['res.country'].search([['code', '=ilike', country_code]])
if not country:
country = self.env['res.country'].search([['name', '=ilike', country_name]])
state_id = False
country_id = False
if country:
country_id = {
'id': country.id,
'display_name': country.display_name
}
if state_name or state_code:
state = self.env['res.country.state'].search([
('country_id', '=', country_id.get('id')),
'|',
('name', '=ilike', state_name),
('code', '=ilike', state_code)
], limit=1)
if state:
state_id = {
'id': state.id,
'display_name': state.display_name
}
else:
_logger.info('Country code not found: %s', country_code)
return country_id, state_id
@api.model
def get_endpoint(self):
url = self.env['ir.config_parameter'].sudo().get_param('iap.partner_autocomplete.endpoint', DEFAULT_ENDPOINT)
url += '/iap/partner_autocomplete'
return url
@api.model
def _rpc_remote_api(self, action, params, timeout=15):
if self.env.registry.in_test_mode() :
return False, 'Insufficient Credit'
url = '%s/%s' % (self.get_endpoint(), action)
account = self.env['iap.account'].get('partner_autocomplete')
if not account.account_token:
return False, 'No Account Token'
params.update({
'db_uuid': self.env['ir.config_parameter'].sudo().get_param('database.uuid'),
'account_token': account.account_token,
'country_code': self.env.company.country_id.code,
'zip': self.env.company.zip,
})
try:
return iap_tools.iap_jsonrpc(url=url, params=params, timeout=timeout), False
except (ConnectionError, HTTPError, exceptions.AccessError, exceptions.UserError) as exception:
_logger.error('Autocomplete API error: %s' % str(exception))
return False, str(exception)
except iap_tools.InsufficientCreditError as exception:
_logger.warning('Insufficient Credits for Autocomplete Service: %s' % str(exception))
return False, 'Insufficient Credit'
@api.model
def autocomplete(self, query):
suggestions, error = self._rpc_remote_api('search', {
'query': query,
})
if suggestions:
results = []
for suggestion in suggestions:
results.append(self._format_data_company(suggestion))
return results
else:
return []
@api.model
def enrich_company(self, company_domain, partner_gid, vat):
response, error = self._rpc_remote_api('enrich', {
'domain': company_domain,
'partner_gid': partner_gid,
'vat': vat,
})
if response and response.get('company_data'):
result = self._format_data_company(response.get('company_data'))
else:
result = {}
if response and response.get('credit_error'):
result.update({
'error': True,
'error_message': 'Insufficient Credit'
})
elif error:
result.update({
'error': True,
'error_message': error
})
return result
@api.model
def read_by_vat(self, vat):
vies_vat_data, error = self._rpc_remote_api('search_vat', {
'vat': vat,
})
if vies_vat_data:
return [self._format_data_company(vies_vat_data)]
else:
return []
@api.model
def _is_company_in_europe(self, country_code):
country = self.env['res.country'].search([('code', '=ilike', country_code)])
if country:
country_id = country.id
europe = self.env.ref('base.europe')
if not europe:
europe = self.env["res.country.group"].search([('name', '=', 'Europe')], limit=1)
if not europe or country_id not in europe.country_ids.ids:
return False
return True
def _is_vat_syncable(self, vat):
vat_country_code = vat[:2]
partner_country_code = self.country_id.code if self.country_id else ''
return self._is_company_in_europe(vat_country_code) and (partner_country_code == vat_country_code or not partner_country_code)
def _is_synchable(self):
already_synched = self.env['res.partner.autocomplete.sync'].search([('partner_id', '=', self.id), ('synched', '=', True)])
return self.is_company and self.partner_gid and not already_synched
def _update_autocomplete_data(self, vat):
self.ensure_one()
if vat and self._is_synchable() and self._is_vat_syncable(vat):
self.env['res.partner.autocomplete.sync'].sudo().add_to_queue(self.id)
@api.model_create_multi
def create(self, vals_list):
partners = super(ResPartner, self).create(vals_list)
if len(vals_list) == 1:
partners._update_autocomplete_data(vals_list[0].get('vat', False))
if partners.additional_info:
template_values = json.loads(partners.additional_info)
template_values['flavor_text'] = _("Partner created by Odoo Partner Autocomplete Service")
partners.message_post_with_view(
'iap_mail.enrich_company',
values=template_values,
subtype_id=self.env.ref('mail.mt_note').id,
)
partners.write({'additional_info': False})
return partners
def write(self, values):
res = super(ResPartner, self).write(values)
if len(self) == 1:
self._update_autocomplete_data(values.get('vat', False))
return res
| agpl-3.0 |
exercitussolus/yolo | src/main/java/org/elasticsearch/transport/local/LocalTransportModule.java | 1391 | /*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.local;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.transport.Transport;
/**
*
*/
public class LocalTransportModule extends AbstractModule {
private final Settings settings;
public LocalTransportModule(Settings settings) {
this.settings = settings;
}
@Override
protected void configure() {
bind(LocalTransport.class).asEagerSingleton();
bind(Transport.class).to(LocalTransport.class).asEagerSingleton();
}
} | agpl-3.0 |
FireWalkerX/eyeOS-FOSS-V.2.0 | eyeos/system/Frameworks/NetSync/cometlib/interfaces/AbstractChannel.php | 229 | <?php
/*
* Abstract object representig a channel
*/
abstract class AbstractChannel {
protected $name;
public function setName($name) {
$this->name = $name;
}
public function getName() {
return $this->name;
}
}
?>
| agpl-3.0 |
vanSabben/java-iso-tools | sabre/src/main/java/com/github/stephenc/javaisotools/udflib/structures/LogicalVolumeDescriptor.java | 7219 | /*
* Copyright (c) 2010. Stephen Connolly.
* Copyright (c) 2006. Björn Stickler <bjoern@stickler.de>.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package com.github.stephenc.javaisotools.udflib.structures;
import java.io.IOException;
import java.io.RandomAccessFile;
import com.github.stephenc.javaisotools.udflib.tools.BinaryTools;
import com.github.stephenc.javaisotools.udflib.tools.OSTAUnicode;
public class LogicalVolumeDescriptor extends VolumeDescriptorSequenceItem {
public CharSpec DescriptorCharacterSet; // struct charspec
public byte LogicalVolumeIdentifier[]; // dstring[128]
public long LogicalBlockSize; // Uint32
public EntityID DomainIdentifier; // struct EntityID
//public byte LogicalVolumeContentsUse[]; // dstring[16]
public Long_ad LogicalVolumeContentsUse;
public long MapTableLength; // Uint32
public long NumberofPartitionMaps; // Uint32
public EntityID ImplementationIdentifier; // struct EntityID
public byte ImplementationUse[]; // byte[128]
public Extend_ad IntegritySequenceExtent; // extend_ad
public byte PartitionMaps[]; // byte[]
public LogicalVolumeDescriptor() {
DescriptorTag = new Tag();
DescriptorTag.TagIdentifier = 6;
DescriptorCharacterSet = new CharSpec();
LogicalVolumeIdentifier = new byte[128];
DomainIdentifier = new EntityID();
LogicalVolumeContentsUse = new Long_ad();
ImplementationIdentifier = new EntityID();
ImplementationUse = new byte[128];
IntegritySequenceExtent = new Extend_ad();
PartitionMaps = new byte[0];
}
public void setLogicalVolumeIdentifier(String volumeIdentifier)
throws Exception {
if (volumeIdentifier.length() > 126) {
throw new Exception("error: logical volume identifier length > 126 characters");
}
LogicalVolumeIdentifier = new byte[128];
try {
byte volumeIdentifierBytes[] = volumeIdentifier.getBytes("UTF-16");
int compId = OSTAUnicode.getBestCompressionId(volumeIdentifierBytes);
byte tmpIdentifier[] = OSTAUnicode.CompressUnicodeByte(volumeIdentifierBytes, compId);
int length = (tmpIdentifier.length < 127) ? tmpIdentifier.length : 127;
System.arraycopy(tmpIdentifier, 0, LogicalVolumeIdentifier, 0, length);
LogicalVolumeIdentifier[LogicalVolumeIdentifier.length - 1] = (byte) length;
}
catch (Exception ex) { /* never happens */ }
}
public void read(RandomAccessFile myRandomAccessFile)
throws IOException {
DescriptorTag = new Tag();
DescriptorTag.read(myRandomAccessFile);
VolumeDescriptorSequenceNumber = BinaryTools.readUInt32AsLong(myRandomAccessFile);
DescriptorCharacterSet = new CharSpec();
DescriptorCharacterSet.read(myRandomAccessFile);
LogicalVolumeIdentifier = new byte[128];
myRandomAccessFile.read(LogicalVolumeIdentifier);
LogicalBlockSize = BinaryTools.readUInt32AsLong(myRandomAccessFile);
DomainIdentifier = new EntityID();
DomainIdentifier.read(myRandomAccessFile);
//LogicalVolumeContentsUse = new byte[16];
//myRandomAccessFile.read( LogicalVolumeContentsUse );
LogicalVolumeContentsUse = new Long_ad();
LogicalVolumeContentsUse.read(myRandomAccessFile);
MapTableLength = BinaryTools.readUInt32AsLong(myRandomAccessFile);
NumberofPartitionMaps = BinaryTools.readUInt32AsLong(myRandomAccessFile);
ImplementationIdentifier = new EntityID();
ImplementationIdentifier.read(myRandomAccessFile);
ImplementationUse = new byte[128];
myRandomAccessFile.read(ImplementationUse);
IntegritySequenceExtent = new Extend_ad();
IntegritySequenceExtent.read(myRandomAccessFile);
PartitionMaps = new byte[(int) MapTableLength];
myRandomAccessFile.read(PartitionMaps);
}
public byte[] getBytesWithoutDescriptorTag() {
byte DescriptorCharacterSetBytes[] = DescriptorCharacterSet.getBytes();
byte DomainIdentifierBytes[] = DomainIdentifier.getBytes();
byte LogicalVolumeContentsUseBytes[] = LogicalVolumeContentsUse.getBytes();
byte ImplementationIdentifierBytes[] = ImplementationIdentifier.getBytes();
byte IntegritySequenceExtentBytes[] = IntegritySequenceExtent.getBytes();
byte rawBytes[] = new byte[272
+ DescriptorCharacterSetBytes.length
+ DomainIdentifierBytes.length
+ LogicalVolumeContentsUseBytes.length
+ ImplementationIdentifierBytes.length
+ IntegritySequenceExtentBytes.length
+ PartitionMaps.length];
int pos = 0;
pos = BinaryTools.getUInt32BytesFromLong(VolumeDescriptorSequenceNumber, rawBytes, pos);
System.arraycopy(DescriptorCharacterSetBytes, 0, rawBytes, pos, DescriptorCharacterSetBytes.length);
pos += DescriptorCharacterSetBytes.length;
System.arraycopy(LogicalVolumeIdentifier, 0, rawBytes, pos, LogicalVolumeIdentifier.length);
pos += LogicalVolumeIdentifier.length;
pos = BinaryTools.getUInt32BytesFromLong(LogicalBlockSize, rawBytes, pos);
System.arraycopy(DomainIdentifierBytes, 0, rawBytes, pos, DomainIdentifierBytes.length);
pos += DomainIdentifierBytes.length;
System.arraycopy(LogicalVolumeContentsUseBytes, 0, rawBytes, pos, LogicalVolumeContentsUseBytes.length);
pos += LogicalVolumeContentsUseBytes.length;
pos = BinaryTools.getUInt32BytesFromLong(MapTableLength, rawBytes, pos);
pos = BinaryTools.getUInt32BytesFromLong(NumberofPartitionMaps, rawBytes, pos);
System.arraycopy(ImplementationIdentifierBytes, 0, rawBytes, pos, ImplementationIdentifierBytes.length);
pos += ImplementationIdentifierBytes.length;
System.arraycopy(ImplementationUse, 0, rawBytes, pos, ImplementationUse.length);
pos += ImplementationUse.length;
System.arraycopy(IntegritySequenceExtentBytes, 0, rawBytes, pos, IntegritySequenceExtentBytes.length);
pos += IntegritySequenceExtentBytes.length;
System.arraycopy(PartitionMaps, 0, rawBytes, pos, PartitionMaps.length);
pos += PartitionMaps.length;
return rawBytes;
}
}
| lgpl-2.1 |
oregional/tiki | lib/sheet/excel/reader.php | 24301 | <?php
//this script may only be included - so its better to die if called directly.
if (strpos($_SERVER["SCRIPT_NAME"],basename(__FILE__)) !== false) {
header("location: index.php");
exit;
}
define('Spreadsheet_Excel_Reader_HAVE_ICONV', function_exists('iconv'));
define('Spreadsheet_Excel_Reader_BIFF8', 0x600);
define('Spreadsheet_Excel_Reader_BIFF7', 0x500);
define('Spreadsheet_Excel_Reader_WorkbookGlobals', 0x5);
define('Spreadsheet_Excel_Reader_Worksheet', 0x10);
define('Spreadsheet_Excel_Reader_Type_BOF', 0x809);
define('Spreadsheet_Excel_Reader_Type_EOF', 0x0a);
define('Spreadsheet_Excel_Reader_Type_BOUNDSHEET', 0x85);
define('Spreadsheet_Excel_Reader_Type_DIMENSION', 0x200);
define('Spreadsheet_Excel_Reader_Type_ROW', 0x208);
define('Spreadsheet_Excel_Reader_Type_DBCELL', 0xd7);
define('Spreadsheet_Excel_Reader_Type_FILEPASS', 0x2f);
define('Spreadsheet_Excel_Reader_Type_NOTE', 0x1c);
define('Spreadsheet_Excel_Reader_Type_TXO', 0x1b6);
define('Spreadsheet_Excel_Reader_Type_RK', 0x7e);
define('Spreadsheet_Excel_Reader_Type_RK2', 0x27e);
define('Spreadsheet_Excel_Reader_Type_MULRK', 0xbd);
define('Spreadsheet_Excel_Reader_Type_MULBLANK', 0xbe);
define('Spreadsheet_Excel_Reader_Type_INDEX', 0x20b);
define('Spreadsheet_Excel_Reader_Type_SST', 0xfc);
define('Spreadsheet_Excel_Reader_Type_EXTSST', 0xff);
define('Spreadsheet_Excel_Reader_Type_CONTINUE', 0x3c);
define('Spreadsheet_Excel_Reader_Type_LABEL', 0x204);
define('Spreadsheet_Excel_Reader_Type_LABELSST', 0xfd);
define('Spreadsheet_Excel_Reader_Type_NUMBER', 0x203);
define('Spreadsheet_Excel_Reader_Type_NAME', 0x18);
define('Spreadsheet_Excel_Reader_Type_ARRAY', 0x221);
define('Spreadsheet_Excel_Reader_Type_STRING', 0x207);
define('Spreadsheet_Excel_Reader_Type_FORMULA', 0x406);
define('Spreadsheet_Excel_Reader_Type_FORMULA2', 0x6);
define('Spreadsheet_Excel_Reader_Type_FORMAT', 0x41e);
define('Spreadsheet_Excel_Reader_Type_XF', 0xe0);
define('Spreadsheet_Excel_Reader_Type_BOOLERR', 0x205);
define('Spreadsheet_Excel_Reader_Type_UNKNOWN', 0xffff);
define('Spreadsheet_Excel_Reader_Type_NINETEENFOUR', 0x22);
define('Spreadsheet_Excel_Reader_Type_MERGEDCELLS', 0xE5);
define('Spreadsheet_Excel_Reader_utcOffsetDays' , 25569);
define('Spreadsheet_Excel_Reader_utcOffsetDays1904', 24107);
define('Spreadsheet_Excel_Reader_msInADay', 24 * 60 * 60);
//define('Spreadsheet_Excel_Reader_DEF_NUM_FORMAT', "%.2f");
define('Spreadsheet_Excel_Reader_DEF_NUM_FORMAT', "%s");
// function file_get_contents for PHP < 4.3.0
// Thanks Marian Steinbach for this function
if (!function_exists('file_get_contents')) {
function file_get_contents($filename, $use_include_path = 0) {
$data = '';
$file = @fopen($filename, "rb", $use_include_path);
if ($file) {
while (!feof($file)) $data .= fread($file, 1024);
fclose($file);
} else {
// There was a problem opening the file
$data = FALSE;
}
return $data;
}
}
//class Spreadsheet_Excel_Reader extends PEAR {
class Spreadsheet_Excel_Reader
{
var $boundsheets = array();
var $formatRecords = array();
var $sst = array();
var $sheets = array();
var $data;
var $pos;
var $_ole;
var $_defaultEncoding;
var $_defaultFormat = Spreadsheet_Excel_Reader_DEF_NUM_FORMAT;
var $_columnsFormat = array();
var $dateFormats = array (
0xe => "d/m/Y",
0xf => "d-M-Y",
0x10 => "d-M",
0x11 => "M-Y",
0x12 => "h:i a",
0x13 => "h:i:s a",
0x14 => "H:i",
0x15 => "H:i:s",
0x16 => "d/m/Y H:i",
0x2d => "i:s",
0x2e => "H:i:s",
0x2f => "i:s.S");
var $numberFormats = array(
0x1 => "%1.0f", // "0"
0x2 => "%1.2f", // "0.00",
0x3 => "%1.0f", //"#,##0",
0x4 => "%1.2f", //"#,##0.00",
0x5 => "%1.0f", /*"$#,##0;($#,##0)",*/
0x6 => '$%1.0f', /*"$#,##0;($#,##0)",*/
0x7 => '$%1.2f', //"$#,##0.00;($#,##0.00)",
0x8 => '$%1.2f', //"$#,##0.00;($#,##0.00)",
0x9 => '%1.0f%%', // "0%"
0xa => '%1.2f%%', // "0.00%"
0xb => '%1.2f', // 0.00E00",
0x25 => '%1.0f', // "#,##0;(#,##0)",
0x26 => '%1.0f', //"#,##0;(#,##0)",
0x27 => '%1.2f', //"#,##0.00;(#,##0.00)",
0x28 => '%1.2f', //"#,##0.00;(#,##0.00)",
0x29 => '%1.0f', //"#,##0;(#,##0)",
0x2a => '$%1.0f', //"$#,##0;($#,##0)",
0x2b => '%1.2f', //"#,##0.00;(#,##0.00)",
0x2c => '$%1.2f', //"$#,##0.00;($#,##0.00)",
0x30 => '%1.0f'); //"##0.0E0";
function __construct(){
$this->_ole = new R_OLE();
}
function setOutputEncoding($Encoding){
$this->_defaultEncoding = $Encoding;
}
function setDefaultFormat($sFormat){
$this->_defaultFormat = $sFormat;
}
function setColumnFormat($column, $sFormat){
$this->_columnsFormat[$column] = $sFormat;
}
function read($sFileName) {
$res = $this->_ole->read($sFileName);
$this->data = $this->_ole->getWorkBook();
/*
$res = $this->_ole->read($sFileName);
if ($this->isError($res)) {
// var_dump($res);
return $this->raiseError($res);
}
$total = $this->_ole->ppsTotal();
for ($i = 0; $i < $total; $i++) {
if ($this->_ole->isFile($i)) {
$type = unpack("v", $this->_ole->getData($i, 0, 2));
if ($type[''] == 0x0809) { // check if it's a BIFF stream
$this->_index = $i;
$this->data = $this->_ole->getData($i, 0, $this->_ole->getDataLength($i));
break;
}
}
}
if ($this->_index === null) {
return $this->raiseError("$file doesn't seem to be an Excel file");
}
*/
//var_dump($this->data);
$this->pos = 0;
//$this->readRecords();
return $this->_parse();
}
function _parse(){
$pos = 0;
$code = ord($this->data[$pos]) | ord($this->data[$pos+1])<<8;
$length = ord($this->data[$pos+2]) | ord($this->data[$pos+3])<<8;
$version = ord($this->data[$pos + 4]) | ord($this->data[$pos + 5])<<8;
$substreamType = ord($this->data[$pos + 6]) | ord($this->data[$pos + 7])<<8;
//echo "Start parse code=".base_convert($code,10,16)." version=".base_convert($version,10,16)." substreamType=".base_convert($substreamType,10,16).""."\n";
if (($version != Spreadsheet_Excel_Reader_BIFF8) && ($version != Spreadsheet_Excel_Reader_BIFF7)) {
return false;
}
if ($substreamType != Spreadsheet_Excel_Reader_WorkbookGlobals){
return false;
}
//print_r($rec);
$pos += $length + 4;
$code = ord($this->data[$pos]) | ord($this->data[$pos+1])<<8;
$length = ord($this->data[$pos+2]) | ord($this->data[$pos+3])<<8;
while ($code != Spreadsheet_Excel_Reader_Type_EOF){
switch ($code) {
case Spreadsheet_Excel_Reader_Type_SST:
//echo "Type_SST\n";
$spos = $pos + 4;
$limitpos = $spos + $length;
$uniqueStrings = $this->_GetInt4d($this->data, $spos+4);
$spos += 8;
for ($i = 0; $i < $uniqueStrings; $i++) {
// Read in the number of characters
if ($spos == $limitpos) {
$opcode = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$conlength = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
if ($opcode != 0x3c) {
return -1;
}
$spos += 4;
$limitpos = $spos + $conlength;
}
$numChars = ord($this->data[$spos]) | (ord($this->data[$spos+1]) << 8);
//echo "i = $i pos = $pos numChars = $numChars ";
$spos += 2;
$optionFlags = ord($this->data[$spos]);
$spos++;
$asciiEncoding = (($optionFlags & 0x01) == 0) ;
$extendedString = ( ($optionFlags & 0x04) != 0);
// See if string contains formatting information
$richString = ( ($optionFlags & 0x08) != 0);
if ($richString) {
// Read in the crun
$formattingRuns = ord($this->data[$spos]) | (ord($this->data[$spos+1]) << 8);
$spos += 2;
}
if ($extendedString) {
// Read in cchExtRst
$extendedRunLength = $this->_GetInt4d($this->data, $spos);
$spos += 4;
}
$len = ($asciiEncoding)? $numChars : $numChars*2;
if ($spos + $len < $limitpos) {
$retstr = substr($this->data, $spos, $len);
$spos += $len;
}else{
// found countinue
$retstr = substr($this->data, $spos, $limitpos - $spos);
$bytesRead = $limitpos - $spos;
$charsLeft = $numChars - (($asciiEncoding) ? $bytesRead : ($bytesRead / 2));
$spos = $limitpos;
while ($charsLeft > 0){
$opcode = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$conlength = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
if ($opcode != 0x3c) {
return -1;
}
$spos += 4;
$limitpos = $spos + $conlength;
$option = ord($this->data[$spos]);
$spos += 1;
if ($asciiEncoding && ($option == 0)) {
$len = min($charsLeft, $limitpos - $spos); // min($charsLeft, $conlength);
$retstr .= substr($this->data, $spos, $len);
$charsLeft -= $len;
$asciiEncoding = true;
}elseif (!$asciiEncoding && ($option != 0)){
$len = min($charsLeft * 2, $limitpos - $spos); // min($charsLeft, $conlength);
$retstr .= substr($this->data, $spos, $len);
$charsLeft -= $len/2;
$asciiEncoding = false;
}elseif (!$asciiEncoding && ($option == 0)) {
// Bummer - the string starts off as Unicode, but after the
// continuation it is in straightforward ASCII encoding
$len = min($charsLeft, $limitpos - $spos); // min($charsLeft, $conlength);
for ($j = 0; $j < $len; $j++) {
$retstr .= $this->data[$spos + $j].chr(0);
}
$charsLeft -= $len;
$asciiEncoding = false;
}else{
$newstr = '';
foreach( $retstr as $ret ) {
$newstr = $ret.chr(0);
}
$retstr = $newstr;
$len = min($charsLeft * 2, $limitpos - $spos); // min($charsLeft, $conlength);
$retstr .= substr($this->data, $spos, $len);
$charsLeft -= $len/2;
$asciiEncoding = false;
//echo "Izavrat\n";
}
$spos += $len;
}
}
$retstr = ($asciiEncoding) ? $retstr : $this->_encodeUTF16($retstr);
// echo "Str $i = $retstr\n";
if ($richString){
$spos += 4 * $formattingRuns;
}
// For extended strings, skip over the extended string data
if ($extendedString) {
$spos += $extendedRunLength;
}
//if ($retstr == 'Derby'){
// echo "bb\n";
//}
$this->sst[]=$retstr;
}
/*$continueRecords = array();
while ($this->getNextCode() == Type_CONTINUE) {
$continueRecords[] = &$this->nextRecord();
}
//echo " 1 Type_SST\n";
$this->shareStrings = new SSTRecord($r, $continueRecords);
//print_r($this->shareStrings->strings);
*/
// echo 'SST read: '.($time_end-$time_start)."\n";
break;
case Spreadsheet_Excel_Reader_Type_FILEPASS:
return false;
break;
case Spreadsheet_Excel_Reader_Type_NAME:
//echo "Type_NAME\n";
break;
case Spreadsheet_Excel_Reader_Type_FORMAT:
$indexCode = ord($this->data[$pos+4]) | ord($this->data[$pos+5]) << 8;
if ($version == Spreadsheet_Excel_Reader_BIFF8) {
$numchars = ord($this->data[$pos+6]) | ord($this->data[$pos+7]) << 8;
if (ord($this->data[$pos+8]) == 0){
$formatString = substr($this->data, $pos+9, $numchars);
} else {
$formatString = substr($this->data, $pos+9, $numchars*2);
}
} else {
$numchars = ord($this->data[$pos+6]);
$formatString = substr($this->data, $pos+7, $numchars*2);
}
$this->formatRecords[$indexCode] = $formatString;
// echo "Type.FORMAT\n";
break;
case Spreadsheet_Excel_Reader_Type_XF:
//global $dateFormats, $numberFormats;
$indexCode = ord($this->data[$pos+6]) | ord($this->data[$pos+7]) << 8;
//echo "\nType.XF $indexCode ";
if (array_key_exists($indexCode, $this->dateFormats)) {
//echo "isdate ".$dateFormats[$indexCode];
$this->formatRecords['xfrecords'][] = array(
'type' => 'date',
'format' => $this->dateFormats[$indexCode]
);
}elseif (array_key_exists($indexCode, $this->numberFormats)) {
//echo "isnumber ".$this->numberFormats[$indexCode];
$this->formatRecords['xfrecords'][] = array(
'type' => 'number',
'format' => $this->numberFormats[$indexCode]
);
}else{
$isdate = FALSE;
if ($indexCode > 0){
$formatstr = $this->formatRecords[$indexCode];
//echo "\ndate-time $formatstr \n";
if (preg_match("/[^hmsday\/\-:\s]/i", $formatstr) == 0) { // found day and time format
$isdate = TRUE;
$formatstr = str_replace('mm', 'i', $formatstr);
$formatstr = str_replace('h', 'H', $formatstr);
//echo "\ndate-time $formatstr \n";
}
}
if ($isdate){
$this->formatRecords['xfrecords'][] = array(
'type' => 'date',
'format' => $formatstr,
);
}else{
$this->formatRecords['xfrecords'][] = array(
'type' => 'other',
'format' => '',
'code' => $indexCode
);
}
}
//echo "\n";
break;
case Spreadsheet_Excel_Reader_Type_NINETEENFOUR:
//echo "Type.NINETEENFOUR\n";
$this->nineteenFour = (ord($this->data[$pos+4]) == 1);
break;
case Spreadsheet_Excel_Reader_Type_BOUNDSHEET:
//echo "Type.BOUNDSHEET\n";
$rec_offset = $this->_GetInt4d($this->data, $pos+4);
$rec_typeFlag = ord($this->data[$pos+8]);
$rec_visibilityFlag = ord($this->data[$pos+9]);
$rec_length = ord($this->data[$pos+10]);
if ($version == Spreadsheet_Excel_Reader_BIFF8){
$chartype = ord($this->data[$pos+11]);
if ($chartype == 0){
$rec_name = substr($this->data, $pos+12, $rec_length);
} else {
$rec_name = $this->_encodeUTF16(substr($this->data, $pos+12, $rec_length*2));
}
}elseif ($version == Spreadsheet_Excel_Reader_BIFF7){
$rec_name = substr($this->data, $pos+11, $rec_length);
}
$this->boundsheets[] = array('name'=>$rec_name,
'offset'=>$rec_offset);
break;
}
//echo "Code = ".base_convert($r['code'],10,16)."\n";
$pos += $length + 4;
$code = ord($this->data[$pos]) | ord($this->data[$pos+1])<<8;
$length = ord($this->data[$pos+2]) | ord($this->data[$pos+3])<<8;
//$r = &$this->nextRecord();
//echo "1 Code = ".base_convert($r['code'],10,16)."\n";
}
foreach ($this->boundsheets as $key=>$val){
$this->sn = $key;
$this->_parsesheet($val['offset']);
}
return true;
}
function _parsesheet($spos){
$cont = true;
// read BOF
$code = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$length = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$version = ord($this->data[$spos + 4]) | ord($this->data[$spos + 5])<<8;
$substreamType = ord($this->data[$spos + 6]) | ord($this->data[$spos + 7])<<8;
if (($version != Spreadsheet_Excel_Reader_BIFF8) && ($version != Spreadsheet_Excel_Reader_BIFF7)) {
return -1;
}
if ($substreamType != Spreadsheet_Excel_Reader_Worksheet){
return -2;
}
//echo "Start parse code=".base_convert($code,10,16)." version=".base_convert($version,10,16)." substreamType=".base_convert($substreamType,10,16).""."\n";
$spos += $length + 4;
while($cont) {
//echo "mem= ".memory_get_usage()."\n";
// $r = &$this->file->nextRecord();
$lowcode = ord($this->data[$spos]);
if ($lowcode == Spreadsheet_Excel_Reader_Type_EOF) break;
$code = $lowcode | ord($this->data[$spos+1])<<8;
$length = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$spos += 4;
//echo "Code=".base_convert($code,10,16)."\n";
unset($this->rectype);
$this->multiplier = 1; // need for format with %
switch ($code) {
case Spreadsheet_Excel_Reader_Type_DIMENSION:
//echo 'Type_DIMENSION ';
if (!isset($this->numRows)) {
if (($length == 10) || ($version == Spreadsheet_Excel_Reader_BIFF7)){
$this->sheets[$this->sn]['numRows'] = ord($this->data[$spos+2]) | ord($this->data[$spos+3]) << 8;
$this->sheets[$this->sn]['numCols'] = ord($this->data[$spos+6]) | ord($this->data[$spos+7]) << 8;
} else {
$this->sheets[$this->sn]['numRows'] = ord($this->data[$spos+4]) | ord($this->data[$spos+5]) << 8;
$this->sheets[$this->sn]['numCols'] = ord($this->data[$spos+10]) | ord($this->data[$spos+11]) << 8;
}
}
//echo 'numRows '.$this->numRows.' '.$this->numCols."\n";
break;
case Spreadsheet_Excel_Reader_Type_MERGEDCELLS:
$cellRanges = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
for ($i = 0; $i < $cellRanges; $i++) {
$fr = ord($this->data[$spos + 8*$i + 2]) | ord($this->data[$spos + 8*$i + 3])<<8;
$lr = ord($this->data[$spos + 8*$i + 4]) | ord($this->data[$spos + 8*$i + 5])<<8;
$fc = ord($this->data[$spos + 8*$i + 6]) | ord($this->data[$spos + 8*$i + 7])<<8;
$lc = ord($this->data[$spos + 8*$i + 8]) | ord($this->data[$spos + 8*$i + 9])<<8;
//$this->sheets[$this->sn]['mergedCells'][] = array($fr + 1, $fc + 1, $lr + 1, $lc + 1);
if ($lr - $fr > 0) {
$this->sheets[$this->sn]['cellsInfo'][$fr+1][$fc+1]['rowspan'] = $lr - $fr + 1;
}
if ($lc - $fc > 0) {
$this->sheets[$this->sn]['cellsInfo'][$fr+1][$fc+1]['colspan'] = $lc - $fc + 1;
}
}
//echo "Merged Cells $cellRanges $lr $fr $lc $fc\n";
break;
case Spreadsheet_Excel_Reader_Type_RK:
case Spreadsheet_Excel_Reader_Type_RK2:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$column = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$rknum = $this->_GetInt4d($this->data, $spos + 6);
$numValue = $this->_GetIEEE754($rknum);
if ($this->isDate($spos)) {
list($string, $raw) = $this->createDate($numValue);
}else{
$raw = $numValue;
if (isset($this->_columnsFormat[$column + 1])){
$this->curformat = $this->_columnsFormat[$column + 1];
}
$string = sprintf($this->curformat, $numValue * $this->multiplier);
//$this->addcell(RKRecord($r));
}
$this->addcell($row, $column, $string, $raw);
//echo "Type_RK $row $column $string $raw {$this->curformat}\n";
break;
case Spreadsheet_Excel_Reader_Type_LABELSST:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$column = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$xfindex = ord($this->data[$spos+4]) | ord($this->data[$spos+5])<<8;
$index = $this->_GetInt4d($this->data, $spos + 6);
$this->addcell($row, $column, $this->sst[$index]);
//echo "LabelSST $row $column $string\n";
break;
case Spreadsheet_Excel_Reader_Type_MULRK:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$colFirst = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$colLast = ord($this->data[$spos + $length - 2]) | ord($this->data[$spos + $length - 1])<<8;
$columns = $colLast - $colFirst + 1;
$tmppos = $spos+4;
for ($i = 0; $i < $columns; $i++) {
$numValue = $this->_GetIEEE754($this->_GetInt4d($this->data, $tmppos + 2));
if ($this->isDate($tmppos-4)) {
list($string, $raw) = $this->createDate($numValue);
}else{
$raw = $numValue;
if (isset($this->_columnsFormat[$colFirst + $i + 1])){
$this->curformat = $this->_columnsFormat[$colFirst + $i + 1];
}
$string = sprintf($this->curformat, $numValue * $this->multiplier);
}
//$rec['rknumbers'][$i]['xfindex'] = ord($rec['data'][$pos]) | ord($rec['data'][$pos+1]) << 8;
$tmppos += 6;
$this->addcell($row, $colFirst + $i, $string, $raw);
//echo "MULRK $row ".($colFirst + $i)." $string\n";
}
//MulRKRecord($r);
// Get the individual cell records from the multiple record
//$num = ;
break;
case Spreadsheet_Excel_Reader_Type_NUMBER:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$column = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$tmp = unpack("d", substr($this->data, $spos + 6, 8)); // It machine machine dependent
if ($this->isDate($spos)) {
list($string, $raw) = $this->createDate($tmp['']);
// $this->addcell(DateRecord($r, 1));
}else{
$raw = $tmp[''];
if (isset($this->_columnsFormat[$column + 1])){
$this->curformat = $this->_columnsFormat[$column + 1];
}
$string = sprintf($this->curformat, $tmp[''] * $this->multiplier);
// $this->addcell(NumberRecord($r));
}
$this->addcell($row, $column, $string, $raw);
//echo "Number $row $column $string\n";
break;
case Spreadsheet_Excel_Reader_Type_BOOLERR:
case Type_BOOLERR:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$column = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$string = ord($this->data[$spos+6]);
$this->addcell($row, $column, $string);
//echo 'Type_BOOLERR '."\n";
break;
case Spreadsheet_Excel_Reader_Type_ROW:
case Spreadsheet_Excel_Reader_Type_DBCELL:
case Spreadsheet_Excel_Reader_Type_MULBLANK:
break;
case Spreadsheet_Excel_Reader_Type_LABEL:
$row = ord($this->data[$spos]) | ord($this->data[$spos+1])<<8;
$column = ord($this->data[$spos+2]) | ord($this->data[$spos+3])<<8;
$this->addcell($row, $column, substr($this->data, $spos + 8, ord($this->data[$spos + 6]) | ord($this->data[$spos + 7])<<8));
// $this->addcell(LabelRecord($r));
break;
case Spreadsheet_Excel_Reader_Type_EOF:
$cont = false;
break;
default:
//echo ' unknown :'.base_convert($r['code'],10,16)."\n";
break;
}
$spos += $length;
}
}
function isDate($spos){
//$xfindex = GetInt2d(, 4);
$xfindex = ord($this->data[$spos+4]) | ord($this->data[$spos+5]) << 8;
//echo 'check is date '.$xfindex.' '.$this->formatRecords['xfrecords'][$xfindex]['type']."\n";
//var_dump($this->formatRecords['xfrecords'][]);
if ($this->formatRecords['xfrecords'][$xfindex]['type'] == 'date') {
$this->curformat = $this->formatRecords['xfrecords'][$xfindex]['format'];
$this->rectype = 'date';
return true;
} else {
if ($this->formatRecords['xfrecords'][$xfindex]['type'] == 'number') {
$this->curformat = $this->formatRecords['xfrecords'][$xfindex]['format'];
$this->rectype = 'number';
if (($xfindex == 0x9) || ($xfindex == 0xa)){
$this->multiplier = 100;
}
}else{
$this->curformat = $this->_defaultFormat;
$this->rectype = 'unknown';
}
return false;
}
}
function createDate($numValue){
if ($numValue > 1){
$utcDays = $numValue - ($this->nineteenFour ? Spreadsheet_Excel_Reader_utcOffsetDays1904 : Spreadsheet_Excel_Reader_utcOffsetDays);
$utcValue = round($utcDays * Spreadsheet_Excel_Reader_msInADay);
$string = date ($this->curformat, $utcValue);
$raw = $utcValue;
}else{
$raw = $numValue;
$hours = floor($numValue * 24);
$mins = floor($numValue * 24 * 60) - $hours * 60;
$secs = floor($numValue * Spreadsheet_Excel_Reader_msInADay) - $hours * 60 * 60 - $mins * 60;
$string = date ($this->curformat, mktime($hours, $mins, $secs));
}
return array($string, $raw);
}
function addcell($row, $col, $string, $raw = ''){
//echo "ADD cel $row-$col $string\n";
$this->sheets[$this->sn]['cells'][$row+1][$col+1] = $string;
if ($raw)
$this->sheets[$this->sn]['cellsInfo'][$row+1][$col+1]['raw'] = $raw;
if (isset($this->rectype))
$this->sheets[$this->sn]['cellsInfo'][$row+1][$col+1]['type'] = $this->rectype;
}
function _GetIEEE754($rknum){
if (($rknum & 0x02) != 0) {
$value = $rknum >> 2;
} else {
$tmp = unpack("d", pack("VV", 0, ($rknum & 0xfffffffc)));
$value = $tmp[''];
}
if (($rknum & 0x01) != 0) {
$value /= 100;
}
return $value;
}
function _encodeUTF16($string){
if ($this->_defaultEncoding){
return (Spreadsheet_Excel_Reader_HAVE_ICONV) ? iconv('UTF-16LE', $this->_defaultEncoding, $string): $string;
}else{
return $string;
}
}
function _GetInt4d($data, $pos) {
return ord($data[$pos]) | (ord($data[$pos+1]) << 8) | (ord($data[$pos+2]) << 16) | (ord($data[$pos+3]) << 24);
}
}
| lgpl-2.1 |
xy515258/moose | framework/src/timesteppers/IterationAdaptiveDT.C | 12670 | /****************************************************************/
/* DO NOT MODIFY THIS HEADER */
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* (c) 2010 Battelle Energy Alliance, LLC */
/* ALL RIGHTS RESERVED */
/* */
/* Prepared by Battelle Energy Alliance, LLC */
/* Under Contract No. DE-AC07-05ID14517 */
/* With the U. S. Department of Energy */
/* */
/* See COPYRIGHT for full restrictions */
/****************************************************************/
#include "IterationAdaptiveDT.h"
#include "Function.h"
#include "Piecewise.h"
#include "Transient.h"
template<>
InputParameters validParams<IterationAdaptiveDT>()
{
InputParameters params = validParams<TimeStepper>();
params.addClassDescription("Adjust the timestep based on the number of iterations");
params.addParam<int>("optimal_iterations", "The target number of nonlinear iterations for adaptive timestepping");
params.addParam<int>("iteration_window", "Attempt to grow/shrink timestep if the iteration count is below/above 'optimal_iterations plus/minus iteration_window' (default = optimal_iterations/5).");
params.addParam<unsigned>("linear_iteration_ratio", "The ratio of linear to nonlinear iterations to determine target linear iterations and window for adaptive timestepping (default = 25)");
params.addParam<FunctionName>("timestep_limiting_function", "A 'Piecewise' type function used to control the timestep by limiting the change in the function over a timestep");
params.addParam<Real>("max_function_change", "The absolute value of the maximum change in timestep_limiting_function over a timestep");
params.addParam<bool>("force_step_every_function_point", false, "Forces the timestepper to take a step that is consistent with points defined in the function");
params.addRequiredParam<Real>("dt", "The default timestep size between solves");
params.addParam<std::vector<Real> >("time_t", "The values of t");
params.addParam<std::vector<Real> >("time_dt", "The values of dt");
params.addParam<Real>("growth_factor", 2.0, "Factor to apply to timestep if easy convergence (if 'optimal_iterations' is specified) or if recovering from failed solve");
params.addParam<Real>("cutback_factor", 0.5, "Factor to apply to timestep if difficult convergence (if 'optimal_iterations' is specified) or if solution failed");
return params;
}
IterationAdaptiveDT::IterationAdaptiveDT(const InputParameters & parameters) :
TimeStepper(parameters),
_dt_old(declareRestartableData<Real>("dt_old", 0.0)),
_input_dt(getParam<Real>("dt")),
_tfunc_last_step(declareRestartableData<bool>("tfunc_last_step", false)),
_sync_last_step(declareRestartableData<bool>("sync_last_step", false)),
_linear_iteration_ratio(isParamValid("linear_iteration_ratio") ? getParam<unsigned>("linear_iteration_ratio") : 25), // Default to 25
_adaptive_timestepping(false),
_timestep_limiting_function(NULL),
_piecewise_timestep_limiting_function(NULL),
_times(0),
_max_function_change(-1),
_force_step_every_function_point(getParam<bool>("force_step_every_function_point")),
_tfunc_times(getParam<std::vector<Real> >("time_t").begin(), getParam<std::vector<Real> >("time_t").end()),
_time_ipol(getParam<std::vector<Real> >("time_t"),
getParam<std::vector<Real> >("time_dt")),
_use_time_ipol(_time_ipol.getSampleSize() > 0),
_growth_factor(getParam<Real>("growth_factor")),
_cutback_factor(getParam<Real>("cutback_factor")),
_nl_its(declareRestartableData<unsigned int>("nl_its", 0)),
_l_its(declareRestartableData<unsigned int>("l_its", 0)),
_cutback_occurred(declareRestartableData<bool>("cutback_occurred", false)),
_at_function_point(false)
{
if (isParamValid("optimal_iterations"))
{
_adaptive_timestepping = true;
_optimal_iterations = getParam<int>("optimal_iterations");
if (isParamValid("iteration_window"))
_iteration_window = getParam<int>("iteration_window");
else
_iteration_window = ceil(_optimal_iterations / 5.0);
}
else
{
if (isParamValid("iteration_window"))
mooseError("'optimal_iterations' must be used for 'iteration_window' to be used");
if (isParamValid("linear_iteration_ratio"))
mooseError("'optimal_iterations' must be used for 'linear_iteration_ratio' to be used");
}
if (isParamValid("timestep_limiting_function"))
_max_function_change = isParamValid("max_function_change") ?
getParam<Real>("max_function_change") : -1;
else if (isParamValid("max_function_change"))
mooseError("'timestep_limiting_function' must be used for 'max_function_change' to be used");
}
IterationAdaptiveDT::~IterationAdaptiveDT()
{
}
void
IterationAdaptiveDT::init()
{
if (isParamValid("timestep_limiting_function"))
{
_timestep_limiting_function = &_fe_problem.getFunction(getParam<FunctionName>("timestep_limiting_function"), isParamValid("_tid") ? getParam<THREAD_ID>("_tid") : 0);
_piecewise_timestep_limiting_function = dynamic_cast<Piecewise*>(_timestep_limiting_function);
if (_piecewise_timestep_limiting_function)
{
unsigned int time_size = _piecewise_timestep_limiting_function->functionSize();
_times.resize(time_size);
for (unsigned int i = 0; i < time_size; ++i)
_times[i] = _piecewise_timestep_limiting_function->domain(i);
}
else
mooseError("timestep_limiting_function must be a Piecewise function");
}
}
void
IterationAdaptiveDT::preExecute()
{
TimeStepper::preExecute();
// Delete all tfunc times that are at or before the begin time
while (!_tfunc_times.empty() && _time + _timestep_tolerance >= *_tfunc_times.begin())
_tfunc_times.erase(_tfunc_times.begin());
}
Real
IterationAdaptiveDT::computeInitialDT()
{
return _input_dt;
}
Real
IterationAdaptiveDT::computeDT()
{
Real dt = _dt_old;
if (_cutback_occurred)
{
_cutback_occurred = false;
if (_adaptive_timestepping)
{
// Don't allow it to grow this step, but shrink if needed
bool allowToGrow = false;
computeAdaptiveDT(dt, allowToGrow);
}
}
else if (_tfunc_last_step)
{
_tfunc_last_step = false;
_sync_last_step = false;
dt = _time_ipol.sample(_time_old);
if (_verbose)
{
_console << "Setting dt to value specified by dt function: "
<< std::setw(9) << dt
<< '\n';
}
}
else if (_sync_last_step)
{
_sync_last_step = false;
dt = _dt_old;
if (_verbose)
{
_console << "Setting dt to value used before sync: "
<< std::setw(9) << dt
<< '\n';
}
}
else if (_adaptive_timestepping)
computeAdaptiveDT(dt);
else if (_use_time_ipol)
dt = computeInterpolationDT();
else
{
dt *= _growth_factor;
if (dt > _dt_old * _growth_factor)
dt = _dt_old * _growth_factor;
}
return dt;
}
bool
IterationAdaptiveDT::constrainStep(Real & dt)
{
bool at_sync_point = TimeStepper::constrainStep(dt);
// Limit the timestep to limit change in the function
limitDTByFunction(dt);
// Adjust to the next tfunc time if needed
if (!_tfunc_times.empty() && _time + dt + _timestep_tolerance >= *_tfunc_times.begin())
{
dt = *_tfunc_times.begin() - _time;
if (_verbose)
{
_console << "Limiting dt to sync with dt function time: "
<< std::setw(9) << *_tfunc_times.begin()
<< " dt: "
<< std::setw(9) << dt
<< '\n';
}
}
return at_sync_point;
}
Real
IterationAdaptiveDT::computeFailedDT()
{
_cutback_occurred = true;
// Can't cut back any more
if (_dt <= _dt_min)
mooseError("Solve failed and timestep already at dtmin, cannot continue!");
if (_verbose)
{
_console << "\nSolve failed with dt: "
<< std::setw(9) << _dt
<< "\nRetrying with reduced dt: "
<< std::setw(9) << _dt * _cutback_factor
<< '\n';
}
else
_console << "\nSolve failed, cutting timestep.\n";
return _dt * _cutback_factor;
}
void
IterationAdaptiveDT::limitDTByFunction(Real & limitedDT)
{
Real orig_dt = limitedDT;
if (_timestep_limiting_function)
{
Point dummyPoint;
Real oldValue = _timestep_limiting_function->value(_time_old, dummyPoint);
Real newValue = _timestep_limiting_function->value(_time_old + limitedDT, dummyPoint);
Real change = std::abs(newValue - oldValue);
if (_max_function_change > 0.0 &&
change > _max_function_change)
{
do
{
limitedDT /= 2.0;
newValue = _timestep_limiting_function->value(_time_old + limitedDT, dummyPoint);
change = std::abs(newValue - oldValue);
}
while (change > _max_function_change);
}
}
_at_function_point = false;
if (_piecewise_timestep_limiting_function && _force_step_every_function_point)
{
for (unsigned int i = 0; i + 1 < _times.size(); ++i)
{
if (_time >= _times[i] && _time < _times[i+1])
{
if (limitedDT > _times[i+1] - _time - _timestep_tolerance)
{
limitedDT = _times[i+1] - _time;
_at_function_point = true;
}
break;
}
}
}
if (_verbose && limitedDT != orig_dt)
{
if (_at_function_point)
_console << "Limiting dt to match function point. dt = ";
else
_console << "Limiting dt to limit change in function. dt = ";
_console << limitedDT << '\n';
}
}
void
IterationAdaptiveDT::computeAdaptiveDT(Real & dt, bool allowToGrow, bool allowToShrink)
{
const unsigned int growth_nl_its(_optimal_iterations > _iteration_window ? _optimal_iterations - _iteration_window : 0);
const unsigned int shrink_nl_its(_optimal_iterations + _iteration_window);
const unsigned int growth_l_its(_optimal_iterations > _iteration_window ? _linear_iteration_ratio * (_optimal_iterations - _iteration_window) : 0);
const unsigned int shrink_l_its(_linear_iteration_ratio*(_optimal_iterations + _iteration_window));
if (allowToGrow && (_nl_its < growth_nl_its && _l_its < growth_l_its))
{
// Grow the timestep
dt *= _growth_factor;
if (_verbose)
{
_console << "Growing dt: nl its = " << _nl_its << " < " << growth_nl_its
<< " && lin its = " << _l_its << " < " << growth_l_its
<< " old dt: " << std::setw(9) << _dt_old
<< " new dt: " << std::setw(9) << dt
<< '\n';
}
}
else if (allowToShrink && (_nl_its > shrink_nl_its || _l_its > shrink_l_its))
{
// Shrink the timestep
dt *= _cutback_factor;
if (_verbose)
{
_console << "Shrinking dt: nl its = " << _nl_its << " > " << shrink_nl_its
<< " || lin its = " << _l_its << " > " << shrink_l_its
<< " old dt: " << std::setw(9) << _dt_old
<< " new dt: " << std::setw(9) << dt
<< '\n';
}
}
}
Real
IterationAdaptiveDT::computeInterpolationDT()
{
Real dt = _time_ipol.sample(_time_old);
if (dt > _dt_old * _growth_factor)
{
dt = _dt_old * _growth_factor;
if (_verbose)
{
_console << "Growing dt to recover from cutback. "
<< " old dt: " << std::setw(9) << _dt_old
<< " new dt: " << std::setw(9) << dt
<< '\n';
}
}
return dt;
}
void
IterationAdaptiveDT::rejectStep()
{
TimeStepper::rejectStep();
}
void
IterationAdaptiveDT::acceptStep()
{
TimeStepper::acceptStep();
while (!_tfunc_times.empty() && _time + _timestep_tolerance >= *_tfunc_times.begin())
{
if (std::abs(_time - *_tfunc_times.begin()) <= _timestep_tolerance)
_tfunc_last_step = true;
_tfunc_times.erase(_tfunc_times.begin());
}
_nl_its = _fe_problem.getNonlinearSystem().nNonlinearIterations();
_l_its = _fe_problem.getNonlinearSystem().nLinearIterations();
if ((_at_function_point || _executioner.atSyncPoint()) &&
_dt + _timestep_tolerance < _executioner.unconstrainedDT())
{
_dt_old = _fe_problem.dtOld();
_sync_last_step = true;
if (_verbose)
{
_console << "Sync point hit in current step, using previous dt for old dt: "
<< std::setw(9) << _dt_old
<< '\n';
}
}
else
_dt_old = _dt;
}
| lgpl-2.1 |
evolvedmicrobe/beast-mcmc | src/dr/inference/distribution/ParametricMultivariateDistributionModel.java | 1378 | /*
* ParametricMultivariateDistributionModel.java
*
* Copyright (c) 2002-2012 Alexei Drummond, Andrew Rambaut and Marc Suchard
*
* This file is part of BEAST.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership and licensing.
*
* BEAST is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* BEAST is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with BEAST; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301 USA
*/
package dr.inference.distribution;
import dr.inference.model.Model;
import dr.math.distributions.MultivariateDistribution;
/**
* A class that describes a parametric multivariate distribution
*
* @author Marc Suchard
*/
public interface ParametricMultivariateDistributionModel extends MultivariateDistribution, Model {
public double[] nextRandom();
} | lgpl-2.1 |
trejkaz/swingx | swingx-core/src/test/java/org/jdesktop/swingx/JXLabelVisualCheck.java | 6627 | /*
* $Id$
*
* Copyright 2006 Sun Microsystems, Inc., 4150 Network Circle,
* Santa Clara, California 95054, U.S.A. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
package org.jdesktop.swingx;
import java.awt.Color;
import java.awt.Font;
import java.awt.event.ActionEvent;
import java.awt.font.TextAttribute;
import java.util.HashMap;
import java.util.Map;
import java.util.logging.Logger;
import javax.swing.AbstractAction;
import javax.swing.Action;
import javax.swing.Box;
import javax.swing.JComponent;
import org.jdesktop.swingx.action.AbstractActionExt;
import org.jdesktop.swingx.painter.AbstractPainter;
import org.jdesktop.swingx.painter.AlphaPainter;
import org.jdesktop.swingx.painter.CompoundPainter;
import org.jdesktop.swingx.painter.MattePainter;
import org.jdesktop.swingx.painter.ShapePainter;
import com.jhlabs.image.BlurFilter;
/**
* Base test class for JXLabel related code and issues.
*
* @author rah003
*/
@SuppressWarnings("nls")
public class JXLabelVisualCheck extends InteractiveTestCase {
static Logger log = Logger.getAnonymousLogger();
public static void main(String[] args) {
JXLabelVisualCheck test = new JXLabelVisualCheck();
try {
test.runInteractiveTests("interactiveUnderlinedFontWithWrapping");
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Example of how-to apply filters to the label's foreground.
*/
@SuppressWarnings("unchecked")
public void interactiveFancyFilter() {
JXLabel label = new JXLabel("that's the real text");
label.setFont(new Font("SansSerif", Font.BOLD, 80));
AbstractPainter<?> fg = new MattePainter(Color.RED);
fg.setFilters(new BlurFilter());
label.setForegroundPainter(fg);
JXFrame frame = wrapInFrame(label, "fancy filter");
show(frame,400, 400);
}
/**
* Issue #??-swingx: default foreground painter not guaranteed after change.
*
* JXLabel restore default foreground painter.
* Sequence:
* compose the default with a transparent overlay
* try to reset to default
* try to compose the overlay again.
*/
public void interactiveRestoreDefaultForegroundPainter() {
JComponent box = Box.createVerticalBox();
final JXLabel foreground = new JXLabel(
"setup: compound - default and overlay ");
ShapePainter shapePainter = new ShapePainter();
final AlphaPainter<?> alpha = new AlphaPainter<Object>();
alpha.setAlpha(0.2f);
alpha.setPainters(shapePainter);
CompoundPainter<?> compound = new CompoundPainter<Object>(foreground
.getForegroundPainter(), alpha);
foreground.setForegroundPainter(compound);
box.add(foreground);
Action action = new AbstractActionExt("reset default foreground") {
boolean reset;
public void actionPerformed(ActionEvent e) {
if (reset) {
CompoundPainter<?> painter = new CompoundPainter<Object>(alpha, foreground.getForegroundPainter());
foreground.setForegroundPainter(painter);
} else {
// try to reset to default
foreground.setForegroundPainter(null);
}
reset = !reset;
}
};
JXFrame frame = wrapInFrame(box, "foreground painters");
addAction(frame, action);
frame.pack();
frame.setVisible(true);
}
/**
* Issue #1330-swingx: underlined font does not retain underline during wrapping.
*/
public void interactiveUnderlinedFontWithWrapping() {
final JXLabel label = new JXLabel("A really long sentence to display the text wrapping features of JXLabel.");
// when lineWrap is true, can't see underline effects
// when lineWrap is false, underline is ok
label.setLineWrap(true);
label.setBounds(31, 48, 91, 18);
// set font underline
Map<TextAttribute, Integer> map = new HashMap<TextAttribute, Integer>();
map.put(TextAttribute.UNDERLINE, TextAttribute.UNDERLINE_ON);
label.setFont(label.getFont().deriveFont(map));
final JXFrame frame = wrapInFrame(label, "Underlined Font with wrapping");
addAction(frame, new AbstractAction("Toggle wrapping") {
@Override
public void actionPerformed(ActionEvent e) {
label.setLineWrap(!label.isLineWrap());
frame.repaint();
}
});
frame.pack();
frame.setVisible(true);
}
/**
* Issue #978: Setting background color has no effect
*/
public void interactiveBackgroundColorSetting() {
final JXLabel label = new JXLabel("A simple label.");
label.setOpaque(true);
label.setBackground(Color.CYAN);
showInFrame(label, "Background Color Check");
}
/**
* Ensure background painter is always painted.
*/
public void interactiveBackgroundPainter() {
JComponent box = Box.createVerticalBox();
ShapePainter shapePainter = new ShapePainter();
JXLabel opaqueTrue = new JXLabel("setup: backgroundPainter, opaque = true");
opaqueTrue.setOpaque(true);
opaqueTrue.setBackgroundPainter(shapePainter);
box.add(opaqueTrue);
JXLabel opaqueFalse = new JXLabel("setup: backgroundPainter, opaque = false");
opaqueFalse.setOpaque(false);
opaqueFalse.setBackgroundPainter(shapePainter);
box.add(opaqueFalse);
JXLabel opaqueUnchanged = new JXLabel("setup: backgroundPainter, opaque = unchanged");
opaqueUnchanged.setBackgroundPainter(shapePainter);
box.add(opaqueUnchanged);
showInFrame(box, "background painters");
}
}
| lgpl-2.1 |
gaoxiaojun/QtRIA | src/plugins/texteditor/generichighlighter/itemdata.cpp | 3771 | /****************************************************************************
**
** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
****************************************************************************/
#include "itemdata.h"
#include "reuse.h"
using namespace TextEditor;
using namespace Internal;
ItemData::ItemData() :
m_italic(false),
m_italicSpecified(false),
m_bold(false),
m_boldSpecified(false),
m_underlined(false),
m_underlinedSpecified(false),
m_strikedOut(false),
m_strikeOutSpecified(false),
m_isCustomized(false)
{}
void ItemData::setStyle(const QString &style)
{ m_style = style; }
const QString &ItemData::style() const
{ return m_style; }
void ItemData::setColor(const QString &color)
{
if (!color.isEmpty()) {
m_color.setNamedColor(color);
m_isCustomized = true;
}
}
const QColor &ItemData::color() const
{ return m_color; }
void ItemData::setSelectionColor(const QString &color)
{
if (!color.isEmpty()) {
m_selectionColor.setNamedColor(color);
m_isCustomized = true;
}
}
const QColor &ItemData::selectionColor() const
{ return m_selectionColor; }
void ItemData::setItalic(const QString &italic)
{
if (!italic.isEmpty()) {
m_italic = toBool(italic);
m_italicSpecified = true;
m_isCustomized = true;
}
}
bool ItemData::isItalic() const
{ return m_italic; }
bool ItemData::isItalicSpecified() const
{ return m_italicSpecified; }
void ItemData::setBold(const QString &bold)
{
if (!bold.isEmpty()) {
m_bold = toBool(bold);
m_boldSpecified = true;
m_isCustomized = true;
}
}
bool ItemData::isBold() const
{ return m_bold; }
bool ItemData::isBoldSpecified() const
{ return m_boldSpecified; }
void ItemData::setUnderlined(const QString &underlined)
{
if (!underlined.isEmpty()) {
m_underlined = toBool(underlined);
m_underlinedSpecified = true;
m_isCustomized = true;
}
}
bool ItemData::isUnderlined() const
{ return m_underlined; }
bool ItemData::isUnderlinedSpecified() const
{ return m_underlinedSpecified; }
void ItemData::setStrikeOut(const QString &strike)
{
if (!strike.isEmpty()) {
m_strikedOut = toBool(strike);
m_strikeOutSpecified = true;
m_isCustomized = true;
}
}
bool ItemData::isStrikeOut() const
{ return m_strikedOut; }
bool ItemData::isStrikeOutSpecified() const
{ return m_strikeOutSpecified; }
bool ItemData::isCustomized() const
{ return m_isCustomized; }
| lgpl-2.1 |
joshua-cogliati-inl/moose | unit/src/BrentsMethodTest.C | 3068 | /****************************************************************/
/* DO NOT MODIFY THIS HEADER */
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* (c) 2010 Battelle Energy Alliance, LLC */
/* ALL RIGHTS RESERVED */
/* */
/* Prepared by Battelle Energy Alliance, LLC */
/* Under Contract No. DE-AC07-05ID14517 */
/* With the U. S. Department of Energy */
/* */
/* See COPYRIGHT for full restrictions */
/****************************************************************/
#include "BrentsMethodTest.h"
#include "BrentsMethod.h"
CPPUNIT_TEST_SUITE_REGISTRATION(BrentsMethodTest);
Real
BrentsMethodTest::f(Real x) const
{
return std::log(1.0 + x) * std::tanh(x / 3.0) + x / 4.0 - 3;
}
void
BrentsMethodTest::bracket()
{
// Initial guess for bracketing interval does not bracket root
Real x1 = 0.5;
Real x2 = 1.0;
auto func = [this](Real x)
{return this->f(x);};
// Call bracket to determine the bracketing interval
BrentsMethod::bracket(func, x1, x2);
// The product of the function f(x) at the bracketing interval (x1, x2) must
// be negative to bracket a root
CPPUNIT_ASSERT(f(x1) * f(x2) < 0.0);
// Test that a warning is thrown if the initial guesses are equal
try
{
// Trigger identical initial guess error
BrentsMethod::bracket(func, x1, x1);
}
catch(const std::exception & e)
{
std::string msg(e.what());
CPPUNIT_ASSERT(msg.find("Bad initial range (0) used in BrentsMethod::bracket") != std::string::npos);
}
// Test that a warning is thrown if no bracketing interval is found after 50 iterations.
try
{
// Trigger no bracketing interval warning by adding 4 to f(x), whereby no real root exists
auto func2 = [this](Real x)
{return this->f(x) + 4.0;};
BrentsMethod::bracket(func2, x1, x2);
}
catch(const std::exception & e)
{
std::string msg(e.what());
CPPUNIT_ASSERT(msg.find("No bracketing interval found by BrentsMethod::bracket after 50 iterations") != std::string::npos);
}
}
void
BrentsMethodTest::root()
{
// Bracketing interval that does bracket root
Real x1 = 0.5;
Real x2 = 10.0;
auto func = [this](Real x)
{return this->f(x);};
// Check that the root is 5.170302597
CPPUNIT_ASSERT_DOUBLES_EQUAL(5.170302597, BrentsMethod::root(func, x1, x2), 1.0E-8);
// Test that a warning is thrown if the supplied interval does not bracket the root
try
{
// Trigger no bracketing interval error
x2 = 1.0;
Real x = BrentsMethod::root(func, x1, x2);
}
catch(const std::exception & e)
{
std::string msg(e.what());
CPPUNIT_ASSERT(msg.find("Root must be bracketed in BrentsMethod::root") != std::string::npos);
}
}
| lgpl-2.1 |
sebastienhupin/qxrad | qooxdoo/framework/source/class/qx/test/bom/__init__.js | 35 | /**
* Test classes for qx.bom
*/
| lgpl-3.0 |
fevangelista/psi4 | psi4/src/psi4/fnocc/lowmemory_triples.cc | 22514 | /*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2021 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/
#include <ctime>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "psi4/libmints/wavefunction.h"
#include "psi4/liboptions/liboptions.h"
#include "psi4/libpsi4util/process.h"
#include "psi4/libqt/qt.h"
#include "blas.h"
#include "ccsd.h"
namespace psi {
namespace fnocc {
PsiReturnType CoupledCluster::lowmemory_triples() {
auto *name = new char[10];
auto *space = new char[10];
double fac;
if (ccmethod == 0) {
sprintf(name, "CCSD");
sprintf(space, " ");
fac = 1.0;
} else if (ccmethod == 1) {
sprintf(name, "QCISD");
sprintf(space, " ");
fac = 2.0;
} else {
sprintf(name, "MP4");
sprintf(space, " ");
fac = 0.0;
}
outfile->Printf("\n");
outfile->Printf(" *******************************************************\n");
outfile->Printf(" * *\n");
outfile->Printf(" * %8s(T) *\n", name);
outfile->Printf(" * *\n");
outfile->Printf(" *******************************************************\n");
outfile->Printf("\n");
outfile->Printf("\n");
outfile->Printf(" Using low-memory algorithm.\n");
outfile->Printf("\n");
long int o = ndoccact;
long int v = nvirt_no;
long int oo = o * o;
long int vo = v * o;
long int ooo = o * o * o;
long int voo = v * o * o;
long int vvo = v * v * o;
long int vooo = v * o * o * o;
long int vvoo = v * v * o * o;
double *F = eps;
double *E2ijak, **E2abci;
// CDS // E2ijak = (double*)malloc(o*o*o*v*sizeof(double));
E2ijak = (double *)malloc(vooo * sizeof(double));
int nthreads = 1;
#ifdef _OPENMP
nthreads = Process::environment.get_n_threads();
#endif
long int memory = Process::environment.get_memory();
if (options_["MEMORY"].has_changed()) {
memory = options_.get_int("MEMORY");
memory *= (long int)1024 * 1024;
}
// CDS // memory -= 8L*(2L*o*o*v*v+o*o*o*v+o*v+5L*nthreads*o*o*o);
long int memory_reqd = 8L * (2L * vvoo + vooo + vo + 5L * nthreads * ooo);
outfile->Printf(" num_threads: %9i\n", nthreads);
outfile->Printf(" available memory: %9.2lf mb\n", (double)memory / 1024. / 1024.);
outfile->Printf(" memory requirements: %9.2lf mb\n", (double)memory_reqd / 1024. / 1024.);
outfile->Printf("\n");
bool threaded = true;
/*
if (memory_reqd > memory){
// CDS // memory += (nthreads-1)*8L*5L*ooo;
if (nthreads==1){
outfile->Printf(" Error: not enough memory.\n");
outfile->Printf("\n");
outfile->Printf(" (T) requires at least %7.2lf mb\n",
(double)(2.*o*o*v*v+1.*o*o*o*v+5.*o*o*o+1.*o*v)/1024./1024.);
outfile->Printf("\n");
return Failure;
}
threaded = false;
nthreads = 1;
outfile->Printf(" Not enough memory for explicit threading ... \n");
outfile->Printf("\n");
outfile->Printf(" memory requirements = %9.2lf mb\n",
8.*(2.*o*o*v*v+1.*o*o*o*v+(5.)*o*o*o+1.*o*v)/1024./1024.);
outfile->Printf("\n");
}
*/
// CDS updated
if (memory_reqd > memory) {
outfile->Printf(" Not enough memory for requested threading ...\n");
outfile->Printf("\n");
long int min_memory_reqd = 8L * (2L * vvoo + vooo + vo + 5L * ooo);
if (min_memory_reqd > memory) {
outfile->Printf(" Sorry, not even enough memory for 1 thread.\n");
delete[] name;
delete[] space;
free(E2ijak);
return Failure;
}
long int mem_leftover = memory - min_memory_reqd;
auto extra_threads = (int)(mem_leftover / 5L * ooo);
nthreads = 1 + extra_threads;
outfile->Printf(" Attempting to proceed with %d threads\n", nthreads);
}
E2abci = (double **)malloc(nthreads * sizeof(double *));
// some o^3 intermediates
double **Z = (double **)malloc(nthreads * sizeof(double *));
double **Z2 = (double **)malloc(nthreads * sizeof(double *));
double **Z3 = (double **)malloc(nthreads * sizeof(double *));
double **Z4 = (double **)malloc(nthreads * sizeof(double *));
auto psio = std::make_shared<PSIO>();
double *tempE2 = (double *)malloc(vooo * sizeof(double));
psio->open(PSIF_DCC_IJAK, PSIO_OPEN_OLD);
psio->read_entry(PSIF_DCC_IJAK, "E2ijak", (char *)&tempE2[0], vooo * sizeof(double));
psio->close(PSIF_DCC_IJAK, 1);
for (long int i = 0; i < ooo; i++) {
for (long int a = 0; a < v; a++) {
E2ijak[a * ooo + i] = tempE2[i * v + a];
}
}
free(tempE2);
long int dim = ooo > vo ? ooo : vo;
for (int i = 0; i < nthreads; i++) {
E2abci[i] = (double *)malloc(dim * sizeof(double));
Z[i] = (double *)malloc(ooo * sizeof(double));
Z2[i] = (double *)malloc(ooo * sizeof(double));
Z3[i] = (double *)malloc(ooo * sizeof(double));
Z4[i] = (double *)malloc(ooo * sizeof(double));
}
double *tempt = (double *)malloc(vvoo * sizeof(double));
if (t2_on_disk) {
// CDS // tb = (double*)malloc((long int)o*o*v*v*sizeof(double));
tb = (double *)malloc(vvoo * sizeof(double));
psio->open(PSIF_DCC_T2, PSIO_OPEN_OLD);
psio->read_entry(PSIF_DCC_T2, "t2", (char *)&tb[0], vvoo * sizeof(double));
psio->close(PSIF_DCC_T2, 1);
}
if (ccmethod == 2) {
psio->open(PSIF_DCC_T2, PSIO_OPEN_OLD);
psio->read_entry(PSIF_DCC_T2, "first", (char *)&tb[0], vvoo * sizeof(double));
psio->close(PSIF_DCC_T2, 1);
}
C_DCOPY(vvoo, tb, 1, tempt, 1);
// might as well use t2's memory
double *E2klcd = tb;
psio->open(PSIF_DCC_IAJB, PSIO_OPEN_OLD);
psio->read_entry(PSIF_DCC_IAJB, "E2iajb", (char *)&E2klcd[0], vvoo * sizeof(double));
psio->close(PSIF_DCC_IAJB, 1);
double *etrip = (double *)malloc(nthreads * sizeof(double));
for (int i = 0; i < nthreads; i++) etrip[i] = 0.0;
std::time_t stop, start = std::time(nullptr);
int pct10, pct20, pct30, pct40, pct50, pct60, pct70, pct80, pct90;
pct10 = pct20 = pct30 = pct40 = pct50 = pct60 = pct70 = pct80 = pct90 = 0;
long int nabc = 0;
for (long int a = 0; a < v; a++) {
for (long int b = 0; b <= a; b++) {
for (long int c = 0; c <= b; c++) {
nabc++;
}
}
}
long int **abc = (long int **)malloc(nabc * sizeof(long int *));
nabc = 0;
for (long int a = 0; a < v; a++) {
for (long int b = 0; b <= a; b++) {
for (long int c = 0; c <= b; c++) {
abc[nabc] = (long int *)malloc(3 * sizeof(long int));
abc[nabc][0] = a;
abc[nabc][1] = b;
abc[nabc][2] = c;
nabc++;
}
}
}
outfile->Printf(" Number of abc combinations: %i\n", nabc);
outfile->Printf("\n");
for (int i = 0; i < nthreads; i++) etrip[i] = 0.0;
outfile->Printf(" Computing (T) correction...\n");
outfile->Printf("\n");
outfile->Printf(" %% complete total time\n");
/**
* if there is enough memory to explicitly thread, do so
*/
std::vector<std::shared_ptr<PSIO> > mypsio;
for (int i = 0; i < nthreads; i++) {
mypsio.push_back(std::make_shared<PSIO>());
mypsio[i]->open(PSIF_DCC_ABCI4, PSIO_OPEN_OLD);
}
if (threaded) {
#pragma omp parallel for schedule(dynamic) num_threads(nthreads)
for (long int ind = 0; ind < nabc; ind++) {
long int a = abc[ind][0];
long int b = abc[ind][1];
long int c = abc[ind][2];
int thread = 0;
#ifdef _OPENMP
thread = omp_get_thread_num();
#endif
// auto mypsio = std::make_shared<PSIO>();
// mypsio->open(PSIF_DCC_ABCI4,PSIO_OPEN_OLD);
psio_address addr = psio_get_address(PSIO_ZERO, (b * vvo + c * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], vo * sizeof(double), addr,
&addr);
// (1)
F_DGEMM('t', 't', o, oo, v, 1.0, E2abci[thread], v, tempt + a * voo, oo, 0.0, Z[thread], o);
// (ikj)(acb)
F_DGEMM('t', 'n', o, oo, o, -1.0, tempt + c * voo + a * oo, o, E2ijak + b * ooo, o, 1.0, Z[thread], o);
addr = psio_get_address(PSIO_ZERO, (a * vvo + c * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], vo * sizeof(double), addr,
&addr);
//(ab)(ij)
F_DGEMM('t', 't', o, oo, v, 1.0, E2abci[thread], v, tempt + b * voo, oo, 0.0, Z2[thread], o);
//(ab)(ij)
F_DGEMM('t', 'n', o * o, o, o, -1.0, E2ijak + c * ooo, o, tempt + b * voo + a * oo, o, 1.0, Z2[thread], oo);
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
C_DAXPY(o, 1.0, Z2[thread] + j * oo + i * o, 1, Z[thread] + i * oo + j * o, 1);
}
}
addr = psio_get_address(PSIO_ZERO, (c * vvo + b * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], vo * sizeof(double), addr,
&addr);
//(bc)(jk)
F_DGEMM('t', 't', o, oo, v, 1.0, E2abci[thread], v, tempt + a * voo, oo, 0.0, Z2[thread], o);
//(bc)(jk)
F_DGEMM('t', 'n', oo, o, o, -1.0, E2ijak + b * ooo, o, tempt + a * voo + c * oo, o, 1.0, Z2[thread], oo);
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
C_DAXPY(o, 1.0, Z2[thread] + i * oo + j, o, Z[thread] + i * oo + j * o, 1);
}
}
addr = psio_get_address(PSIO_ZERO, (b * vvo + a * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], vo * sizeof(double), addr,
&addr);
//(ac)(ik)
F_DGEMM('t', 't', o, oo, v, 1.0, E2abci[thread], v, tempt + c * voo, oo, 0.0, Z2[thread], o);
//(ac)(ik)
F_DGEMM('t', 'n', oo, o, o, -1.0, E2ijak + a * ooo, o, tempt + c * voo + b * oo, o, 1.0, Z2[thread], oo);
//(1)
F_DGEMM('t', 't', o, oo, o, -1.0, tempt + a * voo + b * oo, o, E2ijak + c * ooo, oo, 1.0, Z2[thread], o);
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
Z[thread][i * oo + j * o + k] += Z2[thread][k * oo + j * o + i];
}
}
}
addr = psio_get_address(PSIO_ZERO, (c * vvo + a * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], vo * sizeof(double), addr,
&addr);
//(ijk)(abc)
F_DGEMM('t', 't', o, oo, v, 1.0, E2abci[thread], v, tempt + b * voo, oo, 0.0, Z2[thread], o);
F_DGEMM('t', 'n', oo, o, o, -1.0, E2ijak + a * ooo, o, tempt + b * voo + c * oo, o, 1.0, Z2[thread], oo);
//(ijk)(abc)
//(ikj)(acb)
addr = psio_get_address(PSIO_ZERO, (a * vvo + b * vo) * sizeof(double));
mypsio[thread]->read(PSIF_DCC_ABCI4, "E2abci4", (char *)&E2abci[thread][0], o * v * sizeof(double), addr,
&addr);
F_DGEMM('n', 'n', oo, o, v, 1.0, tempt + c * voo, oo, E2abci[thread], v, 1.0, Z2[thread], oo);
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
Z[thread][i * oo + j * o + k] += Z2[thread][j * oo + k * o + i];
}
}
}
C_DCOPY(ooo, Z[thread], 1, Z2[thread], 1);
double dabc = -F[a + o] - F[b + o] - F[c + o];
for (long int i = 0; i < o; i++) {
double dabci = dabc + F[i];
for (long int j = 0; j < o; j++) {
double dabcij = dabci + F[j];
for (long int k = 0; k < o; k++) {
double denom = dabcij + F[k];
Z[thread][i * oo + j * o + k] /= denom;
}
}
}
for (long int i = 0; i < o; i++) {
double tai = t1[a * o + i];
for (long int j = 0; j < o; j++) {
double tbj = t1[b * o + j];
double E2iajb = E2klcd[i * vvo + a * vo + j * v + b];
for (long int k = 0; k < o; k++) {
Z2[thread][i * oo + j * o + k] +=
fac * (tai * E2klcd[j * vvo + b * vo + k * v + c] +
tbj * E2klcd[i * vvo + a * vo + k * v + c] + t1[c * o + k] * E2iajb);
}
}
}
C_DCOPY(ooo, Z[thread], 1, Z3[thread], 1);
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
Z3[thread][i * oo + j * o + k] *= (1.0 + 0.5 * (i == j) * (j == k));
}
}
}
long int abcfac = (2 - ((a == b) + (b == c) + (a == c)));
// contribute to energy:
double tripval = 0.0;
for (long int i = 0; i < o; i++) {
double dum = 0.0;
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
dum += Z3[thread][ijk] * Z2[thread][ijk];
}
}
tripval += dum;
}
etrip[thread] += 3.0 * tripval * abcfac;
// Z3(ijk) = -2(Z(ijk) + jki + kij) + ikj + jik + kji
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
long int jki = j * oo + k * o + i;
long int kij = k * oo + i * o + j;
long int ikj = i * oo + k * o + j;
long int jik = j * oo + i * o + k;
long int kji = k * oo + j * o + i;
Z3[thread][ijk] = -2.0 * (Z[thread][ijk] + Z[thread][jki] + Z[thread][kij]) + Z[thread][ikj] +
Z[thread][jik] + Z[thread][kji];
}
}
}
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
long int ikj = i * oo + k * o + j;
E2abci[thread][ijk] = Z2[thread][ikj] * 0.5 * (1.0 + 0.5 * (i == j) * (j == k));
}
}
}
// contribute to energy:
tripval = 0.0;
for (long int i = 0; i < o; i++) {
double dum = 0.0;
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
dum += E2abci[thread][ijk] * Z3[thread][ijk];
}
}
tripval += dum;
}
etrip[thread] += tripval * abcfac;
// the second bit
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
E2abci[thread][ijk] = Z2[thread][ijk] * 0.5 * (1.0 + 0.5 * (i == j) * (j == k));
}
}
}
// Z4 = Z(ijk)+jki+kij - 2( (ikj)+(jik)+(kji) )
for (long int i = 0; i < o; i++) {
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
long int jki = j * oo + k * o + i;
long int kij = k * oo + i * o + j;
long int ikj = i * oo + k * o + j;
long int jik = j * oo + i * o + k;
long int kji = k * oo + j * o + i;
Z4[thread][ijk] = Z[thread][ijk] + Z[thread][jki] + Z[thread][kij] -
2.0 * (Z[thread][ikj] + Z[thread][jik] + Z[thread][kji]);
}
}
}
// contribute to energy:
tripval = 0.0;
for (long int i = 0; i < o; i++) {
double dum = 0.0;
for (long int j = 0; j < o; j++) {
for (long int k = 0; k < o; k++) {
long int ijk = i * oo + j * o + k;
dum += Z4[thread][ijk] * E2abci[thread][ijk];
}
}
tripval += dum;
}
etrip[thread] += tripval * abcfac;
// print out update
if (thread == 0) {
int print = 0;
stop = std::time(nullptr);
if ((double)ind / nabc >= 0.1 && !pct10) {
pct10 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.2 && !pct20) {
pct20 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.3 && !pct30) {
pct30 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.4 && !pct40) {
pct40 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.5 && !pct50) {
pct50 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.6 && !pct60) {
pct60 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.7 && !pct70) {
pct70 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.8 && !pct80) {
pct80 = 1;
print = 1;
} else if ((double)ind / nabc >= 0.9 && !pct90) {
pct90 = 1;
print = 1;
}
if (print) {
outfile->Printf(" %3.1lf %8d s\n", 100.0 * ind / nabc, (int)stop - (int)start);
}
}
// mypsio->close(PSIF_DCC_ABCI4,1);
// mypsio.reset();
}
} else {
outfile->Printf("on the to do pile!\n");
delete[] name;
delete[] space;
free(E2ijak);
for (int i = 0; i < nthreads; i++) free(Z4[i]);
free(Z4);
free(etrip);
nabc = 0;
for (long int a = 0; a < v; a++)
for (long int b = 0; b <= a; b++)
for (long int c = 0; c <= b; c++) free(abc[nabc++]);
free(abc);
return Failure;
}
for (int i = 0; i < nthreads; i++) {
mypsio[i]->close(PSIF_DCC_ABCI4, 1);
}
double myet = 0.0;
for (int i = 0; i < nthreads; i++) myet += etrip[i];
// ccsd(t) or qcisd(t)
if (ccmethod <= 1) {
et = myet;
outfile->Printf("\n");
outfile->Printf(" (T) energy %s %20.12lf\n", space, et);
outfile->Printf("\n");
outfile->Printf(" %s(T) correlation energy %20.12lf\n", name, eccsd + et);
outfile->Printf(" * %s(T) total energy %20.12lf\n", name, eccsd + et + escf);
outfile->Printf("\n");
} else {
emp4_t = myet;
outfile->Printf("\n");
outfile->Printf(" MP4(T) correlation energy: %20.12lf\n", emp4_t);
outfile->Printf("\n");
outfile->Printf(" MP4(SDTQ) correlation energy: %20.12lf\n",
emp2 + emp3 + emp4_sd + emp4_q + emp4_t);
outfile->Printf(" * MP4(SDTQ) total energy: %20.12lf\n",
emp2 + emp3 + emp4_sd + emp4_q + emp4_t + escf);
outfile->Printf("\n");
}
delete[] name;
delete[] space;
// free memory:
free(E2ijak);
for (int i = 0; i < nthreads; i++) {
free(E2abci[i]);
free(Z[i]);
free(Z2[i]);
free(Z3[i]);
free(Z4[i]);
}
free(Z);
free(Z2);
free(Z3);
free(Z4);
free(E2abci);
free(etrip);
nabc = 0;
for (long int a = 0; a < v; a++)
for (long int b = 0; b <= a; b++)
for (long int c = 0; c <= b; c++) free(abc[nabc++]);
free(abc);
return Success;
}
}
} // end of namespaces
| lgpl-3.0 |
jstrobl/rts2 | src/pluto/out_comp.cpp | 1564 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
static int is_position_line( const char *buff)
{
int rval = 0;
if( strlen( buff) > 73 && buff[73] < ' ' &&
buff[29] == '.' && buff[46] == '.' && buff[63] == '.')
rval = 1;
return( rval);
}
int main( const int argc, const char **argv)
{
FILE *ifile1 = fopen( argv[1], "rb");
FILE *ifile2 = fopen( argv[2], "rb");
char buff1[80], buff2[80];
int line = 0, n_valid = 0, worst_line = -1;
double max_diff2 = 0.;
if( !ifile1)
printf( "%s not opened\n", argv[1]);
if( !ifile2)
printf( "%s not opened\n", argv[2]);
if( !ifile1 || !ifile2)
exit( -1);
while( fgets( buff1, sizeof( buff1), ifile1) &&
fgets( buff2, sizeof( buff2), ifile2))
{
line++;
if( is_position_line( buff1) && is_position_line( buff2))
{
double diff2 = 0., delta;
int i;
n_valid++;
for( i = 22; i < 60; i += 17)
{
delta = atof( buff1 + i) - atof( buff2 + i);
diff2 += delta * delta;
}
if( diff2 > max_diff2)
{
max_diff2 = diff2;
worst_line = line;
}
}
}
fclose( ifile1);
fclose( ifile2);
printf( "%d lines read in; %d had positions\n", line, n_valid);
printf( "Max difference: %.8lf km at line %d\n",
sqrt( max_diff2), worst_line);
return( 0);
}
| lgpl-3.0 |
ninazeina/SXP | src/main/java/crypt/base/AbstractAsymKey.java | 649 | package crypt.base;
import java.util.HashMap;
import crypt.api.key.AsymKey;
public abstract class AbstractAsymKey<T> implements AsymKey<T>{
protected T publicKey = null;
protected T privateKey = null;
protected HashMap<String, T> params = new HashMap<>();
@Override
public T getPublicKey() {
return publicKey;
}
@Override
public T getPrivateKey() {
return privateKey;
}
@Override
public T getParam(String p) {
return params.get(p);
}
@Override
public void setPublicKey(T pbk) {
this.publicKey = pbk;
}
@Override
public void setPrivateKey(T pk) {
this.privateKey = pk;
}
}
| lgpl-3.0 |
djw8605/condor | src/condor_contrib/mgmt/qmf/plugins/MgmtStartdPlugin.cpp | 4747 | /*
* Copyright 2009-2011 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "condor_common.h"
#include "../condor_startd.V6/StartdPlugin.h"
#include "hashkey.h"
#include "../condor_daemon_core.V6/condor_daemon_core.h"
#include "condor_config.h"
#include "get_daemon_name.h"
#include "SlotObject.h"
#include "broker_utils.h"
// Global from the condor_startd, it's name
extern char * Name;
using namespace std;
using namespace com::redhat::grid;
struct MgmtStartdPlugin : public Service, StartdPlugin
{
// ManagementAgent::Singleton cleans up the ManagementAgent
// instance if there are no ManagementAgent::Singleton's in
// scope!
ManagementAgent::Singleton *singleton;
typedef HashTable<AdNameHashKey, SlotObject *> SlotHashTable;
SlotHashTable *startdAds;
void
initialize()
{
char *host;
char *username;
char *password;
char *mechanism;
int port;
char *tmp;
string storefile;
dprintf(D_FULLDEBUG, "MgmtStartdPlugin: Initializing...\n");
singleton = new ManagementAgent::Singleton();
startdAds = new SlotHashTable(4096, &adNameHashFunction);
ManagementAgent *agent = singleton->getInstance();
Slot::registerSelf(agent);
port = param_integer("QMF_BROKER_PORT", 5672);
if (NULL == (host = param("QMF_BROKER_HOST"))) {
host = strdup("localhost");
}
tmp = param("QMF_STOREFILE");
if (NULL == tmp) {
storefile = ".startd_storefile";
} else {
storefile = tmp;
free(tmp); tmp = NULL;
}
if (NULL == (username = param("QMF_BROKER_USERNAME")))
{
username = strdup("");
}
if (NULL == (mechanism = param("QMF_BROKER_AUTH_MECH")))
{
mechanism = strdup("ANONYMOUS");
}
password = getBrokerPassword();
std::string startd_name = default_daemon_name();
if( Name ) {
startd_name = Name;
}
agent->setName("com.redhat.grid","slot",startd_name.c_str());
agent->init(string(host), port,
param_integer("QMF_UPDATE_INTERVAL", 10),
true,
storefile,
username,
password,
mechanism);
free(host);
free(username);
free(password);
free(mechanism);
ReliSock *sock = new ReliSock;
if (!sock) {
EXCEPT("Failed to allocate Mgmt socket");
}
if (!sock->assign(agent->getSignalFd())) {
EXCEPT("Failed to bind Mgmt socket");
}
int index;
if (-1 == (index =
daemonCore->Register_Socket((Stream *) sock,
"Mgmt Method Socket",
(SocketHandlercpp)
&MgmtStartdPlugin::HandleMgmtSocket,
"Handler for Mgmt Methods.",
this))) {
EXCEPT("Failed to register Mgmt socket");
}
}
void
shutdown()
{
if (!param_boolean("QMF_DELETE_ON_SHUTDOWN", true)) {
return;
}
dprintf(D_FULLDEBUG, "MgmtStartdPlugin: shutting down...\n");
// invalidate will clean up qmf SlotObjects
if (singleton) {
delete singleton;
singleton = NULL;
}
}
void
update(const ClassAd *ad, const ClassAd *)
{
AdNameHashKey hashKey;
SlotObject *slotObject;
if (!makeStartdAdHashKey(hashKey, ((ClassAd *) ad))) {
dprintf(D_FULLDEBUG, "Could not make hashkey -- ignoring ad\n");
return;
}
if (startdAds->lookup(hashKey, slotObject)) {
// Key doesn't exist
slotObject = new SlotObject(singleton->getInstance(), hashKey.name.Value());
// Ignore old value, if it existed (returned)
startdAds->insert(hashKey, slotObject);
}
slotObject->update(*ad);
}
void
invalidate(const ClassAd *ad)
{
AdNameHashKey hashKey;
SlotObject *slotObject;
if (!makeStartdAdHashKey(hashKey, ((ClassAd *) ad))) {
dprintf(D_FULLDEBUG, "Could not make hashkey -- ignoring ad\n");
return;
}
// find it and throw it away
if (0 == startdAds->lookup(hashKey, slotObject)) {
startdAds->remove(hashKey);
delete slotObject;
}
else {
dprintf(D_FULLDEBUG, "%s startd key not found for removal\n",HashString(hashKey).Value());
}
}
// this needs an exact match signature for VC++
#ifdef WIN32
int
HandleMgmtSocket(Stream *)
#else
int
HandleMgmtSocket(Service *, Stream *)
#endif
{
singleton->getInstance()->pollCallbacks();
return KEEP_STREAM;
}
};
static MgmtStartdPlugin instance;
#if defined(WIN32)
int load_startd_mgmt(void) {
return 0;
}
#endif
| apache-2.0 |
citrix-openstack-build/heat | heat/openstack/common/threadgroup.py | 3868 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from eventlet import greenpool
from eventlet import greenthread
from heat.openstack.common import log as logging
from heat.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
class ThreadGroup(object):
"""The point of the ThreadGroup classis to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
def thread_done(self, thread):
self.threads.remove(thread)
def stop(self):
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| apache-2.0 |
yeeunshim/tajo_test | tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/MiniCatalogServer.java | 1498 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tajo.catalog;
import org.apache.tajo.conf.TajoConf;
import java.io.IOException;
public class MiniCatalogServer {
private CatalogServer catalogServers;
public MiniCatalogServer(TajoConf conf) throws IOException {
catalogServers = new CatalogServer();
catalogServers.init(conf);
catalogServers.start();
}
public MiniCatalogServer(CatalogServer server) {
this.catalogServers = server;
}
public void shutdown() {
this.catalogServers.stop();
}
public CatalogServer getCatalogServer() {
return this.catalogServers;
}
public CatalogService getCatalog() {
return new LocalCatalogWrapper(this.catalogServers);
}
}
| apache-2.0 |
haowu80s/spark | core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala | 4227 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS}
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler._
import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab}
import org.apache.spark.ui.scope.RDDOperationGraphListener
class StagePageSuite extends SparkFunSuite with LocalSparkContext {
test("peak execution memory only displayed if unsafe is enabled") {
val unsafeConf = "spark.sql.unsafe.enabled"
val conf = new SparkConf(false).set(unsafeConf, "true")
val html = renderStagePage(conf).toString().toLowerCase
val targetString = "peak execution memory"
assert(html.contains(targetString))
// Disable unsafe and make sure it's not there
val conf2 = new SparkConf(false).set(unsafeConf, "false")
val html2 = renderStagePage(conf2).toString().toLowerCase
assert(!html2.contains(targetString))
// Avoid setting anything; it should be displayed by default
val conf3 = new SparkConf(false)
val html3 = renderStagePage(conf3).toString().toLowerCase
assert(html3.contains(targetString))
}
test("SPARK-10543: peak execution memory should be per-task rather than cumulative") {
val unsafeConf = "spark.sql.unsafe.enabled"
val conf = new SparkConf(false).set(unsafeConf, "true")
val html = renderStagePage(conf).toString().toLowerCase
// verify min/25/50/75/max show task value not cumulative values
assert(html.contains("<td>10.0 b</td>" * 5))
}
/**
* Render a stage page started with the given conf and return the HTML.
* This also runs a dummy stage to populate the page with useful content.
*/
private def renderStagePage(conf: SparkConf): Seq[Node] = {
val jobListener = new JobProgressListener(conf)
val graphListener = new RDDOperationGraphListener(conf)
val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS)
val request = mock(classOf[HttpServletRequest])
when(tab.conf).thenReturn(conf)
when(tab.progressListener).thenReturn(jobListener)
when(tab.operationGraphListener).thenReturn(graphListener)
when(tab.appName).thenReturn("testing")
when(tab.headerTabs).thenReturn(Seq.empty)
when(request.getParameter("id")).thenReturn("0")
when(request.getParameter("attempt")).thenReturn("0")
val page = new StagePage(tab)
// Simulate a stage in job progress listener
val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details")
// Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness
(1 to 2).foreach {
taskId =>
val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false)
val peakExecutionMemory = 10
taskInfo.accumulables += new AccumulableInfo(0, InternalAccumulator.PEAK_EXECUTION_MEMORY,
Some(peakExecutionMemory.toString), (peakExecutionMemory * taskId).toString, true)
jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo))
jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo))
taskInfo.markSuccessful()
jobListener.onTaskEnd(
SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, TaskMetrics.empty))
}
jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo))
page.render(request)
}
}
| apache-2.0 |
winklerm/drools | kie-api/src/main/java/org/kie/api/internal/utils/ServiceRegistry.java | 3216 | /*
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.api.internal.utils;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
import org.kie.api.Service;
import static org.kie.api.internal.utils.ServiceUtil.instanceFromNames;
/**
* Internal Interface
*
*/
public interface ServiceRegistry extends Service {
static <T> T getService(Class<T> cls) {
return getInstance().get( cls );
}
static ServiceRegistry getInstance() {
return ServiceRegistryHolder.serviceRegistry;
}
<T> T get(Class<T> cls);
<T> List<T> getAll(Class<T> cls);
class ServiceRegistryHolder {
private static ServiceRegistry serviceRegistry = Impl.getServiceRegistry();
}
class Impl implements ServiceRegistry {
private static final String DYNAMIC_IMPL = "org.drools.dynamic.DynamicServiceRegistrySupplier";
private static final String STATIC_IMPL = "org.drools.statics.StaticServiceRegistrySupplier";
private static Supplier<ServiceRegistry> supplier;
private Map<String, List<Object>> registry;
public Impl() {
registry = ServiceDiscoveryImpl.getInstance().getServices();
}
public synchronized void reset() {
ServiceDiscoveryImpl.getInstance().reset();
}
public synchronized void reload() {
registry = ServiceDiscoveryImpl.getInstance().getServices();
}
public <T> T get(Class<T> cls) {
for ( Object service : getAll( cls ) ) {
if ( cls.isInstance( service ) ) {
return (T) service;
}
}
return null;
}
public <T> List<T> getAll(Class<T> cls) {
return (List<T>) this.registry.getOrDefault( cls.getCanonicalName(), Collections.emptyList() );
}
public static ServiceRegistry getServiceRegistry() {
if (supplier == null) {
supplier = instanceFromNames(DYNAMIC_IMPL, STATIC_IMPL);
}
return supplier.get();
}
public static void setSupplier( Supplier<ServiceRegistry> supplier ) {
Impl.supplier = supplier;
}
}
static boolean isSupported(Class<?> clazz) {
return getInstance().get(clazz) != null;
}
static <R,A> Optional<R> ifSupported(Class<A> clazz, Function<A, R> executed) {
return isSupported(clazz) ? Optional.of(executed.apply(getInstance().get(clazz))) : Optional.empty();
}
}
| apache-2.0 |
morningman/palo | be/src/gutil/ref_counted_memory.cc | 2435 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gutil/ref_counted_memory.h"
#include <stdlib.h>
#include <common/logging.h>
namespace kudu {
bool RefCountedMemory::Equals(
const scoped_refptr<RefCountedMemory>& other) const {
return other.get() &&
size() == other->size() &&
(memcmp(front(), other->front(), size()) == 0);
}
RefCountedMemory::RefCountedMemory() {}
RefCountedMemory::~RefCountedMemory() {}
const unsigned char* RefCountedStaticMemory::front() const {
return data_;
}
size_t RefCountedStaticMemory::size() const {
return length_;
}
RefCountedStaticMemory::~RefCountedStaticMemory() {}
RefCountedBytes::RefCountedBytes() {}
RefCountedBytes::RefCountedBytes(std::vector<unsigned char> initializer)
: data_(std::move(initializer)) {}
RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
: data_(p, p + size) {}
RefCountedBytes* RefCountedBytes::TakeVector(
std::vector<unsigned char>* to_destroy) {
auto bytes = new RefCountedBytes;
bytes->data_.swap(*to_destroy);
return bytes;
}
const unsigned char* RefCountedBytes::front() const {
// STL will assert if we do front() on an empty vector, but calling code
// expects a NULL.
return size() ? &data_.front() : nullptr;
}
size_t RefCountedBytes::size() const {
return data_.size();
}
RefCountedBytes::~RefCountedBytes() {}
RefCountedString::RefCountedString() {}
RefCountedString::~RefCountedString() {}
// static
RefCountedString* RefCountedString::TakeString(std::string* to_destroy) {
auto self = new RefCountedString;
to_destroy->swap(self->data_);
return self;
}
const unsigned char* RefCountedString::front() const {
return data_.empty() ? nullptr :
reinterpret_cast<const unsigned char*>(data_.data());
}
size_t RefCountedString::size() const {
return data_.size();
}
RefCountedMallocedMemory::RefCountedMallocedMemory(
void* data, size_t length)
: data_(reinterpret_cast<unsigned char*>(data)), length_(length) {
DCHECK(data || length == 0);
}
const unsigned char* RefCountedMallocedMemory::front() const {
return length_ ? data_ : nullptr;
}
size_t RefCountedMallocedMemory::size() const {
return length_;
}
RefCountedMallocedMemory::~RefCountedMallocedMemory() {
free(data_);
}
} // namespace kudu
| apache-2.0 |
graydon/rust | library/alloc/src/macros.rs | 4126 | /// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
/// be mindful of side effects.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(not(test))]
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable(box_syntax, liballoc_internals)]
macro_rules! vec {
() => (
$crate::__rust_force_expr!($crate::vec::Vec::new())
);
($elem:expr; $n:expr) => (
$crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
);
($($x:expr),+ $(,)?) => (
$crate::__rust_force_expr!(<[_]>::into_vec(box [$($x),+]))
);
}
// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
// required for this macro definition, is not available. Instead use the
// `slice::into_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
macro_rules! vec {
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),*) => (
$crate::slice::into_vec(box [$($x),*])
);
($($x:expr,)*) => (vec![$($x),*])
}
/// Creates a `String` using interpolation of runtime expressions.
///
/// The first argument `format!` receives is a format string. This must be a string
/// literal. The power of the formatting string is in the `{}`s contained.
///
/// Additional parameters passed to `format!` replace the `{}`s within the
/// formatting string in the order given unless named or positional parameters
/// are used; see [`std::fmt`] for more information.
///
/// A common use for `format!` is concatenation and interpolation of strings.
/// The same convention is used with [`print!`] and [`write!`] macros,
/// depending on the intended destination of the string.
///
/// To convert a single value to a string, use the [`to_string`] method. This
/// will use the [`Display`] formatting trait.
///
/// [`std::fmt`]: ../std/fmt/index.html
/// [`print!`]: ../std/macro.print.html
/// [`write!`]: core::write
/// [`to_string`]: crate::string::ToString
/// [`Display`]: core::fmt::Display
///
/// # Panics
///
/// `format!` panics if a formatting trait implementation returns an error.
/// This indicates an incorrect implementation
/// since `fmt::Write for String` never returns an error itself.
///
/// # Examples
///
/// ```
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
macro_rules! format {
($($arg:tt)*) => {{
let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
res
}}
}
/// Force AST node to an expression to improve diagnostics in pattern position.
#[doc(hidden)]
#[macro_export]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
macro_rules! __rust_force_expr {
($e:expr) => {
$e
};
}
| apache-2.0 |
ya7lelkom/googleads-java-lib | modules/adwords_appengine/src/main/java/com/google/api/ads/adwords/jaxws/v201502/cm/AdGroupCriterionExperimentBidMultiplier.java | 2261 |
package com.google.api.ads.adwords.jaxws.v201502.cm;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
/**
*
* Bid multiplier used to modify the bid of a criterion while running
* an experiment.
* <span class="constraint AdxEnabled">This is disabled for AdX.</span>
*
*
* <p>Java class for AdGroupCriterionExperimentBidMultiplier complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="AdGroupCriterionExperimentBidMultiplier">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="AdGroupCriterionExperimentBidMultiplier.Type" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "AdGroupCriterionExperimentBidMultiplier", propOrder = {
"adGroupCriterionExperimentBidMultiplierType"
})
@XmlSeeAlso({
ManualCPCAdGroupCriterionExperimentBidMultiplier.class
})
public abstract class AdGroupCriterionExperimentBidMultiplier {
@XmlElement(name = "AdGroupCriterionExperimentBidMultiplier.Type")
protected String adGroupCriterionExperimentBidMultiplierType;
/**
* Gets the value of the adGroupCriterionExperimentBidMultiplierType property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAdGroupCriterionExperimentBidMultiplierType() {
return adGroupCriterionExperimentBidMultiplierType;
}
/**
* Sets the value of the adGroupCriterionExperimentBidMultiplierType property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAdGroupCriterionExperimentBidMultiplierType(String value) {
this.adGroupCriterionExperimentBidMultiplierType = value;
}
}
| apache-2.0 |
tensorflow/tensorflow | tensorflow/compiler/xla/service/scatter_expander.cc | 18921 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/scatter_expander.h"
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/while_util.h"
#include "tensorflow/compiler/xla/statusor.h"
namespace xla {
// Transposes the given scatter_indices such that the index_vector_dim becomes
// the most-minor dimension.
static StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* scatter_indices, int64_t index_vector_dim) {
const Shape& scatter_indices_shape = scatter_indices->shape();
if (scatter_indices_shape.dimensions_size() == index_vector_dim) {
return scatter_indices;
}
if (index_vector_dim == (scatter_indices_shape.dimensions_size() - 1)) {
return scatter_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(scatter_indices_shape.dimensions_size());
for (int64_t i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(scatter_indices, permutation);
}
// Canonicalizes the scatter_indices tensor in order to keep them uniform while
// performing the scatter operation.
static StatusOr<HloInstruction*> CanonicalizeScatterIndices(
HloInstruction* scatter_indices, int64_t index_vector_dim) {
// Transpose the non-index-vector dimensions to the front.
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_scatter_indices,
TransposeIndexVectorDimToLast(scatter_indices, index_vector_dim));
if (scatter_indices->shape().rank() == index_vector_dim + 1 &&
scatter_indices->shape().dimensions(index_vector_dim) == 1) {
auto new_shape =
ShapeUtil::DeleteDimension(index_vector_dim, scatter_indices->shape());
TF_ASSIGN_OR_RETURN(scatter_indices,
MakeReshapeHlo(new_shape, scatter_indices));
}
bool indices_are_scalar =
index_vector_dim == scatter_indices->shape().dimensions_size();
// The number of dimensions in scatter_indices that are index dimensions.
const int64_t index_dims_in_scatter_indices = indices_are_scalar ? 0 : 1;
// If there is only one index (i.e. scatter_indices has rank 1 and this
// scatter is really just a dynamic update slice) add a leading degenerate
// dimension for uniformity. Otherwise create a "collapsed" leading dimension
// that subsumes all of the non-index-vector dimensions.
const Shape& shape = transposed_scatter_indices->shape();
if (shape.dimensions_size() == index_dims_in_scatter_indices) {
return PrependDegenerateDims(transposed_scatter_indices, 1);
} else {
// Collapse all but the dimensions (0 or 1) in scatter_indices containing
// the index vectors.
return CollapseFirstNDims(
transposed_scatter_indices,
shape.dimensions_size() - index_dims_in_scatter_indices);
}
}
// Permutes the `updates` tensor such that all the scatter dims appear in the
// major dimensions and all the window dimensions appear in the minor
// dimensions.
static StatusOr<HloInstruction*> PermuteScatterAndWindowDims(
HloInstruction* updates, absl::Span<const int64_t> update_window_dims) {
std::vector<int64_t> permutation;
const int64_t updates_rank = updates->shape().rank();
permutation.reserve(updates_rank);
for (int64_t i = 0; i < updates_rank; ++i) {
bool is_scatter_dim = !absl::c_binary_search(update_window_dims, i);
if (is_scatter_dim) {
permutation.push_back(i);
}
}
for (auto window_dim : update_window_dims) {
permutation.push_back(window_dim);
}
return MakeTransposeHlo(updates, permutation);
}
// Expands or contracts the scatter indices in the updates tensor.
static StatusOr<HloInstruction*> AdjustScatterDims(
const Shape& scatter_indices_shape, HloInstruction* updates,
int64_t index_vector_dim) {
int64_t num_scatter_dims = scatter_indices_shape.dimensions_size();
if (index_vector_dim < scatter_indices_shape.dimensions_size()) {
--num_scatter_dims;
}
if (num_scatter_dims == 0) {
// If there are no scatter dims, this must be a dynamic-update-slice kind of
// scatter. In this case, we prepend a degenerate dimension to work
// uniformly in the while loop.
return PrependDegenerateDims(updates, 1);
}
return CollapseFirstNDims(updates, num_scatter_dims);
}
// Expands an index vector from the scatter_indices tensor into a vector that
// can be used to dynamic-update-slice to perform the scatter update.
static StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const ScatterDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
// Scatter of a scalar. Return a zero-sized vector of indices.
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
// We extract out individual components from the smaller index and concatenate
// them (interspersing zeros as needed) into the larger index.
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.scatter_dims_to_operand_dims(), i);
if (index_vector_dim_index !=
dim_numbers.scatter_dims_to_operand_dims_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, /*start_indices=*/{index_vector_dim_index},
/*limit_indices=*/{index_vector_dim_index + 1},
/*strides=*/{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, /*dimension=*/0);
}
static StatusOr<HloInstruction*> CheckIndexValidity(
HloComputation* computation, HloInstruction* index,
absl::Span<const int64_t> operand_dims,
absl::Span<const int64_t> window_sizes, HloModule* module) {
DCHECK_NE(nullptr, module);
DCHECK_EQ(operand_dims.size(), window_sizes.size());
// Valid range for the index: [0, operand_dims - window_sizes]
// Check if the index has any negative values.
HloInstruction* zero_index = BroadcastZeros(
computation, index->shape().element_type(), index->shape().dimensions());
TF_ASSIGN_OR_RETURN(
HloInstruction * negative_index_check,
MakeCompareHlo(ComparisonDirection::kLe, zero_index, index));
// Check if the index is OOB w.r.t. the operand dimensions and window sizes.
std::vector<int64_t> max_valid_index(operand_dims.size());
for (int i = 0; i < operand_dims.size(); ++i) {
max_valid_index[i] = operand_dims[i] - window_sizes[i];
}
TF_ASSIGN_OR_RETURN(
HloInstruction * max_valid_index_constant,
MakeR1ConstantHlo<int64_t>(computation, index->shape().element_type(),
max_valid_index));
TF_ASSIGN_OR_RETURN(HloInstruction * oob_index_check,
MakeCompareHlo(ComparisonDirection::kGe,
max_valid_index_constant, index));
// Combine the results of the two checks above.
TF_ASSIGN_OR_RETURN(
HloInstruction * valid_index,
MakeBinaryHlo(HloOpcode::kAnd, negative_index_check, oob_index_check));
// Reduce the index validity check vector into a scalar predicate.
auto reduction_init = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSIGN_OR_RETURN(
HloInstruction * valid_index_reduced,
MakeReduceHlo(valid_index, reduction_init, HloOpcode::kAnd, module));
// Return a broadcasted value of the scalar predicate to the same size as the
// window.
return MakeBroadcastHlo(valid_index_reduced, {}, window_sizes);
}
// Body of the while loop that performs the scatter operation using other HLOs.
static StatusOr<std::vector<HloInstruction*>> ScatterLoopBody(
HloInstruction* scatter, HloInstruction* induction_var,
const std::vector<HloInstruction*>& loop_state) {
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
CHECK_EQ(loop_state.size(), 3);
HloInstruction* operand = loop_state[0];
HloInstruction* scatter_indices = loop_state[1];
HloInstruction* updates = loop_state[2];
bool has_scalar_indices = scatter_indices->shape().dimensions_size() == 1;
// Build a vector form of the induction variable of the while loop.
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, /*broadcast_dimensions=*/{},
/*result_shape_bounds=*/{1});
// Pick the index to scatter from scatter_indices based on the induction_var
// and transform that to an index into the `operand` space.
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(scatter_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_scatter_indices,
PadVectorWithZeros(induction_var_as_vector,
/*zeros_to_prepend=*/0, /*zeros_to_append=*/1));
int index_vector_size = scatter_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(scatter_indices, index_into_scatter_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * scatter_slice_start,
ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers,
operand->shape().dimensions_size()));
// Extract the slice to be used to update from `updates` tensor for the
// induction_var corresponding to this iteration of the while loop.
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_updates,
PadVectorWithZeros(
induction_var_as_vector, /*zeros_to_prepend=*/0,
/*zeros_to_append=*/updates->shape().dimensions_size() - 1));
std::vector<int64_t> update_slice_bounds(
updates->shape().dimensions().begin(),
updates->shape().dimensions().end());
update_slice_bounds[0] = 1;
TF_ASSIGN_OR_RETURN(
HloInstruction * update_slice,
MakeDynamicSliceHlo(updates, index_into_updates, update_slice_bounds));
TF_ASSIGN_OR_RETURN(HloInstruction * update_slice_for_scatter,
ElideDegenerateDims(update_slice, {0}));
TF_ASSIGN_OR_RETURN(HloInstruction * update_slice_with_dims_inserted,
InsertDegenerateDims(update_slice_for_scatter,
dim_numbers.inserted_window_dims()));
// Note that the following transformation assumes that both DynamicSlice and
// DynamicUpdateSlice follow the same semantics for OOB indices. For example,
// if there are negative indices and DynamicSlice uses "clamping" semantics,
// then the extracted data will be "shifted". Since DynamicUpdateSlice also
// follows the same "clamping" semantics, writing the update will also be
// "shifted" by exactly the same amount. So, this transformation is correct as
// long as the semantics of handling OOB indices remain the same in
// DynamicSlice and DynamicUpdateSlice.
// Extract the slice to update from `operand` tensor.
const Shape& update_slice_shape = update_slice_with_dims_inserted->shape();
TF_ASSIGN_OR_RETURN(HloInstruction * operand_slice_to_update,
MakeDynamicSliceHlo(operand, scatter_slice_start,
update_slice_shape.dimensions()));
// Compute the new value for the slice to be updated in `operand` tensor by
// combining the existing value and the update value using the update
// computation.
TF_ASSIGN_OR_RETURN(
HloInstruction * updated_operand_slice,
MakeMapHlo({operand_slice_to_update, update_slice_with_dims_inserted},
scatter->to_apply()));
TF_ASSIGN_OR_RETURN(
HloInstruction * is_index_valid,
CheckIndexValidity(operand->parent(), scatter_slice_start,
operand->shape().dimensions(),
update_slice_with_dims_inserted->shape().dimensions(),
scatter->GetModule()));
// Select the updated operand only if the index is valid. If not, select the
// original value.
TF_ASSIGN_OR_RETURN(HloInstruction * update_to_apply,
MakeSelectHlo(is_index_valid, updated_operand_slice,
operand_slice_to_update));
// Write the updated value of the slice into `operand` tensor.
TF_ASSIGN_OR_RETURN(
HloInstruction * updated_operand,
MakeDynamicUpdateSliceHlo(operand, update_to_apply, scatter_slice_start));
return StatusOr<std::vector<HloInstruction*>>{
{updated_operand, scatter_indices, updates}};
}
static int64_t ScatterTripCount(const HloInstruction* scatter) {
// Compute the trip count for the while loop to be used for scatter. This
// should be the number of indices we should scatter into the operand.
const HloInstruction* scatter_indices = scatter->operand(1);
const Shape& scatter_indices_shape = scatter_indices->shape();
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
int64_t scatter_loop_trip_count = 1;
for (int64_t i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
scatter_loop_trip_count *= scatter_indices_shape.dimensions(i);
}
}
return scatter_loop_trip_count;
}
// High Level Algorithm.
//
// 1. Canonicalize the scatter_indices tensor such that it has rank 2, where
// each row is an index into the operand.
// 2. Canonicalize the updates tensor such that is has rank `num_window_dims+1`
// and the scatter dim is the most-major dimension.
// 3. Iterate over the set of indices in the canonicalized scatter_indices
// tensor using a while loop, updating the operand for each such index. Each
// iteration of this while loop performs the following:
// a. Pick the index from scatter_indices for this iteration.
// b. Transfrom this index into an index into the operand space.
// c. Extract the slice to be used to update from the updates tensor.
// d. Extract the slice to update from the operand tensor.
// e. Compute the new value for the slice to update by combining the slices
// from c. and d. using the update_computation of scatter.
// f. Write the updated value of the slice into the operand tensor.
StatusOr<HloInstruction*> ScatterExpander::ExpandInstruction(
HloInstruction* scatter) {
HloInstruction* operand = scatter->mutable_operand(0);
HloInstruction* scatter_indices = scatter->mutable_operand(1);
HloInstruction* updates = scatter->mutable_operand(2);
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
// If the updates tensor is empty, there is no need to update the operand. We
// can return the operand as is.
if (ShapeUtil::IsZeroElementArray(updates->shape())) {
return operand;
}
// Compute the trip count for the while loop to be used for scatter. This
// should be the number of indices we should scatter into the operand.
int64_t scatter_loop_trip_count = ScatterTripCount(scatter);
if (!IsInt32(scatter_loop_trip_count)) {
return Unimplemented(
"Scatter operations with more than 2147483647 scatter indices are not "
"supported. This error occurred for %s.",
scatter->ToString());
}
// Canonicalize the scatter_indices, after which the size of its most-major
// dimension must be same as the while loop trip count.
TF_ASSIGN_OR_RETURN(HloInstruction * canonical_scatter_indices,
CanonicalizeScatterIndices(
scatter_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(scatter_loop_trip_count,
canonical_scatter_indices->shape().dimensions(0));
// Canonicalize the updates, after which the size of its most-major dimension
// must be same as the while loop trip count.
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_updates,
PermuteScatterAndWindowDims(updates, dim_numbers.update_window_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction * adjusted_canonical_updates,
AdjustScatterDims(scatter_indices->shape(), canonical_updates,
dim_numbers.index_vector_dim()));
CHECK_EQ(scatter_loop_trip_count,
adjusted_canonical_updates->shape().dimensions(0));
// The while loop that implements the scatter operation.
StatusOr<std::vector<HloInstruction*>> scatter_loop_result_status =
WhileUtil::MakeCountedLoop(
scatter->parent(), scatter_loop_trip_count,
{operand, canonical_scatter_indices, adjusted_canonical_updates},
[&](HloInstruction* induction_var,
const std::vector<HloInstruction*>& loop_state) {
return ScatterLoopBody(scatter, induction_var, loop_state);
},
scatter->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> scatter_loop_result,
scatter_loop_result_status);
return scatter_loop_result.front();
}
bool ScatterExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kScatter &&
(mode_ == kEliminateAllScatters || ScatterTripCount(inst) == 1);
}
} // namespace xla
| apache-2.0 |
janstey/fabric8 | fabric/fabric-dosgi/src/main/java/io/fabric8/dosgi/tcp/AsyncInvocationStrategy.java | 7480 | /**
* Copyright 2005-2014 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package io.fabric8.dosgi.tcp;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.rmi.RemoteException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import io.fabric8.dosgi.api.AsyncCallback;
import io.fabric8.dosgi.api.SerializationStrategy;
import org.fusesource.hawtbuf.DataByteArrayInputStream;
import org.fusesource.hawtbuf.DataByteArrayOutputStream;
import org.fusesource.hawtdispatch.Dispatch;
import org.fusesource.hawtdispatch.DispatchQueue;
/**
* <p>
* </p>
*
*/
public class AsyncInvocationStrategy implements InvocationStrategy {
public static final AsyncInvocationStrategy INSTANCE = new AsyncInvocationStrategy();
static public boolean isAsyncMethod(Method method) {
Class<?>[] types = method.getParameterTypes();
return types.length != 0 && types[types.length - 1] == AsyncCallback.class;
}
private class AsyncResponseFuture implements ResponseFuture {
private final ClassLoader loader;
private final Method method;
private final AsyncCallback callback;
private final SerializationStrategy serializationStrategy;
private final DispatchQueue queue;
public AsyncResponseFuture(ClassLoader loader, Method method, AsyncCallback callback, SerializationStrategy serializationStrategy, DispatchQueue queue) {
this.loader = loader;
this.method = method;
this.callback = callback;
this.serializationStrategy = serializationStrategy;
this.queue = queue;
}
public void set(final DataByteArrayInputStream source) {
if( queue!=null ) {
queue.execute(new Runnable() {
public void run() {
decodeIt(source);
}
});
} else {
decodeIt(source);
}
}
private void decodeIt(DataByteArrayInputStream source) {
try {
serializationStrategy.decodeResponse(loader, getResultType(method), source, callback);
} catch (Throwable e) {
e.printStackTrace();
}
}
public Object get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
// TODO: we could store the timeout so we can time out the async request...
return null;
}
@Override
public void fail(Throwable throwable) {
callback.onFailure(throwable);
}
}
public ResponseFuture request(SerializationStrategy serializationStrategy, ClassLoader loader, Method method, Object[] args, DataByteArrayOutputStream target) throws Exception {
if(!isAsyncMethod(method)) {
throw new IllegalArgumentException("Invalid async method declaration: last argument is not a RequestCallback");
}
Class[] new_types = payloadTypes(method);
Object[] new_args = new Object[args.length-1];
System.arraycopy(args, 0, new_args, 0, new_args.length);
serializationStrategy.encodeRequest(loader, new_types, new_args, target);
return new AsyncResponseFuture(loader, method, (AsyncCallback) args[args.length-1], serializationStrategy, Dispatch.getCurrentQueue());
}
static private Class<?>[] payloadTypes(Method method) {
Class<?>[] types = method.getParameterTypes();
Class<?>[] new_types = new Class<?>[types.length-1];
System.arraycopy(types, 0, new_types, 0, new_types.length);
return new_types;
}
static private Class getResultType(Method method) {
Type[] types = method.getGenericParameterTypes();
ParameterizedType t = (ParameterizedType) types[types.length-1];
return (Class) t.getActualTypeArguments()[0];
}
class ServiceResponse {
private final ClassLoader loader;
private final Method method;
private final DataByteArrayOutputStream responseStream;
private final Runnable onComplete;
private final SerializationStrategy serializationStrategy;
private final int pos;
// Used to protect against sending multiple responses.
final AtomicBoolean responded = new AtomicBoolean(false);
public ServiceResponse(ClassLoader loader, Method method, DataByteArrayOutputStream responseStream, Runnable onComplete, SerializationStrategy serializationStrategy) {
this.loader = loader;
this.method = method;
this.responseStream = responseStream;
this.onComplete = onComplete;
this.serializationStrategy = serializationStrategy;
pos = responseStream.position();
}
public void send(Throwable error, Object value) {
if( responded.compareAndSet(false, true) ) {
Class resultType = getResultType(method);
try {
serializationStrategy.encodeResponse(loader, resultType, value, error, responseStream);
} catch (Exception e) {
// we failed to encode the response.. reposition and write that error.
try {
responseStream.position(pos);
serializationStrategy.encodeResponse(loader, resultType, value, new RemoteException(e.toString()), responseStream);
} catch (Exception unexpected) {
unexpected.printStackTrace();
}
} finally {
onComplete.run();
}
}
}
}
public void service(SerializationStrategy serializationStrategy, ClassLoader loader, Method method, Object target, DataByteArrayInputStream requestStream, final DataByteArrayOutputStream responseStream, final Runnable onComplete) {
final ServiceResponse helper = new ServiceResponse(loader, method, responseStream, onComplete, serializationStrategy);
try {
Object[] new_args = new Object[method.getParameterTypes().length];
serializationStrategy.decodeRequest(loader, payloadTypes(method), requestStream, new_args);
new_args[new_args.length-1] = new AsyncCallback<Object>() {
public void onSuccess(Object result) {
helper.send(null, result);
}
public void onFailure(Throwable failure) {
helper.send(failure, null);
}
};
method.invoke(target, new_args);
} catch (Throwable t) {
helper.send(t, null);
}
}
}
| apache-2.0 |
gro-mar/flowable-engine | modules/flowable-dmn-engine/src/main/java/org/flowable/dmn/engine/impl/el/ELConditionExpressionPreParser.java | 2442 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.dmn.engine.impl.el;
import org.apache.commons.lang3.StringUtils;
/**
* @author Yvo Swillens
*/
public class ELConditionExpressionPreParser {
protected static String[] OPERATORS = new String[]{"==", "!=", "<", ">", ">=", "<="};
public static String parse(String expression, String inputVariable, String inputVariableType) {
expression = expression.replaceAll("fn_date", "date:toDate");
expression = expression.replaceAll("fn_subtractDate", "date:subtractDate");
expression = expression.replaceAll("fn_addDate", "date:addDate");
expression = expression.replaceAll("fn_now", "date:now");
if (expression.startsWith("#{") || expression.startsWith("${")) {
return expression;
}
StringBuilder parsedExpressionBuilder = new StringBuilder();
parsedExpressionBuilder
.append("#{")
.append(inputVariable);
if ("date".equals(inputVariableType) || "number".equals(inputVariableType)) {
parsedExpressionBuilder.append(parseSegmentWithOperator(expression));
} else {
if (expression.startsWith(".")) {
parsedExpressionBuilder.append(expression);
} else {
parsedExpressionBuilder.append(parseSegmentWithOperator(expression));
}
}
parsedExpressionBuilder.append("}");
return parsedExpressionBuilder.toString();
}
protected static String parseSegmentWithOperator(String expression) {
String parsedExpressionSegment;
if (expression.length() < 2 || !StringUtils.startsWithAny(expression, OPERATORS)) {
parsedExpressionSegment = " == " + expression;
} else {
parsedExpressionSegment = " " + expression;
}
return parsedExpressionSegment;
}
}
| apache-2.0 |
apache/incubator-trafodion | dcs/src/main/java/org/trafodion/dcs/servermt/serverHandler/ServerApiSqlEndTransact.java | 7063 | /**
* @@@ START COPYRIGHT @@@
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* @@@ END COPYRIGHT @@@
*/
package org.trafodion.dcs.servermt.serverHandler;
import java.net.*;
import java.io.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.channels.spi.*;
import java.sql.SQLException;
import org.trafodion.dcs.Constants;
import org.trafodion.dcs.util.*;
import org.trafodion.dcs.servermt.ServerConstants;
import org.trafodion.dcs.servermt.ServerUtils;
import org.trafodion.dcs.servermt.serverDriverInputOutput.*;
import org.trafodion.dcs.servermt.serverSql.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class ServerApiSqlEndTransact {
private static final int odbc_SQLSvc_EndTransaction_ParamError_exn_ = 1;
private static final int odbc_SQLSvc_EndTransaction_InvalidConnection_exn_ = 2;
private static final int odbc_SQLSvc_EndTransaction_SQLError_exn_ = 3;
private static final int odbc_SQLSvc_EndTransaction_SQLInvalidHandle_exn_ = 4;
private static final int odbc_SQLSvc_EndTransaction_TransactionError_exn_ = 5;
private static final Log LOG = LogFactory.getLog(ServerApiSqlEndTransact.class);
private int instance;
private int serverThread;
private String serverWorkerName;
private ClientData clientData;
private int dialogueId;
private short transactionOpt;
private ServerException serverException;
ServerApiSqlEndTransact(int instance, int serverThread) {
this.instance = instance;
this.serverThread = serverThread;
serverWorkerName = ServerConstants.SERVER_WORKER_NAME + "_" + instance + "_" + serverThread;
}
void init(){
dialogueId = 0;
transactionOpt = 0;
serverException = null;
}
void reset(){
dialogueId = 0;
transactionOpt = 0;
serverException = null;
}
ClientData processApi(ClientData clientData) {
this.clientData = clientData;
init();
//
// ==============process input ByteBuffer===========================
//
ByteBuffer bbHeader = clientData.bbHeader;
ByteBuffer bbBody = clientData.bbBody;
Header hdr = clientData.hdr;
bbHeader.flip();
bbBody.flip();
try {
hdr.extractFromByteArray(bbHeader);
dialogueId = bbBody.getInt();
transactionOpt = bbBody.getShort();
if(LOG.isDebugEnabled()){
LOG.debug(serverWorkerName + ". dialogueId :" + dialogueId);
LOG.debug(serverWorkerName + ". transactionOpt :" + transactionOpt);
}
if (dialogueId < 1 ) {
throw new SQLException(serverWorkerName + ". Wrong dialogueId :" + dialogueId);
}
if (dialogueId != clientData.getDialogueId() ) {
throw new SQLException(serverWorkerName + ". Wrong dialogueId sent by the Client [sent/expected] : [" + dialogueId + "/" + clientData.getDialogueId() + "]");
}
//=====================Process ServerApiEndTransaction==============
//
serverException = new ServerException();
try {
if (transactionOpt == 1)
clientData.getTrafConnection().rollback();
else
clientData.getTrafConnection().commit();
} catch (SQLException ex){
LOG.error(serverWorkerName + ". SQLException :" + ex);
serverException.setServerException (odbc_SQLSvc_EndTransaction_TransactionError_exn_, 0, ex);
}
//
//===================calculate length of output ByteBuffer========================
//
bbHeader.clear();
bbBody.clear();
//
// check if ByteBuffer is big enough for output
//
int dataLength = 0;
int availableBuffer = 0;
dataLength = serverException.lengthOfData();
availableBuffer = bbBody.capacity() - bbBody.position();
// If there is no Exception, serverException.lengthOfData() will return 8;
// The driver still need read a int will indicate the number of the exception,
// which should be 0;
// So here, we add extra 4 bytes for it.
if (dataLength == 2 * ServerConstants.INT_FIELD_SIZE) {
dataLength = dataLength + ServerConstants.INT_FIELD_SIZE;
}
if(LOG.isDebugEnabled())
LOG.debug(serverWorkerName + ". dataLength :" + dataLength + " availableBuffer :" + availableBuffer);
if (dataLength > availableBuffer ) {
bbBody = ByteBufferUtils.increaseCapacity(bbBody, dataLength > ServerConstants.BODY_SIZE ? dataLength : ServerConstants.BODY_SIZE );
ByteBufferUtils.printBBInfo(bbBody);
clientData.bbBuf[1] = bbBody;
}
//===================== build output ==============================================
serverException.insertIntoByteBuffer(bbBody);
// if there is no exception, we add a extra 4 bytes which indicate the number of
// the exception which should be 0;
if (serverException.lengthOfData() == 2 * ServerConstants.INT_FIELD_SIZE) {
bbBody.putInt(0);
}
bbBody.flip();
//=========================Update header================================
hdr.setTotalLength(bbBody.limit());
hdr.insertIntoByteBuffer(bbHeader);
bbHeader.flip();
clientData.setByteBufferArray(bbHeader, bbBody);
clientData.setHdr(hdr);
clientData.setRequest(ServerConstants.REQUST_WRITE_READ);
} catch (SQLException se){
LOG.error(serverWorkerName + ". SQLException :" + se);
clientData.setRequestAndDisconnect();
} catch (UnsupportedEncodingException ue){
LOG.error(serverWorkerName + ". UnsupportedEncodingException :" + ue);
clientData.setRequestAndDisconnect();
} catch (Exception e){
LOG.error(serverWorkerName + ". Exception :" + e);
clientData.setRequestAndDisconnect();
}
reset();
return clientData;
}
}
| apache-2.0 |
sekikn/ambari | contrib/views/pig/src/main/resources/ui/pig-web/vendor/emacs.js | 2466 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function() {
String.prototype.regexIndexOf = function(regex, startpos) {
var indexOf = this.substring(startpos || 0).search(regex);
return (indexOf >= 0) ? (indexOf + (startpos || 0)) : indexOf;
}
function regFindPosition(curLine){
var pos= curLine.regexIndexOf(/\%\w*\%/);
var posArr=[];
while(pos > -1) {
posArr.push(pos);
pos = curLine.regexIndexOf(/\%\w*\%/,pos+1);
}
return posArr;
}
CodeMirror.keyMap.emacs = {
fallthrough: ["basic", "default"],
"Tab": function(cm) {
var cursor = cm.getCursor();
var curLine = cm.getLine(cursor.line);
var vArr = regFindPosition(curLine);
var paramRanges = [];
for (var i = 0; i < vArr.length; i++) {
closeparam = curLine.regexIndexOf(/\%/, vArr[i]+1);
paramRanges.push({s:vArr[i],e:closeparam});
};
if(vArr.length>1){
if (cursor.ch < paramRanges[0].s) {
CodeMirror.commands.defaultTab(cm);
} else {
var thisrange, nextrange;
for (var i = 0; i < paramRanges.length; i++) {
thisrange = paramRanges[i];
nextrange = paramRanges[i+1] || paramRanges[0];
if (cursor.ch > thisrange.s && (cursor.ch < nextrange.s || nextrange.s < thisrange.s)){
cm.setSelection({line:cursor.line, ch:nextrange.s},{line:cursor.line, ch:nextrange.e+1});
} else if (cursor.ch == thisrange.s){
cm.setSelection({line:cursor.line, ch:thisrange.s},{line:cursor.line, ch:thisrange.e+1});
}
}
}
} else {
CodeMirror.commands.defaultTab(cm);
}
}
};
})();
| apache-2.0 |
mikebrow/cri-o | vendor/github.com/docker/docker/api/types/types.go | 20130 | package types // import "github.com/docker/docker/api/types"
import (
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string
Layers []string `json:",omitempty"`
BaseLayer string `json:",omitempty"`
}
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
ID string `json:"Id"`
RepoTags []string
RepoDigests []string
Parent string
Comment string
Created string
Container string
ContainerConfig *container.Config
DockerVersion string
Author string
Config *container.Config
Architecture string
Variant string `json:",omitempty"`
Os string
OsVersion string `json:",omitempty"`
Size int64
VirtualSize int64
GraphDriver GraphDriverData
RootFS RootFS
Metadata ImageMetadata
}
// ImageMetadata contains engine-local data about the image
type ImageMetadata struct {
LastTagTime time.Time `json:",omitempty"`
}
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
Names []string
Image string
ImageID string
Command string
Created int64
Ports []Port
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
State string
Status string
HostConfig struct {
NetworkMode string `json:",omitempty"`
}
NetworkSettings *SummaryNetworkSettings
Mounts []MountPoint
}
// CopyConfig contains request body of Engine API:
// POST "/containers/"+containerID+"/copy"
type CopyConfig struct {
Resource string
}
// ContainerPathStat is used to encode the header from
// GET "/containers/{name:.*}/archive"
// "Name" is the file or directory name.
type ContainerPathStat struct {
Name string `json:"name"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
Mtime time.Time `json:"mtime"`
LinkTarget string `json:"linkTarget"`
}
// ContainerStats contains response of Engine API:
// GET "/stats"
type ContainerStats struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
OSType string
Experimental bool
BuilderVersion BuilderVersion
}
// ComponentVersion describes the version information for a specific component.
type ComponentVersion struct {
Name string
Version string
Details map[string]string `json:",omitempty"`
}
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Platform struct{ Name string } `json:",omitempty"`
Components []ComponentVersion `json:",omitempty"`
// The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
GitCommit string
GoVersion string
Os string
Arch string
KernelVersion string `json:",omitempty"`
Experimental bool `json:",omitempty"`
BuildTime string `json:",omitempty"`
}
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
// in the version-string of external tools, such as containerd, or runC.
type Commit struct {
ID string // ID is the actual commit ID of external tool.
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
}
// Info contains response of Engine API:
// GET "/info"
type Info struct {
ID string
Containers int
ContainersRunning int
ContainersPaused int
ContainersStopped int
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
KernelMemory bool
KernelMemoryTCP bool
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
CPUShares bool
CPUSet bool
PidsLimit bool
IPv4Forwarding bool
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
NFd int
OomKillDisable bool
NGoroutines int
SystemTime string
LoggingDriver string
CgroupDriver string
NEventsListener int
KernelVersion string
OperatingSystem string
OSVersion string
OSType string
Architecture string
IndexServerAddress string
RegistryConfig *registry.ServiceConfig
NCPU int
MemTotal int64
GenericResources []swarm.GenericResource
DockerRootDir string
HTTPProxy string `json:"HttpProxy"`
HTTPSProxy string `json:"HttpsProxy"`
NoProxy string
Name string
Labels []string
ExperimentalBuild bool
ServerVersion string
ClusterStore string
ClusterAdvertise string
Runtimes map[string]Runtime
DefaultRuntime string
Swarm swarm.Info
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
ProductLicense string `json:",omitempty"`
Warnings []string
}
// KeyValue holds a key/value pair
type KeyValue struct {
Key, Value string
}
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a type safe
// SecurityOpt
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
split := strings.Split(opt, ",")
for _, s := range split {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid security option %q", s)
}
if kv[0] == "" || kv[1] == "" {
return nil, errors.New("invalid empty security option")
}
if kv[0] == "name" {
secopt.Name = kv[1]
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
}
so = append(so, secopt)
}
return so, nil
}
// PluginsInfo is a temp struct holding Plugins name
// registered with docker daemon. It is used by Info struct
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
// List of Network plugins registered
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// ExecStartCheck is a temp struct used by execStart
// Config fields is part of ExecConfig in runconfig package
type ExecStartCheck struct {
// ExecStart will first check if it's detached
Detach bool
// Check if there's a tty
Tty bool
}
// HealthcheckResult stores information about a single run of a healthcheck probe
type HealthcheckResult struct {
Start time.Time // Start is the time this check started
End time.Time // End is the time this check ended
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
Output string // Output from last check
}
// Health states
const (
NoHealthcheck = "none" // Indicates there is no healthcheck
Starting = "starting" // Starting indicates that the container is not yet ready
Healthy = "healthy" // Healthy indicates that the container is running correctly
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
)
// Health stores information about the container's healthcheck results
type Health struct {
Status string // Status is one of Starting, Healthy or Unhealthy
FailingStreak int // FailingStreak is the number of consecutive failures
Log []*HealthcheckResult // Log contains the last few results (oldest first)
}
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Running bool
Paused bool
Restarting bool
OOMKilled bool
Dead bool
Pid int
ExitCode int
Error string
StartedAt string
FinishedAt string
Health *Health `json:",omitempty"`
}
// ContainerNode stores information about the node that a container
// is running on. It's only available in Docker Swarm
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
Addr string
Name string
Cpus int
Memory int64
Labels map[string]string
}
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
Created string
Path string
Args []string
State *ContainerState
Image string
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Node *ContainerNode `json:",omitempty"`
Name string
RestartCount int
Driver string
Platform string
MountLabel string
ProcessLabel string
AppArmorProfile string
ExecIDs []string
HostConfig *container.HostConfig
GraphDriver GraphDriverData
SizeRw *int64 `json:",omitempty"`
SizeRootFs *int64 `json:",omitempty"`
}
// ContainerJSON is newly used struct along with MountPoint
type ContainerJSON struct {
*ContainerJSONBase
Mounts []MountPoint
Config *container.Config
NetworkSettings *NetworkSettings
}
// NetworkSettings exposes the network settings in the api
type NetworkSettings struct {
NetworkSettingsBase
DefaultNetworkSettings
Networks map[string]*network.EndpointSettings
}
// SummaryNetworkSettings provides a summary of container's networks
// in /containers/json
type SummaryNetworkSettings struct {
Networks map[string]*network.EndpointSettings
}
// NetworkSettingsBase holds basic information about networks
type NetworkSettingsBase struct {
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
SandboxID string // SandboxID uniquely represents a container's network stack
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
SandboxKey string // SandboxKey identifies the sandbox
SecondaryIPAddresses []network.Address
SecondaryIPv6Addresses []network.Address
}
// DefaultNetworkSettings holds network information
// during the 2 release deprecation period.
// It will be removed in Docker 1.11.
type DefaultNetworkSettings struct {
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
Gateway string // Gateway holds the gateway address for the network
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
IPAddress string // IPAddress holds the IPv4 address for the network
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
MacAddress string // MacAddress holds the MAC address for the network
}
// MountPoint represents a mount point configuration inside the container.
// This is used for reporting the mountpoints in use by a container.
type MountPoint struct {
Type mount.Type `json:",omitempty"`
Name string `json:",omitempty"`
Source string
Destination string
Driver string `json:",omitempty"`
Mode string
RW bool
Propagation mount.Propagation
}
// NetworkResource is the body of the "get network" http response message
type NetworkResource struct {
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
Services map[string]network.ServiceInfo `json:",omitempty"`
}
// EndpointResource contains network resources allocated and used for a container in a network
type EndpointResource struct {
Name string
EndpointID string
MacAddress string
IPv4Address string
IPv6Address string
}
// NetworkCreate is the expected body of the "create network" http request message
type NetworkCreate struct {
// Check for networks with duplicate names.
// Network is primarily keyed based on a random ID and not on the name.
// Network name is strictly a user-friendly alias to the network
// which is uniquely identified using ID.
// And there is no guaranteed way to check for duplicates.
// Option CheckDuplicate is there to provide a best effort checking of any networks
// which has the same name but it is not guaranteed to catch all name collisions.
CheckDuplicate bool
Driver string
Scope string
EnableIPv6 bool
IPAM *network.IPAM
Internal bool
Attachable bool
Ingress bool
ConfigOnly bool
ConfigFrom *network.ConfigReference
Options map[string]string
Labels map[string]string
}
// NetworkCreateRequest is the request message sent to the server for network create call.
type NetworkCreateRequest struct {
NetworkCreate
Name string
}
// NetworkCreateResponse is the response message sent by the server for network create call
type NetworkCreateResponse struct {
ID string `json:"Id"`
Warning string
}
// NetworkConnect represents the data to be used to connect a container to the network
type NetworkConnect struct {
Container string
EndpointConfig *network.EndpointSettings `json:",omitempty"`
}
// NetworkDisconnect represents the data to be used to disconnect a container from the network
type NetworkDisconnect struct {
Container string
Force bool
}
// NetworkInspectOptions holds parameters to inspect network
type NetworkInspectOptions struct {
Scope string
Verbose bool
}
// Checkpoint represents the details of a checkpoint
type Checkpoint struct {
Name string // Name is the name of the checkpoint
}
// Runtime describes an OCI runtime
type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
}
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
BuildCache []*BuildCache
BuilderSize int64 // deprecated
}
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
type ContainersPruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune"
type VolumesPruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDeleteResponseItem
SpaceReclaimed uint64
}
// BuildCachePruneReport contains the response for Engine API:
// POST "/build/prune"
type BuildCachePruneReport struct {
CachesDeleted []string
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {
NetworksDeleted []string
}
// SecretCreateResponse contains the information returned to a client
// on the creation of a new secret.
type SecretCreateResponse struct {
// ID is the id of the created secret.
ID string
}
// SecretListOptions holds parameters to list secrets
type SecretListOptions struct {
Filters filters.Args
}
// ConfigCreateResponse contains the information returned to a client
// on the creation of a new config.
type ConfigCreateResponse struct {
// ID is the id of the created config.
ID string
}
// ConfigListOptions holds parameters to list configs
type ConfigListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
type PushResult struct {
Tag string
Digest string
Size int
}
// BuildResult contains the image id of a successful build
type BuildResult struct {
ID string
}
// BuildCache contains information about a build cache record
type BuildCache struct {
ID string
Parent string
Type string
Description string
InUse bool
Shared bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
}
// BuildCachePruneOptions hold parameters to prune the build cache
type BuildCachePruneOptions struct {
All bool
KeepStorage int64
Filters filters.Args
}
| apache-2.0 |
hgschmie/presto | presto-matching/src/main/java/io/prestosql/matching/DefaultPrinter.java | 2255 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.matching;
import io.prestosql.matching.pattern.CapturePattern;
import io.prestosql.matching.pattern.EqualsPattern;
import io.prestosql.matching.pattern.FilterPattern;
import io.prestosql.matching.pattern.TypeOfPattern;
import io.prestosql.matching.pattern.WithPattern;
import static java.lang.String.format;
public class DefaultPrinter
implements PatternVisitor
{
private final StringBuilder result = new StringBuilder();
private int level;
public String result()
{
return result.toString();
}
@Override
public void visitTypeOf(TypeOfPattern<?> pattern)
{
visitPrevious(pattern);
appendLine("typeOf(%s)", pattern.expectedClass().getSimpleName());
}
@Override
public void visitWith(WithPattern<?> pattern)
{
visitPrevious(pattern);
appendLine("with(%s)", pattern.getProperty().getName());
level += 1;
pattern.getPattern().accept(this);
level -= 1;
}
@Override
public void visitCapture(CapturePattern<?> pattern)
{
visitPrevious(pattern);
appendLine("capturedAs(%s)", pattern.capture().description());
}
@Override
public void visitEquals(EqualsPattern<?> pattern)
{
visitPrevious(pattern);
appendLine("equals(%s)", pattern.expectedValue());
}
@Override
public void visitFilter(FilterPattern<?> pattern)
{
visitPrevious(pattern);
appendLine("filter(%s)", pattern.predicate());
}
private void appendLine(String template, Object... arguments)
{
result.append("\t".repeat(level)).append(format(template + "\n", arguments));
}
}
| apache-2.0 |
blakebarnett/kops | vendor/gopkg.in/square/go-jose.v2/signing.go | 11463 | /*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/ecdsa"
"crypto/rsa"
"encoding/base64"
"errors"
"fmt"
"golang.org/x/crypto/ed25519"
"gopkg.in/square/go-jose.v2/json"
)
// NonceSource represents a source of random nonces to go into JWS objects
type NonceSource interface {
Nonce() (string, error)
}
// Signer represents a signer which takes a payload and produces a signed JWS object.
type Signer interface {
Sign(payload []byte) (*JSONWebSignature, error)
Options() SignerOptions
}
// SigningKey represents an algorithm/key used to sign a message.
type SigningKey struct {
Algorithm SignatureAlgorithm
Key interface{}
}
// SignerOptions represents options that can be set when creating signers.
type SignerOptions struct {
NonceSource NonceSource
EmbedJWK bool
// Optional map of additional keys to be inserted into the protected header
// of a JWS object. Some specifications which make use of JWS like to insert
// additional values here. All values must be JSON-serializable.
ExtraHeaders map[HeaderKey]interface{}
}
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
// if necessary. It returns itself and so can be used in a fluent style.
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
if so.ExtraHeaders == nil {
so.ExtraHeaders = map[HeaderKey]interface{}{}
}
so.ExtraHeaders[k] = v
return so
}
// WithContentType adds a content type ("cty") header and returns the updated
// SignerOptions.
func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
return so.WithHeader(HeaderContentType, contentType)
}
// WithType adds a type ("typ") header and returns the updated SignerOptions.
func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
return so.WithHeader(HeaderType, typ)
}
type payloadSigner interface {
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
}
type payloadVerifier interface {
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
}
type genericSigner struct {
recipients []recipientSigInfo
nonceSource NonceSource
embedJWK bool
extraHeaders map[HeaderKey]interface{}
}
type recipientSigInfo struct {
sigAlg SignatureAlgorithm
publicKey *JSONWebKey
signer payloadSigner
}
// NewSigner creates an appropriate signer based on the key type
func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
return NewMultiSigner([]SigningKey{sig}, opts)
}
// NewMultiSigner creates a signer for multiple recipients
func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
signer := &genericSigner{recipients: []recipientSigInfo{}}
if opts != nil {
signer.nonceSource = opts.NonceSource
signer.embedJWK = opts.EmbedJWK
signer.extraHeaders = opts.ExtraHeaders
}
for _, sig := range sigs {
err := signer.addRecipient(sig.Algorithm, sig.Key)
if err != nil {
return nil, err
}
}
return signer, nil
}
// newVerifier creates a verifier based on the key type
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
switch verificationKey := verificationKey.(type) {
case ed25519.PublicKey:
return &edEncrypterVerifier{
publicKey: verificationKey,
}, nil
case *rsa.PublicKey:
return &rsaEncrypterVerifier{
publicKey: verificationKey,
}, nil
case *ecdsa.PublicKey:
return &ecEncrypterVerifier{
publicKey: verificationKey,
}, nil
case []byte:
return &symmetricMac{
key: verificationKey,
}, nil
case JSONWebKey:
return newVerifier(verificationKey.Key)
case *JSONWebKey:
return newVerifier(verificationKey.Key)
default:
return nil, ErrUnsupportedKeyType
}
}
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
recipient, err := makeJWSRecipient(alg, signingKey)
if err != nil {
return err
}
ctx.recipients = append(ctx.recipients, recipient)
return nil
}
func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
switch signingKey := signingKey.(type) {
case ed25519.PrivateKey:
return newEd25519Signer(alg, signingKey)
case *rsa.PrivateKey:
return newRSASigner(alg, signingKey)
case *ecdsa.PrivateKey:
return newECDSASigner(alg, signingKey)
case []byte:
return newSymmetricSigner(alg, signingKey)
case JSONWebKey:
return newJWKSigner(alg, signingKey)
case *JSONWebKey:
return newJWKSigner(alg, *signingKey)
default:
return recipientSigInfo{}, ErrUnsupportedKeyType
}
}
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
recipient, err := makeJWSRecipient(alg, signingKey.Key)
if err != nil {
return recipientSigInfo{}, err
}
if recipient.publicKey != nil {
// recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
// was created for the inner key (such as a RSA or ECDSA public key). It contains
// the pub key for embedding, but doesn't have extra params like key id.
publicKey := signingKey
publicKey.Key = recipient.publicKey.Key
recipient.publicKey = &publicKey
// This should be impossible, but let's check anyway.
if !recipient.publicKey.IsPublic() {
return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public")
}
}
return recipient, nil
}
func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
obj := &JSONWebSignature{}
obj.payload = payload
obj.Signatures = make([]Signature, len(ctx.recipients))
for i, recipient := range ctx.recipients {
protected := map[HeaderKey]interface{}{
headerAlgorithm: string(recipient.sigAlg),
}
if recipient.publicKey != nil {
// We want to embed the JWK or set the kid header, but not both. Having a protected
// header that contains an embedded JWK while also simultaneously containing the kid
// header is confusing, and at least in ACME the two are considered to be mutually
// exclusive. The fact that both can exist at the same time is a somewhat unfortunate
// result of the JOSE spec. We've decided that this library will only include one or
// the other to avoid this confusion.
//
// See https://github.com/square/go-jose/issues/157 for more context.
if ctx.embedJWK {
protected[headerJWK] = recipient.publicKey
} else {
protected[headerKeyID] = recipient.publicKey.KeyID
}
}
if ctx.nonceSource != nil {
nonce, err := ctx.nonceSource.Nonce()
if err != nil {
return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
}
protected[headerNonce] = nonce
}
for k, v := range ctx.extraHeaders {
protected[k] = v
}
serializedProtected := mustSerializeJSON(protected)
input := []byte(fmt.Sprintf("%s.%s",
base64.RawURLEncoding.EncodeToString(serializedProtected),
base64.RawURLEncoding.EncodeToString(payload)))
signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
if err != nil {
return nil, err
}
signatureInfo.protected = &rawHeader{}
for k, v := range protected {
b, err := json.Marshal(v)
if err != nil {
return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err)
}
(*signatureInfo.protected)[k] = makeRawMessage(b)
}
obj.Signatures[i] = signatureInfo
}
return obj, nil
}
func (ctx *genericSigner) Options() SignerOptions {
return SignerOptions{
NonceSource: ctx.nonceSource,
EmbedJWK: ctx.embedJWK,
ExtraHeaders: ctx.extraHeaders,
}
}
// Verify validates the signature on the object and returns the payload.
// This function does not support multi-signature, if you desire multi-sig
// verification use VerifyMulti instead.
//
// Be careful when verifying signatures based on embedded JWKs inside the
// payload header. You cannot assume that the key received in a payload is
// trusted.
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
err := obj.DetachedVerify(obj.payload, verificationKey)
if err != nil {
return nil, err
}
return obj.payload, nil
}
// DetachedVerify validates a detached signature on the given payload. In
// most cases, you will probably want to use Verify instead. DetachedVerify
// is only useful if you have a payload and signature that are separated from
// each other.
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
verifier, err := newVerifier(verificationKey)
if err != nil {
return err
}
if len(obj.Signatures) > 1 {
return errors.New("square/go-jose: too many signatures in payload; expecting only one")
}
signature := obj.Signatures[0]
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
return err
}
if len(critical) > 0 {
// Unsupported crit header
return ErrCryptoFailure
}
input := obj.computeAuthData(payload, &signature)
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return nil
}
return ErrCryptoFailure
}
// VerifyMulti validates (one of the multiple) signatures on the object and
// returns the index of the signature that was verified, along with the signature
// object and the payload. We return the signature and index to guarantee that
// callers are getting the verified value.
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
if err != nil {
return -1, Signature{}, nil, err
}
return idx, sig, obj.payload, nil
}
// DetachedVerifyMulti validates a detached signature on the given payload with
// a signature/object that has potentially multiple signers. This returns the index
// of the signature that was verified, along with the signature object. We return
// the signature and index to guarantee that callers are getting the verified value.
//
// In most cases, you will probably want to use Verify or VerifyMulti instead.
// DetachedVerifyMulti is only useful if you have a payload and signature that are
// separated from each other, and the signature can have multiple signers at the
// same time.
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
verifier, err := newVerifier(verificationKey)
if err != nil {
return -1, Signature{}, err
}
for i, signature := range obj.Signatures {
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
continue
}
if len(critical) > 0 {
// Unsupported crit header
continue
}
input := obj.computeAuthData(payload, &signature)
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return i, signature, nil
}
}
return -1, Signature{}, ErrCryptoFailure
}
| apache-2.0 |
linyiqun/minos | owl/machine/admin.py | 333 | from django.contrib import admin
from models import Machine
class MachineAdmin(admin.ModelAdmin):
list_display = ('hostname', 'ip', 'idc', 'rack', 'cores', 'ram',
'disks', 'disk_capacity', 'ssds', 'ssd_capacity', )
list_filter = ('idc', 'rack', )
ordering = ('hostname', )
admin.site.register(Machine, MachineAdmin)
| apache-2.0 |
nelsonsilva/wave-protocol | src/org/waveprotocol/wave/model/account/ObservableIndexability.java | 1135 | /**
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.waveprotocol.wave.model.account;
import org.waveprotocol.wave.model.wave.ParticipantId;
import org.waveprotocol.wave.model.wave.SourcesEvents;
/**
* Indexability that you can listen to.
*
*
*/
public interface ObservableIndexability extends Indexability,
SourcesEvents<ObservableIndexability.Listener> {
/**
* Listen to indexability changes.
*
*
*/
interface Listener {
/**
* Indexability has changed.
*/
void onChanged(ParticipantId participant, IndexDecision newValue);
}
}
| apache-2.0 |
bmanc2000/aws-sdk-cpp | aws-cpp-sdk-cloudfront/source/model/GetCloudFrontOriginAccessIdentity2015_04_17Result.cpp | 2145 | /*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/cloudfront/model/GetCloudFrontOriginAccessIdentity2015_04_17Result.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <utility>
using namespace Aws::CloudFront::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils;
using namespace Aws;
GetCloudFrontOriginAccessIdentity2015_04_17Result::GetCloudFrontOriginAccessIdentity2015_04_17Result()
{
}
GetCloudFrontOriginAccessIdentity2015_04_17Result::GetCloudFrontOriginAccessIdentity2015_04_17Result(const AmazonWebServiceResult<XmlDocument>& result)
{
*this = result;
}
GetCloudFrontOriginAccessIdentity2015_04_17Result& GetCloudFrontOriginAccessIdentity2015_04_17Result::operator =(const AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode resultNode = xmlDocument.GetRootElement();
if(!resultNode.IsNull())
{
XmlNode cloudFrontOriginAccessIdentityNode = resultNode.FirstChild("CloudFrontOriginAccessIdentity");
if(cloudFrontOriginAccessIdentityNode.IsNull())
{
cloudFrontOriginAccessIdentityNode = resultNode;
}
if(!cloudFrontOriginAccessIdentityNode.IsNull())
{
m_cloudFrontOriginAccessIdentity = cloudFrontOriginAccessIdentityNode;
}
}
const auto& headers = result.GetHeaderValueCollection();
const auto& eTagIter = headers.find("etag");
if(eTagIter != headers.end())
{
m_eTag = eTagIter->second;
}
return *this;
}
| apache-2.0 |
damifan/damifan.github.io | js/main.js | 2293 | (function($){
var toTop = ($('#sidebar').height() - $(window).height()) + 60;
// Caption
$('.article-entry').each(function(i) {
$(this).find('img').each(function() {
if (this.alt && !(!!$.prototype.justifiedGallery && $(this).parent('.justified-gallery').length)) {
$(this).after('<span class="caption">' + this.alt + '</span>');
}
// 对于已经包含在链接内的图片不适用lightGallery
if ($(this).parent().prop("tagName") !== 'A') {
$(this).wrap('<a href="' + ($(this).attr("data-imgbig") ? $(this).attr("data-imgbig") : this.src) + '" title="' + this.alt + '" class="gallery-item"></a>');
}
});
});
if (typeof lightGallery != 'undefined') {
var options = {
selector: '.gallery-item'
};
$('.article-entry').each(function(i, entry) {
lightGallery(entry, options);
});
lightGallery($('.article-gallery')[0], options);
}
if (!!$.prototype.justifiedGallery) { // if justifiedGallery method is defined
var options = {
rowHeight: 140,
margins: 4,
lastRow: 'justify'
};
$('.justified-gallery').justifiedGallery(options);
}
// Profile card
$(document).on('click', function () {
$('#profile').removeClass('card');
}).on('click', '#profile-anchor', function (e) {
e.stopPropagation();
$('#profile').toggleClass('card');
}).on('click', '.profile-inner', function (e) {
e.stopPropagation();
});
// To Top
if ($('#sidebar').length) {
$(document).on('scroll', function () {
if ($(document).width() >= 800) {
if(($(this).scrollTop() > toTop) && ($(this).scrollTop() > 0)) {
$('#toTop').fadeIn();
$('#toTop').css('left', $('#sidebar').offset().left);
} else {
$('#toTop').fadeOut();
}
} else {
$('#toTop').fadeIn();
$('#toTop').css('right', 20);
}
}).on('click', '#toTop', function () {
$('body, html').animate({ scrollTop: 0 }, 600);
});
}
})(jQuery);
| apache-2.0 |
kunallimaye/apiman-plugins | transformation-policy/src/main/java/io/apiman/plugins/transformation_policy/transformer/DataTransformerFactory.java | 921 | package io.apiman.plugins.transformation_policy.transformer;
import io.apiman.plugins.transformation_policy.beans.DataFormat;
import java.util.HashMap;
import java.util.Map;
public class DataTransformerFactory {
private static final Map<DataFormat, Map<DataFormat, DataTransformer>> dataTransformers = new HashMap<>();
static {
dataTransformers.put(DataFormat.JSON, new HashMap<DataFormat, DataTransformer>());
dataTransformers.get(DataFormat.JSON).put(DataFormat.XML, new JsonToXmlTransformer());
dataTransformers.put(DataFormat.XML, new HashMap<DataFormat, DataTransformer>());
dataTransformers.get(DataFormat.XML).put(DataFormat.JSON, new XmlToJsonTransformer());
}
public static DataTransformer getDataTransformer(DataFormat inputFormat, DataFormat outputFormat) {
return dataTransformers.get(inputFormat).get(outputFormat);
}
}
| apache-2.0 |
Addepar/buck | src/com/facebook/buck/core/model/platform/Platform.java | 1253 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.model.platform;
import com.facebook.buck.core.exceptions.DependencyStack;
import java.util.Collection;
/**
* A platform is defined as a set of properties (constraints).
*
* <p>The platform constraints can be defined in different ways but the representation should not
* matter as long as a platform can figure out whether it's matching a set of given constraints or
* not.
*/
public interface Platform {
/** @return {@code true} if the current platform matches the provided constraints. */
boolean matchesAll(Collection<ConstraintValue> constraintValues, DependencyStack dependencyStack);
}
| apache-2.0 |
shankarh/geode | geode-core/src/main/java/org/apache/geode/admin/GemFireHealthConfig.java | 2039 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.admin;
/**
* Provides configuration information relating to all of the components of a GemFire distributed
* system.
*
*
* @since GemFire 3.5
* @deprecated as of 7.0 use the <code><a href=
* "{@docRoot}/org/apache/geode/management/package-summary.html">management</a></code>
* package instead
*/
public interface GemFireHealthConfig extends MemberHealthConfig, CacheHealthConfig {
/**
* The default number of seconds between assessments of the health of the GemFire components.
*/
public static final int DEFAULT_HEALTH_EVALUATION_INTERVAL = 30;
////////////////////// Instance Methods //////////////////////
/**
* Returns the name of the host to which this configuration applies. If this is the "default"
* configuration, then <code>null</code> is returned.
*
* @see GemFireHealth#getGemFireHealthConfig
*/
public String getHostName();
/**
* Sets the number of seconds between assessments of the health of the GemFire components.
*/
public void setHealthEvaluationInterval(int interval);
/**
* Returns the number of seconds between assessments of the health of the GemFire components.
*/
public int getHealthEvaluationInterval();
}
| apache-2.0 |
stevekuznetsov/origin | pkg/authorization/generated/listers/authorization/v1/role.go | 2425 | // This file was automatically generated by lister-gen
package v1
import (
api "github.com/openshift/origin/pkg/authorization/api"
v1 "github.com/openshift/origin/pkg/authorization/api/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// RoleLister helps list Roles.
type RoleLister interface {
// List lists all Roles in the indexer.
List(selector labels.Selector) (ret []*v1.Role, err error)
// Roles returns an object that can list and get Roles.
Roles(namespace string) RoleNamespaceLister
RoleListerExpansion
}
// roleLister implements the RoleLister interface.
type roleLister struct {
indexer cache.Indexer
}
// NewRoleLister returns a new RoleLister.
func NewRoleLister(indexer cache.Indexer) RoleLister {
return &roleLister{indexer: indexer}
}
// List lists all Roles in the indexer.
func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Role))
})
return ret, err
}
// Roles returns an object that can list and get Roles.
func (s *roleLister) Roles(namespace string) RoleNamespaceLister {
return roleNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// RoleNamespaceLister helps list and get Roles.
type RoleNamespaceLister interface {
// List lists all Roles in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.Role, err error)
// Get retrieves the Role from the indexer for a given namespace and name.
Get(name string) (*v1.Role, error)
RoleNamespaceListerExpansion
}
// roleNamespaceLister implements the RoleNamespaceLister
// interface.
type roleNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all Roles in the indexer for a given namespace.
func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Role))
})
return ret, err
}
// Get retrieves the Role from the indexer for a given namespace and name.
func (s roleNamespaceLister) Get(name string) (*v1.Role, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(api.Resource("role"), name)
}
return obj.(*v1.Role), nil
}
| apache-2.0 |
joewitt/incubator-nifi | nifi-bootstrap/src/main/java/org/apache/nifi/bootstrap/RunNiFi.java | 57337 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.bootstrap;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Reader;
import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.bootstrap.notification.NotificationType;
import org.apache.nifi.bootstrap.util.OSUtils;
import org.apache.nifi.util.file.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* The class which bootstraps Apache NiFi. This class looks for the
* bootstrap.conf file by looking in the following places (in order):</p>
* <ol>
* <li>Java System Property named
* {@code org.apache.nifi.bootstrap.config.file}</li>
* <li>${NIFI_HOME}/./conf/bootstrap.conf, where ${NIFI_HOME} references an
* environment variable {@code NIFI_HOME}</li>
* <li>./conf/bootstrap.conf, where {@code ./} represents the working
* directory.</li>
* </ol>
* <p>
* If the {@code bootstrap.conf} file cannot be found, throws a {@code FileNotFoundException}.
*/
public class RunNiFi {
public static final String DEFAULT_CONFIG_FILE = "./conf/bootstrap.conf";
public static final String DEFAULT_JAVA_CMD = "java";
public static final String DEFAULT_PID_DIR = "bin";
public static final String DEFAULT_LOG_DIR = "./logs";
public static final String GRACEFUL_SHUTDOWN_PROP = "graceful.shutdown.seconds";
public static final String DEFAULT_GRACEFUL_SHUTDOWN_VALUE = "20";
public static final String NOTIFICATION_SERVICES_FILE_PROP = "notification.services.file";
public static final String NOTIFICATION_ATTEMPTS_PROP = "notification.max.attempts";
public static final String NIFI_START_NOTIFICATION_SERVICE_IDS_PROP = "nifi.start.notification.services";
public static final String NIFI_STOP_NOTIFICATION_SERVICE_IDS_PROP = "nifi.stop.notification.services";
public static final String NIFI_DEAD_NOTIFICATION_SERVICE_IDS_PROP = "nifi.dead.notification.services";
public static final String NIFI_PID_DIR_PROP = "org.apache.nifi.bootstrap.config.pid.dir";
public static final String NIFI_PID_FILE_NAME = "nifi.pid";
public static final String NIFI_STATUS_FILE_NAME = "nifi.status";
public static final String NIFI_LOCK_FILE_NAME = "nifi.lock";
public static final String NIFI_BOOTSTRAP_SENSITIVE_KEY = "nifi.bootstrap.sensitive.key";
public static final String PID_KEY = "pid";
public static final int STARTUP_WAIT_SECONDS = 60;
public static final String SHUTDOWN_CMD = "SHUTDOWN";
public static final String PING_CMD = "PING";
public static final String DUMP_CMD = "DUMP";
private volatile boolean autoRestartNiFi = true;
private volatile int ccPort = -1;
private volatile long nifiPid = -1L;
private volatile String secretKey;
private volatile ShutdownHook shutdownHook;
private volatile boolean nifiStarted;
private final Lock startedLock = new ReentrantLock();
private final Lock lock = new ReentrantLock();
private final Condition startupCondition = lock.newCondition();
private final File bootstrapConfigFile;
// used for logging initial info; these will be logged to console by default when the app is started
private final Logger cmdLogger = LoggerFactory.getLogger("org.apache.nifi.bootstrap.Command");
// used for logging all info. These by default will be written to the log file
private final Logger defaultLogger = LoggerFactory.getLogger(RunNiFi.class);
private final ExecutorService loggingExecutor;
private volatile Set<Future<?>> loggingFutures = new HashSet<>(2);
private final NotificationServiceManager serviceManager;
public RunNiFi(final File bootstrapConfigFile, final boolean verbose) throws IOException {
this.bootstrapConfigFile = bootstrapConfigFile;
loggingExecutor = Executors.newFixedThreadPool(2, new ThreadFactory() {
@Override
public Thread newThread(final Runnable runnable) {
final Thread t = Executors.defaultThreadFactory().newThread(runnable);
t.setDaemon(true);
t.setName("NiFi logging handler");
return t;
}
});
serviceManager = loadServices();
}
private static void printUsage() {
System.out.println("Usage:");
System.out.println();
System.out.println("java org.apache.nifi.bootstrap.RunNiFi [<-verbose>] <command> [options]");
System.out.println();
System.out.println("Valid commands include:");
System.out.println("");
System.out.println("Start : Start a new instance of Apache NiFi");
System.out.println("Stop : Stop a running instance of Apache NiFi");
System.out.println("Restart : Stop Apache NiFi, if it is running, and then start a new instance");
System.out.println("Status : Determine if there is a running instance of Apache NiFi");
System.out.println("Dump : Write a Thread Dump to the file specified by [options], or to the log if no file is given");
System.out.println("Run : Start a new instance of Apache NiFi and monitor the Process, restarting if the instance dies");
System.out.println();
}
private static String[] shift(final String[] orig) {
return Arrays.copyOfRange(orig, 1, orig.length);
}
public static void main(String[] args) throws IOException, InterruptedException {
if (args.length < 1 || args.length > 3) {
printUsage();
return;
}
File dumpFile = null;
boolean verbose = false;
if (args[0].equals("-verbose")) {
verbose = true;
args = shift(args);
}
final String cmd = args[0];
if (cmd.equals("dump")) {
if (args.length > 1) {
dumpFile = new File(args[1]);
} else {
dumpFile = null;
}
}
switch (cmd.toLowerCase()) {
case "start":
case "run":
case "stop":
case "status":
case "dump":
case "restart":
case "env":
break;
default:
printUsage();
return;
}
final File configFile = getDefaultBootstrapConfFile();
final RunNiFi runNiFi = new RunNiFi(configFile, verbose);
Integer exitStatus = null;
switch (cmd.toLowerCase()) {
case "start":
runNiFi.start();
break;
case "run":
runNiFi.start();
break;
case "stop":
runNiFi.stop();
break;
case "status":
exitStatus = runNiFi.status();
break;
case "restart":
runNiFi.stop();
runNiFi.start();
break;
case "dump":
runNiFi.dump(dumpFile);
break;
case "env":
runNiFi.env();
break;
}
if (exitStatus != null) {
System.exit(exitStatus);
}
}
private static File getDefaultBootstrapConfFile() {
String configFilename = System.getProperty("org.apache.nifi.bootstrap.config.file");
if (configFilename == null) {
final String nifiHome = System.getenv("NIFI_HOME");
if (nifiHome != null) {
final File nifiHomeFile = new File(nifiHome.trim());
final File configFile = new File(nifiHomeFile, DEFAULT_CONFIG_FILE);
configFilename = configFile.getAbsolutePath();
}
}
if (configFilename == null) {
configFilename = DEFAULT_CONFIG_FILE;
}
final File configFile = new File(configFilename);
return configFile;
}
private NotificationServiceManager loadServices() throws IOException {
final File bootstrapConfFile = this.bootstrapConfigFile;
final Properties properties = new Properties();
try (final FileInputStream fis = new FileInputStream(bootstrapConfFile)) {
properties.load(fis);
}
final NotificationServiceManager manager = new NotificationServiceManager();
final String attemptProp = properties.getProperty(NOTIFICATION_ATTEMPTS_PROP);
if (attemptProp != null) {
try {
final int maxAttempts = Integer.parseInt(attemptProp.trim());
if (maxAttempts >= 0) {
manager.setMaxNotificationAttempts(maxAttempts);
}
} catch (final NumberFormatException nfe) {
defaultLogger.error("Maximum number of attempts to send notification email is set to an invalid value of {}; will use default value", attemptProp);
}
}
final String notificationServicesXmlFilename = properties.getProperty(NOTIFICATION_SERVICES_FILE_PROP);
if (notificationServicesXmlFilename == null) {
defaultLogger.info("No Bootstrap Notification Services configured.");
return manager;
}
final File xmlFile = new File(notificationServicesXmlFilename);
final File servicesFile;
if (xmlFile.isAbsolute()) {
servicesFile = xmlFile;
} else {
final File confDir = bootstrapConfigFile.getParentFile();
final File nifiHome = confDir.getParentFile();
servicesFile = new File(nifiHome, notificationServicesXmlFilename);
}
if (!servicesFile.exists()) {
defaultLogger.error("Bootstrap Notification Services file configured as " + servicesFile + " but could not find file; will not load notification services");
return manager;
}
try {
manager.loadNotificationServices(servicesFile);
} catch (final Exception e) {
defaultLogger.error("Bootstrap Notification Services file configured as " + servicesFile + " but failed to load notification services", e);
}
registerNotificationServices(manager, NotificationType.NIFI_STARTED, properties.getProperty(NIFI_START_NOTIFICATION_SERVICE_IDS_PROP));
registerNotificationServices(manager, NotificationType.NIFI_STOPPED, properties.getProperty(NIFI_STOP_NOTIFICATION_SERVICE_IDS_PROP));
registerNotificationServices(manager, NotificationType.NIFI_DIED, properties.getProperty(NIFI_DEAD_NOTIFICATION_SERVICE_IDS_PROP));
return manager;
}
private void registerNotificationServices(final NotificationServiceManager manager, final NotificationType type, final String serviceIds) {
if (serviceIds == null) {
defaultLogger.info("Registered no Notification Services for Notification Type {}", type);
return;
}
int registered = 0;
for (final String id : serviceIds.split(",")) {
final String trimmed = id.trim();
if (trimmed.isEmpty()) {
continue;
}
try {
manager.registerNotificationService(type, trimmed);
registered++;
} catch (final Exception e) {
defaultLogger.warn("Failed to register Notification Service with ID {} for Notifications of type {} due to {}", trimmed, type, e.toString());
defaultLogger.error("", e);
}
}
defaultLogger.info("Registered {} Notification Services for Notification Type {}", registered, type);
}
protected File getBootstrapFile(final Logger logger, String directory, String defaultDirectory, String fileName) throws IOException {
final File confDir = bootstrapConfigFile.getParentFile();
final File nifiHome = confDir.getParentFile();
String confFileDir = System.getProperty(directory);
final File fileDir;
if (confFileDir != null) {
fileDir = new File(confFileDir.trim());
} else {
fileDir = new File(nifiHome, defaultDirectory);
}
FileUtils.ensureDirectoryExistAndCanAccess(fileDir);
final File statusFile = new File(fileDir, fileName);
logger.debug("Status File: {}", statusFile);
return statusFile;
}
protected File getPidFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_PID_FILE_NAME);
}
protected File getStatusFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_STATUS_FILE_NAME);
}
protected File getLockFile(final Logger logger) throws IOException {
return getBootstrapFile(logger, NIFI_PID_DIR_PROP, DEFAULT_PID_DIR, NIFI_LOCK_FILE_NAME);
}
protected File getStatusFile() throws IOException {
return getStatusFile(defaultLogger);
}
private Properties loadProperties(final Logger logger) throws IOException {
final Properties props = new Properties();
final File statusFile = getStatusFile(logger);
if (statusFile == null || !statusFile.exists()) {
logger.debug("No status file to load properties from");
return props;
}
try (final FileInputStream fis = new FileInputStream(getStatusFile(logger))) {
props.load(fis);
}
final Map<Object, Object> modified = new HashMap<>(props);
modified.remove("secret.key");
logger.debug("Properties: {}", modified);
return props;
}
private synchronized void savePidProperties(final Properties pidProperties, final Logger logger) throws IOException {
final String pid = pidProperties.getProperty(PID_KEY);
if (!StringUtils.isBlank(pid)) {
writePidFile(pid, logger);
}
final File statusFile = getStatusFile(logger);
if (statusFile.exists() && !statusFile.delete()) {
logger.warn("Failed to delete {}", statusFile);
}
if (!statusFile.createNewFile()) {
throw new IOException("Failed to create file " + statusFile);
}
try {
final Set<PosixFilePermission> perms = new HashSet<>();
perms.add(PosixFilePermission.OWNER_READ);
perms.add(PosixFilePermission.OWNER_WRITE);
Files.setPosixFilePermissions(statusFile.toPath(), perms);
} catch (final Exception e) {
logger.warn("Failed to set permissions so that only the owner can read status file {}; "
+ "this may allows others to have access to the key needed to communicate with NiFi. "
+ "Permissions should be changed so that only the owner can read this file", statusFile);
}
try (final FileOutputStream fos = new FileOutputStream(statusFile)) {
pidProperties.store(fos, null);
fos.getFD().sync();
}
logger.debug("Saved Properties {} to {}", new Object[]{pidProperties, statusFile});
}
private synchronized void writePidFile(final String pid, final Logger logger) throws IOException {
final File pidFile = getPidFile(logger);
if (pidFile.exists() && !pidFile.delete()) {
logger.warn("Failed to delete {}", pidFile);
}
if (!pidFile.createNewFile()) {
throw new IOException("Failed to create file " + pidFile);
}
try {
final Set<PosixFilePermission> perms = new HashSet<>();
perms.add(PosixFilePermission.OWNER_WRITE);
perms.add(PosixFilePermission.OWNER_READ);
perms.add(PosixFilePermission.GROUP_READ);
perms.add(PosixFilePermission.OTHERS_READ);
Files.setPosixFilePermissions(pidFile.toPath(), perms);
} catch (final Exception e) {
logger.warn("Failed to set permissions so that only the owner can read pid file {}; "
+ "this may allows others to have access to the key needed to communicate with NiFi. "
+ "Permissions should be changed so that only the owner can read this file", pidFile);
}
try (final FileOutputStream fos = new FileOutputStream(pidFile)) {
fos.write(pid.getBytes(StandardCharsets.UTF_8));
fos.getFD().sync();
}
logger.debug("Saved Pid {} to {}", new Object[]{pid, pidFile});
}
private boolean isPingSuccessful(final int port, final String secretKey, final Logger logger) {
logger.debug("Pinging {}", port);
try (final Socket socket = new Socket("localhost", port)) {
final OutputStream out = socket.getOutputStream();
out.write((PING_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
logger.debug("Sent PING command");
socket.setSoTimeout(5000);
final InputStream in = socket.getInputStream();
final BufferedReader reader = new BufferedReader(new InputStreamReader(in));
final String response = reader.readLine();
logger.debug("PING response: {}", response);
out.close();
reader.close();
return PING_CMD.equals(response);
} catch (final IOException ioe) {
return false;
}
}
private Integer getCurrentPort(final Logger logger) throws IOException {
final Properties props = loadProperties(logger);
final String portVal = props.getProperty("port");
if (portVal == null) {
logger.debug("No Port found in status file");
return null;
} else {
logger.debug("Port defined in status file: {}", portVal);
}
final int port = Integer.parseInt(portVal);
final boolean success = isPingSuccessful(port, props.getProperty("secret.key"), logger);
if (success) {
logger.debug("Successful PING on port {}", port);
return port;
}
final String pid = props.getProperty(PID_KEY);
logger.debug("PID in status file is {}", pid);
if (pid != null) {
final boolean procRunning = isProcessRunning(pid, logger);
if (procRunning) {
return port;
} else {
return null;
}
}
return null;
}
private boolean isProcessRunning(final String pid, final Logger logger) {
try {
// We use the "ps" command to check if the process is still running.
final ProcessBuilder builder = new ProcessBuilder();
builder.command("ps", "-p", pid);
final Process proc = builder.start();
// Look for the pid in the output of the 'ps' command.
boolean running = false;
String line;
try (final InputStream in = proc.getInputStream();
final Reader streamReader = new InputStreamReader(in);
final BufferedReader reader = new BufferedReader(streamReader)) {
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith(pid)) {
running = true;
}
}
}
// If output of the ps command had our PID, the process is running.
if (running) {
logger.debug("Process with PID {} is running", pid);
} else {
logger.debug("Process with PID {} is not running", pid);
}
return running;
} catch (final IOException ioe) {
System.err.println("Failed to determine if Process " + pid + " is running; assuming that it is not");
return false;
}
}
private Status getStatus(final Logger logger) {
final Properties props;
try {
props = loadProperties(logger);
} catch (final IOException ioe) {
return new Status(null, null, false, false);
}
if (props == null) {
return new Status(null, null, false, false);
}
final String portValue = props.getProperty("port");
final String pid = props.getProperty(PID_KEY);
final String secretKey = props.getProperty("secret.key");
if (portValue == null && pid == null) {
return new Status(null, null, false, false);
}
Integer port = null;
boolean pingSuccess = false;
if (portValue != null) {
try {
port = Integer.parseInt(portValue);
pingSuccess = isPingSuccessful(port, secretKey, logger);
} catch (final NumberFormatException nfe) {
return new Status(null, null, false, false);
}
}
if (pingSuccess) {
return new Status(port, pid, true, true);
}
final boolean alive = pid != null && isProcessRunning(pid, logger);
return new Status(port, pid, pingSuccess, alive);
}
public int status() throws IOException {
final Logger logger = cmdLogger;
final Status status = getStatus(logger);
if (status.isRespondingToPing()) {
logger.info("Apache NiFi is currently running, listening to Bootstrap on port {}, PID={}",
new Object[]{status.getPort(), status.getPid() == null ? "unknown" : status.getPid()});
return 0;
}
if (status.isProcessRunning()) {
logger.info("Apache NiFi is running at PID {} but is not responding to ping requests", status.getPid());
return 4;
}
if (status.getPort() == null) {
logger.info("Apache NiFi is not running");
return 3;
}
if (status.getPid() == null) {
logger.info("Apache NiFi is not responding to Ping requests. The process may have died or may be hung");
} else {
logger.info("Apache NiFi is not running");
}
return 3;
}
public void env() {
final Logger logger = cmdLogger;
final Status status = getStatus(logger);
if (status.getPid() == null) {
logger.info("Apache NiFi is not running");
return;
}
final Class<?> virtualMachineClass;
try {
virtualMachineClass = Class.forName("com.sun.tools.attach.VirtualMachine");
} catch (final ClassNotFoundException cnfe) {
logger.error("Seems tools.jar (Linux / Windows JDK) or classes.jar (Mac OS) is not available in classpath");
return;
}
final Method attachMethod;
final Method detachMethod;
try {
attachMethod = virtualMachineClass.getMethod("attach", String.class);
detachMethod = virtualMachineClass.getDeclaredMethod("detach");
} catch (final Exception e) {
logger.error("Methods required for getting environment not available", e);
return;
}
final Object virtualMachine;
try {
virtualMachine = attachMethod.invoke(null, status.getPid());
} catch (final Throwable t) {
logger.error("Problem attaching to NiFi", t);
return;
}
try {
final Method getSystemPropertiesMethod = virtualMachine.getClass().getMethod("getSystemProperties");
final Properties sysProps = (Properties) getSystemPropertiesMethod.invoke(virtualMachine);
for (Entry<Object, Object> syspropEntry : sysProps.entrySet()) {
logger.info(syspropEntry.getKey().toString() + " = " + syspropEntry.getValue().toString());
}
} catch (Throwable t) {
throw new RuntimeException(t);
} finally {
try {
detachMethod.invoke(virtualMachine);
} catch (final Exception e) {
logger.warn("Caught exception detaching from process", e);
}
}
}
/**
* Writes a NiFi thread dump to the given file; if file is null, logs at
* INFO level instead.
*
* @param dumpFile the file to write the dump content to
* @throws IOException if any issues occur while writing the dump file
*/
public void dump(final File dumpFile) throws IOException {
final Logger logger = defaultLogger; // dump to bootstrap log file by default
final Integer port = getCurrentPort(logger);
if (port == null) {
logger.info("Apache NiFi is not currently running");
return;
}
final Properties nifiProps = loadProperties(logger);
final String secretKey = nifiProps.getProperty("secret.key");
final StringBuilder sb = new StringBuilder();
try (final Socket socket = new Socket()) {
logger.debug("Connecting to NiFi instance");
socket.setSoTimeout(60000);
socket.connect(new InetSocketAddress("localhost", port));
logger.debug("Established connection to NiFi instance.");
socket.setSoTimeout(60000);
logger.debug("Sending DUMP Command to port {}", port);
final OutputStream out = socket.getOutputStream();
out.write((DUMP_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
final InputStream in = socket.getInputStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
sb.append(line).append("\n");
}
}
}
final String dump = sb.toString();
if (dumpFile == null) {
logger.info(dump);
} else {
try (final FileOutputStream fos = new FileOutputStream(dumpFile)) {
fos.write(dump.getBytes(StandardCharsets.UTF_8));
}
// we want to log to the console (by default) that we wrote the thread dump to the specified file
cmdLogger.info("Successfully wrote thread dump to {}", dumpFile.getAbsolutePath());
}
}
public void notifyStop() {
final String hostname = getHostname();
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss.SSS");
final String now = sdf.format(System.currentTimeMillis());
String user = System.getProperty("user.name");
if (user == null || user.trim().isEmpty()) {
user = "Unknown User";
}
serviceManager.notify(NotificationType.NIFI_STOPPED, "NiFi Stopped on Host " + hostname,
"Hello,\n\nApache NiFi has been told to initiate a shutdown on host " + hostname + " at " + now + " by user " + user);
}
public void stop() throws IOException {
final Logger logger = cmdLogger;
final Integer port = getCurrentPort(logger);
if (port == null) {
logger.info("Apache NiFi is not currently running");
return;
}
// indicate that a stop command is in progress
final File lockFile = getLockFile(logger);
if (!lockFile.exists()) {
lockFile.createNewFile();
}
final Properties nifiProps = loadProperties(logger);
final String secretKey = nifiProps.getProperty("secret.key");
final String pid = nifiProps.getProperty(PID_KEY);
final File statusFile = getStatusFile(logger);
final File pidFile = getPidFile(logger);
try (final Socket socket = new Socket()) {
logger.debug("Connecting to NiFi instance");
socket.setSoTimeout(10000);
socket.connect(new InetSocketAddress("localhost", port));
logger.debug("Established connection to NiFi instance.");
socket.setSoTimeout(10000);
logger.debug("Sending SHUTDOWN Command to port {}", port);
final OutputStream out = socket.getOutputStream();
out.write((SHUTDOWN_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
out.flush();
socket.shutdownOutput();
final InputStream in = socket.getInputStream();
int lastChar;
final StringBuilder sb = new StringBuilder();
while ((lastChar = in.read()) > -1) {
sb.append((char) lastChar);
}
final String response = sb.toString().trim();
logger.debug("Received response to SHUTDOWN command: {}", response);
if (SHUTDOWN_CMD.equals(response)) {
logger.info("Apache NiFi has accepted the Shutdown Command and is shutting down now");
if (pid != null) {
final Properties bootstrapProperties = new Properties();
try (final FileInputStream fis = new FileInputStream(bootstrapConfigFile)) {
bootstrapProperties.load(fis);
}
String gracefulShutdown = bootstrapProperties.getProperty(GRACEFUL_SHUTDOWN_PROP, DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
int gracefulShutdownSeconds;
try {
gracefulShutdownSeconds = Integer.parseInt(gracefulShutdown);
} catch (final NumberFormatException nfe) {
gracefulShutdownSeconds = Integer.parseInt(DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
}
notifyStop();
final long startWait = System.nanoTime();
while (isProcessRunning(pid, logger)) {
logger.info("Waiting for Apache NiFi to finish shutting down...");
final long waitNanos = System.nanoTime() - startWait;
final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
if (waitSeconds >= gracefulShutdownSeconds && gracefulShutdownSeconds > 0) {
if (isProcessRunning(pid, logger)) {
logger.warn("NiFi has not finished shutting down after {} seconds. Killing process.", gracefulShutdownSeconds);
try {
killProcessTree(pid, logger);
} catch (final IOException ioe) {
logger.error("Failed to kill Process with PID {}", pid);
}
}
break;
} else {
try {
Thread.sleep(2000L);
} catch (final InterruptedException ie) {
}
}
}
if (statusFile.exists() && !statusFile.delete()) {
logger.error("Failed to delete status file {}; this file should be cleaned up manually", statusFile);
}
if (pidFile.exists() && !pidFile.delete()) {
logger.error("Failed to delete pid file {}; this file should be cleaned up manually", pidFile);
}
logger.info("NiFi has finished shutting down.");
}
} else {
logger.error("When sending SHUTDOWN command to NiFi, got unexpected response {}", response);
}
} catch (final IOException ioe) {
if (pid == null) {
logger.error("Failed to send shutdown command to port {} due to {}. No PID found for the NiFi process, so unable to kill process; "
+ "the process should be killed manually.", new Object[]{port, ioe.toString()});
} else {
logger.error("Failed to send shutdown command to port {} due to {}. Will kill the NiFi Process with PID {}.", port, ioe.toString(), pid);
notifyStop();
killProcessTree(pid, logger);
if (statusFile.exists() && !statusFile.delete()) {
logger.error("Failed to delete status file {}; this file should be cleaned up manually", statusFile);
}
}
} finally {
if (lockFile.exists() && !lockFile.delete()) {
logger.error("Failed to delete lock file {}; this file should be cleaned up manually", lockFile);
}
}
}
private static List<String> getChildProcesses(final String ppid) throws IOException {
final Process proc = Runtime.getRuntime().exec(new String[]{"ps", "-o", "pid", "--no-headers", "--ppid", ppid});
final List<String> childPids = new ArrayList<>();
try (final InputStream in = proc.getInputStream();
final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
childPids.add(line.trim());
}
}
return childPids;
}
private void killProcessTree(final String pid, final Logger logger) throws IOException {
logger.debug("Killing Process Tree for PID {}", pid);
final List<String> children = getChildProcesses(pid);
logger.debug("Children of PID {}: {}", new Object[]{pid, children});
for (final String childPid : children) {
killProcessTree(childPid, logger);
}
Runtime.getRuntime().exec(new String[]{"kill", "-9", pid});
}
public static boolean isAlive(final Process process) {
try {
process.exitValue();
return false;
} catch (final IllegalStateException | IllegalThreadStateException itse) {
return true;
}
}
private String getHostname() {
String hostname = "Unknown Host";
String ip = "Unknown IP Address";
try {
final InetAddress localhost = InetAddress.getLocalHost();
hostname = localhost.getHostName();
ip = localhost.getHostAddress();
} catch (final Exception e) {
defaultLogger.warn("Failed to obtain hostname for notification due to:", e);
}
return hostname + " (" + ip + ")";
}
@SuppressWarnings({"rawtypes", "unchecked"})
public void start() throws IOException, InterruptedException {
final Integer port = getCurrentPort(cmdLogger);
if (port != null) {
cmdLogger.info("Apache NiFi is already running, listening to Bootstrap on port " + port);
return;
}
final File prevLockFile = getLockFile(cmdLogger);
if (prevLockFile.exists() && !prevLockFile.delete()) {
cmdLogger.warn("Failed to delete previous lock file {}; this file should be cleaned up manually", prevLockFile);
}
final ProcessBuilder builder = new ProcessBuilder();
if (!bootstrapConfigFile.exists()) {
throw new FileNotFoundException(bootstrapConfigFile.getAbsolutePath());
}
final Properties properties = new Properties();
try (final FileInputStream fis = new FileInputStream(bootstrapConfigFile)) {
properties.load(fis);
}
final Map<String, String> props = new HashMap<>();
props.putAll((Map) properties);
final String specifiedWorkingDir = props.get("working.dir");
if (specifiedWorkingDir != null) {
builder.directory(new File(specifiedWorkingDir));
}
final File bootstrapConfigAbsoluteFile = bootstrapConfigFile.getAbsoluteFile();
final File binDir = bootstrapConfigAbsoluteFile.getParentFile();
final File workingDir = binDir.getParentFile();
if (specifiedWorkingDir == null) {
builder.directory(workingDir);
}
final String nifiLogDir = replaceNull(System.getProperty("org.apache.nifi.bootstrap.config.log.dir"), DEFAULT_LOG_DIR).trim();
final String libFilename = replaceNull(props.get("lib.dir"), "./lib").trim();
File libDir = getFile(libFilename, workingDir);
final String confFilename = replaceNull(props.get("conf.dir"), "./conf").trim();
File confDir = getFile(confFilename, workingDir);
String nifiPropsFilename = props.get("props.file");
if (nifiPropsFilename == null) {
if (confDir.exists()) {
nifiPropsFilename = new File(confDir, "nifi.properties").getAbsolutePath();
} else {
nifiPropsFilename = DEFAULT_CONFIG_FILE;
}
}
nifiPropsFilename = nifiPropsFilename.trim();
final List<String> javaAdditionalArgs = new ArrayList<>();
for (final Map.Entry<String, String> entry : props.entrySet()) {
final String key = entry.getKey();
final String value = entry.getValue();
if (key.startsWith("java.arg")) {
javaAdditionalArgs.add(value);
}
}
final File[] libFiles = libDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(final File dir, final String filename) {
return filename.toLowerCase().endsWith(".jar");
}
});
if (libFiles == null || libFiles.length == 0) {
throw new RuntimeException("Could not find lib directory at " + libDir.getAbsolutePath());
}
final File[] confFiles = confDir.listFiles();
if (confFiles == null || confFiles.length == 0) {
throw new RuntimeException("Could not find conf directory at " + confDir.getAbsolutePath());
}
final List<String> cpFiles = new ArrayList<>(confFiles.length + libFiles.length);
cpFiles.add(confDir.getAbsolutePath());
for (final File file : libFiles) {
cpFiles.add(file.getAbsolutePath());
}
final StringBuilder classPathBuilder = new StringBuilder();
for (int i = 0; i < cpFiles.size(); i++) {
final String filename = cpFiles.get(i);
classPathBuilder.append(filename);
if (i < cpFiles.size() - 1) {
classPathBuilder.append(File.pathSeparatorChar);
}
}
final String classPath = classPathBuilder.toString();
String javaCmd = props.get("java");
if (javaCmd == null) {
javaCmd = DEFAULT_JAVA_CMD;
}
if (javaCmd.equals(DEFAULT_JAVA_CMD)) {
String javaHome = System.getenv("JAVA_HOME");
if (javaHome != null) {
String fileExtension = isWindows() ? ".exe" : "";
File javaFile = new File(javaHome + File.separatorChar + "bin"
+ File.separatorChar + "java" + fileExtension);
if (javaFile.exists() && javaFile.canExecute()) {
javaCmd = javaFile.getAbsolutePath();
}
}
}
final NiFiListener listener = new NiFiListener();
final int listenPort = listener.start(this);
final List<String> cmd = new ArrayList<>();
cmd.add(javaCmd);
cmd.add("-classpath");
cmd.add(classPath);
cmd.addAll(javaAdditionalArgs);
cmd.add("-Dnifi.properties.file.path=" + nifiPropsFilename);
cmd.add("-Dnifi.bootstrap.listen.port=" + listenPort);
cmd.add("-Dapp=NiFi");
cmd.add("-Dorg.apache.nifi.bootstrap.config.log.dir=" + nifiLogDir);
if (!System.getProperty("java.version").startsWith("1.")) {
// running on Java 9+, java.xml.bind module must be made available
cmd.add("--add-modules=java.xml.bind");
}
cmd.add("org.apache.nifi.NiFi");
if (isSensitiveKeyPresent(props)) {
Path sensitiveKeyFile = createSensitiveKeyFile(confDir);
writeSensitiveKeyFile(props, sensitiveKeyFile);
cmd.add("-K " + sensitiveKeyFile.toFile().getAbsolutePath());
}
builder.command(cmd);
final StringBuilder cmdBuilder = new StringBuilder();
for (final String s : cmd) {
cmdBuilder.append(s).append(" ");
}
cmdLogger.info("Starting Apache NiFi...");
cmdLogger.info("Working Directory: {}", workingDir.getAbsolutePath());
cmdLogger.info("Command: {}", cmdBuilder.toString());
String gracefulShutdown = props.get(GRACEFUL_SHUTDOWN_PROP);
if (gracefulShutdown == null) {
gracefulShutdown = DEFAULT_GRACEFUL_SHUTDOWN_VALUE;
}
final int gracefulShutdownSeconds;
try {
gracefulShutdownSeconds = Integer.parseInt(gracefulShutdown);
} catch (final NumberFormatException nfe) {
throw new NumberFormatException("The '" + GRACEFUL_SHUTDOWN_PROP + "' property in Bootstrap Config File "
+ bootstrapConfigAbsoluteFile.getAbsolutePath() + " has an invalid value. Must be a non-negative integer");
}
if (gracefulShutdownSeconds < 0) {
throw new NumberFormatException("The '" + GRACEFUL_SHUTDOWN_PROP + "' property in Bootstrap Config File "
+ bootstrapConfigAbsoluteFile.getAbsolutePath() + " has an invalid value. Must be a non-negative integer");
}
Process process = builder.start();
handleLogging(process);
Long pid = OSUtils.getProcessId(process, cmdLogger);
if (pid == null) {
cmdLogger.warn("Launched Apache NiFi but could not determined the Process ID");
} else {
nifiPid = pid;
final Properties pidProperties = new Properties();
pidProperties.setProperty(PID_KEY, String.valueOf(nifiPid));
savePidProperties(pidProperties, cmdLogger);
cmdLogger.info("Launched Apache NiFi with Process ID " + pid);
}
shutdownHook = new ShutdownHook(process, this, secretKey, gracefulShutdownSeconds, loggingExecutor);
final Runtime runtime = Runtime.getRuntime();
runtime.addShutdownHook(shutdownHook);
final String hostname = getHostname();
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss.SSS");
String now = sdf.format(System.currentTimeMillis());
String user = System.getProperty("user.name");
if (user == null || user.trim().isEmpty()) {
user = "Unknown User";
}
serviceManager.notify(NotificationType.NIFI_STARTED, "NiFi Started on Host " + hostname, "Hello,\n\nApache NiFi has been started on host " + hostname + " at " + now + " by user " + user);
while (true) {
final boolean alive = isAlive(process);
if (alive) {
try {
Thread.sleep(1000L);
} catch (final InterruptedException ie) {
}
} else {
try {
runtime.removeShutdownHook(shutdownHook);
} catch (final IllegalStateException ise) {
// happens when already shutting down
}
now = sdf.format(System.currentTimeMillis());
if (autoRestartNiFi) {
final File statusFile = getStatusFile(defaultLogger);
if (!statusFile.exists()) {
defaultLogger.info("Status File no longer exists. Will not restart NiFi");
return;
}
final File lockFile = getLockFile(defaultLogger);
if (lockFile.exists()) {
defaultLogger.info("A shutdown was initiated. Will not restart NiFi");
return;
}
final boolean previouslyStarted = getNifiStarted();
if (!previouslyStarted) {
defaultLogger.info("NiFi never started. Will not restart NiFi");
return;
} else {
setNiFiStarted(false);
}
if (isSensitiveKeyPresent(props)) {
Path sensitiveKeyFile = createSensitiveKeyFile(confDir);
writeSensitiveKeyFile(props, sensitiveKeyFile);
}
defaultLogger.warn("Apache NiFi appears to have died. Restarting...");
process = builder.start();
handleLogging(process);
pid = OSUtils.getProcessId(process, defaultLogger);
if (pid == null) {
cmdLogger.warn("Launched Apache NiFi but could not obtain the Process ID");
} else {
nifiPid = pid;
final Properties pidProperties = new Properties();
pidProperties.setProperty(PID_KEY, String.valueOf(nifiPid));
savePidProperties(pidProperties, defaultLogger);
cmdLogger.info("Launched Apache NiFi with Process ID " + pid);
}
shutdownHook = new ShutdownHook(process, this, secretKey, gracefulShutdownSeconds, loggingExecutor);
runtime.addShutdownHook(shutdownHook);
final boolean started = waitForStart();
if (started) {
defaultLogger.info("Successfully started Apache NiFi{}", (pid == null ? "" : " with PID " + pid));
// We are expected to restart nifi, so send a notification that it died. If we are not restarting nifi,
// then this means that we are intentionally stopping the service.
serviceManager.notify(NotificationType.NIFI_DIED, "NiFi Died on Host " + hostname,
"Hello,\n\nIt appears that Apache NiFi has died on host " + hostname + " at " + now + "; automatically restarting NiFi");
} else {
defaultLogger.error("Apache NiFi does not appear to have started");
// We are expected to restart nifi, so send a notification that it died. If we are not restarting nifi,
// then this means that we are intentionally stopping the service.
serviceManager.notify(NotificationType.NIFI_DIED, "NiFi Died on Host " + hostname,
"Hello,\n\nIt appears that Apache NiFi has died on host " + hostname + " at " + now +
". Attempted to restart NiFi but the services does not appear to have restarted!");
}
} else {
return;
}
}
}
}
private void writeSensitiveKeyFile(Map<String, String> props, Path sensitiveKeyFile) throws IOException {
BufferedWriter sensitiveKeyWriter = Files.newBufferedWriter(sensitiveKeyFile, StandardCharsets.UTF_8);
sensitiveKeyWriter.write(props.get(NIFI_BOOTSTRAP_SENSITIVE_KEY));
sensitiveKeyWriter.close();
}
private Path createSensitiveKeyFile(File confDir) {
Path sensitiveKeyFile = Paths.get(confDir+"/sensitive.key");
final boolean isPosixSupported = FileSystems.getDefault().supportedFileAttributeViews().contains("posix");
try {
if (isPosixSupported) {
// Initially create file with the empty permission set (so nobody can get a file descriptor on it):
Set<PosixFilePermission> perms = new HashSet<PosixFilePermission>();
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
sensitiveKeyFile = Files.createFile(sensitiveKeyFile, attr);
// Then, once created, add owner-only rights:
perms.add(PosixFilePermission.OWNER_WRITE);
perms.add(PosixFilePermission.OWNER_READ);
attr = PosixFilePermissions.asFileAttribute(perms);
Files.setPosixFilePermissions(sensitiveKeyFile, perms);
} else {
// If Posix is not supported (e.g. Windows) then create the key file without permission settings.
cmdLogger.info("Current file system does not support Posix, using default permission settings.");
sensitiveKeyFile = Files.createFile(sensitiveKeyFile);
}
} catch (final FileAlreadyExistsException faee) {
cmdLogger.error("The sensitive.key file {} already exists. That shouldn't have been. Aborting.", sensitiveKeyFile);
System.exit(1);
} catch (final Exception e) {
cmdLogger.error("Other failure relating to setting permissions on {}. "
+ "(so that only the owner can read it). "
+ "This is fatal to the bootstrap process for security reasons. Exception was: {}", sensitiveKeyFile, e);
System.exit(1);
}
return sensitiveKeyFile;
}
private boolean isSensitiveKeyPresent(Map<String, String> props) {
return props.containsKey(NIFI_BOOTSTRAP_SENSITIVE_KEY) && !StringUtils.isBlank(props.get(NIFI_BOOTSTRAP_SENSITIVE_KEY));
}
private void handleLogging(final Process process) {
final Set<Future<?>> existingFutures = loggingFutures;
if (existingFutures != null) {
for (final Future<?> future : existingFutures) {
future.cancel(false);
}
}
final Future<?> stdOutFuture = loggingExecutor.submit(new Runnable() {
@Override
public void run() {
final Logger stdOutLogger = LoggerFactory.getLogger("org.apache.nifi.StdOut");
final InputStream in = process.getInputStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
stdOutLogger.info(line);
}
} catch (IOException e) {
defaultLogger.error("Failed to read from NiFi's Standard Out stream", e);
}
}
});
final Future<?> stdErrFuture = loggingExecutor.submit(new Runnable() {
@Override
public void run() {
final Logger stdErrLogger = LoggerFactory.getLogger("org.apache.nifi.StdErr");
final InputStream in = process.getErrorStream();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
stdErrLogger.error(line);
}
} catch (IOException e) {
defaultLogger.error("Failed to read from NiFi's Standard Error stream", e);
}
}
});
final Set<Future<?>> futures = new HashSet<>();
futures.add(stdOutFuture);
futures.add(stdErrFuture);
this.loggingFutures = futures;
}
private boolean isWindows() {
final String osName = System.getProperty("os.name");
return osName != null && osName.toLowerCase().contains("win");
}
private boolean waitForStart() {
lock.lock();
try {
final long startTime = System.nanoTime();
while (ccPort < 1) {
try {
startupCondition.await(1, TimeUnit.SECONDS);
} catch (final InterruptedException ie) {
return false;
}
final long waitNanos = System.nanoTime() - startTime;
final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
if (waitSeconds > STARTUP_WAIT_SECONDS) {
return false;
}
}
} finally {
lock.unlock();
}
return true;
}
private File getFile(final String filename, final File workingDir) {
File file = new File(filename);
if (!file.isAbsolute()) {
file = new File(workingDir, filename);
}
return file;
}
private String replaceNull(final String value, final String replacement) {
return (value == null) ? replacement : value;
}
void setAutoRestartNiFi(final boolean restart) {
this.autoRestartNiFi = restart;
}
void setNiFiCommandControlPort(final int port, final String secretKey) throws IOException {
this.ccPort = port;
this.secretKey = secretKey;
if (shutdownHook != null) {
shutdownHook.setSecretKey(secretKey);
}
final File statusFile = getStatusFile(defaultLogger);
final Properties nifiProps = new Properties();
if (nifiPid != -1) {
nifiProps.setProperty(PID_KEY, String.valueOf(nifiPid));
}
nifiProps.setProperty("port", String.valueOf(ccPort));
nifiProps.setProperty("secret.key", secretKey);
try {
savePidProperties(nifiProps, defaultLogger);
} catch (final IOException ioe) {
defaultLogger.warn("Apache NiFi has started but failed to persist NiFi Port information to {} due to {}", new Object[]{statusFile.getAbsolutePath(), ioe});
}
defaultLogger.info("Apache NiFi now running and listening for Bootstrap requests on port {}", port);
}
int getNiFiCommandControlPort() {
return this.ccPort;
}
void setNiFiStarted(final boolean nifiStarted) {
startedLock.lock();
try {
this.nifiStarted = nifiStarted;
} finally {
startedLock.unlock();
}
}
boolean getNifiStarted() {
startedLock.lock();
try {
return nifiStarted;
} finally {
startedLock.unlock();
}
}
private static class Status {
private final Integer port;
private final String pid;
private final Boolean respondingToPing;
private final Boolean processRunning;
public Status(final Integer port, final String pid, final Boolean respondingToPing, final Boolean processRunning) {
this.port = port;
this.pid = pid;
this.respondingToPing = respondingToPing;
this.processRunning = processRunning;
}
public String getPid() {
return pid;
}
public Integer getPort() {
return port;
}
public boolean isRespondingToPing() {
return Boolean.TRUE.equals(respondingToPing);
}
public boolean isProcessRunning() {
return Boolean.TRUE.equals(processRunning);
}
}
}
| apache-2.0 |
unsiloai/syntaxnet-ops-hack | tensorflow/python/training/moving_averages.py | 20099 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average.
The new value of 'variable' can be set with the 'AssignSub' op as:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See `ADAM: A Method for Stochastic Optimization` Section 3 for more details
(https://arxiv.org/abs/1412.6980).
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in https://arxiv.org/abs/1412.6980. See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A reference to the input 'variable' tensor with the newly computed
moving average.
"""
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
if zero_debias:
update_delta = _zero_debias(variable, value, decay)
else:
update_delta = (variable - value) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
"""
with variable_scope.variable_scope(
unbiased_var.op.name, values=[unbiased_var, value, decay]) as scope:
with ops.colocate_with(unbiased_var):
with ops.control_dependencies(None):
biased_initializer = init_ops.zeros_initializer(
dtype=unbiased_var.dtype)(unbiased_var.get_shape())
local_step_initializer = init_ops.zeros_initializer()
biased_var = variable_scope.get_variable(
"biased", initializer=biased_initializer, trainable=False)
local_step = variable_scope.get_variable(
"local_step",
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
# Get an update ops for both shadow variables.
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay,
name=scope.name)
update_local_step = local_step.assign_add(1)
# Compute the value of the delta to update the unbiased EMA. Make sure to
# use the new values of the biased variable and the local step.
with ops.control_dependencies([update_biased, update_local_step]):
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
unbiased_ema_delta = (unbiased_var - biased_var.read_value() /
(1 - math_ops.pow(
1.0 - decay, local_step.read_value())))
return unbiased_ema_delta
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1.
maintain_averages_op = ema.apply([var0, var1])
# Create an op that will update the moving averages after each training
# step. This is what we will use in place of the usual training op.
with tf.control_dependencies([opt_op]):
training_op = tf.group(maintain_averages_op)
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the @{tf.train.Saver} for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self, decay, num_updates=None, zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.global_variables()`.
Returns an op that updates all shadow variables as described above.
Note that `apply()` can be called multiple times with different lists of
variables.
Args:
var_list: A list of Variable or Tensor objects. The variables
and Tensors must be of types float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not all float16, float32, or float64.
ValueError: If the moving average of one of the variables is already
being computed.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [dtypes.float16, dtypes.float32,
dtypes.float64]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var in self._averages:
raise ValueError("Moving average already computed for: %s" % var.name)
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.control_dependencies(None):
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(var,
var.initialized_value(),
self._name,
colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self._name,
colocate_with_primary=(var.op.type in ["Variable", "VariableV2"]))
if self._zero_debias:
zero_debias_true.add(avg)
self._averages[var] = avg
with ops.name_scope(self._name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(self._num_updates,
dtypes.float32,
name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
zero_debias = self._averages[var] in zero_debias_true
updates.append(assign_moving_average(
self._averages[var], var, decay, zero_debias=zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var in self._averages:
return self._averages[var].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self._name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name can be the
moving_average version of the variable name if it exist, or the original
variable name.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving average as well.
for v in list(set(variables.global_variables()) - moving_avg_variables):
if v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
| apache-2.0 |
droolsjbpm/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-api/src/main/java/org/kie/workbench/common/stunner/bpmn/definition/IntermediateCompensationEvent.java | 5060 | /*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.definition;
import java.util.Objects;
import javax.validation.Valid;
import org.jboss.errai.common.client.api.annotations.MapsTo;
import org.jboss.errai.common.client.api.annotations.Portable;
import org.jboss.errai.databinding.client.api.Bindable;
import org.kie.workbench.common.forms.adf.definitions.annotations.FieldParam;
import org.kie.workbench.common.forms.adf.definitions.annotations.FormDefinition;
import org.kie.workbench.common.forms.adf.definitions.annotations.FormField;
import org.kie.workbench.common.forms.adf.definitions.settings.FieldPolicy;
import org.kie.workbench.common.stunner.bpmn.definition.property.background.BackgroundSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.dataio.DataIOSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.CircleDimensionSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.Radius;
import org.kie.workbench.common.stunner.bpmn.definition.property.event.BaseCancellingEventExecutionSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.font.FontSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.general.BPMNGeneralSet;
import org.kie.workbench.common.stunner.core.definition.annotation.Definition;
import org.kie.workbench.common.stunner.core.definition.annotation.Property;
import org.kie.workbench.common.stunner.core.definition.annotation.morph.Morph;
import org.kie.workbench.common.stunner.core.util.HashUtil;
import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.COLLAPSIBLE_CONTAINER;
import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.FIELD_CONTAINER_PARAM;
@Portable
@Bindable
@Definition
@Morph(base = BaseCatchingIntermediateEvent.class)
@FormDefinition(
startElement = "general",
policy = FieldPolicy.ONLY_MARKED,
defaultFieldSettings = {@FieldParam(name = FIELD_CONTAINER_PARAM, value = COLLAPSIBLE_CONTAINER)}
)
public class IntermediateCompensationEvent extends BaseCatchingIntermediateEvent {
@Property
@FormField(afterElement = "general")
@Valid
protected BaseCancellingEventExecutionSet executionSet;
public IntermediateCompensationEvent() {
this(new BPMNGeneralSet(""),
new BackgroundSet(),
new FontSet(),
new CircleDimensionSet(new Radius()),
new DataIOSet(),
new BaseCancellingEventExecutionSet());
}
public IntermediateCompensationEvent(final @MapsTo("general") BPMNGeneralSet general,
final @MapsTo("backgroundSet") BackgroundSet backgroundSet,
final @MapsTo("fontSet") FontSet fontSet,
final @MapsTo("dimensionsSet") CircleDimensionSet dimensionsSet,
final @MapsTo("dataIOSet") DataIOSet dataIOSet,
final @MapsTo("executionSet") BaseCancellingEventExecutionSet executionSet) {
super(general,
backgroundSet,
fontSet,
dimensionsSet,
dataIOSet);
this.executionSet = executionSet;
}
@Override
protected void initLabels() {
super.initLabels();
labels.add("IntermediateCompensationEvent");
labels.remove("sequence_start");
}
public BaseCancellingEventExecutionSet getExecutionSet() {
return executionSet;
}
public void setExecutionSet(BaseCancellingEventExecutionSet executionSet) {
this.executionSet = executionSet;
}
@Override
public int hashCode() {
return HashUtil.combineHashCodes(super.hashCode(),
Objects.hashCode(executionSet));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o instanceof IntermediateCompensationEvent) {
IntermediateCompensationEvent other = (IntermediateCompensationEvent) o;
return super.equals(other) &&
Objects.equals(executionSet, other.executionSet);
}
return false;
}
} | apache-2.0 |
fengshao0907/jstorm | jstorm-client-extension/src/main/java/com/alibaba/jstorm/metric/UserDefMetric.java | 2814 | package com.alibaba.jstorm.metric;
import java.util.Map;
import java.util.HashMap;
import java.util.Map.Entry;
import java.io.Serializable;
import com.codahale.metrics.Metric;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Sampling;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.alibaba.jstorm.client.metric.MetricCallback;
import com.alibaba.jstorm.metric.MetricInfo;
/**
* /storm-zk-root/Monitor/{topologyid}/UserDefMetrics/{workerid} data
*/
public class UserDefMetric {
private static final long serialVersionUID = 4547327064057659279L;
private Map<String, Gauge<?>> gaugeMap = new HashMap<String, Gauge<?>>();
private Map<String, Counter> counterMap = new HashMap<String, Counter>();
private Map<String, Histogram> histogramMap = new HashMap<String, Histogram>();
private Map<String, Timer> timerMap = new HashMap<String, Timer>();
private Map<String, Meter> meterMap = new HashMap<String, Meter>();
private Map<String, MetricCallback> callbacks = new HashMap<String, MetricCallback>();
public UserDefMetric() {
}
public Map<String, Gauge<?>> getGauge() {
return this.gaugeMap;
}
public void registerCallback(MetricCallback callback, String name) {
if (callbacks.containsKey(name) != true) {
callbacks.put(name, callback);
}
}
public void unregisterCallback(String name) {
callbacks.remove(name);
}
public Map<String, MetricCallback> getCallbacks() {
return callbacks;
}
public void addToGauge(String name, Gauge<?> gauge) {
gaugeMap.put(name, gauge);
}
public Map<String, Counter> getCounter() {
return this.counterMap;
}
public void addToCounter(String name, Counter counter) {
counterMap.put(name, counter);
}
public Map<String, Histogram> getHistogram() {
return this.histogramMap;
}
public void addToHistogram(String name, Histogram histogram) {
histogramMap.put(name, histogram);
}
public Map<String, Timer> getTimer() {
return this.timerMap;
}
public void addToTimer(String name, Timer timer) {
timerMap.put(name, timer);
}
public Map<String, Meter> getMeter() {
return this.meterMap;
}
public void addToMeter(String name, Meter meter) {
meterMap.put(name, meter);
}
public void remove(String name) {
if (gaugeMap.containsKey(name)) {
gaugeMap.remove(name);
} else if (counterMap.containsKey(name)) {
counterMap.remove(name);
} else if (histogramMap.containsKey(name)) {
histogramMap.remove(name);
} else if (timerMap.containsKey(name)) {
timerMap.remove(name);
} else if (meterMap.containsKey(name)) {
meterMap.remove(name);
}
if (callbacks.containsKey(name)) {
callbacks.remove(name);
}
}
} | apache-2.0 |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/test/test262/data/test/built-ins/JSON/parse/S15.12.2_A1.js | 572 | // Copyright 2011 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: JSON.parse must create a property with the given property name
es5id: 15.12.2_A1
description: Tests that JSON.parse treats "__proto__" as a regular property name
---*/
var x = JSON.parse('{"__proto__":[]}');
if (Object.getPrototypeOf(x) !== Object.prototype) {
$ERROR('#1: JSON.parse confused by "__proto__"');
}
if (!Array.isArray(x.__proto__)) {
$ERROR('#2: JSON.parse did not set "__proto__" as a regular property');
}
| apache-2.0 |
Microsoft/TypeScript | tests/baselines/reference/specializedInheritedConstructors1.js | 1809 | //// [specializedInheritedConstructors1.ts]
interface ViewOptions<TModel> {
model: TModel;
}
class View<TModel> {
constructor(options: ViewOptions<TModel>) { }
model: TModel;
}
class Model { }
class MyView extends View<Model> { }
var m: ViewOptions<Model> = { model: new Model() };
var aView = new View({ model: new Model() });
var aView2 = new View(m);
var myView = new MyView(m); // was error
//// [specializedInheritedConstructors1.js]
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var View = /** @class */ (function () {
function View(options) {
}
return View;
}());
var Model = /** @class */ (function () {
function Model() {
}
return Model;
}());
var MyView = /** @class */ (function (_super) {
__extends(MyView, _super);
function MyView() {
return _super !== null && _super.apply(this, arguments) || this;
}
return MyView;
}(View));
var m = { model: new Model() };
var aView = new View({ model: new Model() });
var aView2 = new View(m);
var myView = new MyView(m); // was error
| apache-2.0 |
Dhandapani/gluster-ovirt | backend/manager/modules/vdsbroker/src/main/java/org/ovirt/engine/core/vdsbroker/vdsbroker/GetAllVmStatsVDSCommand.java | 1709 | package org.ovirt.engine.core.vdsbroker.vdsbroker;
import org.ovirt.engine.core.common.businessentities.VmDynamic;
import org.ovirt.engine.core.common.businessentities.VmStatistics;
import org.ovirt.engine.core.common.vdscommands.VdsIdAndVdsVDSCommandParametersBase;
import org.ovirt.engine.core.compat.Guid;
import org.ovirt.engine.core.compat.KeyValuePairCompat;
import org.ovirt.engine.core.utils.log.Logged;
import org.ovirt.engine.core.utils.log.Logged.LogLevel;
@Logged(executionLevel = LogLevel.DEBUG)
public class GetAllVmStatsVDSCommand<P extends VdsIdAndVdsVDSCommandParametersBase> extends VmStatsVdsBrokerCommand<P> {
public GetAllVmStatsVDSCommand(P parameters) {
super(parameters, parameters.getVds());
}
@Override
protected void ExecuteVdsBrokerCommand() {
mVmListReturn = getBroker().getAllVmStats();
ProceedProxyReturnValue();
java.util.HashMap<Guid, java.util.Map.Entry<VmDynamic, VmStatistics>> returnVMs =
new java.util.HashMap<Guid, java.util.Map.Entry<VmDynamic, VmStatistics>>();
for (int idx = 0; idx < mVmListReturn.mInfoList.length; ++idx) {
VmDynamic vmDynamic = new ExtendedVmDynamic(getVds());
VdsBrokerObjectsBuilder.updateVMDynamicData(vmDynamic, mVmListReturn.mInfoList[idx]);
VmStatistics vmStatistics = VdsBrokerObjectsBuilder.buildVMStatisticsData(mVmListReturn.mInfoList[idx]);
returnVMs.put(vmDynamic.getId(), new KeyValuePairCompat<VmDynamic, VmStatistics>(vmDynamic,
vmStatistics));
}
setReturnValue(returnVMs);
}
@Override
protected boolean getIsPrintReturnValue() {
return false;
}
}
| apache-2.0 |
wuranbo/elasticsearch | core/src/main/java/org/elasticsearch/index/IndexSettings.java | 24691 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.AllFieldMapper;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.node.Node;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* This class encapsulates all index level settings and handles settings updates.
* It's created per index and available to all index level classes and allows them to retrieve
* the latest updated settings instance. Classes that need to listen to settings updates can register
* a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will
* be called for each settings update.
*/
public final class IndexSettings {
public static final Setting<String> DEFAULT_FIELD_SETTING =
new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), Property.IndexScope);
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING =
Setting.boolSetting("index.query_string.lenient", false, Property.IndexScope);
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD =
Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, Property.NodeScope);
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD =
Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, Property.NodeScope);
public static final Setting<Boolean> ALLOW_UNMAPPED =
Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, Property.IndexScope);
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING =
Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100),
Property.IndexScope);
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING =
new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(),
(value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope);
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING =
Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope);
public static final Setting<Boolean> INDEX_TTL_DISABLE_PURGE_SETTING =
Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope);
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
switch(s) {
case "false":
case "true":
case "fix":
case "checksum":
return s;
default:
throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s);
}
}, Property.IndexScope);
/**
* Index setting describing the maximum value of from + size on a query.
* The Default maximum value of from + size on a query is 10,000. This was chosen as
* a conservative default as it is sure to not cause trouble. Users can
* certainly profile their cluster and decide to set it to 100,000
* safely. 1,000,000 is probably way to high for any cluster to set
* safely.
*/
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING =
Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope);
/**
* Index setting describing the maximum size of the rescore window. Defaults to {@link #MAX_RESULT_WINDOW_SETTING}
* because they both do the same thing: control the size of the heap of hits.
*/
public static final Setting<Integer> MAX_RESCORE_WINDOW_SETTING =
Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, Property.Dynamic, Property.IndexScope);
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING =
Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS),
Property.Dynamic, Property.IndexScope);
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING =
Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic,
Property.IndexScope);
public static final Setting<TimeValue> INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL =
Setting.timeSetting("index.seq_no.checkpoint_sync_interval", new TimeValue(30, TimeUnit.SECONDS),
new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope);
/**
* Index setting to enable / disable deletes garbage collection.
* This setting is realtime updateable
*/
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING =
Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic,
Property.IndexScope);
/**
* The maximum number of refresh listeners allows on this shard.
*/
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0,
Property.Dynamic, Property.IndexScope);
/**
* The maximum number of slices allowed in a scroll request
*/
public static final Setting<Integer> MAX_SLICES_PER_SCROLL = Setting.intSetting("index.max_slices_per_scroll",
1024, 1, Property.Dynamic, Property.IndexScope);
private final Index index;
private final Version version;
private final Logger logger;
private final String nodeName;
private final Settings nodeSettings;
private final int numberOfShards;
private final boolean isShadowReplicaIndex;
private final ParseFieldMatcher parseFieldMatcher;
// volatile fields are updated via #updateIndexMetaData(IndexMetaData) under lock
private volatile Settings settings;
private volatile IndexMetaData indexMetaData;
private final String defaultField;
private final boolean queryStringLenient;
private final boolean queryStringAnalyzeWildcard;
private final boolean queryStringAllowLeadingWildcard;
private final boolean defaultAllowUnmappedFields;
private volatile Translog.Durability durability;
private final TimeValue syncInterval;
private volatile TimeValue refreshInterval;
private final TimeValue globalCheckpointInterval;
private volatile ByteSizeValue flushThresholdSize;
private final MergeSchedulerConfig mergeSchedulerConfig;
private final MergePolicyConfig mergePolicyConfig;
private final IndexScopedSettings scopedSettings;
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
private volatile boolean warmerEnabled;
private volatile int maxResultWindow;
private volatile int maxRescoreWindow;
private volatile boolean TTLPurgeDisabled;
/**
* The maximum number of refresh listeners allows on this shard.
*/
private volatile int maxRefreshListeners;
/**
* The maximum number of slices allowed in a scroll request.
*/
private volatile int maxSlicesPerScroll;
/**
* Returns the default search field for this index.
*/
public String getDefaultField() {
return defaultField;
}
/**
* Returns <code>true</code> if query string parsing should be lenient. The default is <code>false</code>
*/
public boolean isQueryStringLenient() {
return queryStringLenient;
}
/**
* Returns <code>true</code> if the query string should analyze wildcards. The default is <code>false</code>
*/
public boolean isQueryStringAnalyzeWildcard() {
return queryStringAnalyzeWildcard;
}
/**
* Returns <code>true</code> if the query string parser should allow leading wildcards. The default is <code>true</code>
*/
public boolean isQueryStringAllowLeadingWildcard() {
return queryStringAllowLeadingWildcard;
}
/**
* Returns <code>true</code> if queries should be lenient about unmapped fields. The default is <code>true</code>
*/
public boolean isDefaultAllowUnmappedFields() {
return defaultAllowUnmappedFields;
}
/**
* Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata
* while index level settings will overwrite node settings.
*
* @param indexMetaData the index metadata this settings object is associated with
* @param nodeSettings the nodes settings this index is allocated on.
*/
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings) {
this(indexMetaData, nodeSettings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
}
/**
* Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata
* while index level settings will overwrite node settings.
*
* @param indexMetaData the index metadata this settings object is associated with
* @param nodeSettings the nodes settings this index is allocated on.
*/
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, IndexScopedSettings indexScopedSettings) {
scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetaData);
this.nodeSettings = nodeSettings;
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
this.index = indexMetaData.getIndex();
version = Version.indexCreated(settings);
logger = Loggers.getLogger(getClass(), settings, index);
nodeName = Node.NODE_NAME_SETTING.get(settings);
this.indexMetaData = indexMetaData;
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
isShadowReplicaIndex = IndexMetaData.isIndexUsingShadowReplicas(settings);
this.defaultField = DEFAULT_FIELD_SETTING.get(settings);
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings);
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
globalCheckpointInterval = scopedSettings.get(INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL);
flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING);
mergeSchedulerConfig = new MergeSchedulerConfig(this);
gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis();
warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING);
maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING);
maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING);
TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING);
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, mergePolicyConfig::setFloorSegmentSetting);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, mergePolicyConfig::setMaxMergesAtOnceExplicit);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, mergePolicyConfig::setSegmentsPerTier);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, mergePolicyConfig::setReclaimDeletesWeight);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
mergeSchedulerConfig::setMaxThreadAndMergeCount);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
scopedSettings.addSettingsUpdateConsumer(INDEX_TTL_DISABLE_PURGE_SETTING, this::setTTLPurgeDisabled);
scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow);
scopedSettings.addSettingsUpdateConsumer(MAX_RESCORE_WINDOW_SETTING, this::setMaxRescoreWindow);
scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer);
scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize);
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
}
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
this.flushThresholdSize = byteSizeValue;
}
private void setGCDeletes(TimeValue timeValue) {
this.gcDeletesInMillis = timeValue.getMillis();
}
private void setRefreshInterval(TimeValue timeValue) {
this.refreshInterval = timeValue;
}
/**
* Returns the settings for this index. These settings contain the node and index level settings where
* settings that are specified on both index and node level are overwritten by the index settings.
*/
public Settings getSettings() { return settings; }
/**
* Returns the index this settings object belongs to
*/
public Index getIndex() {
return index;
}
/**
* Returns the indexes UUID
*/
public String getUUID() {
return getIndex().getUUID();
}
/**
* Returns <code>true</code> if the index has a custom data path
*/
public boolean hasCustomDataPath() {
return customDataPath() != null;
}
/**
* Returns the customDataPath for this index, if configured. <code>null</code> o.w.
*/
public String customDataPath() {
return settings.get(IndexMetaData.SETTING_DATA_PATH);
}
/**
* Returns <code>true</code> iff the given settings indicate that the index
* associated with these settings allocates it's shards on a shared
* filesystem.
*/
public boolean isOnSharedFilesystem() {
return IndexMetaData.isOnSharedFilesystem(getSettings());
}
/**
* Returns <code>true</code> iff the given settings indicate that the index associated
* with these settings uses shadow replicas. Otherwise <code>false</code>. The default
* setting for this is <code>false</code>.
*/
public boolean isIndexUsingShadowReplicas() {
return IndexMetaData.isOnSharedFilesystem(getSettings());
}
/**
* Returns the version the index was created on.
* @see Version#indexCreated(Settings)
*/
public Version getIndexVersionCreated() {
return version;
}
/**
* Returns the current node name
*/
public String getNodeName() {
return nodeName;
}
/**
* Returns the current IndexMetaData for this index
*/
public IndexMetaData getIndexMetaData() {
return indexMetaData;
}
/**
* Returns the number of shards this index has.
*/
public int getNumberOfShards() { return numberOfShards; }
/**
* Returns the number of replicas this index has.
*/
public int getNumberOfReplicas() { return settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); }
/**
* Returns <code>true</code> iff this index uses shadow replicas.
* @see IndexMetaData#isIndexUsingShadowReplicas(Settings)
*/
public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; }
/**
* Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
* index settings and the node settings where node settings are overwritten by index settings.
*/
public Settings getNodeSettings() {
return nodeSettings;
}
/**
* Returns a {@link ParseFieldMatcher} for this index.
*/
public ParseFieldMatcher getParseFieldMatcher() { return parseFieldMatcher; }
/**
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one setting has changed.
*
* @return <code>true</code> iff any setting has been updated otherwise <code>false</code>.
*/
public synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) {
final Settings newSettings = indexMetaData.getSettings();
if (version.equals(Version.indexCreated(newSettings)) == false) {
throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings));
}
final String newUUID = newSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
if (newUUID.equals(getUUID()) == false) {
throw new IllegalArgumentException("uuid mismatch on settings update expected: " + getUUID() + " but was: " + newUUID);
}
this.indexMetaData = indexMetaData;
final Settings existingSettings = this.settings;
if (existingSettings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).getAsMap().equals(newSettings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).getAsMap())) {
// nothing to update, same settings
return false;
}
scopedSettings.applySettings(newSettings);
this.settings = Settings.builder().put(nodeSettings).put(newSettings).build();
return true;
}
/**
* Returns the translog durability for this index.
*/
public Translog.Durability getTranslogDurability() {
return durability;
}
private void setTranslogDurability(Translog.Durability durability) {
this.durability = durability;
}
/**
* Returns true if index warmers are enabled, otherwise <code>false</code>
*/
public boolean isWarmerEnabled() {
return warmerEnabled;
}
private void setEnableWarmer(boolean enableWarmer) {
this.warmerEnabled = enableWarmer;
}
/**
* Returns the translog sync interval. This is the interval in which the transaction log is asynchronously fsynced unless
* the transaction log is fsyncing on every operations
*/
public TimeValue getTranslogSyncInterval() {
return syncInterval;
}
/**
* Returns this interval in which the shards of this index are asynchronously refreshed. <tt>-1</tt> means async refresh is disabled.
*/
public TimeValue getRefreshInterval() {
return refreshInterval;
}
/**
* Returns this interval in which the primary shards of this index should check and advance the global checkpoint
*/
public TimeValue getGlobalCheckpointInterval() {
return globalCheckpointInterval;
}
/**
* Returns the transaction log threshold size when to forcefully flush the index and clear the transaction log.
*/
public ByteSizeValue getFlushThresholdSize() { return flushThresholdSize; }
/**
* Returns the {@link MergeSchedulerConfig}
*/
public MergeSchedulerConfig getMergeSchedulerConfig() { return mergeSchedulerConfig; }
/**
* Returns the max result window for search requests, describing the maximum value of from + size on a query.
*/
public int getMaxResultWindow() {
return this.maxResultWindow;
}
private void setMaxResultWindow(int maxResultWindow) {
this.maxResultWindow = maxResultWindow;
}
/**
* Returns the maximum rescore window for search requests.
*/
public int getMaxRescoreWindow() {
return maxRescoreWindow;
}
private void setMaxRescoreWindow(int maxRescoreWindow) {
this.maxRescoreWindow = maxRescoreWindow;
}
/**
* Returns the GC deletes cycle in milliseconds.
*/
public long getGcDeletesInMillis() {
return gcDeletesInMillis;
}
/**
* Returns the merge policy that should be used for this index.
*/
public MergePolicy getMergePolicy() {
return mergePolicyConfig.getMergePolicy();
}
/**
* Returns <code>true</code> if the TTL purge is disabled for this index. Default is <code>false</code>
*/
public boolean isTTLPurgeDisabled() {
return TTLPurgeDisabled;
}
private void setTTLPurgeDisabled(boolean ttlPurgeDisabled) {
this.TTLPurgeDisabled = ttlPurgeDisabled;
}
public <T> T getValue(Setting<T> setting) {
return scopedSettings.get(setting);
}
/**
* The maximum number of refresh listeners allows on this shard.
*/
public int getMaxRefreshListeners() {
return maxRefreshListeners;
}
private void setMaxRefreshListeners(int maxRefreshListeners) {
this.maxRefreshListeners = maxRefreshListeners;
}
/**
* The maximum number of slices allowed in a scroll request.
*/
public int getMaxSlicesPerScroll() {
return maxSlicesPerScroll;
}
private void setMaxSlicesPerScroll(int value) {
this.maxSlicesPerScroll = value;
}
public IndexScopedSettings getScopedSettings() { return scopedSettings;}
}
| apache-2.0 |
lyogavin/spark | core/src/main/java/spark/network/netty/FileServerHandler.java | 2852 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.network.netty;
import java.io.File;
import java.io.FileInputStream;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundMessageHandlerAdapter;
import io.netty.channel.DefaultFileRegion;
class FileServerHandler extends ChannelInboundMessageHandlerAdapter<String> {
PathResolver pResolver;
public FileServerHandler(PathResolver pResolver){
this.pResolver = pResolver;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, String blockId) {
String path = pResolver.getAbsolutePath(blockId);
// if getFilePath returns null, close the channel
if (path == null) {
//ctx.close();
return;
}
File file = new File(path);
if (file.exists()) {
if (!file.isFile()) {
//logger.info("Not a file : " + file.getAbsolutePath());
ctx.write(new FileHeader(0, blockId).buffer());
ctx.flush();
return;
}
long length = file.length();
if (length > Integer.MAX_VALUE || length <= 0) {
//logger.info("too large file : " + file.getAbsolutePath() + " of size "+ length);
ctx.write(new FileHeader(0, blockId).buffer());
ctx.flush();
return;
}
int len = new Long(length).intValue();
//logger.info("Sending block "+blockId+" filelen = "+len);
//logger.info("header = "+ (new FileHeader(len, blockId)).buffer());
ctx.write((new FileHeader(len, blockId)).buffer());
try {
ctx.sendFile(new DefaultFileRegion(new FileInputStream(file)
.getChannel(), 0, file.length()));
} catch (Exception e) {
//logger.warning("Exception when sending file : " + file.getAbsolutePath());
e.printStackTrace();
}
} else {
//logger.warning("File not found: " + file.getAbsolutePath());
ctx.write(new FileHeader(0, blockId).buffer());
}
ctx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
| apache-2.0 |
ericmckean/syzygy | syzygy/kasko/waitable_timer_impl_unittest.cc | 1284 | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "syzygy/kasko/waitable_timer_impl.h"
#include <windows.h>
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "gtest/gtest.h"
namespace kasko {
TEST(WaitableTimerImplTest, BasicTest) {
base::Time start = base::Time::Now();
scoped_ptr<WaitableTimer> instance =
WaitableTimerImpl::Create(base::TimeDelta::FromMilliseconds(100));
ASSERT_TRUE(instance);
instance->Start();
// Wait up to 5000 ms.
ASSERT_EQ(WAIT_OBJECT_0, ::WaitForSingleObject(instance->GetHANDLE(), 5000));
EXPECT_LT(50, (base::Time::Now() - start).InMilliseconds());
EXPECT_GT(500, (base::Time::Now() - start).InMilliseconds());
}
} // namespace kasko
| apache-2.0 |
jiajiechen/mxnet | src/imperative/cached_op.cc | 19653 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <unordered_set>
#include <iostream>
#include "./imperative_utils.h"
namespace mxnet {
DMLC_REGISTER_PARAMETER(CachedOpParam);
Imperative::CachedOp::CachedOp(
const nnvm::Symbol& sym,
const std::vector<std::pair<std::string, std::string> >& kwargs) {
using namespace nnvm;
using namespace imperative;
static const std::vector<const Op*> zero_ops{Op::Get("zeros_like"), Op::Get("_zeros")};
static const auto _copy = Op::Get("_copy");
param_.Init(kwargs);
// construct forward graph
{
NodeEntryMap<int> dedup_out;
for (const auto& i : sym.outputs) {
if (dedup_out.count(i)) {
NodePtr copy_node = Node::Create();
copy_node->attrs.op = _copy;
copy_node->attrs.name =
i.node->attrs.name + "_copy" + std::to_string(dedup_out[i]++);
copy_node->inputs.emplace_back(i);
if (_copy->attr_parser != nullptr) {
_copy->attr_parser(&(copy_node->attrs));
}
fwd_graph_.outputs.push_back(NodeEntry{copy_node, 0, 0});
} else {
dedup_out.insert({i, 0});
fwd_graph_.outputs.push_back(i);
}
}
const auto& idx = fwd_graph_.indexed_graph();
CHECK_GE(idx.input_nodes().size(), 1) << "CachedOp requires at least 1 input";
std::vector<uint32_t> ref_count(idx.num_node_entries(), 0);
for (const auto& i : idx.input_nodes()) ++ref_count[idx.entry_id(i, 0)];
for (const auto& i : idx.outputs()) ++ref_count[idx.entry_id(i)];
for (size_t i = 0; i < idx.num_nodes(); ++i) {
for (const auto& j : idx[i].inputs) ++ref_count[idx.entry_id(j)];
}
fwd_graph_.attrs["forward_ref_count"] =
std::make_shared<dmlc::any>(std::move(ref_count));
inlining_ = (idx.num_nodes() - idx.input_nodes().size()) <= param_.inline_limit;
}
// construct backward graph
{
ograd_entries_.reserve(fwd_graph_.outputs.size());
for (size_t i = 0; i < fwd_graph_.outputs.size(); ++i) {
ograd_entries_.emplace_back(NodeEntry{Node::Create(), 0, 0});
}
std::vector<NodeEntry> xs;
std::vector<NodePtr> args = sym.ListInputs(Symbol::kReadOnlyArgs);
xs.reserve(args.size());
for (const auto& i : args) xs.emplace_back(NodeEntry{i, 0, 0});
CHECK_GT(xs.size(), 0)
<< "There are no inputs in computation graph that require gradients.";
grad_graph_ = pass::Gradient(
fwd_graph_, fwd_graph_.outputs, xs, ograd_entries_,
exec::AggregateGradient, nullptr, nullptr,
zero_ops, "_copy");
}
// construct full graph
{
size_t num_forward_nodes = fwd_graph_.indexed_graph().num_nodes();
size_t num_forward_entries = fwd_graph_.indexed_graph().num_node_entries();
full_graph_.outputs = fwd_graph_.outputs;
curr_grad_req_ = std::vector<bool>(grad_graph_.outputs.size(), true);
for (const auto& i : grad_graph_.outputs) full_graph_.outputs.emplace_back(i);
const auto& idx = full_graph_.indexed_graph();
std::vector<uint32_t> ref_count(idx.num_node_entries(), 0);
for (size_t i = num_forward_nodes; i < idx.num_nodes(); ++i) {
for (const auto& j : idx[i].inputs) {
++ref_count[idx.entry_id(j)];
}
}
auto full_ref_count = fwd_graph_.GetAttr<std::vector<uint32_t> >("forward_ref_count");
for (size_t i = 0; i < num_forward_entries; ++i) full_ref_count[i] += ref_count[i];
fwd_graph_.attrs["full_ref_count"] =
std::make_shared<dmlc::any>(std::move(full_ref_count));
size_t num_forward_inputs = num_inputs();
size_t num_forward_outputs = num_outputs();
for (uint32_t i = 0; i < ograd_entries_.size(); ++i) {
if (!idx.exist(ograd_entries_[i].node.get())) continue;
auto eid = idx.entry_id(ograd_entries_[i]);
if (ref_count[eid] > 0) {
bwd_ograd_dep_.push_back(i);
}
}
save_inputs_.resize(num_forward_inputs, false);
for (uint32_t i = 0; i < num_forward_inputs; ++i) {
auto eid = idx.entry_id(idx.input_nodes()[i], 0);
if (ref_count[eid] > 0) {
save_inputs_[i] = true;
bwd_in_dep_.push_back(i);
}
}
save_outputs_.resize(idx.outputs().size(), false);
for (uint32_t i = 0; i < num_forward_outputs; ++i) {
auto eid = idx.entry_id(idx.outputs()[i]);
if (ref_count[eid] > 0) {
save_outputs_[i] = true;
bwd_out_dep_.push_back(i);
}
}
}
}
std::vector<nnvm::NodeEntry> Imperative::CachedOp::Gradient(
const nnvm::NodePtr& node,
const std::vector<nnvm::NodeEntry>& ograds) {
using namespace nnvm;
static const auto _backward_CachedOp = Op::Get("_backward_CachedOp");
static const auto _NoGrad = Op::Get("_NoGradient");
auto p = Node::Create();
p->attrs.op = _backward_CachedOp;
p->attrs.name = node->attrs.name + "_backward";
p->attrs.parsed = node->attrs.parsed;
p->control_deps.push_back(node);
p->inputs.reserve(bwd_ograd_dep_.size() + bwd_in_dep_.size() + bwd_out_dep_.size());
for (auto i : bwd_ograd_dep_) p->inputs.push_back(ograds[i]);
for (auto i : bwd_in_dep_) p->inputs.push_back(node->inputs[i]);
for (auto i : bwd_out_dep_) p->inputs.emplace_back(NodeEntry{node, i, 0});
std::vector<NodeEntry> ret;
ret.reserve(num_inputs());
const auto& auxs = mutable_input_nodes();
if (auxs.size()) {
auto nop = Node::Create();
nop->attrs.op = _NoGrad;
nop->attrs.name = "NoGradient";
uint32_t k = 0;
for (const auto& i : fwd_graph_.indexed_graph().input_nodes()) {
if (auxs.count(i)) {
ret.emplace_back(NodeEntry{nop, 0, 0});
} else {
ret.emplace_back(NodeEntry{p, k++, 0});
}
}
} else {
for (uint32_t i = 0; i < num_inputs(); ++i) ret.emplace_back(NodeEntry{p, i, 0});
}
return ret;
}
nnvm::Graph Imperative::CachedOp::GetForwardGraph(
const bool recording, const std::vector<NDArray*>& inputs) {
using namespace nnvm;
using namespace imperative;
std::lock_guard<std::mutex> lock(mutex_);
CHECK_EQ(inputs.size(), num_inputs());
nnvm::Graph& g = fwd_graph_;
ShapeVector shape_inputs;
DTypeVector dtype_inputs;
StorageTypeVector storage_type_inputs;
shape_inputs.reserve(inputs.size());
dtype_inputs.reserve(inputs.size());
storage_type_inputs.reserve(inputs.size());
for (uint32_t i = 0; i < inputs.size(); ++i) {
shape_inputs.emplace_back(inputs[i]->shape());
dtype_inputs.emplace_back(inputs[i]->dtype());
storage_type_inputs.emplace_back(inputs[i]->storage_type());
}
bool match = true;
match &= CheckAndInferShape(&g, std::move(shape_inputs), true);
match &= CheckAndInferType(&g, std::move(dtype_inputs), true);
exec::DevMaskVector dev_mask(g.indexed_graph().num_nodes(), inputs[0]->ctx().dev_mask());
match &= CheckAndInferStorageType(&g, std::move(dev_mask),
std::move(storage_type_inputs), true);
if (!match) {
g.attrs.erase("forward_mem_plan");
g.attrs.erase("full_mem_plan");
} else if (g.attrs.count(recording ? "full_mem_plan" : "forward_mem_plan")) {
return g;
}
const auto& idx = g.indexed_graph();
StorageVector storage(idx.num_node_entries(), exec::kBadStorageID);
for (const auto i : idx.input_nodes()) storage[idx.entry_id(i, 0)] = exec::kExternalStorageID;
const auto& stypes = g.GetAttr<StorageTypeVector>("storage_type");
CHECK_EQ(stypes.size(), storage.size());
for (size_t i = 0; i < stypes.size(); i++) {
if (stypes[i] != kDefaultStorage)
storage[i] = exec::kDynamicStorageID;
}
auto mem_plan = PlanMemory(
&g, std::move(storage), g.GetAttr<std::vector<uint32_t> >(
recording ? "full_ref_count" : "forward_ref_count"));
g.attrs[recording ? "full_mem_plan" : "forward_mem_plan"] =
std::make_shared<dmlc::any>(std::move(mem_plan));
return g;
}
nnvm::Graph Imperative::CachedOp::GetBackwardGraph(
const OpStatePtr& op_state,
const std::vector<OpReqType>& reqs,
const std::vector<NDArray*>& inputs) {
using namespace nnvm;
using namespace imperative;
std::lock_guard<std::mutex> lock(mutex_);
nnvm::Graph& g = full_graph_;
auto& state = op_state.get_state<CachedOpState>();
bool req_match = true;
for (size_t i = 0; i < reqs.size(); ++i) {
if (curr_grad_req_[i] != (reqs[i] != kNullOp)) {
curr_grad_req_[i] = reqs[i] != kNullOp;
req_match = false;
}
}
if (!req_match) {
g = nnvm::Graph();
g.outputs = fwd_graph_.outputs;
for (size_t i = 0; i < grad_graph_.outputs.size(); ++i) {
if (curr_grad_req_[i]) g.outputs.emplace_back(grad_graph_.outputs[i]);
}
bwd_input_eid_.clear();
}
const auto& idx = g.indexed_graph();
if (bwd_input_eid_.size() != inputs.size()) {
bwd_input_eid_.clear();
for (const auto& i : bwd_ograd_dep_) {
auto eid = idx.entry_id(ograd_entries_[i]);
bwd_input_eid_.push_back(eid);
}
for (const auto& i : bwd_in_dep_) {
auto eid = idx.entry_id(idx.input_nodes()[i], 0);
bwd_input_eid_.push_back(eid);
}
for (const auto& i : bwd_out_dep_) {
auto eid = idx.entry_id(idx.outputs()[i]);
bwd_input_eid_.push_back(eid);
}
CHECK_EQ(inputs.size(), bwd_input_eid_.size());
}
size_t num_forward_nodes = fwd_graph_.indexed_graph().num_nodes();
size_t num_forward_entries = fwd_graph_.indexed_graph().num_node_entries();
if (!g.attrs.count("backward_ref_count")) {
std::vector<uint32_t> ref_count(idx.num_node_entries(), 0);
for (size_t i = num_forward_nodes; i < idx.num_nodes(); ++i) {
for (const auto& j : idx[i].inputs) ++ref_count[idx.entry_id(j)];
}
for (size_t i = 0; i < inputs.size(); ++i) ++ref_count[bwd_input_eid_[i]];
for (const auto& i : idx.outputs()) ++ref_count[idx.entry_id(i)];
g.attrs["backward_ref_count"] = std::make_shared<dmlc::any>(std::move(ref_count));
}
ShapeVector shapes(idx.num_node_entries(), TShape());
DTypeVector dtypes(idx.num_node_entries(), -1);
StorageTypeVector stypes(idx.num_node_entries(), -1);
for (size_t i = 0; i < num_forward_entries; ++i) {
shapes[i] = state.buff[i].shape();
dtypes[i] = state.buff[i].dtype();
stypes[i] = state.buff[i].storage_type();
}
for (size_t i = 0; i < inputs.size(); ++i) {
shapes[bwd_input_eid_[i]] = inputs[i]->shape();
dtypes[bwd_input_eid_[i]] = inputs[i]->dtype();
stypes[bwd_input_eid_[i]] = inputs[i]->storage_type();
}
std::pair<uint32_t, uint32_t> node_range, entry_range;
node_range = {num_forward_nodes, idx.num_nodes()};
entry_range = {num_forward_entries, idx.num_node_entries()};
bool match = true;
match &= CheckAndInferShape(&g, std::move(shapes), false,
node_range, entry_range);
match &= CheckAndInferType(&g, std::move(dtypes), false,
node_range, entry_range);
exec::DevMaskVector dev_mask(idx.num_nodes(), inputs[0]->ctx().dev_mask());
match &= CheckAndInferStorageType(&g, std::move(dev_mask), std::move(stypes),
false, node_range, entry_range);
if (!match) {
g.attrs.erase("backward_mem_plan");
} else if (g.attrs.count("backward_mem_plan")) {
return g;
}
StorageVector storage(idx.num_node_entries(), exec::kBadStorageID);
for (size_t i = 0; i < num_forward_entries; ++i) storage[i] = exec::kExternalStorageID;
for (const auto i : idx.input_nodes()) storage[idx.entry_id(i, 0)] = exec::kExternalStorageID;
for (const auto i : idx.outputs()) storage[idx.entry_id(i)] = exec::kExternalStorageID;
for (size_t i = 0; i < stypes.size(); i++) {
if (stypes[i] != kDefaultStorage)
storage[i] = exec::kDynamicStorageID;
}
auto mem_plan = PlanMemory(
&g, std::move(storage), g.GetAttr<std::vector<uint32_t> >("backward_ref_count"),
{num_forward_nodes, idx.num_nodes()}, {num_forward_entries, idx.num_node_entries()});
g.attrs["backward_mem_plan"] = std::make_shared<dmlc::any>(std::move(mem_plan));
return g;
}
void Imperative::CachedOp::Forward(
const std::shared_ptr<CachedOp>& op_ptr,
const std::vector<NDArray*>& inputs,
const std::vector<NDArray*>& outputs) {
using namespace nnvm;
using namespace imperative;
static const auto cached_op = nnvm::Op::Get("_CachedOp");
// Initialize
bool recording = Imperative::Get()->is_recording();
nnvm::Graph g = GetForwardGraph(recording, inputs);
const auto& idx = g.indexed_graph();
size_t num_inputs = idx.input_nodes().size();
CHECK_EQ(num_inputs, inputs.size())
<< "CachedOp requires " << num_inputs << " but got " << inputs.size();
Context default_ctx = inputs[0]->ctx();
for (size_t i = 0; i < inputs.size(); ++i) {
CHECK_EQ(inputs[i]->ctx(), default_ctx)
<< "CachedOp requires all inputs to live on the same context. But "
<< idx[idx.input_nodes()[0]].source->attrs.name << " is on " << default_ctx
<< " while " << idx[idx.input_nodes()[i]].source->attrs.name << " is on "
<< inputs[i]->ctx();
}
auto op_state_ptr = OpStatePtr::Create<CachedOpState>();
auto& cached_op_state = op_state_ptr.get_state<CachedOpState>();
auto& buff = cached_op_state.buff;
auto& states = cached_op_state.states;
// Allocate entries
states.resize(idx.num_nodes());
buff.resize(idx.num_node_entries());
states.reserve(idx.num_nodes());
std::vector<NDArray*> arrays;
arrays.reserve(buff.size());
for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
for (size_t i = 0; i < num_inputs; ++i) {
arrays[idx.entry_id(idx.input_nodes()[i], 0)] = inputs[i];
}
for (size_t i = 0; i < idx.outputs().size(); ++i) {
auto eid = idx.entry_id(idx.outputs()[i]);
if (!arrays[eid]->is_none()) *outputs[i] = arrays[eid]->Detach();
arrays[eid] = outputs[i];
}
// Allocate NDArrays
std::vector<uint32_t> ref_count = g.GetAttr<std::vector<uint32_t> >(
recording ? "full_ref_count" : "forward_ref_count");
std::vector<OpReqType> array_reqs(arrays.size(), kWriteTo);
for (size_t i = 0; i < idx.num_node_entries(); ++i) {
if (ref_count[i] == 0) array_reqs[i] = kNullOp;
}
const auto& mem_plan = g.GetAttr<MemoryPlanVector >(
recording ? "full_mem_plan" : "forward_mem_plan");
AllocateMemory(g, idx, default_ctx, 0, idx.num_node_entries(),
mem_plan, arrays, &array_reqs);
const auto& dispatch_modes = g.GetAttr<DispatchModeVector>("dispatch_mode");
if (recording && !inlining_) Imperative::Get()->set_is_recording(false);
int prev_bulk_size = Engine::Get()->set_bulk_size(param_.forward_bulk_size);
Imperative::Get()->RunGraph(
false, idx, arrays, 0, idx.num_nodes(), std::move(array_reqs),
std::move(ref_count), &states, dispatch_modes);
Engine::Get()->set_bulk_size(prev_bulk_size);
Imperative::Get()->set_is_recording(recording);
for (size_t i = 0; i < idx.num_node_entries(); ++i) {
if (arrays[i] == &buff[i]) continue;
buff[i].shape_ = arrays[i]->shape_;
buff[i].dtype_ = arrays[i]->dtype_;
buff[i].storage_type_ = arrays[i]->storage_type_;
}
if (recording && !inlining_) {
nnvm::NodeAttrs attrs;
attrs.op = cached_op;
attrs.name = "_cachedop";
attrs.parsed = op_ptr;
Imperative::Get()->RecordOp(
std::move(attrs), inputs, outputs, op_state_ptr,
&save_inputs(), &save_outputs());
}
}
void Imperative::CachedOp::Backward(
const bool retain_graph,
const OpStatePtr& state,
const std::vector<NDArray*>& inputs,
const std::vector<OpReqType>& reqs,
const std::vector<NDArray*>& outputs) {
using namespace nnvm;
using namespace imperative;
CHECK(!Imperative::Get()->is_recording())
<< "CachedOp does not support higher order gradients. "
<< "If you want to do backward with create_graph=True please "
<< "do not use hybridize.";
// Initialize
nnvm::Graph g = GetBackwardGraph(state, reqs, inputs);
const auto& idx = g.indexed_graph();
auto& cached_op_state = state.get_state<CachedOpState>();
auto& buff = cached_op_state.buff;
auto& states = cached_op_state.states;
size_t num_forward_outputs = fwd_graph_.outputs.size();
size_t num_forward_nodes = fwd_graph_.indexed_graph().num_nodes();
size_t num_forward_entries = fwd_graph_.indexed_graph().num_node_entries();
buff.resize(idx.num_node_entries());
std::vector<NDArray*> arrays;
arrays.reserve(buff.size());
for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
for (size_t i = 0; i < inputs.size(); ++i) {
arrays[bwd_input_eid_[i]] = inputs[i];
}
for (size_t i = 0, j = num_forward_outputs; i < reqs.size(); ++i) {
if (reqs[i] == kNullOp) continue;
arrays[idx.entry_id(idx.outputs()[j++])] = outputs[i];
}
// Allocate NDArrays
auto ref_count = g.GetAttr<std::vector<uint32_t> >("backward_ref_count");
if (retain_graph) {
for (size_t i = 0; i < num_forward_entries; ++i) ++ref_count[i];
}
std::vector<OpReqType> array_reqs(arrays.size(), kWriteTo);
for (size_t i = num_forward_entries; i < idx.num_node_entries(); ++i) {
if (ref_count[i] == 0) array_reqs[i] = kNullOp;
}
Context default_ctx = outputs[0]->ctx();
const auto& mem_plan = g.GetAttr<MemoryPlanVector >("backward_mem_plan");
AllocateMemory(g, idx, default_ctx, num_forward_entries, idx.num_node_entries(),
mem_plan, arrays, &array_reqs);
const auto& dispatch_modes = g.GetAttr<DispatchModeVector>("dispatch_mode");
int prev_bulk_size = Engine::Get()->set_bulk_size(param_.backward_bulk_size);
Imperative::Get()->RunGraph(
retain_graph, idx, arrays, num_forward_nodes, idx.num_nodes(),
std::move(array_reqs), std::move(ref_count), &states, dispatch_modes);
Engine::Get()->set_bulk_size(prev_bulk_size);
if (retain_graph) {
buff.resize(num_forward_entries);
} else {
buff.clear();
states.clear();
}
}
NNVM_REGISTER_OP(_CachedOp)
.set_num_inputs([](const NodeAttrs& attrs) {
const CachedOpPtr& op = nnvm::get<CachedOpPtr>(attrs.parsed);
return op->num_inputs();
})
.set_num_outputs([](const NodeAttrs& attrs) {
const CachedOpPtr& op = nnvm::get<CachedOpPtr>(attrs.parsed);
return op->num_outputs();
})
.set_attr<nnvm::FGradient>("FGradient",
[](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
const CachedOpPtr& op = nnvm::get<CachedOpPtr>(n->attrs.parsed);
return op->Gradient(n, ograds);
});
NNVM_REGISTER_OP(_backward_CachedOp)
.set_num_inputs([](const NodeAttrs& attrs){
const CachedOpPtr& op = nnvm::get<CachedOpPtr>(attrs.parsed);
return op->num_backward_inputs();
})
.set_num_outputs([](const NodeAttrs& attrs){
const CachedOpPtr& op = nnvm::get<CachedOpPtr>(attrs.parsed);
return op->num_inputs() - op->mutable_input_nodes().size();
})
.set_attr<bool>("TIsLayerOpBackward", true)
.set_attr<bool>("TIsBackward", true);
} // namespace mxnet
| apache-2.0 |
spring-projects/spring-boot | spring-boot-tests/spring-boot-integration-tests/spring-boot-loader-tests/spring-boot-loader-tests-app/src/main/java/org/springframework/boot/loaderapp/LoaderTestApplication.java | 2228 | /*
* Copyright 2012-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.loaderapp;
import java.io.File;
import java.net.JarURLConnection;
import java.net.URL;
import java.util.Arrays;
import jakarta.servlet.ServletContext;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.util.FileCopyUtils;
@SpringBootApplication
public class LoaderTestApplication {
@Bean
public CommandLineRunner commandLineRunner(ServletContext servletContext) {
return (args) -> {
File temp = new File(System.getProperty("java.io.tmpdir"));
URL resourceUrl = servletContext.getResource("webjars/jquery/3.5.0/jquery.js");
JarURLConnection connection = (JarURLConnection) resourceUrl.openConnection();
String jarName = connection.getJarFile().getName();
System.out.println(">>>>> jar file " + jarName);
if(jarName.contains(temp.getAbsolutePath())) {
System.out.println(">>>>> jar written to temp");
}
byte[] resourceContent = FileCopyUtils.copyToByteArray(resourceUrl.openStream());
URL directUrl = new URL(resourceUrl.toExternalForm());
byte[] directContent = FileCopyUtils.copyToByteArray(directUrl.openStream());
String message = (!Arrays.equals(resourceContent, directContent)) ? "NO MATCH"
: directContent.length + " BYTES";
System.out.println(">>>>> " + message + " from " + resourceUrl);
};
}
public static void main(String[] args) {
SpringApplication.run(LoaderTestApplication.class, args).stop();
}
}
| apache-2.0 |
paulstapleton/flowable-engine | modules/flowable-engine-common/src/main/java/org/flowable/common/engine/impl/javax/el/ValueExpression.java | 8770 | /*
* Copyright 2006-2009 Odysseus Software GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.common.engine.impl.javax.el;
/**
* An Expression that can get or set a value.
* <p>
* In previous incarnations of this API, expressions could only be read. ValueExpression objects can
* now be used both to retrieve a value and to set a value. Expressions that can have a value set on
* them are referred to as l-value expressions. Those that cannot are referred to as r-value
* expressions. Not all r-value expressions can be used as l-value expressions (e.g. "${1+1}" or
* "${firstName} ${lastName}"). See the EL Specification for details. Expressions that cannot be
* used as l-values must always return true from isReadOnly().
* </p>
* <p>
* The {@link ExpressionFactory#createValueExpression(ELContext, String, Class)} method can be used
* to parse an expression string and return a concrete instance of ValueExpression that encapsulates
* the parsed expression. The {@link FunctionMapper} is used at parse time, not evaluation time, so
* one is not needed to evaluate an expression using this class. However, the {@link ELContext} is
* needed at evaluation time.
* </p>
* <p>
* The {@link #getValue(ELContext)}, {@link #setValue(ELContext, Object)},
* {@link #isReadOnly(ELContext)}, {@link #getType(ELContext)} and
* {@link #getValueReference(ELContext)} methods will evaluate the expression each time they are
* called. The {@link ELResolver} in the ELContext is used to resolve the top-level variables and to
* determine the behavior of the . and [] operators. For any of the five methods, the
* {@link ELResolver#getValue(ELContext, Object, Object)} method is used to resolve all properties
* up to but excluding the last one. This provides the base object. For all methods other than the
* {@link #getValueReference(ELContext)} method, at the last resolution, the ValueExpression will
* call the corresponding {@link ELResolver#getValue(ELContext, Object, Object)},
* {@link ELResolver#setValue(ELContext, Object, Object, Object)},
* {@link ELResolver#isReadOnly(ELContext, Object, Object)} or
* {@link ELResolver#getType(ELContext, Object, Object)} method, depending on which was called on
* the ValueExpression. For the {@link #getValueReference(ELContext)} method, the (base, property)
* is not resolved by the ELResolver, but an instance of {@link ValueReference} is created to
* encapsulate this (base, property), and returned.
* </p>
* <p>
* See the notes about comparison, serialization and immutability in the {@link Expression}
* javadocs.
* </p>
*
* @see ELResolver
* @see Expression
* @see ExpressionFactory
*/
public abstract class ValueExpression extends Expression {
private static final long serialVersionUID = 1L;
/**
* Returns the type the result of the expression will be coerced to after evaluation.
*
* @return the expectedType passed to the ExpressionFactory.createValueExpression method that
* created this ValueExpression.
*/
public abstract Class<?> getExpectedType();
/**
* Evaluates the expression relative to the provided context, and returns the most general type
* that is acceptable for an object to be passed as the value parameter in a future call to the
* {@link #setValue(ELContext, Object)} method. This is not always the same as
* getValue().getClass(). For example, in the case of an expression that references an array
* element, the getType method will return the element type of the array, which might be a
* superclass of the type of the actual element that is currently in the specified array
* element.
*
* @param context
* The context of this evaluation.
* @return the most general acceptable type; otherwise undefined.
* @throws NullPointerException
* if context is null.
* @throws PropertyNotFoundException
* if one of the property resolutions failed because a specified variable or
* property does not exist or is not readable.
* @throws ELException
* if an exception was thrown while performing property or variable resolution. The
* thrown exception must be included as the cause property of this exception, if
* available.
*/
public abstract Class<?> getType(ELContext context);
/**
* Evaluates the expression relative to the provided context, and returns the resulting value.
* The resulting value is automatically coerced to the type returned by getExpectedType(), which
* was provided to the ExpressionFactory when this expression was created.
*
* @param context
* The context of this evaluation.
* @return The result of the expression evaluation.
* @throws NullPointerException
* if context is null.
* @throws PropertyNotFoundException
* if one of the property resolutions failed because a specified variable or
* property does not exist or is not readable.
* @throws ELException
* if an exception was thrown while performing property or variable resolution. The
* thrown exception must be included as the cause property of this exception, if
* available.
*/
public abstract Object getValue(ELContext context);
/**
* Evaluates the expression relative to the provided context, and returns true if a call to
* {@link #setValue(ELContext, Object)} will always fail.
*
* @param context
* The context of this evaluation.
* @return true if the expression is read-only or false if not.
* @throws NullPointerException
* if context is null.
* @throws PropertyNotFoundException
* if one of the property resolutions failed because a specified variable or
* property does not exist or is not readable.
* @throws ELException
* if an exception was thrown while performing property or variable resolution. The
* thrown exception must be included as the cause property of this exception, if
* available.
*/
public abstract boolean isReadOnly(ELContext context);
/**
* Evaluates the expression relative to the provided context, and sets the result to the
* provided value.
*
* @param context
* The context of this evaluation.
* @param value
* The new value to be set.
* @throws NullPointerException
* if context is null.
* @throws PropertyNotFoundException
* if one of the property resolutions failed because a specified variable or
* property does not exist or is not readable.
* @throws PropertyNotWritableException
* if the final variable or property resolution failed because the specified
* variable or property is not writable.
* @throws ELException
* if an exception was thrown while attempting to set the property or variable. The
* thrown exception must be included as the cause property of this exception, if
* available.
*/
public abstract void setValue(ELContext context, Object value);
/**
* Returns a {@link ValueReference} for this expression instance.
*
* @param context
* the context of this evaluation
* @return the <code>ValueReference</code> for this <code>ValueExpression</code>, or
* <code>null</code> if this <code>ValueExpression</code> is not a reference to a base
* (null or non-null) and a property. If the base is null, and the property is a EL
* variable, return the <code>ValueReference</code> for the <code>ValueExpression</code>
* associated with this EL variable.
* @throws PropertyNotFoundException
* if one of the property resolutions failed because a specified variable or
* property does not exist or is not readable.
* @throws ELException
* if an exception was thrown while performing property or variable resolution. The
* thrown exception must be included as the cause property of this exception, if
* available.
* @since 2.2
*/
public ValueReference getValueReference(ELContext context) {
return null;
}
}
| apache-2.0 |
shootstar/novatest | nova/api/openstack/compute/contrib/evacuate.py | 3519 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'evacuate')
class Controller(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('evacuate')
def _evacuate(self, req, id, body):
"""
Permit admins to evacuate a server from a failed host
to a new one.
"""
context = req.environ["nova.context"]
authorize(context)
try:
if len(body) != 1:
raise exc.HTTPBadRequest(_("Malformed request body"))
evacuate_body = body["evacuate"]
host = evacuate_body["host"]
on_shared_storage = strutils.bool_from_string(
evacuate_body["onSharedStorage"])
password = None
if 'adminPass' in evacuate_body:
# check that if requested to evacuate server on shared storage
# password not specified
if on_shared_storage:
msg = _("admin password can't be changed on existing disk")
raise exc.HTTPBadRequest(explanation=msg)
password = evacuate_body['adminPass']
elif not on_shared_storage:
password = utils.generate_password()
except (TypeError, KeyError):
msg = _("host and onSharedStorage must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate')
except Exception as e:
msg = _("Error in evacuate, %s") % e
LOG.exception(msg, instance=instance)
raise exc.HTTPBadRequest(explanation=msg)
if password:
return {'adminPass': password}
class Evacuate(extensions.ExtensionDescriptor):
"""Enables server evacuation."""
name = "Evacuate"
alias = "os-evacuate"
namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v2"
updated = "2013-01-06T00:00:00+00:00"
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
projectcypress/health-data-standards | test/unit/hqmf/1.0/nqf_0002_test.rb | 7276 | require_relative '../../../test_helper'
module HQMF1
class NQF0002Test < Minitest::Test
def setup
path = File.expand_path("../../../../fixtures/1.0/0002/0002.xml", __FILE__)
@hqmf_contents = File.open(path).read
end
def test_to_json_0002
hqmf = HQMF1::Document.new(@hqmf_contents)
json = hqmf.to_json
all_criteria = json[:data_criteria]
refute_nil all_criteria
all_criteria.length.must_equal 11
all_criteria.length.must_equal hqmf.all_data_criteria.length
["PATIENT_CHARACTERISTIC_BIRTH_DATE","ENCOUNTER_ENCOUNTER_AMBULATORY_INCLUDING_PEDIATRICS","LABORATORY_TEST_PERFORMED_GROUP_A_STREPTOCOCCUS_TEST",
"DIAGNOSIS_ACTIVE_PHARYNGITIS","MEDICATION_ACTIVE_PHARYNGITIS_ANTIBIOTICS","MEDICATION_DISPENSED_PHARYNGITIS_ANTIBIOTICS",
"MEDICATION_ORDER_PHARYNGITIS_ANTIBIOTICS"].each do |data_criteria_key|
refute_nil all_criteria[data_criteria_key]
end
check_data_criteria(all_criteria, "PATIENT_CHARACTERISTIC_BIRTH_DATE", {:id=>"E929E9CB-E788-47C3-A467-4AB732D0606C",:title=>"Patient Characteristic: birth date",:code_list_id=>"2.16.840.1.113883.3.560.100.4"})
check_data_criteria(all_criteria, "ENCOUNTER_ENCOUNTER_AMBULATORY_INCLUDING_PEDIATRICS", {:id=>"CA71325F-084A-4F5A-9214-229D925F836D",:title=>"Encounter: Encounter ambulatory including pediatrics",:code_list_id=>"2.16.840.1.113883.3.464.10001.231"})
check_data_criteria(all_criteria, "LABORATORY_TEST_PERFORMED_GROUP_A_STREPTOCOCCUS_TEST", {:id=>"AF9EE784-50CF-413F-9082-41A3330511A1",:title=>"Laboratory Test, Performed: Group A Streptococcus Test",:code_list_id=>"2.16.840.1.113883.3.464.10001.250",:status=>"performed"})
check_data_criteria(all_criteria, "DIAGNOSIS_ACTIVE_PHARYNGITIS", {:id=>"F33D9CE9-A084-4BA1-AB0E-1CBEF3934C88",:title=>"Diagnosis, Active: pharyngitis",:code_list_id=>"2.16.840.1.113883.3.464.10001.369",:status=>"active"})
check_data_criteria(all_criteria, "MEDICATION_ACTIVE_PHARYNGITIS_ANTIBIOTICS", {:id=>"6A3F288D-B565-4BE9-B6DD-DCDEAB2E6DD4",:title=>"Medication, Active: pharyngitis antibiotics",:code_list_id=>"2.16.840.1.113883.3.464.10001.373",:status=>"active"})
check_data_criteria(all_criteria, "MEDICATION_DISPENSED_PHARYNGITIS_ANTIBIOTICS", {:id=>"C5239EC0-B769-476F-B79E-6927ED720A03",:title=>"Medication, Dispensed: pharyngitis antibiotics",:code_list_id=>"2.16.840.1.113883.3.464.10001.373"})
check_data_criteria(all_criteria, "MEDICATION_ORDER_PHARYNGITIS_ANTIBIOTICS", {:id=>"B2EC949E-E9FD-4184-941E-F1F73470CB21",:title=>"Medication, Order: pharyngitis antibiotics",:code_list_id=>"2.16.840.1.113883.3.464.10001.373"})
logic = json[:logic]
refute_nil logic
["NUMER", "DENOM", "IPP"].each do |logic_key|
refute_nil logic[logic_key]
end
population_criteria = logic["NUMER"][:preconditions]
population_criteria.size.must_equal 2
refute_nil population_criteria[0][:comparison]
assert_nil(population_criteria[0][:preconditions])
assert_nil(population_criteria[0][:restrictions])
population_criteria[0][:comparison][:restrictions].size.must_equal 1
refute_nil population_criteria[1][:comparison]
assert_nil(population_criteria[1][:preconditions])
assert_nil(population_criteria[1][:restrictions])
population_criteria[1][:comparison][:restrictions].size.must_equal 1
population_criteria[0][:comparison][:restrictions][0][:type].must_equal "EAS"
assert population_criteria[0][:comparison][:restrictions][0][:negation]
population_criteria = logic["IPP"][:preconditions]
population_criteria.size.must_equal 2
refute_nil population_criteria[0][:comparison]
population_criteria[0][:conjunction].must_equal "AND"
assert_nil(population_criteria[0][:preconditions])
assert_nil(population_criteria[0][:restrictions])
population_criteria[0][:comparison][:data_criteria_id].must_equal "E929E9CB-E788-47C3-A467-4AB732D0606C"
population_criteria[0][:comparison][:title].must_equal "Patient Characteristic: birth date"
population_criteria[0][:comparison][:restrictions].size.must_equal 1
population_criteria[0][:comparison][:restrictions][0][:type].must_equal "SBS"
assert !population_criteria[0][:comparison][:restrictions][0][:negation]
population_criteria[0][:comparison][:restrictions][0][:target_id].must_equal "D578142D-F78F-4BF4-8194-82015DE21A7F"
population_criteria[0][:comparison][:restrictions][0][:range][:low][:value].must_equal "2"
population_criteria[0][:comparison][:restrictions][0][:range][:low][:unit].must_equal "a"
population_criteria[0][:comparison][:restrictions][0][:range][:low][:inclusive?].must_equal true
refute_nil population_criteria[1][:comparison]
population_criteria[1][:conjunction].must_equal "AND"
assert_nil(population_criteria[1][:preconditions])
assert_nil(population_criteria[1][:restrictions])
population_criteria[1][:comparison][:data_criteria_id].must_equal "E929E9CB-E788-47C3-A467-4AB732D0606C"
population_criteria[1][:comparison][:title].must_equal "Patient Characteristic: birth date"
population_criteria[1][:comparison][:restrictions].size.must_equal 1
population_criteria[1][:comparison][:restrictions][0][:type].must_equal "SBS"
population_criteria[1][:comparison][:restrictions][0][:target_id].must_equal "D578142D-F78F-4BF4-8194-82015DE21A7F"
population_criteria[1][:comparison][:restrictions][0][:range][:high][:value].must_equal "17"
population_criteria[1][:comparison][:restrictions][0][:range][:high][:unit].must_equal "a"
population_criteria[1][:comparison][:restrictions][0][:range][:high][:inclusive?].must_equal true
population_criteria = logic["DENOM"][:preconditions]
population_criteria.size.must_equal 4
population_criteria[0][:conjunction].must_equal "AND"
population_criteria[0][:comparison][:data_criteria_id].must_equal "CA71325F-084A-4F5A-9214-229D925F836D"
population_criteria[0][:comparison][:title].must_equal "Encounter: Encounter ambulatory including pediatrics"
population_criteria[0][:comparison][:restrictions].size.must_equal 1
population_criteria[0][:comparison][:restrictions][0][:type].must_equal "DURING"
population_criteria[0][:comparison][:restrictions][0][:target_id].must_equal "D578142D-F78F-4BF4-8194-82015DE21A7F"
assert !population_criteria[2][:negation]
population_criteria[2][:preconditions][0][:conjunction].must_equal "OR"
population_criteria[2][:preconditions][0][:comparison][:restrictions].size.must_equal 1
population_criteria[2][:preconditions][0][:comparison][:restrictions][0][:type].must_equal "DURING"
population_criteria[2][:preconditions][0][:comparison][:restrictions][0][:target_id].must_equal "D578142D-F78F-4BF4-8194-82015DE21A7F"
assert population_criteria[3][:negation]
end
private
def check_data_criteria(all_criteria, key, values)
data_criteria = all_criteria[key]
data_criteria[:id].must_equal values[:id]
data_criteria[:title].must_equal values[:title]
data_criteria[:code_list_id].must_equal values[:code_list_id]
end
end
end
| apache-2.0 |
joewitt/incubator-nifi | nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/main/java/org/apache/nifi/provenance/WriteAheadProvenanceRepository.java | 14515 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.provenance;
import org.apache.nifi.authorization.Authorizer;
import org.apache.nifi.authorization.RequestAction;
import org.apache.nifi.authorization.resource.Authorizable;
import org.apache.nifi.authorization.user.NiFiUser;
import org.apache.nifi.events.EventReporter;
import org.apache.nifi.provenance.authorization.EventAuthorizer;
import org.apache.nifi.provenance.authorization.UserEventAuthorizer;
import org.apache.nifi.provenance.index.EventIndex;
import org.apache.nifi.provenance.index.lucene.LuceneEventIndex;
import org.apache.nifi.provenance.lineage.ComputeLineageSubmission;
import org.apache.nifi.provenance.lucene.IndexManager;
import org.apache.nifi.provenance.lucene.SimpleIndexManager;
import org.apache.nifi.provenance.search.Query;
import org.apache.nifi.provenance.search.QuerySubmission;
import org.apache.nifi.provenance.search.SearchableField;
import org.apache.nifi.provenance.serialization.RecordReaders;
import org.apache.nifi.provenance.serialization.StorageSummary;
import org.apache.nifi.provenance.store.EventFileManager;
import org.apache.nifi.provenance.store.EventStore;
import org.apache.nifi.provenance.store.PartitionedWriteAheadEventStore;
import org.apache.nifi.provenance.store.RecordReaderFactory;
import org.apache.nifi.provenance.store.RecordWriterFactory;
import org.apache.nifi.provenance.store.StorageResult;
import org.apache.nifi.provenance.toc.StandardTocWriter;
import org.apache.nifi.provenance.toc.TocUtil;
import org.apache.nifi.provenance.toc.TocWriter;
import org.apache.nifi.provenance.util.CloseableUtil;
import org.apache.nifi.reporting.Severity;
import org.apache.nifi.util.NiFiProperties;
import org.apache.nifi.util.file.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* <p>
* A Provenance Repository that is made up of two distinct concepts: An {@link EventStore Event Store} that is responsible
* for storing and accessing the events (this repository makes use of an Event Store that uses a backing Write-Ahead Log, hence the name
* WriteAheadProvenanceRepository) and an {@link EventIndex Event Index} that is responsible for indexing and searching those
* events.
* </p>
*
* <p>
* When a Provenance Event is added to the repository, it is first stored in the Event Store. The Event Store reports the location (namely, the
* Event Identifier) that it used to store the event. The stored event is then given to the Event Index along with its storage location. The index
* is then responsible for indexing the event in real-time. Once this has completed, the method returns.
* </p>
*
* <p>
* The Event Index that is used by this implementation currently is the {@link LuceneEventIndex}, which is powered by Apache Lucene. This index provides
* very high throughput. However, this high throughput is gained by avoiding continual 'commits' of the Index Writer. As a result, on restart, this Repository
* may take a minute or two to re-index some of the Provenance Events, as some of the Events may have been added to the index without committing the Index Writer.
* Given the substantial performance improvement gained by committing the Index Writer only periodically, this trade-off is generally well accepted.
* </p>
*
* <p>
* This Repositories supports the notion of 'partitions'. The repository can be configured to store data to one or more partitions. Each partition is typically
* stored on a separate physical partition on disk. As a result, this allows striping of data across multiple partitions in order to achieve linear scalability
* across disks for far greater performance.
* </p>
*/
public class WriteAheadProvenanceRepository implements ProvenanceRepository {
private static final Logger logger = LoggerFactory.getLogger(WriteAheadProvenanceRepository.class);
static final int BLOCK_SIZE = 1024 * 32;
public static final String EVENT_CATEGORY = "Provenance Repository";
private final RepositoryConfiguration config;
// effectively final
private EventStore eventStore;
private EventIndex eventIndex;
private EventReporter eventReporter;
private Authorizer authorizer;
private ProvenanceAuthorizableFactory resourceFactory;
/**
* This constructor exists solely for the use of the Java Service Loader mechanism and should not be used.
*/
public WriteAheadProvenanceRepository() {
config = null;
}
public WriteAheadProvenanceRepository(final NiFiProperties nifiProperties) {
this(RepositoryConfiguration.create(nifiProperties));
}
public WriteAheadProvenanceRepository(final RepositoryConfiguration config) {
this.config = config;
}
@Override
public synchronized void initialize(final EventReporter eventReporter, final Authorizer authorizer, final ProvenanceAuthorizableFactory resourceFactory,
final IdentifierLookup idLookup) throws IOException {
final RecordWriterFactory recordWriterFactory = (file, idGenerator, compressed, createToc) -> {
final TocWriter tocWriter = createToc ? new StandardTocWriter(TocUtil.getTocFile(file), false, false) : null;
return new EventIdFirstSchemaRecordWriter(file, idGenerator, tocWriter, compressed, BLOCK_SIZE, idLookup);
};
final EventFileManager fileManager = new EventFileManager();
final RecordReaderFactory recordReaderFactory = (file, logs, maxChars) -> {
fileManager.obtainReadLock(file);
try {
return RecordReaders.newRecordReader(file, logs, maxChars);
} finally {
fileManager.releaseReadLock(file);
}
};
init(recordWriterFactory, recordReaderFactory, eventReporter, authorizer, resourceFactory);
}
synchronized void init(RecordWriterFactory recordWriterFactory, RecordReaderFactory recordReaderFactory,
final EventReporter eventReporter, final Authorizer authorizer,
final ProvenanceAuthorizableFactory resourceFactory) throws IOException {
final EventFileManager fileManager = new EventFileManager();
eventStore = new PartitionedWriteAheadEventStore(config, recordWriterFactory, recordReaderFactory, eventReporter, fileManager);
final IndexManager indexManager = new SimpleIndexManager(config);
eventIndex = new LuceneEventIndex(config, indexManager, eventReporter);
this.eventReporter = eventReporter;
this.authorizer = authorizer;
this.resourceFactory = resourceFactory;
eventStore.initialize();
eventIndex.initialize(eventStore);
try {
eventStore.reindexLatestEvents(eventIndex);
} catch (final Exception e) {
logger.error("Failed to re-index some of the Provenance Events. It is possible that some of the latest "
+ "events will not be available from the Provenance Repository when a query is issued.", e);
}
}
@Override
public ProvenanceEventBuilder eventBuilder() {
return new StandardProvenanceEventRecord.Builder();
}
@Override
public void registerEvent(final ProvenanceEventRecord event) {
registerEvents(Collections.singleton(event));
}
@Override
public void registerEvents(final Iterable<ProvenanceEventRecord> events) {
final StorageResult storageResult;
try {
storageResult = eventStore.addEvents(events);
} catch (final IOException e) {
logger.error("Failed to write events to the Event Store", e);
eventReporter.reportEvent(Severity.ERROR, EVENT_CATEGORY, "Failed to write Provenance Events to the repository. See logs for more details.");
return;
}
final Map<ProvenanceEventRecord, StorageSummary> locationMap = storageResult.getStorageLocations();
if (!locationMap.isEmpty()) {
eventIndex.addEvents(locationMap);
}
}
@Override
public List<ProvenanceEventRecord> getEvents(final long firstRecordId, final int maxRecords) throws IOException {
return eventStore.getEvents(firstRecordId, maxRecords);
}
@Override
public ProvenanceEventRecord getEvent(final long id) throws IOException {
return eventStore.getEvent(id).orElse(null);
}
@Override
public Long getMaxEventId() {
return eventStore.getMaxEventId();
}
@Override
public void close() {
CloseableUtil.closeQuietly(eventStore, eventIndex);
}
@Override
public ProvenanceEventRecord getEvent(final long id, final NiFiUser user) throws IOException {
final ProvenanceEventRecord event = getEvent(id);
if (event == null) {
return null;
}
authorize(event, user);
return event;
}
private void authorize(final ProvenanceEventRecord event, final NiFiUser user) {
if (authorizer == null || user == null) {
return;
}
final Authorizable eventAuthorizable = resourceFactory.createProvenanceDataAuthorizable(event.getComponentId());
eventAuthorizable.authorize(authorizer, RequestAction.READ, user);
}
@Override
public List<ProvenanceEventRecord> getEvents(final long firstRecordId, final int maxRecords, final NiFiUser user) throws IOException {
final List<ProvenanceEventRecord> events = getEvents(firstRecordId, maxRecords);
return createEventAuthorizer(user).filterUnauthorizedEvents(events);
}
private EventAuthorizer createEventAuthorizer(final NiFiUser user) {
return new UserEventAuthorizer(authorizer, resourceFactory, user);
}
@Override
public ProvenanceEventRepository getProvenanceEventRepository() {
return this;
}
@Override
public QuerySubmission submitQuery(final Query query, final NiFiUser user) {
return eventIndex.submitQuery(query, createEventAuthorizer(user), user == null ? null : user.getIdentity());
}
@Override
public QuerySubmission retrieveQuerySubmission(final String queryIdentifier, final NiFiUser user) {
return eventIndex.retrieveQuerySubmission(queryIdentifier, user);
}
@Override
public ComputeLineageSubmission submitLineageComputation(final String flowFileUuid, final NiFiUser user) {
return eventIndex.submitLineageComputation(flowFileUuid, user, createEventAuthorizer(user));
}
@Override
public ComputeLineageSubmission submitLineageComputation(final long eventId, final NiFiUser user) {
return eventIndex.submitLineageComputation(eventId, user, createEventAuthorizer(user));
}
@Override
public ComputeLineageSubmission retrieveLineageSubmission(final String lineageIdentifier, final NiFiUser user) {
return eventIndex.retrieveLineageSubmission(lineageIdentifier, user);
}
@Override
public ComputeLineageSubmission submitExpandParents(final long eventId, final NiFiUser user) {
return eventIndex.submitExpandParents(eventId, user, createEventAuthorizer(user));
}
@Override
public ComputeLineageSubmission submitExpandChildren(final long eventId, final NiFiUser user) {
return eventIndex.submitExpandChildren(eventId, user, createEventAuthorizer(user));
}
@Override
public List<SearchableField> getSearchableFields() {
return Collections.unmodifiableList(config.getSearchableFields());
}
@Override
public List<SearchableField> getSearchableAttributes() {
return Collections.unmodifiableList(config.getSearchableAttributes());
}
RepositoryConfiguration getConfig() {
return this.config;
}
@Override
public Set<String> getContainerNames() {
return new HashSet<>(config.getStorageDirectories().keySet());
}
@Override
public long getContainerCapacity(final String containerName) throws IOException {
Map<String, File> map = config.getStorageDirectories();
File container = map.get(containerName);
if(container != null) {
long capacity = FileUtils.getContainerCapacity(container.toPath());
if(capacity==0) {
throw new IOException("System returned total space of the partition for " + containerName + " is zero byte. "
+ "Nifi can not create a zero sized provenance repository.");
}
return capacity;
} else {
throw new IllegalArgumentException("There is no defined container with name " + containerName);
}
}
@Override
public String getContainerFileStoreName(final String containerName) {
final Map<String, File> map = config.getStorageDirectories();
final File container = map.get(containerName);
if (container == null) {
return null;
}
try {
return Files.getFileStore(container.toPath()).name();
} catch (IOException e) {
return null;
}
}
@Override
public long getContainerUsableSpace(String containerName) throws IOException {
Map<String, File> map = config.getStorageDirectories();
File container = map.get(containerName);
if(container != null) {
return FileUtils.getContainerUsableSpace(container.toPath());
} else {
throw new IllegalArgumentException("There is no defined container with name " + containerName);
}
}
}
| apache-2.0 |
manipopopo/tensorflow | tensorflow/python/saved_model/builder_impl.py | 24919 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
@tf_export("saved_model.builder.SavedModelBuilder")
class SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides functionality to build a `SavedModel`
protocol buffer. Specifically, this allows multiple meta graphs to be saved as
part of a single language-neutral `SavedModel`, while sharing variables and
assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they can be provided
when the meta graph def is added. If multiple meta graph defs are associated
an asset of the same name, only the first version is retained.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
raise AssertionError(
"Export directory already exists. Please specify a different export "
"directory: %s" % export_dir)
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
asset_filename_map = _maybe_save_assets(assets_collection_to_add)
# Return if there are no assets to write.
if not asset_filename_map:
tf_logging.info("No assets to write.")
return
assets_destination_dir = saved_model_utils.get_or_create_assets_dir(
self._export_dir)
# Copy each asset from source path to destination path.
for asset_basename, asset_source_filepath in asset_filename_map.items():
asset_destination_filepath = os.path.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_basename))
# Only copy the asset file to the destination if it does not already
# exist. This is to ensure that an asset with the same name defined as
# part of multiple graphs is only copied the first time.
if not file_io.file_exists(asset_destination_filepath):
file_io.copy(asset_source_filepath, asset_destination_filepath)
tf_logging.info("Assets written to: %s",
compat.as_text(assets_destination_dir))
def _maybe_add_main_op(self, main_op):
"""Adds main op to the SavedModel.
Args:
main_op: Main op to run as part of graph initialization. If None, no
main op will be added to the graph.
Raises:
TypeError: if main op is provided but is not of type `Operation`.
ValueError: if the Graph already contains an init op.
"""
if main_op is None:
return
if not isinstance(main_op, ops.Operation):
raise TypeError("main_op needs to be an Operation: %r" % main_op)
# Validate that no other init ops have been added to this graph already.
# We check main_op and legacy_init_op for thoroughness and explicitness.
for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):
if ops.get_collection(init_op_key):
raise ValueError(
"Graph already contains one or more main ops under the "
"collection {}.".format(init_op_key))
ops.add_to_collection(constants.MAIN_OP_KEY, main_op)
def _add_train_op(self, train_op):
"""Add train op to the SavedModel.
Note that this functionality is in development, and liable to be
moved elsewhere.
Args:
train_op: Op or group of ops that are used for training. These are
stored as a collection with key TRAIN_OP_KEY, but not executed.
Raises:
TypeError if Train op is not of type `Operation`.
"""
if train_op is not None:
if (not isinstance(train_op, ops.Tensor) and
not isinstance(train_op, ops.Operation)):
raise TypeError("train_op needs to be a Tensor or Op: %r" % train_op)
ops.add_to_collection(constants.TRAIN_OP_KEY, train_op)
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def _validate_tensor_info(self, tensor_info):
"""Validates the `TensorInfo` proto.
Checks if the `encoding` (`name` or `coo_sparse`) and `dtype` fields exist
and are non-empty.
Args:
tensor_info: `TensorInfo` protocol buffer to validate.
Raises:
AssertionError: If the `name` or `dtype` fields of the supplied
`TensorInfo` proto are not populated.
"""
if tensor_info is None:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the name "
"and dtype fields set.")
if tensor_info.WhichOneof("encoding") is None:
# TODO(soergel) validate each of the fields of coo_sparse
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have one of "
"the 'encoding' fields (e.g., name or coo_sparse) set: %s"
% tensor_info)
if tensor_info.dtype is types_pb2.DT_INVALID:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the dtype "
"field set: %s" % tensor_info)
def _validate_signature_def_map(self, signature_def_map):
"""Validates the `SignatureDef` entries in the signature def map.
Validation of entries in the signature def map includes ensuring that the
`name` and `dtype` fields of the TensorInfo protos of the `inputs` and
`outputs` of each `SignatureDef` are populated.
Args:
signature_def_map: The map of signature defs to be validated.
"""
if signature_def_map is not None:
for signature_def_key in signature_def_map:
signature_def = signature_def_map[signature_def_key]
inputs = signature_def.inputs
outputs = signature_def.outputs
for inputs_key in inputs:
self._validate_tensor_info(inputs[inputs_key])
for outputs_key in outputs:
self._validate_tensor_info(outputs[outputs_key])
def _add_collections(
self, assets_collection, main_op, train_op):
"""Add asset and op collections to be saved."""
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
self._maybe_add_main_op(main_op)
self._add_train_op(train_op)
def _maybe_create_saver(self, saver=None):
"""Creates a sharded saver if one does not already exist."""
if not saver:
# Initialize a saver to generate a sharded output for all saveables in the
# current scope.
saver = tf_saver.Saver(
variables._all_saveable_objects(), # pylint: disable=protected-access
sharded=True,
write_version=saver_pb2.SaverDef.V2,
allow_empty=True)
return saver
@deprecated_args(None,
"Pass your op to the equivalent parameter main_op instead.",
"legacy_init_op")
def add_meta_graph(self,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None,
strip_default_attrs=False,
saver=None):
# pylint: disable=line-too-long
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel. Note
that this collection should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
legacy_init_op: Legacy support for op or group of ops to execute after the
restore op upon a load. Deprecated; please use main_op instead.
clear_devices: Set to true if the device info on the default graph should
be cleared.
main_op: Op or group of ops to execute when the graph is loaded. Note
that when the main_op is specified it is run after the restore op at
load-time.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
saver: An instance of tf.train.Saver that will be used to export the
metagraph. If None, a sharded Saver that restores all variables will
be used.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet, or if the graph already contains one or more legacy init ops.
"""
# pylint: enable=line-too-long
if not self._has_saved_variables:
raise AssertionError(
"Graph state including variables and assets has not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
self._validate_signature_def_map(signature_def_map)
# legacy_init_op is deprecated, and going away in TF 2.0.
# Re-mapping to main_op, as treatment is identical regardless.
main_op = main_op or legacy_init_op
# Add assets and ops
self._add_collections(assets_collection, main_op, None)
saver = self._maybe_create_saver(saver)
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
@deprecated_args(None,
"Pass your op to the equivalent parameter main_op instead.",
"legacy_init_op")
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None,
strip_default_attrs=False,
saver=None):
# pylint: disable=line-too-long
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel.
legacy_init_op: Legacy support for op or group of ops to execute after the
restore op upon a load. Deprecated; please use main_op instead.
clear_devices: Set to true if the device info on the default graph should
be cleared.
main_op: Op or group of ops to execute when the graph is loaded. Note
that when the main_op is specified it is run after the restore op at
load-time.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
saver: An instance of tf.train.Saver that will be used to export the
metagraph and save variables. If None, a sharded Saver that restores
all variables will be used.
"""
# pylint: enable=line-too-long
if self._has_saved_variables:
raise AssertionError("Graph state including variables and assets has "
"already been saved. Please invoke "
"`add_meta_graph()` instead.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
self._validate_signature_def_map(signature_def_map)
# legacy_init_op is deprecated, and going away in TF 2.0.
# Re-mapping to main_op, as treatment is identical regardless.
main_op = main_op or legacy_init_op
# Add assets and ops
self._add_collections(assets_collection, main_op, None)
saved_model_utils.get_or_create_variables_dir(self._export_dir)
variables_path = saved_model_utils.get_variables_path(self._export_dir)
saver = self._maybe_create_saver(saver)
# Save the variables. Also, disable writing the checkpoint state proto. The
# file is not used during SavedModel loading. In addition, since a
# SavedModel can be copied or moved, this avoids the checkpoint state to
# become outdated.
saver.save(sess, variables_path, write_meta_graph=False, write_state=False)
# Export the meta graph def.
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, self._saved_model.SerializeToString())
tf_logging.info("SavedModel written to: %s", compat.as_text(path))
return path
def _maybe_save_assets(assets_collection_to_add=None):
"""Saves assets to the meta graph.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
Returns:
A dict of asset basenames for saving to the original full path to the asset.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
# Map of target file names to original filenames
asset_filename_map = {}
if assets_collection_to_add is None:
tf_logging.info("No assets to save.")
return asset_filename_map
# Iterate over the supplied asset collection, build the `AssetFile` proto
# and add them to the collection with key `constants.ASSETS_KEY`, in the
# graph.
for asset_tensor in assets_collection_to_add:
asset_source_filepath = _asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError("Invalid asset filepath tensor %s" % asset_tensor)
asset_filename = _get_asset_filename_to_add(
asset_source_filepath, asset_filename_map)
# Build `AssetFile` proto and add it to the asset collection in the graph.
# Note that this should be done even when the file is a duplicate of an
# already-added file, as the tensor reference should still exist.
_add_asset_to_collection(asset_filename, asset_tensor)
# In the cases where we are adding a duplicate, this will result in the
# last of the filepaths being the one used for copying the file to the
# SavedModel. Since the files in question are the same, it doesn't matter
# either way.
asset_filename_map[asset_filename] = asset_source_filepath
tf_logging.info("Assets added to graph.")
return asset_filename_map
def _get_asset_filename_to_add(asset_filepath, asset_filename_map):
"""Get a unique basename to add to the SavedModel if this file is unseen.
Assets come from users as full paths, and we save them out to the
SavedModel as basenames. In some cases, the basenames collide. Here,
we dedupe asset basenames by first checking if the file is the same,
and, if different, generate and return an index-suffixed basename
that can be used to add the asset to the SavedModel.
Args:
asset_filepath: the full path to the asset that is being saved
asset_filename_map: a dict of filenames used for saving the asset in
the SavedModel to full paths from which the filenames were derived.
Returns:
Uniquified filename string if the file is not a duplicate, or the original
filename if the file has already been seen and saved.
"""
asset_filename = os.path.basename(asset_filepath)
if asset_filename not in asset_filename_map:
# This is an unseen asset. Safe to add.
return asset_filename
other_asset_filepath = asset_filename_map[asset_filename]
if other_asset_filepath == asset_filepath:
# This is the same file, stored twice in the collection list. No need
# to make unique.
return asset_filename
# Else, asset_filename is in the map, and the filepath is different. Dedupe.
if not file_io.filecmp(asset_filepath, other_asset_filepath):
# Files are different; dedupe filenames.
return _get_unique_asset_filename(asset_filename, asset_filename_map)
# Files are the same; don't make unique.
return asset_filename
def _get_unique_asset_filename(asset_filename, asset_filename_map):
i = 1
unique_filename = asset_filename
while unique_filename in asset_filename_map:
unique_filename = compat.as_bytes("_").join(
[compat.as_bytes(asset_filename), compat.as_bytes(str(i))])
i += 1
return unique_filename
def _asset_path_from_tensor(path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("Asset path tensor must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError("Asset path tensor must be of type constant.")
if path_tensor.dtype != dtypes.string:
raise TypeError("Asset path tensor must be of dtype string.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError("Asset path tensor must be a scalar.")
return str_values[0]
def _add_asset_to_collection(asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
| apache-2.0 |
minwoox/armeria | core/src/test/java/com/linecorp/armeria/common/DefaultRequestIdTest.java | 2148 | /*
* Copyright 2019 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.common;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.jupiter.api.Test;
class DefaultRequestIdTest {
@Test
void basic() {
final RequestId id = RequestId.random();
assertThat(id).isInstanceOf(DefaultRequestId.class);
assertThat(id.text()).hasSize(16);
assertThat(id.shortText()).hasSize(8);
assertThat(id.toString()).isEqualTo(id.text());
}
@Test
void textWithoutLeadingZero() {
final RequestId id = RequestId.of(0x123456789ABCDEF0L);
assertThat(id.text()).isEqualTo("123456789abcdef0");
assertThat(id.shortText()).isEqualTo("12345678");
}
@Test
void textWithLeadingZero() {
final RequestId id = RequestId.of(0x0FEDCBA987654321L);
assertThat(id.text()).isEqualTo("0fedcba987654321");
assertThat(id.shortText()).isEqualTo("0fedcba9");
}
@Test
void cache() {
final RequestId id = RequestId.random();
final String longText = id.text();
assertThat(id.text()).isSameAs(longText);
final String shortText = id.shortText();
assertThat(id.shortText()).isSameAs(shortText);
}
@Test
void equality() {
assertThat(new DefaultRequestId(1)).isEqualTo(new DefaultRequestId(1));
assertThat(new DefaultRequestId(2)).isNotEqualTo(new DefaultRequestId(3));
}
@Test
void hash() {
assertThat(new DefaultRequestId(1).hashCode()).isEqualTo(1);
}
}
| apache-2.0 |
ibuildthecloud/go-machine-service | vendor/github.com/rancher/go-rancher/v3/generated_public_endpoint.go | 3078 | package client
const (
PUBLIC_ENDPOINT_TYPE = "publicEndpoint"
)
type PublicEndpoint struct {
Resource
AgentIpAddress string `json:"agentIpAddress,omitempty" yaml:"agent_ip_address,omitempty"`
BindAll bool `json:"bindAll,omitempty" yaml:"bind_all,omitempty"`
BindIpAddress string `json:"bindIpAddress,omitempty" yaml:"bind_ip_address,omitempty"`
Fqdn string `json:"fqdn,omitempty" yaml:"fqdn,omitempty"`
HostId string `json:"hostId,omitempty" yaml:"host_id,omitempty"`
InstanceId string `json:"instanceId,omitempty" yaml:"instance_id,omitempty"`
IpAddress string `json:"ipAddress,omitempty" yaml:"ip_address,omitempty"`
PrivatePort int64 `json:"privatePort,omitempty" yaml:"private_port,omitempty"`
Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"`
PublicPort int64 `json:"publicPort,omitempty" yaml:"public_port,omitempty"`
ServiceId string `json:"serviceId,omitempty" yaml:"service_id,omitempty"`
}
type PublicEndpointCollection struct {
Collection
Data []PublicEndpoint `json:"data,omitempty"`
client *PublicEndpointClient
}
type PublicEndpointClient struct {
rancherClient *RancherClient
}
type PublicEndpointOperations interface {
List(opts *ListOpts) (*PublicEndpointCollection, error)
Create(opts *PublicEndpoint) (*PublicEndpoint, error)
Update(existing *PublicEndpoint, updates interface{}) (*PublicEndpoint, error)
ById(id string) (*PublicEndpoint, error)
Delete(container *PublicEndpoint) error
}
func newPublicEndpointClient(rancherClient *RancherClient) *PublicEndpointClient {
return &PublicEndpointClient{
rancherClient: rancherClient,
}
}
func (c *PublicEndpointClient) Create(container *PublicEndpoint) (*PublicEndpoint, error) {
resp := &PublicEndpoint{}
err := c.rancherClient.doCreate(PUBLIC_ENDPOINT_TYPE, container, resp)
return resp, err
}
func (c *PublicEndpointClient) Update(existing *PublicEndpoint, updates interface{}) (*PublicEndpoint, error) {
resp := &PublicEndpoint{}
err := c.rancherClient.doUpdate(PUBLIC_ENDPOINT_TYPE, &existing.Resource, updates, resp)
return resp, err
}
func (c *PublicEndpointClient) List(opts *ListOpts) (*PublicEndpointCollection, error) {
resp := &PublicEndpointCollection{}
err := c.rancherClient.doList(PUBLIC_ENDPOINT_TYPE, opts, resp)
resp.client = c
return resp, err
}
func (cc *PublicEndpointCollection) Next() (*PublicEndpointCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &PublicEndpointCollection{}
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *PublicEndpointClient) ById(id string) (*PublicEndpoint, error) {
resp := &PublicEndpoint{}
err := c.rancherClient.doById(PUBLIC_ENDPOINT_TYPE, id, resp)
if apiError, ok := err.(*ApiError); ok {
if apiError.StatusCode == 404 {
return nil, nil
}
}
return resp, err
}
func (c *PublicEndpointClient) Delete(container *PublicEndpoint) error {
return c.rancherClient.doResourceDelete(PUBLIC_ENDPOINT_TYPE, &container.Resource)
}
| apache-2.0 |
winger007/zstack | header/src/main/java/org/zstack/header/storage/snapshot/VolumeSnapshotAO.java | 5199 | package org.zstack.header.storage.snapshot;
import org.zstack.header.storage.primary.PrimaryStorageEO;
import org.zstack.header.vo.ForeignKey;
import org.zstack.header.vo.ForeignKey.ReferenceOption;
import org.zstack.header.vo.Index;
import org.zstack.header.vo.ShadowEntity;
import org.zstack.header.volume.VolumeEO;
import javax.persistence.*;
import java.sql.Timestamp;
/**
*/
@MappedSuperclass
public class VolumeSnapshotAO implements ShadowEntity {
@Id
@Column
private String uuid;
@Column
@Index
private String name;
@Column
private String description;
@Column
private String type;
@Column
@ForeignKey(parentEntityClass = VolumeEO.class, onDeleteAction = ReferenceOption.SET_NULL)
private String volumeUuid;
@Column
private String format;
@Column
@ForeignKey(parentEntityClass = VolumeSnapshotTreeEO.class, onDeleteAction = ReferenceOption.CASCADE)
private String treeUuid;
@Column
@ForeignKey(parentEntityClass = VolumeSnapshotEO.class, onDeleteAction = ReferenceOption.SET_NULL)
private String parentUuid;
@Column
@ForeignKey(parentEntityClass = PrimaryStorageEO.class, onDeleteAction = ReferenceOption.SET_NULL)
private String primaryStorageUuid;
@Column
private String primaryStorageInstallPath;
@Column
private int distance;
@Column
private long size;
@Column
private boolean latest;
@Column
private boolean fullSnapshot;
@Column
private String volumeType;
@Column
@Enumerated(EnumType.STRING)
private VolumeSnapshotState state;
@Column
@Enumerated(EnumType.STRING)
private VolumeSnapshotStatus status;
@Column
private Timestamp createDate;
@Column
private Timestamp lastOpDate;
@Transient
private VolumeSnapshotAO shadow;
public VolumeSnapshotAO getShadow() {
return shadow;
}
@PreUpdate
private void preUpdate() {
lastOpDate = null;
}
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getParentUuid() {
return parentUuid;
}
public void setParentUuid(String parentUuid) {
this.parentUuid = parentUuid;
}
public VolumeSnapshotState getState() {
return state;
}
public void setState(VolumeSnapshotState state) {
this.state = state;
}
public Timestamp getCreateDate() {
return createDate;
}
public void setCreateDate(Timestamp createDate) {
this.createDate = createDate;
}
public Timestamp getLastOpDate() {
return lastOpDate;
}
public void setLastOpDate(Timestamp lastOpDate) {
this.lastOpDate = lastOpDate;
}
public String getVolumeUuid() {
return volumeUuid;
}
public void setVolumeUuid(String volumeUuid) {
this.volumeUuid = volumeUuid;
}
public VolumeSnapshotStatus getStatus() {
return status;
}
public void setStatus(VolumeSnapshotStatus status) {
this.status = status;
}
public boolean isLatest() {
return latest;
}
public void setLatest(boolean latest) {
this.latest = latest;
}
public boolean isFullSnapshot() {
return fullSnapshot;
}
public void setFullSnapshot(boolean fullSnapshot) {
this.fullSnapshot = fullSnapshot;
}
public String getPrimaryStorageUuid() {
return primaryStorageUuid;
}
public void setPrimaryStorageUuid(String primaryStorageUuid) {
this.primaryStorageUuid = primaryStorageUuid;
}
public String getPrimaryStorageInstallPath() {
return primaryStorageInstallPath;
}
public void setPrimaryStorageInstallPath(String primaryStorageInstallPath) {
this.primaryStorageInstallPath = primaryStorageInstallPath;
}
public void setVolumeType(String volumeType) {
this.volumeType = volumeType;
}
public String getVolumeType() {
return volumeType;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public int getDistance() {
return distance;
}
public void setDistance(int distance) {
this.distance = distance;
}
public String getTreeUuid() {
return treeUuid;
}
public void setTreeUuid(String treeUuid) {
this.treeUuid = treeUuid;
}
@Override
public void setShadow(Object o) {
shadow = (VolumeSnapshotAO) o;
}
}
| apache-2.0 |
paulrossman/knife-google | spec/chef/knife/google_disk_list_spec.rb | 1365 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
describe Chef::Knife::GoogleDiskList do
let(:knife_plugin) do
Chef::Knife::GoogleDiskList.new(["-Z"+stored_zone.name])
end
it "should enlist all the GCE disks when run invoked" do
zones = double(Google::Compute::ListableResourceCollection)
expect(zones).to receive(:get).with(stored_zone.name).
and_return(stored_zone)
disks = double(Google::Compute::ListableResourceCollection)
expect(disks).to receive(:list).with(:zone => stored_zone.name).
and_return([stored_disk])
client = double(Google::Compute::Client, :disks => disks, :zones => zones)
allow(Google::Compute::Client).to receive(:from_json).and_return(client)
expect(knife_plugin.ui).to receive(:info)
knife_plugin.run
end
end
| apache-2.0 |
jtimberman/erlang | test/kitchen/cookbooks/erlang_test/recipes/source.rb | 665 | #
# Cookbook Name:: erlang_test
# Recipe:: source
#
# Copyright 2012, Opscode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "erlang::source"
| apache-2.0 |
Bizyroth/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java | 19439 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test for metrics published by the Namenode
*/
public class TestNameNodeMetrics {
private static final Configuration CONF = new HdfsConfiguration();
private static final int DFS_REPLICATION_INTERVAL = 1;
private static final Path TEST_ROOT_DIR_PATH =
new Path("/testNameNodeMetrics");
private static final String NN_METRICS = "NameNodeActivity";
private static final String NS_METRICS = "FSNamesystem";
// Number of datanodes in the cluster
private static final int DATANODE_COUNT = 3;
private static final int WAIT_GAUGE_VALUE_RETRIES = 20;
// Rollover interval of percentile metrics (in seconds)
private static final int PERCENTILES_INTERVAL = 1;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
"" + PERCENTILES_INTERVAL);
// Enable stale DataNodes checking
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
.getLogger().setLevel(Level.DEBUG);
}
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private final Random rand = new Random();
private FSNamesystem namesystem;
private BlockManager bm;
private static Path getTestPath(String fileName) {
return new Path(TEST_ROOT_DIR_PATH, fileName);
}
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
bm = namesystem.getBlockManager();
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
if (source != null) {
// Run only once since the UGI metrics is cleaned up during teardown
MetricsRecordBuilder rb = getMetrics(source);
assertQuantileGauges("GetGroups1s", rb);
}
cluster.shutdown();
}
/** create a file with a length of <code>fileLen</code> */
private void createFile(Path file, long fileLen, short replicas) throws IOException {
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
}
private void updateMetrics() throws Exception {
// Wait for metrics update (corresponds to dfs.namenode.replication.interval
// for some block related metrics to get updated)
Thread.sleep(1000);
}
private void readFile(FileSystem fileSys,Path name) throws IOException {
//Just read file so that getNumBlockLocations are incremented
DataInputStream stm = fileSys.open(name);
byte [] buffer = new byte[4];
stm.read(buffer,0,4);
stm.close();
}
/**
* Test that capacity metrics are exported and pass
* basic sanity tests.
*/
@Test (timeout = 1800)
public void testCapacityMetrics() throws Exception {
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
long capacityTotal = MetricsAsserts.getLongGauge("CapacityTotal", rb);
assert(capacityTotal != 0);
long capacityUsed = MetricsAsserts.getLongGauge("CapacityUsed", rb);
long capacityRemaining =
MetricsAsserts.getLongGauge("CapacityRemaining", rb);
long capacityUsedNonDFS =
MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb);
assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
capacityTotal);
}
/** Test metrics indicating the number of stale DataNodes */
@Test
public void testStaleNodes() throws Exception {
// Set two datanodes as stale
for (int i = 0; i < 2; i++) {
DataNode dn = cluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
long staleInterval = CONF.getLong(
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
}
// Let HeartbeatManager to check heartbeat
BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
.getBlockManager());
assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
// Reset stale datanodes
for (int i = 0; i < 2; i++) {
DataNode dn = cluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, 0);
}
// Let HeartbeatManager to refresh
BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
.getBlockManager());
assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS));
}
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
// Add files with 100 blocks
final Path file = getTestPath("testFileAdd");
createFile(file, 3200, (short)3);
final long blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity();
updateMetrics();
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
// File create operations is 1
// Number of files created is depth of <code>file</code> path
assertCounter("CreateFileOps", 1L, rb);
assertCounter("FilesCreated", (long)file.depth(), rb);
updateMetrics();
long filesTotal = file.depth() + 1; // Add 1 for root
rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb);
assertGauge("BlocksTotal", blockCount, rb);
fs.delete(file, true);
filesTotal--; // reduce the filecount for deleted file
rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
assertGauge("BlocksTotal", 0L, rb);
assertGauge("PendingDeletionBlocks", 0L, rb);
rb = getMetrics(NN_METRICS);
// Delete file operations and number of files deleted must be 1
assertCounter("DeleteFileOps", 1L, rb);
assertCounter("FilesDeleted", 1L, rb);
}
/** Corrupt a block and ensure metrics reflects it */
@Test
public void testCorruptBlock() throws Exception {
// Create a file with single block with two replicas
final Path file = getTestPath("testCorruptBlock");
createFile(file, 100, (short)2);
// Corrupt first replica of the block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 1L, rb);
assertGauge("PendingReplicationBlocks", 1L, rb);
assertGauge("ScheduledReplicationBlocks", 1L, rb);
fs.delete(file, true);
rb = waitForDnMetricValue(NS_METRICS, "CorruptBlocks", 0L);
assertGauge("PendingReplicationBlocks", 0L, rb);
assertGauge("ScheduledReplicationBlocks", 0L, rb);
}
/** Create excess blocks by reducing the replication factor for
* for a file and ensure metrics reflects it
*/
@Test
public void testExcessBlocks() throws Exception {
Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 1L, rb);
// verify ExcessBlocks metric is decremented and
// excessReplicateMap is cleared after deleting a file
fs.delete(file, true);
rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 0L, rb);
assertTrue(bm.excessReplicateMap.isEmpty());
}
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
assertGauge("MissingReplOneBlocks", 1L, rb);
fs.delete(file, true);
waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
private void waitForDeletion() throws InterruptedException {
// Wait for more than DATANODE_COUNT replication intervals to ensure all
// the blocks pending deletion are sent for deletion to the datanodes.
Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
}
/**
* Wait for the named gauge value from the metrics source to reach the
* desired value.
*
* There's an initial delay then a spin cycle of sleep and poll. Because
* all the tests use a shared FS instance, these tests are not independent;
* that's why the initial sleep is in there.
*
* @param source metrics source
* @param name gauge name
* @param expected expected value
* @return the last metrics record polled
* @throws Exception if something went wrong.
*/
private MetricsRecordBuilder waitForDnMetricValue(String source,
String name,
long expected)
throws Exception {
MetricsRecordBuilder rb;
long gauge;
//initial wait.
waitForDeletion();
//lots of retries are allowed for slow systems; fast ones will still
//exit early
int retries = (DATANODE_COUNT + 1) * WAIT_GAUGE_VALUE_RETRIES;
rb = getMetrics(source);
gauge = MetricsAsserts.getLongGauge(name, rb);
while (gauge != expected && (--retries > 0)) {
Thread.sleep(DFS_REPLICATION_INTERVAL * 500);
rb = getMetrics(source);
gauge = MetricsAsserts.getLongGauge(name, rb);
}
//at this point the assertion is valid or the retry count ran out
assertGauge(name, expected, rb);
return rb;
}
@Test
public void testRenameMetrics() throws Exception {
Path src = getTestPath("src");
createFile(src, 100, (short)1);
Path target = getTestPath("target");
createFile(target, 100, (short)1);
fs.rename(src, target, Rename.OVERWRITE);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounter("FilesRenamed", 1L, rb);
assertCounter("FilesDeleted", 1L, rb);
}
/**
* Test numGetBlockLocations metric
*
* Test initiates and performs file operations (create,read,close,open file )
* which results in metrics changes. These metrics changes are updated and
* tested for correctness.
*
* create file operation does not increment numGetBlockLocation
* one read file operation increments numGetBlockLocation by 1
*
* @throws IOException in case of an error
*/
@Test
public void testGetBlockLocationMetric() throws Exception {
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "file1.dat");
// When cluster starts first time there are no file (read,create,open)
// operations so metric GetBlockLocations should be 0.
assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
//Perform create file operation
createFile(file1_Path,100,(short)2);
updateMetrics();
//Create file does not change numGetBlockLocations metric
//expect numGetBlockLocations = 0 for previous and current interval
assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
// Open and read file operation increments GetBlockLocations
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
updateMetrics();
// Verify read file operation has incremented numGetBlockLocations by 1
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
// opening and reading file twice will increment numGetBlockLocations by 2
readFile(fs, file1_Path);
readFile(fs, file1_Path);
updateMetrics();
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
}
/**
* Test NN checkpoint and transaction-related metrics.
*/
@Test
public void testTransactionAndCheckpointMetrics() throws Exception {
long lastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 2L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
updateMetrics();
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
assertTrue(lastCkptTime < newLastCkptTime);
assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
}
/**
* Tests that the sync and block report metrics get updated on cluster
* startup.
*/
@Test
public void testSyncAndBlockReportMetric() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
// We have one sync when the cluster starts up, just opening the journal
assertCounter("SyncsNumOps", 1L, rb);
// Each datanode reports in when the cluster comes up
assertCounter("BlockReportNumOps",
(long)DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb);
// Sleep for an interval+slop to let the percentiles rollover
Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
// Check that the percentiles were updated
assertQuantileGauges("Syncs1s", rb);
assertQuantileGauges("BlockReport1s", rb);
}
/**
* Test NN ReadOps Count and WriteOps Count
*/
@Test
public void testReadWriteOps() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
rb);
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");
//Perform create file operation
createFile(file1_Path, 1024 * 1024,(short)2);
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
startWriteCounter);
}
}
| apache-2.0 |
zdw/xos | xos/tests/permissiontest.py | 7533 | import unittest
from core.models import *
class TestPermission(unittest.TestCase):
def setUp(self):
self.test_objects = []
# deployment
self.deployment = Deployment(name='TestDeployment')
self.deployment.save()
self.test_objects.append(self.deployment)
# site
self.site = Site(name='TestSite')
self.site.save()
self.test_objects.append(self.site)
# site deployment
self.site_deployment = SiteDeployment(site=self.site, deployment=self.deployment)
self.site_deployment.save()
self.test_objects.append(self.site_deployment)
# node
self.node = Node(name='TestNode', site_deployment=self.site_deployment)
self.node.save()
self.test_objects.append(self.node)
# slice
self.slice = Slice(name='TestSlice', site=self.site)
self.slice.save()
self.test_objects.appen(slice.slice)
# admin user
self.user_admin = User(email='user_admin@test.com', first_name='Test', last_name='Test', is_admin=True)
self.user_admin.site = self.site
self.user_admin.save()
self.test_objects.append(self.user_admin)
# read only user
self.user_read_only = User(email='user_read_only@test.com', first_name='Test', last_name='Test')
self.user_read_only.site = self.site
self.user_read_only.save()
self.test_objects.append(self.user_read_only)
# default user
self.user_default = User(email='user_default@test.com', first_name='Test', last_name='Test')
self.user_default.site = self.site
self.user_default.save()
self.test_objects.append(self.user_default)
# deployment admin
self.user_deployment_admin = User(email='user_deployment_admin@test.com', first_name='Test', last_name='Test')
self.user_deployment_admin.site = self.site
self.user_deployment_admin.save()
self.test_objects.append(self.user_deployment_admin)
deployment_privilege = DeploymentPrivilege(
user=self.user_deployment_admin,
deployment=self.deployment,
role='admin')
deployment_privilege.save()
self.test_objects.append(deployment_privilege)
# site admin
self.user_site_admin = User(email='user_site_admin@test.com', first_name='Test', last_name='Test')
self.user_site_admin = self.site
self.user_site_admin.save()
self.test_objects.append(self.user_site_admin)
site_admin_privilege = SitePrivilege(
user = self.user_site_admin,
site=self.site,
role='admin')
site_admin_privilege.save()
self.test_objects.append(site_admin_privilege)
# site pi
self.user_site_pi = User(email='user_site_pi@test.com', first_name='Test', last_name='Test')
self.user_site_pi = self.site
self.user_site_pi.save()
self.test_objects.append(self.user_site_pi)
site_pi_privilege = SitePrivilege(
user = self.user_site_pi,
site=self.site,
role='pi')
site_pi_privilege.save()
self.test_objects.append(site_pi_privilege)
# site tech
self.user_site_tech = User(email='user_site_tech@test.com', first_name='Test', last_name='Test')
self.user_site_tech = self.site
self.user_site_tech.save()
self.test_objects.append(self.user_site_tech)
site_tech_privilege = SitePrivilege(
user = self.user_site_tech,
site=self.site,
role='tech')
site_tech_privilege.save()
self.test_objects.append(site_tech_privilege)
# slice admin
self.user_slice_admin = User(email='user_slice_admin@test.com', first_name='Test', last_name='Test')
self.user_slice_admin = self.site
self.user_slice_admin.save()
self.test_objects.append(self.user_slice_admin)
slice_admin_privilege = SlicePrivilege(
user = self.user_slice_admin,
slice = self.slice,
role='admin')
slice_admin_privilege.save()
self.test_objects.append(slice_admin_privilege)
# slice access
self.user_slice_access = User(email='user_slice_access@test.com', first_name='Test', last_name='Test')
self.user_slice_access = self.site
self.user_slice_access.save()
self.test_objects.append(self.user_slice_access)
slice_access_privilege = SlicePrivilege(
user = self.user_slice_access,
slice = self.slice,
role='access')
slice_access_privilege.save()
self.test_objects.append(slice_access_privilege)
def test_deployment(self):
for user in [self.user_admin, self.user_deployment_admin]:
self.assertEqual(
self.deployment.save_by_user(user), None)
for user in [self.user_read_only, self.user_default, self.user_site_admin,
self.user_site_pi, self.user_site_tech, self.user_slice_admin,
self.user_slice_access]:
self.assertRaises(
PermissionDenied,
self.deployment.save_by_user(user,))
def test_site(self):
for user in [self.user_admin, self.user_site_admin, self.user_site_pi]:
self.assertEqual(
self.site.save_by_user(user), None)
for user in [self.user_read_only, self.user_default, self.user_deployment_admin,
self.user_site_tech, self.user_slice_admin, self.user_slice_access]:
self.assertRaises(
PermissionDenied,
self.site.save_by_user(user,))
def test_node(self):
for user in [self.user_admin, self.user_site_admin, self.user_site_tech]:
self.assertEqual(self.node.save_by_user(user), None)
for user in [self.user_read_only, self.user_default, self.user_deployment_admin,
self.user_site_pi, self.user_slice_admin, self.user_slice_access]:
self.assertRaises(
PermissionDenied,
self.node.save_by_user(user,))
def test_slice(self):
for user in [self.user_admin, self.user_site_admin, self.user_site_pi,
self.user_slice_admin]:
self.assertEqual(
self.slice.save_by_user(user), None)
for user in [self.user_read_only, self.user_default, self.user_deployment_admin,
self.user_site_tech, self.user_slice_access]:
self.assertRaises(
PermissionDenied,
self.slice.save_by_user(user,))
def test_user(self):
for user in [self.user_admin, self.user_site_admin, self.user_deployment_admin,
self.user_site_pi, self.user_default]:
self.assertEqual(
self.user_default.save_by_user(user), None)
for user in [self.user_read_only, self.user_deployment_admin,
self.user_site_tech, self.user_slice_admin, self.user_slice_access]:
self.assertRaises(
PermissionDenied,
self.user_default.save_by_user(user,))
def tearDown(self):
for obj in self.test_objects:
obj.delete()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rcordovano/autopsy | Core/src/org/sleuthkit/autopsy/timeline/ui/detailview/datamodel/SingleDetailsViewEvent.java | 10305 | /*
* Autopsy Forensic Browser
*
* Copyright 2018-2019 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.timeline.ui.detailview.datamodel;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSortedSet;
import java.util.Collections;
import java.util.Comparator;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import org.joda.time.Interval;
import org.sleuthkit.datamodel.TimelineEvent;
import org.sleuthkit.datamodel.TimelineEventType;
import org.sleuthkit.datamodel.TimelineLevelOfDetail;
/**
* A single event.
*/
public class SingleDetailsViewEvent implements DetailViewEvent {
private final long eventID;
/**
* The TSK object ID of the file (could be data source) this event is
* derived from.
*/
private final long fileObjId;
/**
* The TSK artifact ID of the file this event is derived from. Null, if this
* event is not derived from an artifact.
*/
private final Long artifactID;
/**
* The TSK datasource ID of the datasource this event belongs to.
*/
private final long dataSourceObjId;
/**
* The time of this event in second from the Unix epoch.
*/
private final long time;
/**
* The type of this event.
*/
private final TimelineEventType type;
/**
* The three descriptions (full, med, short) stored in a map, keyed by
* DescriptionLOD (Level of Detail)
*/
private final ImmutableMap<TimelineLevelOfDetail, String> descriptions;
/**
* True if the file this event is derived from hits any of the configured
* hash sets.
*/
private final boolean hashHit;
/**
* True if the file or artifact this event is derived from is tagged.
*/
private final boolean tagged;
/**
* Single events may or may not have their parent set, since the parent is a
* transient property of the current (details) view settings.
*/
private MultiEvent<?> parent = null;
/**
*
* @param eventID
* @param dataSourceObjId
* @param fileObjId Object Id of file (could be a data source) that
* event is associated with
* @param artifactID
* @param time
* @param type
* @param fullDescription
* @param medDescription
* @param shortDescription
* @param hashHit
* @param tagged
*/
public SingleDetailsViewEvent(long eventID, long dataSourceObjId, long fileObjId, Long artifactID, long time, TimelineEventType type, String fullDescription, String medDescription, String shortDescription, boolean hashHit, boolean tagged) {
this.eventID = eventID;
this.dataSourceObjId = dataSourceObjId;
this.fileObjId = fileObjId;
this.artifactID = Long.valueOf(0).equals(artifactID) ? null : artifactID;
this.time = time;
this.type = type;
descriptions = ImmutableMap.<TimelineLevelOfDetail, String>of(TimelineLevelOfDetail.HIGH, fullDescription,
TimelineLevelOfDetail.MEDIUM, medDescription,
TimelineLevelOfDetail.LOW, shortDescription);
this.hashHit = hashHit;
this.tagged = tagged;
}
public SingleDetailsViewEvent(TimelineEvent singleEvent) {
this(singleEvent.getEventID(),
singleEvent.getDataSourceObjID(),
singleEvent.getContentObjID(),
singleEvent.getArtifactID().orElse(null),
singleEvent.getTime(),
singleEvent.getEventType(),
singleEvent.getDescription(TimelineLevelOfDetail.HIGH),
singleEvent.getDescription(TimelineLevelOfDetail.MEDIUM),
singleEvent.getDescription(TimelineLevelOfDetail.LOW),
singleEvent.eventSourceHasHashHits(),
singleEvent.eventSourceIsTagged());
}
/**
* Get a new SingleDetailsViewEvent that is the same as this event, but with
* the given parent.
*
* @param newParent the parent of the new event object.
*
* @return a new SingleDetailsViewEvent that is the same as this event, but
* with the given parent.
*/
public SingleDetailsViewEvent withParent(MultiEvent<?> newParent) {
SingleDetailsViewEvent singleEvent = new SingleDetailsViewEvent(eventID, dataSourceObjId, fileObjId, artifactID, time, type, descriptions.get(TimelineLevelOfDetail.HIGH), descriptions.get(TimelineLevelOfDetail.MEDIUM), descriptions.get(TimelineLevelOfDetail.LOW), hashHit, tagged);
singleEvent.parent = newParent;
return singleEvent;
}
/**
* Is the file or artifact this event is derived from tagged?
*
* @return true if he file or artifact this event is derived from is tagged.
*/
public boolean isTagged() {
return tagged;
}
/**
* Is the file this event is derived from in any of the configured hash
* sets.
*
*
* @return True if the file this event is derived from is in any of the
* configured hash sets.
*/
public boolean isHashHit() {
return hashHit;
}
/**
* Get the artifact id of the artifact this event is derived from.
*
* @return An Optional containing the artifact ID. Will be empty if this
* event is not derived from an artifact
*/
public Optional<Long> getArtifactID() {
return Optional.ofNullable(artifactID);
}
/**
* Get the event id of this event.
*
* @return The event id of this event.
*/
public long getEventID() {
return eventID;
}
/**
* Get the obj id of the file (which could be a data source) this event is
* derived from.
*
* @return the object id.
*/
public long getFileID() {
return fileObjId;
}
/**
* Get the time of this event (in seconds from the Unix epoch).
*
* @return the time of this event in seconds from Unix epoch
*/
public long getTime() {
return time;
}
@Override
public TimelineEventType getEventType() {
return type;
}
/**
* Get the full description of this event.
*
* @return the full description
*/
public String getFullDescription() {
return getDescription(TimelineLevelOfDetail.HIGH);
}
/**
* Get the medium description of this event.
*
* @return the medium description
*/
public String getMedDescription() {
return getDescription(TimelineLevelOfDetail.MEDIUM);
}
/**
* Get the short description of this event.
*
* @return the short description
*/
public String getShortDescription() {
return getDescription(TimelineLevelOfDetail.LOW);
}
/**
* Get the description of this event at the give level of detail(LoD).
*
* @param lod The level of detail to get.
*
* @return The description of this event at the given level of detail.
*/
public String getDescription(TimelineLevelOfDetail lod) {
return descriptions.get(lod);
}
/**
* Get the datasource id of the datasource this event belongs to.
*
* @return the datasource id.
*/
public long getDataSourceObjID() {
return dataSourceObjId;
}
@Override
public Set<Long> getEventIDs() {
return Collections.singleton(eventID);
}
@Override
public Set<Long> getEventIDsWithHashHits() {
return isHashHit() ? Collections.singleton(eventID) : Collections.emptySet();
}
@Override
public Set<Long> getEventIDsWithTags() {
return isTagged() ? Collections.singleton(eventID) : Collections.emptySet();
}
@Override
public long getEndMillis() {
return time * 1000;
}
@Override
public long getStartMillis() {
return time * 1000;
}
@Override
public int hashCode() {
int hash = 7;
hash = 13 * hash + (int) (this.eventID ^ (this.eventID >>> 32));
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final SingleDetailsViewEvent other = (SingleDetailsViewEvent) obj;
return this.eventID == other.eventID;
}
@Override
public SortedSet<EventCluster> getClusters() {
EventCluster eventCluster = new EventCluster(new Interval(time * 1000, time * 1000), type, getEventIDs(), getEventIDsWithHashHits(), getEventIDsWithTags(), getFullDescription(), TimelineLevelOfDetail.HIGH);
return ImmutableSortedSet.orderedBy(Comparator.comparing(EventCluster::getStartMillis)).add(eventCluster).build();
}
@Override
public String getDescription() {
return getFullDescription();
}
@Override
public TimelineLevelOfDetail getDescriptionLevel() {
return TimelineLevelOfDetail.HIGH;
}
/**
* get the EventStripe (if any) that contains this event, skipping over any
* intervening event cluster
*
* @return an Optional containing the parent stripe of this cluster: empty
* if the cluster has no parent set or the parent has no parent
* stripe.
*/
@Override
public Optional<EventStripe> getParentStripe() {
if (parent == null) {
return Optional.empty();
} else if (parent instanceof EventStripe) {
return Optional.of((EventStripe) parent);
} else {
return parent.getParentStripe();
}
}
}
| apache-2.0 |
ashward/buddycloud-server-java | src/main/java/org/buddycloud/channelserver/channel/node/configuration/field/NodeDescription.java | 648 | package org.buddycloud.channelserver.channel.node.configuration.field;
public class NodeDescription extends Field {
public static final String FIELD_NAME = "pubsub#description";
public static final String DEFAULT_VALUE = "Channel description";
public static final int MAX_DESCRIPTION_LENGTH = 1024;
public NodeDescription() {
name = FIELD_NAME;
}
public String getValue() {
if (this.value.length() > MAX_DESCRIPTION_LENGTH) {
this.value = this.value.substring(0, MAX_DESCRIPTION_LENGTH);
}
return this.value;
}
public boolean isValid() {
return true;
}
}
| apache-2.0 |
TieWei/nova | nova/service.py | 14994 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import os
import random
import sys
from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import service
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='a list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='port for ec2 api to listen'),
cfg.IntOpt('ec2_workers',
help='Number of workers for EC2 API service'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='IP address for OpenStack API to listen'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='list port for osapi compute'),
cfg.IntOpt('osapi_compute_workers',
help='Number of workers for OpenStack API service'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='IP address for metadata api to listen'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='port for metadata api to listen'),
cfg.IntOpt('metadata_workers',
help='Number of workers for metadata service'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
super(Service, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
self.service_ref = self._create_service_ref(ctxt)
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
rpc_dispatcher = self.manager.create_rpc_dispatcher(self.backdoor_port)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.manager.post_start_hook()
LOG.debug(_("Join ServiceGroup membership for this service %s")
% self.topic)
# Add service to the ServiceGroup membership group.
self.servicegroup_api.join(self.host, self.topic, self)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=initial_delay,
periodic_interval_max=
self.periodic_interval_max)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
| apache-2.0 |
tlong2/amphtml | build-system/tasks/e2e/amp-driver.js | 5155 | /**
* Copyright 2019 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @enum {string} */
const AmpdocEnvironment = {
SINGLE: 'single',
VIEWER_DEMO: 'viewer-demo',
SHADOW_DEMO: 'shadow-demo',
// AMPHTML ads environments
A4A_FIE: 'a4a-fie',
A4A_INABOX: 'a4a-inabox',
A4A_INABOX_FRIENDLY: 'a4a-inabox-friendly',
A4A_INABOX_SAFEFRAME: 'a4a-inabox-safeframe',
};
/** @const {string} */
const HOST = 'http://localhost:8000';
const EnvironmentBehaviorMap = {
[AmpdocEnvironment.SINGLE]: {
ready(unusedController) {
return Promise.resolve();
},
url(url) {
return url;
},
},
[AmpdocEnvironment.VIEWER_DEMO]: {
ready(controller) {
return controller
.findElement('#AMP_DOC_dynamic[data-loaded]')
.then(frame => controller.switchToFrame(frame));
},
url(url) {
const defaultCaps = [
'a2a',
'focus-rect',
'foo',
'keyboard',
'swipe',
'iframeScroll',
];
// Correctly append extra params in original url
url = url.replace('#', '&');
// TODO(estherkim): somehow allow non-8000 port and domain
return (
`http://localhost:8000/examples/viewer.html#href=${url}` +
`&caps=${defaultCaps.join(',')}`
);
},
},
[AmpdocEnvironment.SHADOW_DEMO]: {
async ready(controller) {
// TODO(cvializ): this is a HACK
// There should be a better way to detect that the shadowdoc is ready.
const shadowHost = await controller.findElement(
'.amp-doc-host[style="visibility: visible;"]'
);
const doc = await controller.getDocumentElement();
const rect = await controller.getElementRect(shadowHost);
await controller./*OK*/ scrollTo(doc, {left: rect.left, top: rect.top});
await controller.switchToShadow(shadowHost);
},
url(url) {
// TODO(estherkim): somehow allow non-8000 port and domain
return `http://localhost:8000/pwa#href=${url}`;
},
},
[AmpdocEnvironment.A4A_FIE]: {
async ready(controller) {
return controller
.findElement('amp-ad > iframe')
.then(frame => controller.switchToFrame(frame));
},
url(url) {
return url.replace(HOST, HOST + '/a4a');
},
},
[AmpdocEnvironment.A4A_INABOX]: {
async ready(controller) {
return controller
.findElement('#inabox-frame')
.then(frame => controller.switchToFrame(frame));
},
url(url) {
return url.replace(HOST, HOST + '/inabox');
},
},
[AmpdocEnvironment.A4A_INABOX_FRIENDLY]: {
async ready(controller) {
return controller
.findElement('#inabox-frame')
.then(frame => controller.switchToFrame(frame));
},
url(url) {
return url.replace(HOST, HOST + '/inabox-friendly');
},
},
[AmpdocEnvironment.A4A_INABOX_SAFEFRAME]: {
async ready(controller) {
return controller
.findElement('#inabox-frame')
.then(frame => controller.switchToFrame(frame));
},
url(url) {
return url.replace(HOST, HOST + '/inabox-safeframe');
},
},
};
/**
* Provides AMP-related utilities for E2E Functional Tests.
*/
class AmpDriver {
/**
* @param {!../functional-test-controller.FunctionalTestController} controller
*/
constructor(controller) {
/** @private @const */
this.controller_ = controller;
}
/**
* Toggles an experiment in an AMP document. Uses the current domain.
* @param {string} name
* @param {boolean} toggle
* @return {!Promise}
*/
async toggleExperiment(name, toggle) {
await this.controller_.evaluate(
(name, toggle) => {
(window.AMP = window.AMP || []).push(AMP => {
AMP.toggleExperiment(name, toggle);
});
},
name,
toggle
);
}
/**
* Navigate the browser to a URL that will display the given url in the
* given environment.
* @param {!AmpdocEnvironment} environment
* @param {string} url
*/
async navigateToEnvironment(environment, url) {
const ampEnv = EnvironmentBehaviorMap[environment];
await this.controller_.navigateTo(ampEnv.url(url));
try {
await ampEnv.ready(this.controller_);
} catch (e) {
// Take a snapshot of current DOM for debugging.
const documentElement = await this.controller_.getDocumentElement();
const html = await this.controller_.getElementProperty(
documentElement,
'innerHTML'
);
throw new Error(e.message + '\n' + html);
}
}
}
module.exports = {
AmpDriver,
AmpdocEnvironment,
};
| apache-2.0 |
WillJiang/WillJiang | src/xwork-core/src/main/java/com/opensymphony/xwork2/conversion/impl/DefaultTypeConverterHolder.java | 3103 | package com.opensymphony.xwork2.conversion.impl;
import com.opensymphony.xwork2.conversion.TypeConverter;
import com.opensymphony.xwork2.conversion.TypeConverterHolder;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
/**
* Default implementation of {@link TypeConverterHolder}
*/
public class DefaultTypeConverterHolder implements TypeConverterHolder {
/**
* Record class and its type converter mapping.
* <pre>
* - String - classname as String
* - TypeConverter - instance of TypeConverter
* </pre>
*/
private HashMap<String, TypeConverter> defaultMappings = new HashMap<String, TypeConverter>(); // non-action (eg. returned value)
/**
* Target class conversion Mappings.
* <pre>
* Map<Class, Map<String, Object>>
* - Class -> convert to class
* - Map<String, Object>
* - String -> property name
* eg. Element_property, property etc.
* - Object -> String to represent properties
* eg. value part of
* KeyProperty_property=id
* -> TypeConverter to represent an Ognl TypeConverter
* eg. value part of
* property=foo.bar.MyConverter
* -> Class to represent a class
* eg. value part of
* Element_property=foo.bar.MyObject
* </pre>
*/
private HashMap<Class, Map<String, Object>> mappings = new HashMap<Class, Map<String, Object>>(); // action
/**
* Unavailable target class conversion mappings, serves as a simple cache.
*/
private HashSet<Class> noMapping = new HashSet<Class>(); // action
/**
* Record classes that doesn't have conversion mapping defined.
* <pre>
* - String -> classname as String
* </pre>
*/
protected HashSet<String> unknownMappings = new HashSet<String>(); // non-action (eg. returned value)
public void addDefaultMapping(String className, TypeConverter typeConverter) {
defaultMappings.put(className, typeConverter);
if (unknownMappings.contains(className)) {
unknownMappings.remove(className);
}
}
public boolean containsDefaultMapping(String className) {
return defaultMappings.containsKey(className);
}
public TypeConverter getDefaultMapping(String className) {
return defaultMappings.get(className);
}
public Map<String, Object> getMapping(Class clazz) {
return mappings.get(clazz);
}
public void addMapping(Class clazz, Map<String, Object> mapping) {
mappings.put(clazz, mapping);
}
public boolean containsNoMapping(Class clazz) {
return noMapping.contains(clazz);
}
public void addNoMapping(Class clazz) {
noMapping.add(clazz);
}
public boolean containsUnknownMapping(String className) {
return unknownMappings.contains(className);
}
public void addUnknownMapping(String className) {
unknownMappings.add(className);
}
}
| apache-2.0 |
Azure/azure-mobile-apps-net-server | test/Microsoft.Azure.Mobile.Server.Test/Controllers/MobileAppControllerCacheTests.cs | 2307 | // ----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// ----------------------------------------------------------------------------
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using System.Web.Http;
using Microsoft.Azure.Mobile.Server.Config;
using Microsoft.Owin.Testing;
using Owin;
using Xunit;
namespace Microsoft.Azure.Mobile.Server.Controllers
{
public class MobileAppControllerCacheTests
{
private HttpClient client;
public MobileAppControllerCacheTests()
{
TestServer server = this.CreateTestServer();
this.client = server.HttpClient;
}
[Fact]
public async Task ApiController_AttributeRoute_AddsCacheHeaders()
{
HttpResponseMessage response = await this.client.GetAsync("api/attribute/test");
VerifyResponse(response);
}
[Fact]
public async Task ApiController_RouteTable_AddsCacheHeaders()
{
HttpResponseMessage response = await this.client.GetAsync("api/testapi");
VerifyResponse(response);
// make sure the formatters are correctly returning json by default
Assert.Equal("application/json", response.Content.Headers.ContentType.MediaType);
Assert.Equal("\"hello world\"", await response.Content.ReadAsStringAsync());
}
private static void VerifyResponse(HttpResponseMessage response)
{
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
Assert.True(response.Headers.CacheControl.NoCache);
var pragma = response.Headers.Pragma.Single();
Assert.Equal("no-cache", pragma.Name);
Assert.Null(pragma.Value);
}
private TestServer CreateTestServer()
{
var config = new HttpConfiguration();
config.MapHttpAttributeRoutes();
new MobileAppConfiguration()
.MapApiControllers()
.ApplyTo(config);
return TestServer.Create(appBuilder =>
{
appBuilder.UseWebApi(config);
});
}
}
} | apache-2.0 |
GBGamer/rust | src/test/run-pass/traits/traits-impl-object-overlap-issue-23853.rs | 927 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// Test that we are able to compile the case where both a blanket impl
// and the object type itself supply the required trait obligation.
// In this case, the blanket impl for `Foo` applies to any type,
// including `Bar`, but the object type `Bar` also implicitly supplies
// this context.
trait Foo { fn dummy(&self) { } }
trait Bar: Foo { }
impl<T:?Sized> Foo for T { }
fn want_foo<B:?Sized+Foo>() { }
fn main() {
want_foo::<Bar>();
}
| apache-2.0 |
mosoft521/lemon | src/main/java/com/mossle/user/avatar/AvatarCache.java | 2121 | package com.mossle.user.avatar;
import java.util.HashSet;
import java.util.Set;
import javax.activation.DataSource;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import javax.cache.Cache;
import javax.cache.CacheManager;
import com.mossle.core.store.ByteArrayDataSource;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AvatarCache {
private static Logger logger = LoggerFactory.getLogger(AvatarCache.class);
private CacheManager cacheManager;
private Cache<String, byte[]> dataCache;
private Cache<String, Set<String>> aliasCache;
@PostConstruct
public void init() {
this.dataCache = cacheManager.getCache("avatar");
this.aliasCache = cacheManager.getCache("avatar-alias");
}
public DataSource getDataSource(String userId, int width) {
String key = userId + ":" + width;
byte[] bytes = this.dataCache.get(key);
if (bytes == null) {
return null;
}
return new ByteArrayDataSource(bytes);
}
public void updateDataSource(String userId, int width, DataSource dataSource) {
try {
String key = userId + ":" + width;
byte[] bytes = IOUtils.toByteArray(dataSource.getInputStream());
Set<String> aliasValue = this.aliasCache.get(userId);
if (aliasValue == null) {
aliasValue = new HashSet<String>();
this.aliasCache.put(userId, aliasValue);
}
aliasValue.add(key);
this.dataCache.put(key, bytes);
} catch (Exception ex) {
logger.error(ex.getMessage(), ex);
}
}
public void removeDataSource(String userId) {
Set<String> aliasValue = this.aliasCache.get(userId);
if (aliasValue == null) {
return;
}
for (String alias : aliasValue) {
this.dataCache.remove(alias);
}
}
@Resource
public void setCacheManager(CacheManager cacheManager) {
this.cacheManager = cacheManager;
}
}
| apache-2.0 |
christophd/camel | components/camel-sql/src/main/java/org/apache/camel/component/sql/SqlComponent.java | 5512 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.sql;
import java.util.Map;
import javax.sql.DataSource;
import org.apache.camel.CamelContext;
import org.apache.camel.Endpoint;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.annotations.Component;
import org.apache.camel.support.DefaultComponent;
import org.apache.camel.support.PropertyBindingSupport;
import org.apache.camel.util.PropertiesHelper;
import org.springframework.jdbc.core.JdbcTemplate;
/**
* The <a href="http://camel.apache.org/sql-component.html">SQL Component</a> is for working with databases using JDBC
* queries.
*/
@Component("sql")
public class SqlComponent extends DefaultComponent {
@Metadata(autowired = true)
private DataSource dataSource;
@Metadata(label = "advanced", defaultValue = "true")
private boolean usePlaceholder = true;
public SqlComponent() {
}
public SqlComponent(Class<? extends Endpoint> endpointClass) {
}
public SqlComponent(CamelContext context) {
super(context);
}
public SqlComponent(CamelContext context, Class<? extends Endpoint> endpointClass) {
super(context);
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
String parameterPlaceholderSubstitute = getAndRemoveParameter(parameters, "placeholder", String.class, "#");
String query = remaining;
if (usePlaceholder) {
query = query.replaceAll(parameterPlaceholderSubstitute, "?");
}
String onConsume = getAndRemoveParameter(parameters, "consumer.onConsume", String.class);
if (onConsume == null) {
onConsume = getAndRemoveParameter(parameters, "onConsume", String.class);
}
if (onConsume != null && usePlaceholder) {
onConsume = onConsume.replaceAll(parameterPlaceholderSubstitute, "?");
}
String onConsumeFailed = getAndRemoveParameter(parameters, "consumer.onConsumeFailed", String.class);
if (onConsumeFailed == null) {
onConsumeFailed = getAndRemoveParameter(parameters, "onConsumeFailed", String.class);
}
if (onConsumeFailed != null && usePlaceholder) {
onConsumeFailed = onConsumeFailed.replaceAll(parameterPlaceholderSubstitute, "?");
}
String onConsumeBatchComplete = getAndRemoveParameter(parameters, "consumer.onConsumeBatchComplete", String.class);
if (onConsumeBatchComplete == null) {
onConsumeBatchComplete = getAndRemoveParameter(parameters, "onConsumeBatchComplete", String.class);
}
if (onConsumeBatchComplete != null && usePlaceholder) {
onConsumeBatchComplete = onConsumeBatchComplete.replaceAll(parameterPlaceholderSubstitute, "?");
}
// create endpoint
SqlEndpoint endpoint = new SqlEndpoint(uri, this);
endpoint.setQuery(query);
endpoint.setPlaceholder(parameterPlaceholderSubstitute);
endpoint.setUsePlaceholder(isUsePlaceholder());
endpoint.setOnConsume(onConsume);
endpoint.setOnConsumeFailed(onConsumeFailed);
endpoint.setOnConsumeBatchComplete(onConsumeBatchComplete);
setProperties(endpoint, parameters);
// endpoint configured data source takes precedence
DataSource ds = dataSource;
if (endpoint.getDataSource() != null) {
ds = endpoint.getDataSource();
}
if (ds == null) {
throw new IllegalArgumentException("DataSource must be configured");
}
// create template
JdbcTemplate jdbcTemplate = new JdbcTemplate(ds);
Map<String, Object> templateOptions = PropertiesHelper.extractProperties(parameters, "template.");
PropertyBindingSupport.bindProperties(getCamelContext(), jdbcTemplate, templateOptions);
// set template on endpoint
endpoint.setJdbcTemplate(jdbcTemplate);
endpoint.setDataSource(ds);
endpoint.setTemplateOptions(templateOptions);
return endpoint;
}
/**
* Sets the DataSource to use to communicate with the database.
*/
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public DataSource getDataSource() {
return dataSource;
}
/**
* Sets whether to use placeholder and replace all placeholder characters with ? sign in the SQL queries.
* <p/>
* This option is default <tt>true</tt>
*/
public void setUsePlaceholder(boolean usePlaceholder) {
this.usePlaceholder = usePlaceholder;
}
public boolean isUsePlaceholder() {
return usePlaceholder;
}
}
| apache-2.0 |
mbebenita/shumway.ts | tests/baselines/reference/icomparable.js | 30 | var sc;
var x = sort(sc);
| apache-2.0 |
xoofx/roslyn | src/Compilers/CSharp/Test/Symbol/Symbols/Retargeting/NoPia.cs | 118253 | // Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using System.Linq;
using Microsoft.CodeAnalysis.CSharp.Symbols;
using Microsoft.CodeAnalysis.CSharp.Test.Utilities;
using Microsoft.CodeAnalysis.Test.Utilities;
using Xunit;
using Roslyn.Test.Utilities;
namespace Microsoft.CodeAnalysis.CSharp.UnitTests.Symbols.Retargeting
{
public class NoPia : CSharpTestBase
{
/// <summary>
/// Translation of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\Pia1.vb
/// Disassembly of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\Pia1.dll
/// </summary>
private static readonly string s_sourcePia1 =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
[Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c01""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
[ComImport]
public interface I1
{
void Sub1(int x);
}
public struct S1
{
public int F1;
}
namespace NS1
{
[Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c02""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
[ComImport]
public interface I2
{
void Sub1(int x);
}
public struct S2
{
public int F1;
}
}
";
/// <summary>
/// Disassembly of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes1.dll
/// </summary>
private static readonly string s_sourceLocalTypes1_IL =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using NS1;
[CompilerGenerated, Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c01""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown), TypeIdentifier]
[ComImport]
public interface I1
{
}
public class LocalTypes1
{
public void Test1(I1 x, I2 y)
{
}
}
namespace NS1
{
[CompilerGenerated, Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c02""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown), TypeIdentifier]
[ComImport]
public interface I2
{
}
}
";
/// <summary>
/// Translation of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes1.vb
/// </summary>
private static readonly string s_sourceLocalTypes1 =
@"
using NS1;
public class LocalTypes1
{
public void Test1(I1 x, I2 y)
{
}
}
";
/// <summary>
/// Disassembly of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes2.dll
/// </summary>
private static readonly string s_sourceLocalTypes2_IL =
@"
using NS1;
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S2 y)
{
}
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1"")]
public struct S1
{
public int F1;
}
namespace NS1
{
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""NS1.S2"")]
public struct S2
{
public int F1;
}
}
";
/// <summary>
/// Translation of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes2.vb
/// </summary>
private static readonly string s_sourceLocalTypes2 =
@"
using NS1;
public class LocalTypes2
{
public void Test2(S1 x, S2 y)
{
}
}
";
/// <summary>
/// Disassembly of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes3.dll
/// </summary>
private static readonly string s_sourceLocalTypes3_IL =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Collections.Generic;
public class C31<T>
{
public interface I31<S>
{
}
}
public class C32<T>
{
}
public class C33
{
}
[CompilerGenerated, Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c01""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown), TypeIdentifier]
[ComImport]
public interface I1
{
}
public interface I32<S>
{
}
public class LocalTypes3
{
public C31<C33>.I31<C33> Test1()
{
return null;
}
public C31<C33>.I31<I1> Test2()
{
return null;
}
public C31<I1>.I31<C33> Test3()
{
return null;
}
public C31<C33>.I31<I32<I1>> Test4()
{
return null;
}
public C31<I32<I1>>.I31<C33> Test5()
{
return null;
}
public List<I1> Test6()
{
return null;
}
}
";
/// <summary>
/// Translation of Roslyn\Main\Open\Compilers\Test\Resources\Core\SymbolsTests\NoPia\LocalTypes3.vb
/// </summary>
private static readonly string s_sourceLocalTypes3 =
@"
using System;
using System.Collections.Generic;
public class C31<T>
{
public interface I31<S>
{
}
}
public class C32<T>
{
}
public class C33
{
}
public interface I32<S>
{
}
public class LocalTypes3
{
public C31<C33>.I31<C33> Test1()
{
return null;
}
public C31<C33>.I31<I1> Test2()
{
return null;
}
public C31<I1>.I31<C33> Test3()
{
return null;
}
public C31<C33>.I31<I32<I1>> Test4()
{
return null;
}
public C31<I32<I1>>.I31<C33> Test5()
{
return null;
}
public List<I1> Test6()
{
return null;
}
}
";
[ClrOnlyFact]
public void HideLocalTypeDefinitions()
{
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1");
CompileAndVerify(LocalTypes1);
var LocalTypes2 = CreateCompilationWithMscorlib(s_sourceLocalTypes2_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
CompileAndVerify(LocalTypes2);
var assemblies = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] { MscorlibRef });
var localTypes1 = assemblies[0].Modules[0];
var localTypes2 = assemblies[1].Modules[0];
Assert.Same(assemblies[2], LocalTypes1.Assembly.CorLibrary);
Assert.Same(assemblies[2], LocalTypes2.Assembly.CorLibrary);
Assert.Equal(2, localTypes1.GlobalNamespace.GetMembers().Length);
Assert.Equal(2, localTypes1.GlobalNamespace.GetMembersUnordered().Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetMembers("I1").Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetMembers("S1").Length);
Assert.Equal(1, localTypes1.GlobalNamespace.GetTypeMembers().Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetTypeMembers("I1").Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetTypeMembers("S1").Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetTypeMembers("I1", 0).Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetTypeMembers("S1", 0).Length);
Assert.Equal(0, localTypes1.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single().
GetTypeMembers().Length);
Assert.Equal(2, localTypes2.GlobalNamespace.GetMembers().Length);
Assert.Equal(2, localTypes2.GlobalNamespace.GetMembersUnordered().Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetMembers("I1").Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetMembers("S1").Length);
Assert.Equal(1, localTypes2.GlobalNamespace.GetTypeMembers().Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetTypeMembers("I1").Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetTypeMembers("S1").Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetTypeMembers("I1", 0).Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetTypeMembers("S1", 0).Length);
Assert.Equal(0, localTypes2.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single().
GetTypeMembers().Length);
var fullName_I1 = MetadataTypeName.FromFullName("I1");
var fullName_I2 = MetadataTypeName.FromFullName("NS1.I2");
var fullName_S1 = MetadataTypeName.FromFullName("S1");
var fullName_S2 = MetadataTypeName.FromFullName("NS1.S2");
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes1.LookupTopLevelMetadataType(ref fullName_I1));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes1.LookupTopLevelMetadataType(ref fullName_I2));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes1.LookupTopLevelMetadataType(ref fullName_S1));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes1.LookupTopLevelMetadataType(ref fullName_S2));
Assert.Null(assemblies[0].GetTypeByMetadataName(fullName_I1.FullName));
Assert.Null(assemblies[0].GetTypeByMetadataName(fullName_I2.FullName));
Assert.Null(assemblies[0].GetTypeByMetadataName(fullName_S1.FullName));
Assert.Null(assemblies[0].GetTypeByMetadataName(fullName_S2.FullName));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes2.LookupTopLevelMetadataType(ref fullName_I1));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes2.LookupTopLevelMetadataType(ref fullName_I2));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes2.LookupTopLevelMetadataType(ref fullName_S1));
Assert.IsType<MissingMetadataTypeSymbol.TopLevel>(localTypes2.LookupTopLevelMetadataType(ref fullName_S2));
Assert.Null(assemblies[1].GetTypeByMetadataName(fullName_I1.FullName));
Assert.Null(assemblies[1].GetTypeByMetadataName(fullName_I2.FullName));
Assert.Null(assemblies[1].GetTypeByMetadataName(fullName_S1.FullName));
Assert.Null(assemblies[1].GetTypeByMetadataName(fullName_S2.FullName));
}
[ClrOnlyFact]
public void LocalTypeSubstitution1_1()
{
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1");
CompileAndVerify(LocalTypes1);
var LocalTypes2 = CreateCompilationWithMscorlib(s_sourceLocalTypes2_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
CompileAndVerify(LocalTypes2);
var assemblies1 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
var localTypes1_1 = assemblies1[0];
var localTypes2_1 = assemblies1[1];
var pia1_1 = assemblies1[2];
var varI1 = pia1_1.GlobalNamespace.GetTypeMembers("I1").Single();
var varS1 = pia1_1.GlobalNamespace.GetTypeMembers("S1").Single();
var varNS1 = pia1_1.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single();
var varI2 = varNS1.GetTypeMembers("I2").Single();
var varS2 = varNS1.GetTypeMembers("S2").Single();
NamedTypeSymbol classLocalTypes1;
NamedTypeSymbol classLocalTypes2;
classLocalTypes1 = localTypes1_1.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_1.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
MethodSymbol test1;
MethodSymbol test2;
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
ImmutableArray<ParameterSymbol> param;
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies2 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
},
null);
var localTypes1_2 = assemblies2[0];
var localTypes2_2 = assemblies2[1];
Assert.NotSame(localTypes1_1, localTypes1_2);
Assert.NotSame(localTypes2_1, localTypes2_2);
Assert.Same(pia1_1, assemblies2[2]);
classLocalTypes1 = localTypes1_2.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_2.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies3 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] { TestReferences.SymbolsTests.NoPia.Pia1 },
null);
var localTypes1_3 = assemblies3[0];
var localTypes2_3 = assemblies3[1];
var pia1_3 = assemblies3[2];
Assert.NotSame(localTypes1_1, localTypes1_3);
Assert.NotSame(localTypes2_1, localTypes2_3);
Assert.NotSame(localTypes1_2, localTypes1_3);
Assert.NotSame(localTypes2_2, localTypes2_3);
Assert.NotSame(pia1_1, pia1_3);
classLocalTypes1 = localTypes1_3.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_3.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(pia1_3.GlobalNamespace.GetTypeMembers("I1").Single(), param[0].Type);
Assert.Same(pia1_3.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single().GetTypeMembers("I2").Single(), param[1].Type);
// This tests that we cannot find canonical type for an embedded structure if we don't know
// whether it is a structure because we can't find definition of the base class. Mscorlib is
// not referenced.
param = test2.Parameters;
NoPiaMissingCanonicalTypeSymbol missing;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes2_3, missing.EmbeddingAssembly);
Assert.Null(missing.Guid);
Assert.Equal(varS1.ToTestDisplayString(), missing.FullTypeName);
Assert.Equal("f9c2d51d-4f44-45f0-9eda-c9d599b58257", missing.Scope);
Assert.Equal(varS1.ToTestDisplayString(), missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies4 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
for (int i = 0; i < assemblies1.Length; i++)
{
Assert.Same(assemblies1[i], assemblies4[i]);
}
var assemblies5 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia2,
MscorlibRef
}, null);
var localTypes1_5 = assemblies5[0];
var localTypes2_5 = assemblies5[1];
classLocalTypes1 = localTypes1_5.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_5.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_5, missing.EmbeddingAssembly);
Assert.Equal("27e3e649-994b-4f58-b3c6-f8089a5f2c01", missing.Guid);
Assert.Equal(varI1.ToTestDisplayString(), missing.FullTypeName);
Assert.Null(missing.Scope);
Assert.Null(missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies6 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia3,
MscorlibRef
}, null);
var localTypes1_6 = assemblies6[0];
var localTypes2_6 = assemblies6[1];
classLocalTypes1 = localTypes1_6.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_6.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies7 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var localTypes1_7 = assemblies7[0];
var localTypes2_7 = assemblies7[1];
classLocalTypes1 = localTypes1_7.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_7.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(TypeKind.Interface, param[0].Type.TypeKind);
Assert.Equal(TypeKind.Interface, param[1].Type.TypeKind);
Assert.NotEqual(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.NotEqual(SymbolKind.ErrorType, param[1].Type.Kind);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies8 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
var localTypes1_8 = assemblies8[0];
var localTypes2_8 = assemblies8[1];
var pia4_8 = assemblies8[2];
var pia1_8 = assemblies8[3];
classLocalTypes1 = localTypes1_8.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_8.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
NoPiaAmbiguousCanonicalTypeSymbol ambiguous;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
ambiguous = (NoPiaAmbiguousCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_8, ambiguous.EmbeddingAssembly);
Assert.Same(pia4_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.FirstCandidate);
Assert.Same(pia1_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.SecondCandidate);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaAmbiguousCanonicalTypeSymbol>(param[1].Type);
var assemblies9 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var library1_9 = assemblies9[0];
var localTypes1_9 = assemblies9[1];
var assemblies10 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
var library1_10 = assemblies10[0];
var localTypes1_10 = assemblies10[1];
Assert.NotSame(library1_9, library1_10);
Assert.NotSame(localTypes1_9, localTypes1_10);
GC.KeepAlive(localTypes1_1);
GC.KeepAlive(localTypes2_1);
GC.KeepAlive(pia1_1);
GC.KeepAlive(localTypes1_9);
GC.KeepAlive(library1_9);
}
[ClrOnlyFact]
public void LocalTypeSubstitution1_2()
{
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1",
references: new[] { TestReferences.SymbolsTests.NoPia.Pia1.WithEmbedInteropTypes(true) });
CompileAndVerify(LocalTypes1);
var LocalTypes2 = CreateCompilationWithMscorlib(s_sourceLocalTypes2, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2",
references: new[] { TestReferences.SymbolsTests.NoPia.Pia1.WithEmbedInteropTypes(true) });
CompileAndVerify(LocalTypes2);
var assemblies1 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
});
var localTypes1_1 = assemblies1[0];
var localTypes2_1 = assemblies1[1];
var pia1_1 = assemblies1[2];
var varI1 = pia1_1.GlobalNamespace.GetTypeMembers("I1").Single();
var varS1 = pia1_1.GlobalNamespace.GetTypeMembers("S1").Single();
var varNS1 = pia1_1.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single();
var varI2 = varNS1.GetTypeMembers("I2").Single();
var varS2 = varNS1.GetTypeMembers("S2").Single();
NamedTypeSymbol classLocalTypes1;
NamedTypeSymbol classLocalTypes2;
classLocalTypes1 = localTypes1_1.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_1.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
MethodSymbol test1;
MethodSymbol test2;
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
ImmutableArray<ParameterSymbol> param;
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies2 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
},
null);
var localTypes1_2 = assemblies2[0];
var localTypes2_2 = assemblies2[1];
Assert.NotSame(localTypes1_1, localTypes1_2);
Assert.NotSame(localTypes2_1, localTypes2_2);
Assert.Same(pia1_1, assemblies2[2]);
classLocalTypes1 = localTypes1_2.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_2.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies3 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] { TestReferences.SymbolsTests.NoPia.Pia1 },
null);
var localTypes1_3 = assemblies3[0];
var localTypes2_3 = assemblies3[1];
var pia1_3 = assemblies3[2];
Assert.NotSame(localTypes1_1, localTypes1_3);
Assert.NotSame(localTypes2_1, localTypes2_3);
Assert.NotSame(localTypes1_2, localTypes1_3);
Assert.NotSame(localTypes2_2, localTypes2_3);
Assert.NotSame(pia1_1, pia1_3);
classLocalTypes1 = localTypes1_3.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_3.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(pia1_3.GlobalNamespace.GetTypeMembers("I1").Single(), param[0].Type);
Assert.Same(pia1_3.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single().GetTypeMembers("I2").Single(), param[1].Type);
// This tests that we cannot find canonical type for an embedded structure if we don't know
// whether it is a structure because we can't find definition of the base class. Mscorlib is
// not referenced.
param = test2.Parameters;
NoPiaMissingCanonicalTypeSymbol missing;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes2_3, missing.EmbeddingAssembly);
Assert.Null(missing.Guid);
Assert.Equal(varS1.ToTestDisplayString(), missing.FullTypeName);
Assert.Equal("f9c2d51d-4f44-45f0-9eda-c9d599b58257", missing.Scope);
Assert.Equal(varS1.ToTestDisplayString(), missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies4 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
for (int i = 0; i < assemblies1.Length; i++)
{
Assert.Same(assemblies1[i], assemblies4[i]);
}
var assemblies5 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia2,
MscorlibRef
}, null);
var localTypes1_5 = assemblies5[0];
var localTypes2_5 = assemblies5[1];
classLocalTypes1 = localTypes1_5.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_5.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_5, missing.EmbeddingAssembly);
Assert.Equal("27e3e649-994b-4f58-b3c6-f8089a5f2c01", missing.Guid);
Assert.Equal(varI1.ToTestDisplayString(), missing.FullTypeName);
Assert.Null(missing.Scope);
Assert.Null(missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies6 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia3,
MscorlibRef
}, null);
var localTypes1_6 = assemblies6[0];
var localTypes2_6 = assemblies6[1];
classLocalTypes1 = localTypes1_6.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_6.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies7 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var localTypes1_7 = assemblies7[0];
var localTypes2_7 = assemblies7[1];
classLocalTypes1 = localTypes1_7.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_7.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(TypeKind.Interface, param[0].Type.TypeKind);
Assert.Equal(TypeKind.Interface, param[1].Type.TypeKind);
Assert.NotEqual(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.NotEqual(SymbolKind.ErrorType, param[1].Type.Kind);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies8 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
var localTypes1_8 = assemblies8[0];
var localTypes2_8 = assemblies8[1];
var pia4_8 = assemblies8[2];
var pia1_8 = assemblies8[3];
classLocalTypes1 = localTypes1_8.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_8.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
NoPiaAmbiguousCanonicalTypeSymbol ambiguous;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
ambiguous = (NoPiaAmbiguousCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_8, ambiguous.EmbeddingAssembly);
Assert.Same(pia4_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.FirstCandidate);
Assert.Same(pia1_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.SecondCandidate);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaAmbiguousCanonicalTypeSymbol>(param[1].Type);
var assemblies9 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var library1_9 = assemblies9[0];
var localTypes1_9 = assemblies9[1];
var assemblies10 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
var library1_10 = assemblies10[0];
var localTypes1_10 = assemblies10[1];
Assert.NotSame(library1_9, library1_10);
Assert.NotSame(localTypes1_9, localTypes1_10);
GC.KeepAlive(localTypes1_1);
GC.KeepAlive(localTypes2_1);
GC.KeepAlive(pia1_1);
GC.KeepAlive(localTypes1_9);
GC.KeepAlive(library1_9);
}
[ClrOnlyFact]
public void LocalTypeSubstitution1_3()
{
var Pia1 = CreateCompilationWithMscorlib(s_sourcePia1, options: TestOptions.ReleaseDll, assemblyName: "Pia1");
CompileAndVerify(Pia1);
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1",
references: new MetadataReference[] { new CSharpCompilationReference(Pia1, embedInteropTypes: true) });
CompileAndVerify(LocalTypes1);
var LocalTypes2 = CreateCompilationWithMscorlib(s_sourceLocalTypes2, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2",
references: new MetadataReference[] { new CSharpCompilationReference(Pia1, embedInteropTypes: true) });
CompileAndVerify(LocalTypes2);
var assemblies1 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
var localTypes1_1 = assemblies1[0];
var localTypes2_1 = assemblies1[1];
var pia1_1 = assemblies1[2];
var varI1 = pia1_1.GlobalNamespace.GetTypeMembers("I1").Single();
var varS1 = pia1_1.GlobalNamespace.GetTypeMembers("S1").Single();
var varNS1 = pia1_1.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single();
var varI2 = varNS1.GetTypeMembers("I2").Single();
var varS2 = varNS1.GetTypeMembers("S2").Single();
NamedTypeSymbol classLocalTypes1;
NamedTypeSymbol classLocalTypes2;
classLocalTypes1 = localTypes1_1.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_1.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
MethodSymbol test1;
MethodSymbol test2;
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
ImmutableArray<ParameterSymbol> param;
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies2 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
},
null);
var localTypes1_2 = assemblies2[0];
var localTypes2_2 = assemblies2[1];
Assert.NotSame(localTypes1_1, localTypes1_2);
Assert.NotSame(localTypes2_1, localTypes2_2);
Assert.Same(pia1_1, assemblies2[2]);
classLocalTypes1 = localTypes1_2.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_2.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(varI1, param[0].Type);
Assert.Same(varI2, param[1].Type);
param = test2.Parameters;
Assert.Same(varS1, param[0].Type);
Assert.Same(varS2, param[1].Type);
var assemblies3 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] { TestReferences.SymbolsTests.NoPia.Pia1 },
null);
var localTypes1_3 = assemblies3[0];
var localTypes2_3 = assemblies3[1];
var pia1_3 = assemblies3[2];
Assert.NotSame(localTypes1_1, localTypes1_3);
Assert.NotSame(localTypes2_1, localTypes2_3);
Assert.NotSame(localTypes1_2, localTypes1_3);
Assert.NotSame(localTypes2_2, localTypes2_3);
Assert.NotSame(pia1_1, pia1_3);
classLocalTypes1 = localTypes1_3.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_3.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Same(pia1_3.GlobalNamespace.GetTypeMembers("I1").Single(), param[0].Type);
Assert.Same(pia1_3.GlobalNamespace.GetMembers("NS1").OfType<NamespaceSymbol>().Single().GetTypeMembers("I2").Single(), param[1].Type);
// This tests that we cannot find canonical type for an embedded structure if we don't know
// whether it is a structure because we can't find definition of the base class. Mscorlib is
// not referenced.
param = test2.Parameters;
NoPiaMissingCanonicalTypeSymbol missing;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes2_3, missing.EmbeddingAssembly);
Assert.Null(missing.Guid);
Assert.Equal(varS1.ToTestDisplayString(), missing.FullTypeName);
Assert.Equal("f9c2d51d-4f44-45f0-9eda-c9d599b58257", missing.Scope);
Assert.Equal(varS1.ToTestDisplayString(), missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies4 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
for (int i = 0; i < assemblies1.Length; i++)
{
Assert.Same(assemblies1[i], assemblies4[i]);
}
var assemblies5 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia2,
MscorlibRef
}, null);
var localTypes1_5 = assemblies5[0];
var localTypes2_5 = assemblies5[1];
classLocalTypes1 = localTypes1_5.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_5.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
missing = (NoPiaMissingCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_5, missing.EmbeddingAssembly);
Assert.Equal("27e3e649-994b-4f58-b3c6-f8089a5f2c01", missing.Guid);
Assert.Equal(varI1.ToTestDisplayString(), missing.FullTypeName);
Assert.Null(missing.Scope);
Assert.Null(missing.Identifier);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies6 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia3,
MscorlibRef
}, null);
var localTypes1_6 = assemblies6[0];
var localTypes2_6 = assemblies6[1];
classLocalTypes1 = localTypes1_6.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_6.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies7 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var localTypes1_7 = assemblies7[0];
var localTypes2_7 = assemblies7[1];
classLocalTypes1 = localTypes1_7.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_7.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
Assert.Equal(TypeKind.Interface, param[0].Type.TypeKind);
Assert.Equal(TypeKind.Interface, param[1].Type.TypeKind);
Assert.NotEqual(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.NotEqual(SymbolKind.ErrorType, param[1].Type.Kind);
param = test2.Parameters;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[0].Type);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaMissingCanonicalTypeSymbol>(param[1].Type);
var assemblies8 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
var localTypes1_8 = assemblies8[0];
var localTypes2_8 = assemblies8[1];
var pia4_8 = assemblies8[2];
var pia1_8 = assemblies8[3];
classLocalTypes1 = localTypes1_8.GlobalNamespace.GetTypeMembers("LocalTypes1").Single();
classLocalTypes2 = localTypes2_8.GlobalNamespace.GetTypeMembers("LocalTypes2").Single();
test1 = classLocalTypes1.GetMembers("Test1").OfType<MethodSymbol>().Single();
test2 = classLocalTypes2.GetMembers("Test2").OfType<MethodSymbol>().Single();
param = test1.Parameters;
NoPiaAmbiguousCanonicalTypeSymbol ambiguous;
Assert.Equal(SymbolKind.ErrorType, param[0].Type.Kind);
ambiguous = (NoPiaAmbiguousCanonicalTypeSymbol)param[0].Type;
Assert.Same(localTypes1_8, ambiguous.EmbeddingAssembly);
Assert.Same(pia4_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.FirstCandidate);
Assert.Same(pia1_8.GlobalNamespace.GetTypeMembers("I1").Single(), ambiguous.SecondCandidate);
Assert.Equal(SymbolKind.ErrorType, param[1].Type.Kind);
Assert.IsType<NoPiaAmbiguousCanonicalTypeSymbol>(param[1].Type);
var assemblies9 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef
}, null);
var library1_9 = assemblies9[0];
var localTypes1_9 = assemblies9[1];
var assemblies10 = MetadataTestHelpers.GetSymbolsForReferences(new CSharpCompilation[] { LocalTypes1, LocalTypes2 },
null,
new MetadataReference[] {
TestReferences.SymbolsTests.NoPia.Pia4,
MscorlibRef,
TestReferences.SymbolsTests.MDTestLib1
}, null);
var library1_10 = assemblies10[0];
var localTypes1_10 = assemblies10[1];
Assert.NotSame(library1_9, library1_10);
Assert.NotSame(localTypes1_9, localTypes1_10);
GC.KeepAlive(localTypes1_1);
GC.KeepAlive(localTypes2_1);
GC.KeepAlive(pia1_1);
GC.KeepAlive(localTypes1_9);
GC.KeepAlive(library1_9);
}
[ClrOnlyFact]
public void CyclicReference_1()
{
var mscorlibRef = TestReferences.SymbolsTests.MDTestLib1;
var cyclic2Ref = TestReferences.SymbolsTests.Cyclic.Cyclic2.dll;
var piaRef = TestReferences.SymbolsTests.NoPia.Pia1;
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1");
CompileAndVerify(LocalTypes1);
var localTypes1Ref = new CSharpCompilationReference(LocalTypes1);
var tc1 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc1.Assembly); // force creation of SourceAssemblySymbol
var tc2 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc2.Assembly); // force creation of SourceAssemblySymbol
Assert.NotSame(tc1.GetReferencedAssemblySymbol(localTypes1Ref),
tc2.GetReferencedAssemblySymbol(localTypes1Ref));
GC.KeepAlive(tc1);
GC.KeepAlive(tc2);
}
[ClrOnlyFact]
public void CyclicReference_2()
{
var mscorlibRef = TestReferences.SymbolsTests.MDTestLib1;
var cyclic2Ref = TestReferences.SymbolsTests.Cyclic.Cyclic2.dll;
var piaRef = TestReferences.SymbolsTests.NoPia.Pia1;
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1",
references: new[] { TestReferences.SymbolsTests.NoPia.Pia1.WithEmbedInteropTypes(true) });
CompileAndVerify(LocalTypes1);
var localTypes1Ref = new CSharpCompilationReference(LocalTypes1);
var tc1 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc1.Assembly); // force creation of SourceAssemblySymbol
var tc2 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc2.Assembly); // force creation of SourceAssemblySymbol
Assert.NotSame(tc1.GetReferencedAssemblySymbol(localTypes1Ref),
tc2.GetReferencedAssemblySymbol(localTypes1Ref));
GC.KeepAlive(tc1);
GC.KeepAlive(tc2);
}
[ClrOnlyFact]
public void CyclicReference_3()
{
var mscorlibRef = TestReferences.SymbolsTests.MDTestLib1;
var cyclic2Ref = TestReferences.SymbolsTests.Cyclic.Cyclic2.dll;
var Pia1 = CreateCompilationWithMscorlib(s_sourcePia1, options: TestOptions.ReleaseDll, assemblyName: "Pia1");
CompileAndVerify(Pia1);
var piaRef = new CSharpCompilationReference(Pia1);
var LocalTypes1 = CreateCompilationWithMscorlib(s_sourceLocalTypes1, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes1",
references: new MetadataReference[] { new CSharpCompilationReference(Pia1, embedInteropTypes: true) });
CompileAndVerify(LocalTypes1);
var localTypes1Ref = new CSharpCompilationReference(LocalTypes1);
var tc1 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc1.Assembly); // force creation of SourceAssemblySymbol
var tc2 = CSharpCompilation.Create("Cyclic1", references: new MetadataReference[] { mscorlibRef, cyclic2Ref, piaRef, localTypes1Ref });
Assert.NotNull(tc2.Assembly); // force creation of SourceAssemblySymbol
Assert.NotSame(tc1.GetReferencedAssemblySymbol(localTypes1Ref),
tc2.GetReferencedAssemblySymbol(localTypes1Ref));
GC.KeepAlive(tc1);
GC.KeepAlive(tc2);
}
[ClrOnlyFact]
public void GenericsClosedOverLocalTypes1_1()
{
var LocalTypes3 = CreateCompilationWithMscorlib(s_sourceLocalTypes3_IL, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes3");
CompileAndVerify(LocalTypes3);
var assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1
}, null);
var asmLocalTypes3 = assemblies[0];
var localTypes3 = asmLocalTypes3.GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.Equal(SymbolKind.ErrorType, localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType.Kind);
NoPiaIllegalGenericInstantiationSymbol illegal = (NoPiaIllegalGenericInstantiationSymbol)localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType;
Assert.Equal("C31<I1>.I31<C33>", illegal.UnderlyingSymbol.ToTestDisplayString());
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
localTypes3 = assemblies[0].GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test6").OfType<MethodSymbol>().Single().ReturnType);
}
[ClrOnlyFact]
public void ValueTupleWithMissingCanonicalType()
{
string source = @"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System
{
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1"")]
public struct S1 { }
public class C
{
public ValueTuple<S1, S1> Test1()
{
throw new Exception();
}
}
";
var comp = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "comp");
comp.VerifyDiagnostics();
CompileAndVerify(comp);
var assemblies1 = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { comp },
null,
new MetadataReference[] { },
null);
Assert.Equal(SymbolKind.ErrorType, assemblies1[0].GlobalNamespace.GetMember<MethodSymbol>("C.Test1").ReturnType.Kind);
var assemblies2 = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { },
null,
new MetadataReference[] { comp.ToMetadataReference() },
null);
Assert.Equal(SymbolKind.ErrorType, assemblies2[0].GlobalNamespace.GetMember<MethodSymbol>("C.Test1").ReturnType.Kind);
}
[ClrOnlyFact]
public void EmbeddedValueTuple()
{
string source = @"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System
{
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""ValueTuple"")]
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}
public class C
{
public ValueTuple<int, int> Test1()
{
throw new Exception();
}
}
";
var comp = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "comp");
comp.VerifyDiagnostics();
var assemblies1 = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { comp },
null,
new MetadataReference[] { },
null);
Assert.Equal(SymbolKind.ErrorType, assemblies1[0].GlobalNamespace.GetMember<MethodSymbol>("C.Test1").ReturnType.Kind);
var assemblies2 = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { },
null,
new MetadataReference[] { comp.ToMetadataReference() },
null);
Assert.Equal(SymbolKind.ErrorType, assemblies2[0].GlobalNamespace.GetMember<MethodSymbol>("C.Test1").ReturnType.Kind);
}
[ClrOnlyFact]
public void CannotEmbedValueTuple()
{
string piaSource = @"
using System;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
namespace System
{
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "pia");
pia.VerifyDiagnostics();
string source = @"
public class C
{
public System.ValueTuple<string, string> TestValueTuple()
{
throw new System.Exception();
}
public (int, int) TestTuple()
{
throw new System.Exception();
}
public object TestTupleLiteral()
{
return (1, 2);
}
public void TestDeconstruction()
{
int x, y;
(x, y) = new C();
}
public void Deconstruct(out int a, out int b) { a = b = 1; }
}";
var expectedDiagnostics = new[]
{
// (8,12): error CS1768: Type 'ValueTuple<T1, T2>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public (int, int) TestTuple()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "(int, int)").WithArguments("System.ValueTuple<T1, T2>").WithLocation(8, 12),
// (4,19): error CS1768: Type 'ValueTuple<T1, T2>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public System.ValueTuple<string, string> TestValueTuple()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "ValueTuple<string, string>").WithArguments("System.ValueTuple<T1, T2>").WithLocation(4, 19),
// (14,16): error CS1768: Type 'ValueTuple<T1, T2>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// return (1, 2);
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "(1, 2)").WithArguments("System.ValueTuple<T1, T2>").WithLocation(14, 16),
// (19,9): error CS1768: Type 'ValueTuple<T1, T2>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// (x, y) = new C();
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "(x, y) = new C()").WithArguments("System.ValueTuple<T1, T2>").WithLocation(19, 9)
};
var comp1 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.ToMetadataReference(embedInteropTypes: true) });
comp1.VerifyDiagnostics(expectedDiagnostics);
var comp2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.EmitToImageReference(embedInteropTypes: true) });
comp2.VerifyDiagnostics(expectedDiagnostics);
}
[ClrOnlyFact(Skip = "https://github.com/dotnet/roslyn/issues/13200")]
public void CannotEmbedValueTupleImplicitlyReferred()
{
string piaSource = @"
using System;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S<T> { }
namespace System
{
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}
[ComImport()]
[Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58280"")]
public interface ITest1
{
(int, int) M();
S<int> M2();
}";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "pia");
pia.VerifyEmitDiagnostics();
string source = @"
public interface ITest2 : ITest1 { }
";
// We should expect errors as generic types cannot be embedded
// Issue https://github.com/dotnet/roslyn/issues/13200 tracks this
var expectedDiagnostics = new DiagnosticDescription[]
{
};
var comp1 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.ToMetadataReference(embedInteropTypes: true) });
comp1.VerifyEmitDiagnostics(expectedDiagnostics);
var comp2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.EmitToImageReference(embedInteropTypes: true) });
comp2.VerifyEmitDiagnostics(expectedDiagnostics);
}
[ClrOnlyFact(Skip = "https://github.com/dotnet/roslyn/issues/13200")]
public void CannotEmbedValueTupleImplicitlyReferredFromMetadata()
{
string piaSource = @"
using System;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S<T> { }
namespace System
{
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}";
var libSource = @"
public class D
{
public static (int, int) M() { throw new System.Exception(); }
public static S<int> M2() { throw new System.Exception(); }
}";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "pia");
pia.VerifyDiagnostics();
var lib = CreateCompilationWithMscorlib(libSource, options: TestOptions.ReleaseDll, references: new[] { pia.ToMetadataReference() });
lib.VerifyEmitDiagnostics();
string source = @"
public class C
{
public void TestTupleFromMetadata()
{
D.M();
D.M2();
}
public void TestTupleAssignmentFromMetadata()
{
var t = D.M();
t.ToString();
var t2 = D.M2();
t2.ToString();
}
}";
// We should expect errors, as generic types cannot be embedded
// Issue https://github.com/dotnet/roslyn/issues/13200 tracks this
var expectedDiagnostics = new DiagnosticDescription[]
{
};
var comp1 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.ToMetadataReference(embedInteropTypes: true), lib.ToMetadataReference() });
comp1.VerifyEmitDiagnostics(expectedDiagnostics);
var comp2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { pia.EmitToImageReference(embedInteropTypes: true), lib.EmitToImageReference() });
comp2.VerifyEmitDiagnostics(expectedDiagnostics);
}
[ClrOnlyFact]
public void CheckForUnembeddableTypesInTuples()
{
string piaSource = @"
using System;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct Generic<T1> { }
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "pia");
pia.VerifyDiagnostics();
string source = @"
public class C
{
public System.ValueTuple<Generic<string>, Generic<string>> Test1()
{
throw new System.Exception();
}
public (Generic<int>, Generic<int>) Test2()
{
throw new System.Exception();
}
}
namespace System
{
public struct ValueTuple<T1, T2>
{
public ValueTuple(T1 item1, T2 item2) { }
}
}";
var comp1 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { new CSharpCompilationReference(pia).WithEmbedInteropTypes(true) });
comp1.VerifyDiagnostics(
// (8,13): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public (Generic<int>, Generic<int>) Test2()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<int>").WithArguments("Generic<T1>").WithLocation(8, 13),
// (8,27): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public (Generic<int>, Generic<int>) Test2()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<int>").WithArguments("Generic<T1>").WithLocation(8, 27),
// (4,30): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public System.ValueTuple<Generic<string>, Generic<string>> Test1()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<string>").WithArguments("Generic<T1>").WithLocation(4, 30),
// (4,47): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public System.ValueTuple<Generic<string>, Generic<string>> Test1()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<string>").WithArguments("Generic<T1>").WithLocation(4, 47)
);
var comp2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll,
references: new MetadataReference[] { MetadataReference.CreateFromImage(pia.EmitToArray()).WithEmbedInteropTypes(true) });
comp2.VerifyDiagnostics(
// (8,13): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public (Generic<int>, Generic<int>) Test2()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<int>").WithArguments("Generic<T1>").WithLocation(8, 13),
// (8,27): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public (Generic<int>, Generic<int>) Test2()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<int>").WithArguments("Generic<T1>").WithLocation(8, 27),
// (4,30): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public System.ValueTuple<Generic<string>, Generic<string>> Test1()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<string>").WithArguments("Generic<T1>").WithLocation(4, 30),
// (4,47): error CS1768: Type 'Generic<T1>' cannot be embedded because it has a generic argument. Consider setting the 'Embed Interop Types' property to false.
// public System.ValueTuple<Generic<string>, Generic<string>> Test1()
Diagnostic(ErrorCode.ERR_GenericsUsedInNoPIAType, "Generic<string>").WithArguments("Generic<T1>").WithLocation(4, 47)
);
}
[ClrOnlyFact]
public void GenericsClosedOverLocalTypes1_2()
{
var LocalTypes3 = CreateCompilationWithMscorlib(s_sourceLocalTypes3, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes3",
references: new[] { TestReferences.SymbolsTests.NoPia.Pia1.WithEmbedInteropTypes(true) });
CompileAndVerify(LocalTypes3);
var assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1
}, null);
var asmLocalTypes3 = assemblies[0];
var localTypes3 = asmLocalTypes3.GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.Equal(SymbolKind.ErrorType, localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType.Kind);
NoPiaIllegalGenericInstantiationSymbol illegal = (NoPiaIllegalGenericInstantiationSymbol)localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType;
Assert.Equal("C31<I1>.I31<C33>", illegal.UnderlyingSymbol.ToTestDisplayString());
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
localTypes3 = assemblies[0].GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test6").OfType<MethodSymbol>().Single().ReturnType);
}
[ClrOnlyFact]
public void GenericsClosedOverLocalTypes1_3()
{
var Pia1 = CreateCompilationWithMscorlib(s_sourcePia1, options: TestOptions.ReleaseDll, assemblyName: "Pia1");
CompileAndVerify(Pia1);
var LocalTypes3 = CreateCompilationWithMscorlib(s_sourceLocalTypes3, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes3",
references: new MetadataReference[] { new CSharpCompilationReference(Pia1, embedInteropTypes: true) });
CompileAndVerify(LocalTypes3);
var assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1
}, null);
var asmLocalTypes3 = assemblies[0];
var localTypes3 = asmLocalTypes3.GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.Equal(SymbolKind.ErrorType, localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType.Kind);
NoPiaIllegalGenericInstantiationSymbol illegal = (NoPiaIllegalGenericInstantiationSymbol)localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType;
Assert.Equal("C31<I1>.I31<C33>", illegal.UnderlyingSymbol.ToTestDisplayString());
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
assemblies = MetadataTestHelpers.GetSymbolsForReferences(
new CSharpCompilation[] { LocalTypes3 },
null,
new MetadataReference[]
{
TestReferences.SymbolsTests.NoPia.Pia1,
MscorlibRef
}, null);
localTypes3 = assemblies[0].GlobalNamespace.GetTypeMembers("LocalTypes3").Single();
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test1").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test2").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test3").OfType<MethodSymbol>().Single().ReturnType);
Assert.NotEqual(SymbolKind.ErrorType, localTypes3.GetMembers("Test4").OfType<MethodSymbol>().Single().ReturnType.Kind);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test5").OfType<MethodSymbol>().Single().ReturnType);
Assert.IsType<NoPiaIllegalGenericInstantiationSymbol>(localTypes3.GetMembers("Test6").OfType<MethodSymbol>().Single().ReturnType);
}
[ClrOnlyFact]
public void NestedType1()
{
string source =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S1.S2 y)
{
}
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1"")]
public struct S1
{
public int F1;
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1.S2"")]
public struct S2
{
public int F1;
}
}
[ComEventInterface(typeof(S1), typeof(S1.S2))]
interface AttrTest1
{
}
";
var localTypes2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
CompileAndVerify(localTypes2);
var localTypes2Image = MetadataReference.CreateFromImage(localTypes2.EmitToArray());
string piaSource =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S1
{
public int F1;
public struct S2
{
public int F1;
}
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia");
CompileAndVerify(pia);
var piaImage = MetadataReference.CreateFromImage(pia.EmitToArray());
var compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
new CSharpCompilationReference(pia)});
NamedTypeSymbol lt = compilation.GetTypeByMetadataName("LocalTypes2");
var test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
NamedTypeSymbol attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
var args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
new CSharpCompilationReference(pia)});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
}
[ClrOnlyFact]
public void NestedType2()
{
string source =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S1.S2 y)
{
}
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1"")]
public struct S1
{
public int F1;
public struct S2
{
public int F1;
}
}
[ComEventInterface(typeof(S1), typeof(S1.S2))]
interface AttrTest1
{
}
";
var localTypes2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
CompileAndVerify(localTypes2);
var localTypes2Image = MetadataReference.CreateFromImage(localTypes2.EmitToArray());
string piaSource =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S1
{
public int F1;
public struct S2
{
public int F1;
}
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia");
CompileAndVerify(pia);
var piaImage = MetadataReference.CreateFromImage(pia.EmitToArray());
var compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
new CSharpCompilationReference(pia)});
NamedTypeSymbol lt = compilation.GetTypeByMetadataName("LocalTypes2");
var test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
NamedTypeSymbol attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
var args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
new CSharpCompilationReference(pia)});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
}
[ClrOnlyFact]
public void NestedType3()
{
string source =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S1.S2 y)
{
}
}
public struct S1
{
public int F1;
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1.S2"")]
public struct S2
{
public int F1;
}
}
[ComEventInterface(typeof(S1), typeof(S1.S2))]
interface AttrTest1
{
}
";
var localTypes2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
//CompileAndVerify(localTypes2);
var localTypes2Image = MetadataReference.CreateFromImage(localTypes2.EmitToArray());
string piaSource =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S1
{
public int F1;
public struct S2
{
public int F1;
}
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia");
CompileAndVerify(pia);
var piaImage = MetadataReference.CreateFromImage(pia.EmitToArray());
var compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
new CSharpCompilationReference(pia)});
NamedTypeSymbol lt = compilation.GetTypeByMetadataName("LocalTypes2");
var test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("LocalTypes2", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.Equal("LocalTypes2", test2.Parameters[1].Type.ContainingAssembly.Name);
NamedTypeSymbol attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
var args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("LocalTypes2", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.Equal("LocalTypes2", ((TypeSymbol)args[1].Value).ContainingAssembly.Name);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
new CSharpCompilationReference(pia)});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("LocalTypes2", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.Equal("LocalTypes2", test2.Parameters[1].Type.ContainingAssembly.Name);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("LocalTypes2", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.Equal("LocalTypes2", ((TypeSymbol)args[1].Value).ContainingAssembly.Name);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("LocalTypes2", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.Equal("LocalTypes2", test2.Parameters[1].Type.ContainingAssembly.Name);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("LocalTypes2", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.Equal("LocalTypes2", ((TypeSymbol)args[1].Value).ContainingAssembly.Name);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("LocalTypes2", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.Equal("LocalTypes2", test2.Parameters[1].Type.ContainingAssembly.Name);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("LocalTypes2", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.Equal("LocalTypes2", ((TypeSymbol)args[1].Value).ContainingAssembly.Name);
}
[ClrOnlyFact]
public void NestedType4()
{
string piaSource =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S1
{
public int F1;
public struct S2
{
public int F1;
}
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia");
CompileAndVerify(pia);
string source =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S1.S2 y)
{
}
}
[ComEventInterface(typeof(S1), typeof(S1.S2))]
interface AttrTest1
{
}
";
var localTypes2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2",
references: new MetadataReference[] { new CSharpCompilationReference(pia, embedInteropTypes: true) });
var piaImage = MetadataReference.CreateFromImage(pia.EmitToArray());
var compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
new CSharpCompilationReference(pia)});
NamedTypeSymbol lt = compilation.GetTypeByMetadataName("LocalTypes2");
var test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
NamedTypeSymbol attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
var args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
}
[ClrOnlyFact]
public void GenericType1()
{
string source =
@"
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
public class LocalTypes2
{
public void Test2(S1 x, S2<int> y)
{
}
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S1"")]
public struct S1
{
public int F1;
}
[CompilerGenerated, TypeIdentifier(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"", ""S2`1"")]
public struct S2<T>
{
public int F1;
}
[ComEventInterface(typeof(S1), typeof(S2<>))]
interface AttrTest1
{
}
";
var localTypes2 = CreateCompilationWithMscorlib(source, options: TestOptions.ReleaseDll, assemblyName: "LocalTypes2");
//CompileAndVerify(localTypes2);
var localTypes2Image = MetadataReference.CreateFromImage(localTypes2.EmitToArray());
string piaSource =
@"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
public struct S1
{
public int F1;
}
public struct S2<T>
{
public int F1;
}
";
var pia = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia");
CompileAndVerify(pia);
var piaImage = MetadataReference.CreateFromImage(pia.EmitToArray());
var compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
new CSharpCompilationReference(pia)});
NamedTypeSymbol lt = compilation.GetTypeByMetadataName("LocalTypes2");
var test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
NamedTypeSymbol attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
var args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
new CSharpCompilationReference(pia)});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {new CSharpCompilationReference(localTypes2),
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
compilation = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new MetadataReference[] {localTypes2Image,
piaImage});
lt = compilation.GetTypeByMetadataName("LocalTypes2");
test2 = lt.GetMember<MethodSymbol>("Test2");
Assert.Equal("Pia", test2.Parameters[0].Type.ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(test2.Parameters[1].Type);
attrTest1 = compilation.GetTypeByMetadataName("AttrTest1");
args = attrTest1.GetAttributes()[0].CommonConstructorArguments;
Assert.Equal("Pia", ((TypeSymbol)args[0].Value).ContainingAssembly.Name);
Assert.IsType<UnsupportedMetadataTypeSymbol>(args[1].Value);
}
[ClrOnlyFact]
[WorkItem(685240, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/685240")]
public void Bug685240()
{
string piaSource = @"
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: Guid(""f9c2d51d-4f44-45f0-9eda-c9d599b58257"")]
[assembly: ImportedFromTypeLib(""Pia1.dll"")]
[Guid(""27e3e649-994b-4f58-b3c6-f8089a5f2c01""), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
[ComImport]
public interface I1
{
void Sub1(int x);
}
";
var pia1 = CreateCompilationWithMscorlib(piaSource, options: TestOptions.ReleaseDll, assemblyName: "Pia1");
CompileAndVerify(pia1);
string moduleSource = @"
public class Test
{
public static I1 M1()
{
return null;
}
}
";
var module1 = CreateCompilationWithMscorlib(moduleSource, options: TestOptions.ReleaseModule, assemblyName: "Module1",
references: new[] { new CSharpCompilationReference(pia1, embedInteropTypes: true) });
var multiModule = CreateCompilationWithMscorlib("", options: TestOptions.ReleaseDll,
references: new[] { module1.EmitToImageReference() });
CompileAndVerify(multiModule);
string consumerSource = @"
public class Consumer
{
public static void M2()
{
var x = Test.M1();
}
}
";
var consumer = CreateCompilationWithMscorlib(consumerSource, options: TestOptions.ReleaseDll,
references: new[] { new CSharpCompilationReference(multiModule),
new CSharpCompilationReference(pia1)});
CompileAndVerify(consumer);
}
}
}
| apache-2.0 |
drtittle/HelloBlueMixSrc | node_modules/watson-developer-cloud/services/alchemy_language/v1.js | 4526 | /**
* Copyright 2015 IBM Corp. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
var extend = require('extend');
var requestFactory = require('../../lib/requestwrapper');
var endpoints = require('../../lib/alchemy_endpoints.json');
var helper = require('../../lib/helper');
function errorFormatter(cb) {
return function(err, result, response) {
if (err) {
cb(err, result);
}
else {
if (result.status === 'OK')
cb(err,result);
else
cb({
error: result.statusInfo || response['headers']['x-alchemyapi-error-msg'],
code: 400
}, null);
}
};
}
function createRequest(method) {
return function(_params, callback ) {
var params = _params || {};
var accepted_formats = Object.keys(endpoints[method]);
var format = helper.getFormat(params, accepted_formats);
if (format === null) {
callback(new Error('Missing required parameters: ' +
accepted_formats.join(', ') +
' needs to be specified'));
return;
}
var parameters = {
options: {
url: endpoints[method][format],
method: 'POST',
json: true,
form: extend({}, params, {outputMode: 'json'}) // change default output to json
},
defaultOptions: this._options
};
return requestFactory(parameters, errorFormatter(callback));
};
}
function AlchemyLanguage(options) {
// Default URL
var serviceDefaults = {
url: 'https://access.alchemyapi.com/calls'
};
// Replace default options with user provided
this._options = extend(serviceDefaults, options);
}
/**
* Extracts a grouped, ranked list of named entities (people, companies,
* organizations, etc.) from text, a URL or HTML.
*/
AlchemyLanguage.prototype.entities = createRequest('entities');
/**
* Extracts the keywords from text, a URL or HTML.
*/
AlchemyLanguage.prototype.keywords = createRequest('keywords');
/**
* Tags the concepts from text, a URL or HTML.
*/
AlchemyLanguage.prototype.concepts = createRequest('concepts');
/**
* Calculates the sentiment for text, a URL or HTML.
*/
AlchemyLanguage.prototype.sentiment = function(params, callback) {
var service = (params && params.target) ? 'sentiment_targeted' : 'sentiment';
return createRequest(service).call(this, params, callback);
};
/**
* Extracts the cleaned text (removes ads, navigation, etc.) for a URL or HTML.
* if raw = true, extracts the cleaned text (removes ads, navigation, etc.).
*/
AlchemyLanguage.prototype.text = function(params, callback) {
var service = (params && params.raw) ? 'text_raw' : 'text';
return createRequest(service).call(this, params, callback);
};
/**
* Extracts the authors from a URL or HTML.
*/
AlchemyLanguage.prototype.authors = createRequest('authors');
/**
* Detects the language for text, a URL or HTML.
*/
AlchemyLanguage.prototype.language = createRequest('language');
/**
* Extracts the title for a URL or HTML.
*/
AlchemyLanguage.prototype.title = createRequest('title');
/**
* Extracts the relations for text, a URL or HTML.
*/
AlchemyLanguage.prototype.relations = createRequest('relations');
/**
* Categorizes the text for text, a URL or HTML.
*/
AlchemyLanguage.prototype.category = createRequest('category');
/**
* Categorizes the text for text, a URL or HTML.
*/
AlchemyLanguage.prototype.publicationDate = createRequest('publication_date');
/**
* Detects the RSS/ATOM feeds for a URL or HTML.
*/
AlchemyLanguage.prototype.feeds = createRequest('feeds');
/**
* Parses the microformats for a URL or HTML.
*/
AlchemyLanguage.prototype.microformats = createRequest('microformats');
/**
* Categorized through the taxonomy call for text, HTML, or a URL.
*/
AlchemyLanguage.prototype.taxonomy = createRequest('taxonomy');
/**
* Categorized through the taxonomy call for text, HTML, or a URL.
*/
AlchemyLanguage.prototype.combined = createRequest('combined');
module.exports = AlchemyLanguage; | apache-2.0 |
agolPL/keycloak | services/src/main/java/org/keycloak/services/managers/RealmManager.java | 25932 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.services.managers;
import org.keycloak.Config;
import org.keycloak.common.enums.SslRequired;
import org.keycloak.models.AccountRoles;
import org.keycloak.models.AdminRoles;
import org.keycloak.models.BrowserSecurityHeaders;
import org.keycloak.models.ClientModel;
import org.keycloak.models.Constants;
import org.keycloak.models.ImpersonationConstants;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.OTPPolicy;
import org.keycloak.models.PasswordPolicy;
import org.keycloak.models.ProtocolMapperModel;
import org.keycloak.models.RealmModel;
import org.keycloak.models.RealmProvider;
import org.keycloak.models.RoleModel;
import org.keycloak.models.UserModel;
import org.keycloak.models.UserSessionProvider;
import org.keycloak.models.session.UserSessionPersisterProvider;
import org.keycloak.models.utils.DefaultAuthenticationFlows;
import org.keycloak.models.utils.DefaultRequiredActions;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.models.utils.RepresentationToModel;
import org.keycloak.protocol.ProtocolMapperUtils;
import org.keycloak.protocol.oidc.OIDCLoginProtocol;
import org.keycloak.protocol.oidc.OIDCLoginProtocolFactory;
import org.keycloak.representations.idm.ApplicationRepresentation;
import org.keycloak.representations.idm.ClientRepresentation;
import org.keycloak.representations.idm.OAuthClientRepresentation;
import org.keycloak.representations.idm.RealmEventsConfigRepresentation;
import org.keycloak.representations.idm.RealmRepresentation;
import org.keycloak.representations.idm.RoleRepresentation;
import org.keycloak.sessions.AuthenticationSessionProvider;
import org.keycloak.storage.UserStorageProviderModel;
import org.keycloak.services.clientregistration.policy.DefaultClientRegistrationPolicies;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
/**
* Per request object
*
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
public class RealmManager {
protected KeycloakSession session;
protected RealmProvider model;
protected String contextPath = "";
public String getContextPath() {
return contextPath;
}
public void setContextPath(String contextPath) {
this.contextPath = contextPath;
}
public RealmManager(KeycloakSession session) {
this.session = session;
this.model = session.realms();
}
public KeycloakSession getSession() {
return session;
}
public RealmModel getKeycloakAdminstrationRealm() {
return getRealm(Config.getAdminRealm());
}
public RealmModel getRealm(String id) {
return model.getRealm(id);
}
public RealmModel getRealmByName(String name) {
return model.getRealmByName(name);
}
public RealmModel createRealm(String name) {
return createRealm(name, name);
}
public RealmModel createRealm(String id, String name) {
if (id == null) id = KeycloakModelUtils.generateId();
RealmModel realm = model.createRealm(id, name);
realm.setName(name);
// setup defaults
setupRealmDefaults(realm);
setupMasterAdminManagement(realm);
setupRealmAdminManagement(realm);
setupAccountManagement(realm);
setupBrokerService(realm);
setupAdminConsole(realm);
setupAdminConsoleLocaleMapper(realm);
setupAdminCli(realm);
setupImpersonationService(realm);
setupAuthenticationFlows(realm);
setupRequiredActions(realm);
setupOfflineTokens(realm);
setupAuthorizationServices(realm);
setupClientRegistrations(realm);
fireRealmPostCreate(realm);
return realm;
}
protected void setupAuthenticationFlows(RealmModel realm) {
if (realm.getAuthenticationFlows().size() == 0) DefaultAuthenticationFlows.addFlows(realm);
}
protected void setupRequiredActions(RealmModel realm) {
if (realm.getRequiredActionProviders().size() == 0) DefaultRequiredActions.addActions(realm);
}
protected void setupOfflineTokens(RealmModel realm) {
KeycloakModelUtils.setupOfflineTokens(realm);
}
protected void setupAdminConsole(RealmModel realm) {
ClientModel adminConsole = realm.getClientByClientId(Constants.ADMIN_CONSOLE_CLIENT_ID);
if (adminConsole == null) adminConsole = KeycloakModelUtils.createClient(realm, Constants.ADMIN_CONSOLE_CLIENT_ID);
adminConsole.setName("${client_" + Constants.ADMIN_CONSOLE_CLIENT_ID + "}");
String baseUrl = contextPath + "/admin/" + realm.getName() + "/console";
adminConsole.setBaseUrl(baseUrl + "/index.html");
adminConsole.setEnabled(true);
adminConsole.setPublicClient(true);
adminConsole.addRedirectUri(baseUrl + "/*");
adminConsole.setFullScopeAllowed(false);
adminConsole.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL);
RoleModel adminRole;
if (realm.getName().equals(Config.getAdminRealm())) {
adminRole = realm.getRole(AdminRoles.ADMIN);
} else {
String realmAdminApplicationClientId = getRealmAdminClientId(realm);
ClientModel realmAdminApp = realm.getClientByClientId(realmAdminApplicationClientId);
adminRole = realmAdminApp.getRole(AdminRoles.REALM_ADMIN);
}
}
protected void setupAdminConsoleLocaleMapper(RealmModel realm) {
ClientModel adminConsole = realm.getClientByClientId(Constants.ADMIN_CONSOLE_CLIENT_ID);
ProtocolMapperModel localeMapper = adminConsole.getProtocolMapperByName(OIDCLoginProtocol.LOGIN_PROTOCOL, OIDCLoginProtocolFactory.LOCALE);
if (localeMapper == null) {
localeMapper = ProtocolMapperUtils.findLocaleMapper(session);
if (localeMapper != null) {
adminConsole.addProtocolMapper(localeMapper);
}
}
}
public void setupAdminCli(RealmModel realm) {
ClientModel adminCli = realm.getClientByClientId(Constants.ADMIN_CLI_CLIENT_ID);
if (adminCli == null) {
adminCli = KeycloakModelUtils.createClient(realm, Constants.ADMIN_CLI_CLIENT_ID);
adminCli.setName("${client_" + Constants.ADMIN_CLI_CLIENT_ID + "}");
adminCli.setEnabled(true);
adminCli.setPublicClient(true);
adminCli.setFullScopeAllowed(false);
adminCli.setStandardFlowEnabled(false);
adminCli.setDirectAccessGrantsEnabled(true);
adminCli.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL);
RoleModel adminRole;
if (realm.getName().equals(Config.getAdminRealm())) {
adminRole = realm.getRole(AdminRoles.ADMIN);
} else {
String realmAdminApplicationClientId = getRealmAdminClientId(realm);
ClientModel realmAdminApp = realm.getClientByClientId(realmAdminApplicationClientId);
adminRole = realmAdminApp.getRole(AdminRoles.REALM_ADMIN);
}
}
}
public void addQueryCompositeRoles(ClientModel realmAccess) {
RoleModel queryClients = realmAccess.getRole(AdminRoles.QUERY_CLIENTS);
RoleModel queryUsers = realmAccess.getRole(AdminRoles.QUERY_USERS);
RoleModel queryGroups = realmAccess.getRole(AdminRoles.QUERY_GROUPS);
RoleModel viewClients = realmAccess.getRole(AdminRoles.VIEW_CLIENTS);
viewClients.addCompositeRole(queryClients);
RoleModel viewUsers = realmAccess.getRole(AdminRoles.VIEW_USERS);
viewUsers.addCompositeRole(queryUsers);
viewUsers.addCompositeRole(queryGroups);
}
public String getRealmAdminClientId(RealmModel realm) {
return Constants.REALM_MANAGEMENT_CLIENT_ID;
}
public String getRealmAdminClientId(RealmRepresentation realm) {
return Constants.REALM_MANAGEMENT_CLIENT_ID;
}
protected void setupRealmDefaults(RealmModel realm) {
realm.setBrowserSecurityHeaders(BrowserSecurityHeaders.defaultHeaders);
// brute force
realm.setBruteForceProtected(false); // default settings off for now todo set it on
realm.setPermanentLockout(false);
realm.setMaxFailureWaitSeconds(900);
realm.setMinimumQuickLoginWaitSeconds(60);
realm.setWaitIncrementSeconds(60);
realm.setQuickLoginCheckMilliSeconds(1000);
realm.setMaxDeltaTimeSeconds(60 * 60 * 12); // 12 hours
realm.setFailureFactor(30);
realm.setSslRequired(SslRequired.EXTERNAL);
realm.setOTPPolicy(OTPPolicy.DEFAULT_POLICY);
realm.setLoginWithEmailAllowed(true);
realm.setEventsListeners(Collections.singleton("jboss-logging"));
}
public boolean removeRealm(RealmModel realm) {
ClientModel masterAdminClient = realm.getMasterAdminClient();
boolean removed = model.removeRealm(realm.getId());
if (removed) {
if (masterAdminClient != null) {
new ClientManager(this).removeClient(getKeycloakAdminstrationRealm(), masterAdminClient);
}
UserSessionProvider sessions = session.sessions();
if (sessions != null) {
sessions.onRealmRemoved(realm);
}
UserSessionPersisterProvider sessionsPersister = session.getProvider(UserSessionPersisterProvider.class);
if (sessionsPersister != null) {
sessionsPersister.onRealmRemoved(realm);
}
AuthenticationSessionProvider authSessions = session.authenticationSessions();
if (authSessions != null) {
authSessions.onRealmRemoved(realm);
}
// Refresh periodic sync tasks for configured storageProviders
List<UserStorageProviderModel> storageProviders = realm.getUserStorageProviders();
UserStorageSyncManager storageSync = new UserStorageSyncManager();
for (UserStorageProviderModel provider : storageProviders) {
storageSync.notifyToRefreshPeriodicSync(session, realm, provider, true);
}
}
return removed;
}
public void updateRealmEventsConfig(RealmEventsConfigRepresentation rep, RealmModel realm) {
realm.setEventsEnabled(rep.isEventsEnabled());
realm.setEventsExpiration(rep.getEventsExpiration() != null ? rep.getEventsExpiration() : 0);
if (rep.getEventsListeners() != null) {
realm.setEventsListeners(new HashSet<>(rep.getEventsListeners()));
}
if(rep.getEnabledEventTypes() != null) {
realm.setEnabledEventTypes(new HashSet<>(rep.getEnabledEventTypes()));
}
if(rep.isAdminEventsEnabled() != null) {
realm.setAdminEventsEnabled(rep.isAdminEventsEnabled());
}
if(rep.isAdminEventsDetailsEnabled() != null){
realm.setAdminEventsDetailsEnabled(rep.isAdminEventsDetailsEnabled());
}
}
public void setupMasterAdminManagement(RealmModel realm) {
// Need to refresh masterApp for current realm
String adminRealmId = Config.getAdminRealm();
RealmModel adminRealm = model.getRealm(adminRealmId);
ClientModel masterApp = adminRealm.getClientByClientId(KeycloakModelUtils.getMasterRealmAdminApplicationClientId(realm.getName()));
if (masterApp != null) {
realm.setMasterAdminClient(masterApp);
} else {
createMasterAdminManagement(realm);
}
}
private void createMasterAdminManagement(RealmModel realm) {
RealmModel adminRealm;
RoleModel adminRole;
if (realm.getName().equals(Config.getAdminRealm())) {
adminRealm = realm;
adminRole = realm.addRole(AdminRoles.ADMIN);
RoleModel createRealmRole = realm.addRole(AdminRoles.CREATE_REALM);
adminRole.addCompositeRole(createRealmRole);
createRealmRole.setDescription("${role_" + AdminRoles.CREATE_REALM + "}");
createRealmRole.setScopeParamRequired(false);
} else {
adminRealm = model.getRealm(Config.getAdminRealm());
adminRole = adminRealm.getRole(AdminRoles.ADMIN);
}
adminRole.setDescription("${role_"+AdminRoles.ADMIN+"}");
adminRole.setScopeParamRequired(false);
ClientModel realmAdminApp = KeycloakModelUtils.createClient(adminRealm, KeycloakModelUtils.getMasterRealmAdminApplicationClientId(realm.getName()));
// No localized name for now
realmAdminApp.setName(realm.getName() + " Realm");
realmAdminApp.setBearerOnly(true);
realm.setMasterAdminClient(realmAdminApp);
for (String r : AdminRoles.ALL_REALM_ROLES) {
RoleModel role = realmAdminApp.addRole(r);
role.setDescription("${role_"+r+"}");
role.setScopeParamRequired(false);
adminRole.addCompositeRole(role);
}
addQueryCompositeRoles(realmAdminApp);
}
private void checkMasterAdminManagementRoles(RealmModel realm) {
RealmModel adminRealm = model.getRealmByName(Config.getAdminRealm());
RoleModel adminRole = adminRealm.getRole(AdminRoles.ADMIN);
ClientModel masterAdminClient = realm.getMasterAdminClient();
for (String r : AdminRoles.ALL_REALM_ROLES) {
RoleModel found = masterAdminClient.getRole(r);
if (found == null) {
addAndSetAdminRole(r, masterAdminClient, adminRole);
}
}
addQueryCompositeRoles(masterAdminClient);
}
private void setupRealmAdminManagement(RealmModel realm) {
if (realm.getName().equals(Config.getAdminRealm())) { return; } // don't need to do this for master realm
String realmAdminClientId = getRealmAdminClientId(realm);
ClientModel realmAdminClient = realm.getClientByClientId(realmAdminClientId);
if (realmAdminClient == null) {
realmAdminClient = KeycloakModelUtils.createClient(realm, realmAdminClientId);
realmAdminClient.setName("${client_" + realmAdminClientId + "}");
}
RoleModel adminRole = realmAdminClient.addRole(AdminRoles.REALM_ADMIN);
adminRole.setDescription("${role_" + AdminRoles.REALM_ADMIN + "}");
adminRole.setScopeParamRequired(false);
realmAdminClient.setBearerOnly(true);
realmAdminClient.setFullScopeAllowed(false);
realmAdminClient.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL);
for (String r : AdminRoles.ALL_REALM_ROLES) {
addAndSetAdminRole(r, realmAdminClient, adminRole);
}
addQueryCompositeRoles(realmAdminClient);
}
private void addAndSetAdminRole(String roleName, ClientModel parentClient, RoleModel parentRole) {
RoleModel role = parentClient.addRole(roleName);
role.setDescription("${role_" + roleName + "}");
role.setScopeParamRequired(false);
parentRole.addCompositeRole(role);
}
private void checkRealmAdminManagementRoles(RealmModel realm) {
if (realm.getName().equals(Config.getAdminRealm())) { return; } // don't need to do this for master realm
String realmAdminClientId = getRealmAdminClientId(realm);
ClientModel realmAdminClient = realm.getClientByClientId(realmAdminClientId);
RoleModel adminRole = realmAdminClient.getRole(AdminRoles.REALM_ADMIN);
for (String r : AdminRoles.ALL_REALM_ROLES) {
RoleModel found = realmAdminClient.getRole(r);
if (found == null) {
addAndSetAdminRole(r, realmAdminClient, adminRole);
}
}
addQueryCompositeRoles(realmAdminClient);
}
private void setupAccountManagement(RealmModel realm) {
ClientModel client = realm.getClientByClientId(Constants.ACCOUNT_MANAGEMENT_CLIENT_ID);
if (client == null) {
client = KeycloakModelUtils.createClient(realm, Constants.ACCOUNT_MANAGEMENT_CLIENT_ID);
client.setName("${client_" + Constants.ACCOUNT_MANAGEMENT_CLIENT_ID + "}");
client.setEnabled(true);
client.setFullScopeAllowed(false);
String base = contextPath + "/realms/" + realm.getName() + "/account";
String redirectUri = base + "/*";
client.addRedirectUri(redirectUri);
client.setBaseUrl(base);
client.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL);
for (String role : AccountRoles.ALL) {
client.addDefaultRole(role);
RoleModel roleModel = client.getRole(role);
roleModel.setDescription("${role_" + role + "}");
roleModel.setScopeParamRequired(false);
}
RoleModel manageAccountLinks = client.addRole(AccountRoles.MANAGE_ACCOUNT_LINKS);
manageAccountLinks.setDescription("${role_" + AccountRoles.MANAGE_ACCOUNT_LINKS + "}");
manageAccountLinks.setScopeParamRequired(false);
RoleModel manageAccount = client.getRole(AccountRoles.MANAGE_ACCOUNT);
manageAccount.addCompositeRole(manageAccountLinks);
}
}
public void setupImpersonationService(RealmModel realm) {
ImpersonationConstants.setupImpersonationService(session, realm);
}
public void setupBrokerService(RealmModel realm) {
ClientModel client = realm.getClientByClientId(Constants.BROKER_SERVICE_CLIENT_ID);
if (client == null) {
client = KeycloakModelUtils.createClient(realm, Constants.BROKER_SERVICE_CLIENT_ID);
client.setEnabled(true);
client.setName("${client_" + Constants.BROKER_SERVICE_CLIENT_ID + "}");
client.setFullScopeAllowed(false);
client.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL);
for (String role : Constants.BROKER_SERVICE_ROLES) {
RoleModel roleModel = client.addRole(role);
roleModel.setDescription("${role_"+ role.toLowerCase().replaceAll("_", "-") +"}");
roleModel.setScopeParamRequired(false);
}
}
}
public RealmModel importRealm(RealmRepresentation rep) {
return importRealm(rep, false);
}
/**
* if "skipUserDependent" is true, then import of any models, which needs users already imported in DB, will be skipped. For example authorization
*/
public RealmModel importRealm(RealmRepresentation rep, boolean skipUserDependent) {
String id = rep.getId();
if (id == null) {
id = KeycloakModelUtils.generateId();
}
RealmModel realm = model.createRealm(id, rep.getRealm());
realm.setName(rep.getRealm());
// setup defaults
setupRealmDefaults(realm);
boolean postponeMasterClientSetup = postponeMasterClientSetup(rep);
if (!postponeMasterClientSetup) {
setupMasterAdminManagement(realm);
}
if (!hasRealmAdminManagementClient(rep)) setupRealmAdminManagement(realm);
if (!hasAccountManagementClient(rep)) setupAccountManagement(realm);
boolean postponeImpersonationSetup = false;
if (hasRealmAdminManagementClient(rep)) {
postponeImpersonationSetup = true;
} else {
setupImpersonationService(realm);
}
if (!hasBrokerClient(rep)) setupBrokerService(realm);
if (!hasAdminConsoleClient(rep)) setupAdminConsole(realm);
boolean postponeAdminCliSetup = false;
if (!hasAdminCliClient(rep)) {
if (hasRealmAdminManagementClient(rep)) {
postponeAdminCliSetup = true;
} else {
setupAdminCli(realm);
}
}
if (!hasRealmRole(rep, Constants.OFFLINE_ACCESS_ROLE)) setupOfflineTokens(realm);
RepresentationToModel.importRealm(session, rep, realm, skipUserDependent);
setupAdminConsoleLocaleMapper(realm);
if (postponeMasterClientSetup) {
setupMasterAdminManagement(realm);
}
// Assert all admin roles are available once import took place. This is needed due to import from previous version where JSON file may not contain all admin roles
checkMasterAdminManagementRoles(realm);
checkRealmAdminManagementRoles(realm);
// Could happen when migrating from older version and I have exported JSON file, which contains "realm-management" client but not "impersonation" client
// I need to postpone impersonation because it needs "realm-management" client and its roles set
if (postponeImpersonationSetup) {
setupImpersonationService(realm);
String realmAdminClientId = getRealmAdminClientId(realm);
}
if (postponeAdminCliSetup) {
setupAdminCli(realm);
}
setupAuthenticationFlows(realm);
setupRequiredActions(realm);
// Refresh periodic sync tasks for configured storageProviders
List<UserStorageProviderModel> storageProviders = realm.getUserStorageProviders();
UserStorageSyncManager storageSync = new UserStorageSyncManager();
for (UserStorageProviderModel provider : storageProviders) {
storageSync.notifyToRefreshPeriodicSync(session, realm, provider, false);
}
setupAuthorizationServices(realm);
setupClientRegistrations(realm);
fireRealmPostCreate(realm);
return realm;
}
private boolean postponeMasterClientSetup(RealmRepresentation rep) {
if (!Config.getAdminRealm().equals(rep.getRealm())) {
return false;
}
return hasRealmAdminManagementClient(rep);
}
private boolean hasRealmAdminManagementClient(RealmRepresentation rep) {
String realmAdminClientId = Config.getAdminRealm().equals(rep.getRealm()) ? KeycloakModelUtils.getMasterRealmAdminApplicationClientId(rep.getRealm()) : getRealmAdminClientId(rep);
return hasClient(rep, realmAdminClientId);
}
private boolean hasAccountManagementClient(RealmRepresentation rep) {
return hasClient(rep, Constants.ACCOUNT_MANAGEMENT_CLIENT_ID);
}
private boolean hasBrokerClient(RealmRepresentation rep) {
return hasClient(rep, Constants.BROKER_SERVICE_CLIENT_ID);
}
private boolean hasAdminConsoleClient(RealmRepresentation rep) {
return hasClient(rep, Constants.ADMIN_CONSOLE_CLIENT_ID);
}
private boolean hasAdminCliClient(RealmRepresentation rep) {
return hasClient(rep, Constants.ADMIN_CLI_CLIENT_ID);
}
private boolean hasClient(RealmRepresentation rep, String clientId) {
if (rep.getClients() != null) {
for (ClientRepresentation clientRep : rep.getClients()) {
if (clientRep.getClientId() != null && clientRep.getClientId().equals(clientId)) {
return true;
}
}
}
// TODO: Just for compatibility with old versions. Should be removed later...
if (rep.getApplications() != null) {
for (ApplicationRepresentation clientRep : rep.getApplications()) {
if (clientRep.getName().equals(clientId)) {
return true;
}
}
}
if (rep.getOauthClients() != null) {
for (OAuthClientRepresentation clientRep : rep.getOauthClients()) {
if (clientRep.getName().equals(clientId)) {
return true;
}
}
}
return false;
}
private boolean hasRealmRole(RealmRepresentation rep, String roleName) {
if (rep.getRoles() == null || rep.getRoles().getRealm() == null) {
return false;
}
for (RoleRepresentation role : rep.getRoles().getRealm()) {
if (roleName.equals(role.getName())) {
return true;
}
}
return false;
}
/**
* Query users based on a search string:
* <p/>
* "Bill Burke" first and last name
* "bburke@redhat.com" email
* "Burke" lastname or username
*
* @param searchString
* @param realmModel
* @return
*/
public List<UserModel> searchUsers(String searchString, RealmModel realmModel) {
if (searchString == null) {
return Collections.emptyList();
}
return session.users().searchForUser(searchString.trim(), realmModel);
}
private void setupAuthorizationServices(RealmModel realm) {
KeycloakModelUtils.setupAuthorizationServices(realm);
}
private void setupClientRegistrations(RealmModel realm) {
DefaultClientRegistrationPolicies.addDefaultPolicies(realm);
}
private void fireRealmPostCreate(RealmModel realm) {
session.getKeycloakSessionFactory().publish(new RealmModel.RealmPostCreateEvent() {
@Override
public RealmModel getCreatedRealm() {
return realm;
}
@Override
public KeycloakSession getKeycloakSession() {
return session;
}
});
}
}
| apache-2.0 |
ebyhr/presto | plugin/trino-redis/src/main/java/io/trino/plugin/redis/RedisJedisManager.java | 2908 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.redis;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import io.airlift.log.Logger;
import io.trino.spi.HostAddress;
import io.trino.spi.NodeManager;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.util.Map;
import static java.lang.Math.toIntExact;
import static java.util.Objects.requireNonNull;
/**
* Manages connections to the Redis nodes
*/
public class RedisJedisManager
{
private static final Logger log = Logger.get(RedisJedisManager.class);
private final LoadingCache<HostAddress, JedisPool> jedisPoolCache;
private final RedisConnectorConfig redisConnectorConfig;
private final JedisPoolConfig jedisPoolConfig;
@Inject
RedisJedisManager(
RedisConnectorConfig redisConnectorConfig,
NodeManager nodeManager)
{
this.redisConnectorConfig = requireNonNull(redisConnectorConfig, "redisConnectorConfig is null");
this.jedisPoolCache = CacheBuilder.newBuilder().build(CacheLoader.from(this::createConsumer));
this.jedisPoolConfig = new JedisPoolConfig();
}
@PreDestroy
public void tearDown()
{
for (Map.Entry<HostAddress, JedisPool> entry : jedisPoolCache.asMap().entrySet()) {
try {
entry.getValue().destroy();
}
catch (Exception e) {
log.warn(e, "While destroying JedisPool %s:", entry.getKey());
}
}
}
public RedisConnectorConfig getRedisConnectorConfig()
{
return redisConnectorConfig;
}
public JedisPool getJedisPool(HostAddress host)
{
requireNonNull(host, "host is null");
return jedisPoolCache.getUnchecked(host);
}
private JedisPool createConsumer(HostAddress host)
{
log.info("Creating new JedisPool for %s", host);
return new JedisPool(jedisPoolConfig,
host.getHostText(),
host.getPort(),
toIntExact(redisConnectorConfig.getRedisConnectTimeout().toMillis()),
redisConnectorConfig.getRedisPassword(),
redisConnectorConfig.getRedisDataBaseIndex());
}
}
| apache-2.0 |
naveedaz/azure-powershell | src/ResourceManager/Compute/Stack/Commands.Compute/Generated/Models/Page.cs | 1738 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator 0.14.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Microsoft.Azure.Management.Compute.Models
{
using System.Collections.Generic;
using System.Linq;
using Newtonsoft.Json;
using Microsoft.Rest.Azure;
/// <summary>
/// Defines a page in Azure responses.
/// </summary>
/// <typeparam name="T">Type of the page content items</typeparam>
[JsonObject]
public class Page<T> : IPage<T>
{
/// <summary>
/// Gets the link to the next page.
/// </summary>
[JsonProperty("nextLink")]
public string NextPageLink { get; private set; }
[JsonProperty("value")]
private IList<T> Items{ get; set; }
/// <summary>
/// Returns an enumerator that iterates through the collection.
/// </summary>
/// <returns>A an enumerator that can be used to iterate through the collection.</returns>
public IEnumerator<T> GetEnumerator()
{
return (Items == null) ? Enumerable.Empty<T>().GetEnumerator() : Items.GetEnumerator();
}
/// <summary>
/// Returns an enumerator that iterates through the collection.
/// </summary>
/// <returns>A an enumerator that can be used to iterate through the collection.</returns>
System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
}
}
| apache-2.0 |
lxc/lxd-pkg-ubuntu | dist/src/gopkg.in/lxc/go-lxc.v2/error.go | 5776 | // Copyright © 2013, 2014, The Go-LXC Authors. All rights reserved.
// Use of this source code is governed by a LGPLv2.1
// license that can be found in the LICENSE file.
// +build linux,cgo
package lxc
var (
ErrAddDeviceNodeFailed = NewError("adding device to container failed")
ErrAllocationFailed = NewError("allocating memory failed")
ErrAlreadyDefined = NewError("container already defined")
ErrAlreadyFrozen = NewError("container is already frozen")
ErrAlreadyRunning = NewError("container is already running")
ErrAttachFailed = NewError("attaching to the container failed")
ErrAttachInterfaceFailed = NewError("attaching specified netdev to the container failed")
ErrBlkioUsage = NewError("BlkioUsage for the container failed")
ErrCheckpointFailed = NewError("checkpoint failed")
ErrClearingConfigItemFailed = NewError("clearing config item for the container failed")
ErrClearingCgroupItemFailed = NewError("clearing cgroup item for the container failed")
ErrCloneFailed = NewError("cloning the container failed")
ErrCloseAllFdsFailed = NewError("setting close_all_fds flag for container failed")
ErrCreateFailed = NewError("creating the container failed")
ErrCreateSnapshotFailed = NewError("snapshotting the container failed")
ErrDaemonizeFailed = NewError("setting daemonize flag for container failed")
ErrDestroyAllSnapshotsFailed = NewError("destroying all snapshots failed")
ErrDestroyFailed = NewError("destroying the container failed")
ErrDestroySnapshotFailed = NewError("destroying the snapshot failed")
ErrDestroyWithAllSnapshotsFailed = NewError("destroying the container with all snapshots failed")
ErrDetachInterfaceFailed = NewError("detaching specified netdev to the container failed")
ErrExecuteFailed = NewError("executing the command in a temporary container failed")
ErrFreezeFailed = NewError("freezing the container failed")
ErrInsufficientNumberOfArguments = NewError("insufficient number of arguments were supplied")
ErrInterfaces = NewError("getting interface names for the container failed")
ErrIPAddresses = NewError("getting IP addresses of the container failed")
ErrIPAddress = NewError("getting IP address on the interface of the container failed")
ErrIPv4Addresses = NewError("getting IPv4 addresses of the container failed")
ErrIPv6Addresses = NewError("getting IPv6 addresses of the container failed")
ErrKMemLimit = NewError("your kernel does not support cgroup kernel memory controller")
ErrLoadConfigFailed = NewError("loading config file for the container failed")
ErrMemLimit = NewError("your kernel does not support cgroup memory controller")
ErrMemorySwapLimit = NewError("your kernel does not support cgroup swap controller")
ErrMethodNotAllowed = NewError("the requested method is not currently supported with unprivileged containers")
ErrNewFailed = NewError("allocating the container failed")
ErrNoSnapshot = NewError("container has no snapshot")
ErrNotDefined = NewError("container is not defined")
ErrNotFrozen = NewError("container is not frozen")
ErrNotRunning = NewError("container is not running")
ErrNotSupported = NewError("method is not supported by this LXC version")
ErrRebootFailed = NewError("rebooting the container failed")
ErrRemoveDeviceNodeFailed = NewError("removing device from container failed")
ErrRenameFailed = NewError("renaming the container failed")
ErrRestoreFailed = NewError("restore failed")
ErrRestoreSnapshotFailed = NewError("restoring the container failed")
ErrSaveConfigFailed = NewError("saving config file for the container failed")
ErrSettingCgroupItemFailed = NewError("setting cgroup item for the container failed")
ErrSettingConfigItemFailed = NewError("setting config item for the container failed")
ErrSettingConfigPathFailed = NewError("setting config file for the container failed")
ErrSettingKMemoryLimitFailed = NewError("setting kernel memory limit for the container failed")
ErrSettingMemoryLimitFailed = NewError("setting memory limit for the container failed")
ErrSettingMemorySwapLimitFailed = NewError("setting memory+swap limit for the container failed")
ErrSettingSoftMemoryLimitFailed = NewError("setting soft memory limit for the container failed")
ErrShutdownFailed = NewError("shutting down the container failed")
ErrSoftMemLimit = NewError("your kernel does not support cgroup memory controller")
ErrStartFailed = NewError("starting the container failed")
ErrStopFailed = NewError("stopping the container failed")
ErrTemplateNotAllowed = NewError("unprivileged users only allowed to use \"download\" template")
ErrUnfreezeFailed = NewError("unfreezing the container failed")
ErrUnknownBackendStore = NewError("unknown backend type")
)
// Error represents a basic error that implies the error interface.
type Error struct {
Message string
}
// NewError creates a new error with the given msg argument.
func NewError(msg string) error {
return &Error{
Message: msg,
}
}
func (e *Error) Error() string {
return e.Message
}
| apache-2.0 |
mbebenita/shumway.ts | tests/Fidelity/test262/suite/ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-7-7.js | 1266 | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-7-7.js
* @description Array.prototype.reduce returns initialValue if 'length' is 0 and initialValue is present (subclassed Array, length overridden with obj w/o valueOf (toString))
*/
function testcase() {
foo.prototype = new Array(1, 2, 3);
function foo() {}
var f = new foo();
var o = { toString: function () { return '0';}};
f.length = o;
// objects inherit the default valueOf method of the Object object;
// that simply returns the itself. Since the default valueOf() method
// does not return a primitive value, ES next tries to convert the object
// to a number by calling its toString() method and converting the
// resulting string to a number.
function cb(){}
try {
if(f.reduce(cb,1) === 1)
return true;
}
catch (e) { }
}
runTestCase(testcase);
| apache-2.0 |
umangmehta12/elasticsearch-server | elasticsearch-transport-thrift/src/main/java/org/elasticsearch/thrift/ThriftRestRequest.java | 3720 | /*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.thrift;
import org.elasticsearch.common.bytes.ByteBufferBytesReference;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.rest.support.AbstractRestRequest;
import org.elasticsearch.rest.support.RestUtils;
import java.util.HashMap;
import java.util.Map;
/**
*/
public class ThriftRestRequest extends AbstractRestRequest implements org.elasticsearch.rest.RestRequest {
private final org.elasticsearch.thrift.RestRequest request;
private final String rawPath;
private final Map<String, String> params;
public ThriftRestRequest(org.elasticsearch.thrift.RestRequest request) {
this.request = request;
this.params = request.getParameters() == null ? new HashMap<String, String>() : request.getParameters();
int pathEndPos = request.getUri().indexOf('?');
if (pathEndPos < 0) {
this.rawPath = request.getUri();
} else {
this.rawPath = request.getUri().substring(0, pathEndPos);
RestUtils.decodeQueryString(request.getUri(), pathEndPos + 1, params);
}
}
@Override
public Method method() {
switch (request.getMethod()) {
case GET:
return Method.GET;
case POST:
return Method.POST;
case PUT:
return Method.PUT;
case DELETE:
return Method.DELETE;
case HEAD:
return Method.HEAD;
case OPTIONS:
return Method.OPTIONS;
}
return null;
}
@Override
public String uri() {
return request.getUri();
}
@Override
public String rawPath() {
return this.rawPath;
}
@Override
public boolean hasContent() {
return request.isSetBody() && request.bufferForBody().remaining() > 0;
}
@Override
public boolean contentUnsafe() {
return false;
}
@Override
public BytesReference content() {
if (!request.isSetBody()) {
return BytesArray.EMPTY;
}
return new ByteBufferBytesReference(request.bufferForBody());
}
@Override
public String header(String name) {
if (request.getHeaders() == null) {
return null;
}
return request.getHeaders().get(name);
}
@Override
public boolean hasParam(String key) {
return params.containsKey(key);
}
@Override
public String param(String key) {
return params.get(key);
}
@Override
public Map<String, String> params() {
return params;
}
@Override
public String param(String key, String defaultValue) {
String value = params.get(key);
if (value == null) {
return defaultValue;
}
return value;
}
}
| apache-2.0 |
hsanjuan/one | src/sunstone/public/app/tabs/vms-tab/dialogs/spice.js | 2627 | /* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
/*
DEPENDENCIES
*/
var BaseDialog = require('utils/dialogs/dialog');
var TemplateHTML = require('hbs!./spice/html');
var Sunstone = require('sunstone');
var Spice = require('utils/spice');
/*
CONSTANTS
*/
var DIALOG_ID = require('./spice/dialogId');
var TAB_ID = require('../tabId')
/*
CONSTRUCTOR
*/
function Dialog() {
this.dialogId = DIALOG_ID;
BaseDialog.call(this);
};
Dialog.DIALOG_ID = DIALOG_ID;
Dialog.prototype = Object.create(BaseDialog.prototype);
Dialog.prototype.constructor = Dialog;
Dialog.prototype.html = _html;
Dialog.prototype.onShow = _onShow;
Dialog.prototype.onClose = _onClose;
Dialog.prototype.setup = _setup;
Dialog.prototype.setElement = _setElement;
return Dialog;
/*
FUNCTION DEFINITIONS
*/
function _html() {
return TemplateHTML({
'dialogId': this.dialogId
});
}
function _setup(context) {
var that = this;
$("#open_in_a_new_window_spice", context).on("click", function() {
var dialog = Sunstone.getDialog(DIALOG_ID);
dialog.hide();
});
return false;
}
function _onShow(context) {
Spice.spiceCallback(this.element);
return false;
}
function _onClose(context) {
Spice.disconnect();
Spice.unlock();
return false;
}
function _setElement(element) {
this.element = element
}
});
| apache-2.0 |
fceller/arangodb | 3rdParty/V8/v7.1.302.28/test/mjsunit/compiler/inline-accessors2.js | 11858 | // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --inline-accessors
var accessorCallCount, setterArgument, setterValue, obj, forceDeopt;
// -----------------------------------------------------------------------------
// Helpers for testing inlining of getters.
function TestInlinedGetter(context, obj, expected) {
forceDeopt = { deopt: 0 };
accessorCallCount = 0;
assertEquals(expected, context(obj));
assertEquals(1, accessorCallCount);
assertEquals(expected, context(obj));
assertEquals(2, accessorCallCount);
%OptimizeFunctionOnNextCall(context);
assertEquals(expected, context(obj));
assertEquals(3, accessorCallCount);
forceDeopt = { /* empty*/ };
assertEquals(expected, context(obj));
assertEquals(4, accessorCallCount);
}
function value_context_for_getter(obj) {
return obj.getterProperty;
}
function test_context_for_getter(obj) {
if (obj.getterProperty) {
return 111;
} else {
return 222;
}
}
function effect_context_for_getter(obj) {
obj.getterProperty;
return 5678;
}
function TryGetter(context, getter, obj, expected, expectException) {
try {
TestInlinedGetter(context, obj, expected);
assertFalse(expectException);
} catch (exception) {
assertTrue(expectException);
assertEquals(7, exception.stack.split('\n').length);
}
%DeoptimizeFunction(context);
%ClearFunctionFeedback(context);
%ClearFunctionFeedback(getter);
}
function TestGetterInAllContexts(getter, obj, expected, expectException) {
TryGetter(value_context_for_getter, getter, obj, expected, expectException);
TryGetter(test_context_for_getter, getter, obj, expected ? 111 : 222,
expectException);
TryGetter(effect_context_for_getter, getter, obj, 5678, expectException);
}
// -----------------------------------------------------------------------------
// Test getter returning something 'true'ish in all contexts.
function getter1() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
return 1234;
}
function ConstrG1() { }
obj = Object.defineProperty(new ConstrG1(), "getterProperty", { get: getter1 });
TestGetterInAllContexts(getter1, obj, 1234, false);
obj = Object.create(obj);
TestGetterInAllContexts(getter1, obj, 1234, false);
// -----------------------------------------------------------------------------
// Test getter returning false in all contexts.
function getter2() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
return false;
}
function ConstrG2() { }
obj = Object.defineProperty(new ConstrG2(), "getterProperty", { get: getter2 });
TestGetterInAllContexts(getter2, obj, false, false);
obj = Object.create(obj);
TestGetterInAllContexts(getter2, obj, false, false);
// -----------------------------------------------------------------------------
// Test getter without a return in all contexts.
function getter3() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
}
function ConstrG3() { }
obj = Object.defineProperty(new ConstrG3(), "getterProperty", { get: getter3 });
TestGetterInAllContexts(getter3, obj, undefined, false);
obj = Object.create(obj);
TestGetterInAllContexts(getter3, obj, undefined, false);
// -----------------------------------------------------------------------------
// Test getter with too many arguments without a return in all contexts.
function getter4(a) {
assertSame(obj, this);
assertEquals(undefined, a);
accessorCallCount++;
forceDeopt.deopt;
}
function ConstrG4() { }
obj = Object.defineProperty(new ConstrG4(), "getterProperty", { get: getter4 });
TestGetterInAllContexts(getter4, obj, undefined, false);
obj = Object.create(obj);
TestGetterInAllContexts(getter4, obj, undefined, false);
// -----------------------------------------------------------------------------
// Test getter with too many arguments with a return in all contexts.
function getter5(a) {
assertSame(obj, this);
assertEquals(undefined, a);
accessorCallCount++;
forceDeopt.deopt;
return 9876;
}
function ConstrG5() { }
obj = Object.defineProperty(new ConstrG5(), "getterProperty", { get: getter5 });
TestGetterInAllContexts(getter5, obj, 9876, false);
obj = Object.create(obj);
TestGetterInAllContexts(getter5, obj, 9876, false);
// -----------------------------------------------------------------------------
// Test getter which throws from optimized code.
function getter6() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
if (accessorCallCount == 4) { 123 in null; }
return 13579;
}
function ConstrG6() { }
obj = Object.defineProperty(new ConstrG6(), "getterProperty", { get: getter6 });
TestGetterInAllContexts(getter6, obj, 13579, true);
obj = Object.create(obj);
TestGetterInAllContexts(getter6, obj, 13579, true);
// -----------------------------------------------------------------------------
// Helpers for testing inlining of setters.
function TestInlinedSetter(context, obj, value, expected) {
forceDeopt = { deopt: 0 };
accessorCallCount = 0;
setterArgument = value;
assertEquals(expected, context(obj, value));
assertEquals(value, setterValue);
assertEquals(1, accessorCallCount);
assertEquals(expected, context(obj, value));
assertEquals(value, setterValue);
assertEquals(2, accessorCallCount);
%OptimizeFunctionOnNextCall(context);
assertEquals(expected, context(obj, value));
assertEquals(value, setterValue);
assertEquals(3, accessorCallCount);
forceDeopt = { /* empty*/ };
assertEquals(expected, context(obj, value));
assertEquals(value, setterValue);
assertEquals(4, accessorCallCount);
}
function value_context_for_setter(obj, value) {
return obj.setterProperty = value;
}
function test_context_for_setter(obj, value) {
if (obj.setterProperty = value) {
return 333;
} else {
return 444;
}
}
function effect_context_for_setter(obj, value) {
obj.setterProperty = value;
return 666;
}
function TrySetter(context, setter, obj, expectException, value, expected) {
try {
TestInlinedSetter(context, obj, value, expected);
assertFalse(expectException);
} catch (exception) {
assertTrue(expectException);
assertEquals(7, exception.stack.split('\n').length);
}
%DeoptimizeFunction(context);
%ClearFunctionFeedback(context);
%ClearFunctionFeedback(setter);
}
function TestSetterInAllContexts(setter, obj, expectException) {
TrySetter(value_context_for_setter, setter, obj, expectException, 111, 111);
TrySetter(test_context_for_setter, setter, obj, expectException, true, 333);
TrySetter(test_context_for_setter, setter, obj, expectException, false, 444);
TrySetter(effect_context_for_setter, setter, obj, expectException, 555, 666);
}
// -----------------------------------------------------------------------------
// Test setter without a return in all contexts.
function setter1(value) {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
setterValue = value;
}
function ConstrS1() { }
obj = Object.defineProperty(new ConstrS1(), "setterProperty", { set: setter1 });
TestSetterInAllContexts(setter1, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter1, obj, false);
// -----------------------------------------------------------------------------
// Test setter returning something different than the RHS in all contexts.
function setter2(value) {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
setterValue = value;
return 1000000;
}
function ConstrS2() { }
obj = Object.defineProperty(new ConstrS2(), "setterProperty", { set: setter2 });
TestSetterInAllContexts(setter2, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter2, obj, false);
// -----------------------------------------------------------------------------
// Test setter with too few arguments without a return in all contexts.
function setter3() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
setterValue = setterArgument;
}
function ConstrS3() { }
obj = Object.defineProperty(new ConstrS3(), "setterProperty", { set: setter3 });
TestSetterInAllContexts(setter3, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter3, obj, false);
// -----------------------------------------------------------------------------
// Test setter with too few arguments with a return in all contexts.
function setter4() {
assertSame(obj, this);
accessorCallCount++;
forceDeopt.deopt;
setterValue = setterArgument;
return 2000000;
}
function ConstrS4() { }
obj = Object.defineProperty(new ConstrS4(), "setterProperty", { set: setter4 });
TestSetterInAllContexts(setter4, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter4, obj, false);
// -----------------------------------------------------------------------------
// Test setter with too many arguments without a return in all contexts.
function setter5(value, foo) {
assertSame(obj, this);
assertEquals(undefined, foo);
accessorCallCount++;
forceDeopt.deopt;
setterValue = value;
}
function ConstrS5() { }
obj = Object.defineProperty(new ConstrS5(), "setterProperty", { set: setter5 });
TestSetterInAllContexts(setter5, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter5, obj, false);
// -----------------------------------------------------------------------------
// Test setter with too many arguments with a return in all contexts.
function setter6(value, foo) {
assertSame(obj, this);
assertEquals(undefined, foo);
accessorCallCount++;
forceDeopt.deopt;
setterValue = value;
return 3000000;
}
function ConstrS6() { }
obj = Object.defineProperty(new ConstrS6(), "setterProperty", { set: setter6 });
TestSetterInAllContexts(setter6, obj, false);
obj = Object.create(obj);
TestSetterInAllContexts(setter6, obj, false);
// -----------------------------------------------------------------------------
// Test setter which throws from optimized code.
function setter7(value) {
accessorCallCount++;
forceDeopt.deopt;
if (accessorCallCount == 4) { 123 in null; }
setterValue = value;
}
function ConstrS7() { }
obj = Object.defineProperty(new ConstrS7(), "setterProperty", { set: setter7 });
TestSetterInAllContexts(setter7, obj, true);
obj = Object.create(obj);
TestSetterInAllContexts(setter7, obj, true);
| apache-2.0 |
brenton/openshift-ansible | roles/lib_openshift/library/oc_scale.py | 61968 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/scale -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_scale
short_description: Manage openshift services through the scale parameters
description:
- Manage openshift services through scaling them.
options:
state:
description:
- State represents whether to scale or list the current replicas
required: true
default: present
choices: ["present", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
kind:
description:
- The kind of object to scale.
required: false
default: None
choices:
- rc
- dc
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: scale down a rc to 0
oc_scale:
name: my-replication-controller
kind: rc
namespace: openshift-infra
replicas: 0
- name: scale up a deploymentconfig to 2
oc_scale:
name: php
kind: dc
namespace: my-php-app
replicas: 2
'''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/replicationcontroller.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
''' Class to model a replicationcontroller openshift object.
Currently we are modeled after a deployment config since they
are very similar. In the future, when the need arises we
will add functionality to this class.
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content):
''' Constructor for ReplicationController '''
super(ReplicationController, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/replicationcontroller.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_scale.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''perform the idempotent ansible logic'''
oc_scale = OCScale(params['name'],
params['namespace'],
params['replicas'],
params['kind'],
params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'result': api_rval['results'], 'state': 'list'} # noqa: E501
elif state == 'present':
########
# Update
########
if oc_scale.needs_update():
if check_mode:
return {'changed': True, 'result': 'CHECK_MODE: Would have updated.'} # noqa: E501
api_rval = oc_scale.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'changed': False, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'failed': True, 'msg': 'Unknown state passed. [{}]'.format(state)}
# -*- -*- -*- End included fragment: class/oc_scale.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_scale.py -*- -*- -*-
def main():
'''
ansible oc module for scaling
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='dc', choices=['dc', 'rc'], type='str'),
namespace=dict(default='default', type='str'),
replicas=dict(default=None, type='int'),
name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
rval = OCScale.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_scale.py -*- -*- -*-
| apache-2.0 |
young-zhang/Lean | Algorithm.CSharp/MarginCallEventsAlgorithm.cs | 4596 | /*
* QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
* Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using QuantConnect.Data.Market;
using QuantConnect.Orders;
namespace QuantConnect.Algorithm.CSharp
{
/// <summary>
/// This algorithm showcases two margin related event handlers.
/// OnMarginCallWarning: Fired when a portfolio's remaining margin dips below 5% of the total portfolio value
/// OnMarginCall: Fired immediately before margin call orders are execued, this gives the algorithm a change to regain margin on its own through liquidation
/// </summary>
/// <meta name="tag" content="securities and portfolio" />
/// <meta name="tag" content="margin models" />
public class MarginCallEventsAlgorithm : QCAlgorithm
{
/// <summary>
/// Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.
/// </summary>
public override void Initialize()
{
SetStartDate(2013, 10, 01); //Set Start Date
SetEndDate(2013, 12, 11); //Set End Date
SetCash(100000); //Set Strategy Cash
// Find more symbols here: http://quantconnect.com/data
AddSecurity(SecurityType.Equity, "SPY", Resolution.Second);
// cranking up the leverage increases the odds of a margin call when the security falls in value
Securities["SPY"].SetLeverage(100);
}
/// <summary>
/// OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
/// </summary>
/// <param name="data">TradeBars IDictionary object with your stock data</param>
public void OnData(TradeBars data)
{
if (!Portfolio.Invested)
{
Liquidate();
SetHoldings("SPY", 100);
}
}
/// <summary>
/// Margin call event handler. This method is called right before the margin call orders are placed in the market.
/// </summary>
/// <param name="requests">The orders to be executed to bring this algorithm within margin limits</param>
public override void OnMarginCall(List<SubmitOrderRequest> requests)
{
// this code gets called BEFORE the orders are placed, so we can try to liquidate some of our positions
// before we get the margin call orders executed. We could also modify these orders by changing their
// quantities
foreach (var order in requests.ToList())
{
// liquidate an extra 10% each time we get a margin call to give us more padding
var newQuantity = (int)(Math.Sign(order.Quantity) * order.Quantity * 1.1m);
requests.Remove(order);
requests.Add(new SubmitOrderRequest(order.OrderType, order.SecurityType, order.Symbol, newQuantity, order.StopPrice, order.LimitPrice, Time, "OnMarginCall"));
}
}
/// <summary>
/// Margin call warning event handler. This method is called when Portoflio.MarginRemaining is under 5% of your Portfolio.TotalPortfolioValue
/// </summary>
public override void OnMarginCallWarning()
{
// this code gets called when the margin remaining drops below 5% of our total portfolio value, it gives the algorithm
// a chance to prevent a margin call from occurring
// prevent margin calls by responding to the warning and increasing margin remaining
var spyHoldings = Securities["SPY"].Holdings.Quantity;
var shares = (int)(-spyHoldings * .005m);
Error(string.Format("{0} - OnMarginCallWarning(): Liquidating {1} shares of SPY to avoid margin call.", Time, shares));
MarketOrder("SPY", shares);
}
}
} | apache-2.0 |
zrccxyb62/hadoop | hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java | 9197 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.maven.plugin.protoc;
import org.apache.hadoop.maven.plugin.util.Exec;
import org.apache.hadoop.maven.plugin.util.FileSetUtils;
import org.apache.maven.model.FileSet;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.CRC32;
@Mojo(name="protoc", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
public class ProtocMojo extends AbstractMojo {
@Parameter(defaultValue="${project}", readonly=true)
private MavenProject project;
@Parameter
private File[] imports;
@Parameter(defaultValue="${project.build.directory}/generated-sources/java")
private File output;
@Parameter(required=true)
private FileSet source;
@Parameter(defaultValue="protoc")
private String protocCommand;
@Parameter(required=true)
private String protocVersion;
@Parameter(defaultValue =
"${project.build.directory}/hadoop-maven-plugins-protoc-checksums.json")
private String checksumPath;
/**
* Compares include and source file checksums against previously computed
* checksums stored in a json file in the build directory.
*/
public class ChecksumComparator {
private final Map<String, Long> storedChecksums;
private final Map<String, Long> computedChecksums;
private final File checksumFile;
ChecksumComparator(String checksumPath) throws IOException {
checksumFile = new File(checksumPath);
// Read in the checksums
if (checksumFile.exists()) {
ObjectMapper mapper = new ObjectMapper();
storedChecksums = mapper
.readValue(checksumFile, new TypeReference<Map<String, Long>>() {
});
} else {
storedChecksums = new HashMap<>(0);
}
computedChecksums = new HashMap<>();
}
public boolean hasChanged(File file) throws IOException {
if (!file.exists()) {
throw new FileNotFoundException(
"Specified protoc include or source does not exist: " + file);
}
if (file.isDirectory()) {
return hasDirectoryChanged(file);
} else if (file.isFile()) {
return hasFileChanged(file);
} else {
throw new IOException("Not a file or directory: " + file);
}
}
private boolean hasDirectoryChanged(File directory) throws IOException {
File[] listing = directory.listFiles();
boolean changed = false;
// Do not exit early, since we need to compute and save checksums
// for each file within the directory.
for (File f : listing) {
if (f.isDirectory()) {
if (hasDirectoryChanged(f)) {
changed = true;
}
} else if (f.isFile()) {
if (hasFileChanged(f)) {
changed = true;
}
} else {
getLog().debug("Skipping entry that is not a file or directory: "
+ f);
}
}
return changed;
}
private boolean hasFileChanged(File file) throws IOException {
long computedCsum = computeChecksum(file);
// Return if the generated csum matches the stored csum
Long storedCsum = storedChecksums.get(file.getCanonicalPath());
if (storedCsum == null || storedCsum.longValue() != computedCsum) {
// It has changed.
return true;
}
return false;
}
private long computeChecksum(File file) throws IOException {
// If we've already computed the csum, reuse the computed value
final String canonicalPath = file.getCanonicalPath();
if (computedChecksums.containsKey(canonicalPath)) {
return computedChecksums.get(canonicalPath);
}
// Compute the csum for the file
CRC32 crc = new CRC32();
byte[] buffer = new byte[1024*64];
try (BufferedInputStream in =
new BufferedInputStream(new FileInputStream(file))) {
while (true) {
int read = in.read(buffer);
if (read <= 0) {
break;
}
crc.update(buffer, 0, read);
}
}
// Save it in the generated map and return
final long computedCsum = crc.getValue();
computedChecksums.put(canonicalPath, computedCsum);
return crc.getValue();
}
public void writeChecksums() throws IOException {
ObjectMapper mapper = new ObjectMapper();
try (BufferedOutputStream out = new BufferedOutputStream(
new FileOutputStream(checksumFile))) {
mapper.writeValue(out, computedChecksums);
getLog().info("Wrote protoc checksums to file " + checksumFile);
}
}
}
public void execute() throws MojoExecutionException {
try {
List<String> command = new ArrayList<String>();
command.add(protocCommand);
command.add("--version");
Exec exec = new Exec(this);
List<String> out = new ArrayList<String>();
if (exec.run(command, out) == 127) {
getLog().error("protoc, not found at: " + protocCommand);
throw new MojoExecutionException("protoc failure");
} else {
if (out.isEmpty()) {
getLog().error("stdout: " + out);
throw new MojoExecutionException(
"'protoc --version' did not return a version");
} else {
if (!out.get(0).endsWith(protocVersion)) {
throw new MojoExecutionException(
"protoc version is '" + out.get(0) + "', expected version is '"
+ protocVersion + "'");
}
}
}
if (!output.mkdirs()) {
if (!output.exists()) {
throw new MojoExecutionException(
"Could not create directory: " + output);
}
}
// Whether the import or source protoc files have changed.
ChecksumComparator comparator = new ChecksumComparator(checksumPath);
boolean importsChanged = false;
command = new ArrayList<String>();
command.add(protocCommand);
command.add("--java_out=" + output.getCanonicalPath());
if (imports != null) {
for (File i : imports) {
if (comparator.hasChanged(i)) {
importsChanged = true;
}
command.add("-I" + i.getCanonicalPath());
}
}
// Filter to generate classes for just the changed source files.
List<File> changedSources = new ArrayList<>();
boolean sourcesChanged = false;
for (File f : FileSetUtils.convertFileSetToFiles(source)) {
// Need to recompile if the source has changed, or if any import has
// changed.
if (comparator.hasChanged(f) || importsChanged) {
sourcesChanged = true;
changedSources.add(f);
command.add(f.getCanonicalPath());
}
}
if (!sourcesChanged && !importsChanged) {
getLog().info("No changes detected in protoc files, skipping "
+ "generation.");
} else {
if (getLog().isDebugEnabled()) {
StringBuilder b = new StringBuilder();
b.append("Generating classes for the following protoc files: [");
String prefix = "";
for (File f : changedSources) {
b.append(prefix);
b.append(f.toString());
prefix = ", ";
}
b.append("]");
getLog().debug(b.toString());
}
exec = new Exec(this);
out = new ArrayList<String>();
List<String> err = new ArrayList<>();
if (exec.run(command, out, err) != 0) {
getLog().error("protoc compiler error");
for (String s : out) {
getLog().error(s);
}
for (String s : err) {
getLog().error(s);
}
throw new MojoExecutionException("protoc failure");
}
// Write the new checksum file on success.
comparator.writeChecksums();
}
} catch (Throwable ex) {
throw new MojoExecutionException(ex.toString(), ex);
}
project.addCompileSourceRoot(output.getAbsolutePath());
}
}
| apache-2.0 |
adelina-t/nova | nova/tests/functional/v3/test_deferred_delete.py | 1588 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-deferred-delete"
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| apache-2.0 |
Microsoft/TypeScript | tests/baselines/reference/commentOnArrayElement15.js | 167 | //// [commentOnArrayElement15.ts]
const array = [/* comment */ 1 /* comment */];
//// [commentOnArrayElement15.js]
var array = [/* comment */ 1 /* comment */];
| apache-2.0 |
jridgewell/amphtml | test/unit/core/dom/layout/test-viewport-observer.js | 5557 | import {
createViewportObserver,
observeIntersections,
} from '#core/dom/layout/viewport-observer';
describes.sandboxed('DOM - layout - Viewport Observer', {}, (env) => {
describe('createViewportObserver', () => {
let win;
let ctorSpy;
const noop = () => {};
beforeEach(() => {
ctorSpy = env.sandbox.stub();
win = {
parent: null,
document: {},
IntersectionObserver: ctorSpy,
};
});
it('Uses implicit root.', () => {
createViewportObserver(noop, win);
expect(ctorSpy).calledWith(noop, {threshold: undefined, root: undefined});
});
it('Pass along threshold argument', () => {
createViewportObserver(noop, win, {threshold: 0.5});
expect(ctorSpy).calledWith(noop, {threshold: 0.5, root: undefined});
});
it('Sets document root appropriately', () => {
// Implicit root when not iframed.
createViewportObserver(noop, win, {needsRootBounds: true});
expect(ctorSpy).calledWith(noop, {threshold: undefined, root: undefined});
// Document root when iframed.
win.parent = {};
createViewportObserver(noop, win, {needsRootBounds: true});
expect(ctorSpy).calledWith(noop, {
threshold: undefined,
root: win.document,
});
});
});
describe('Shared viewport observer', () => {
let inOb;
let win;
let doc;
let el1;
let el2;
let tracked;
beforeEach(() => {
inOb = env.sandbox.stub();
tracked = new Set();
inOb.callsFake(() => ({
observe: (el) => tracked.add(el),
unobserve: (el) => tracked.delete(el),
}));
win = {IntersectionObserver: inOb};
doc = {defaultView: win};
el1 = {ownerDocument: doc};
el2 = {ownerDocument: doc};
});
/**
* Simulate an IntersectionObserver callback for an element.
* @param {!Element} el
* @param {boolean} inViewport
*/
function toggleViewport(el, inViewport) {
const win = el.ownerDocument.defaultView;
// Grabs the IO Callback shared by all the viewport observers.
const ioCallback = win.IntersectionObserver.getCall(0).args[0];
ioCallback([{target: el, isIntersecting: inViewport}]);
}
it('observed element should have its callback fired each time it enters/exits the viewport.', () => {
const viewportEvents = [];
observeIntersections(el1, (entry) => viewportEvents.push(entry));
toggleViewport(el1, true);
toggleViewport(el1, false);
expect(viewportEvents[0].target).to.eql(el1);
expect(viewportEvents[0].isIntersecting).to.be.true;
expect(viewportEvents[1].target).to.eql(el1);
expect(viewportEvents[1].isIntersecting).to.be.false;
});
it('can independently observe multiple elements', () => {
const el1Events = [];
const el2Events = [];
observeIntersections(el1, (entry) =>
el1Events.push(entry.isIntersecting)
);
observeIntersections(el2, (entry) =>
el2Events.push(entry.isIntersecting)
);
toggleViewport(el1, false);
toggleViewport(el2, true);
toggleViewport(el1, true);
expect(el1Events).to.eql([false, true]);
expect(el2Events).to.eql([true]);
});
it('once unobserved, the callback is no longer fired', () => {
const el1Events = [];
const unobserveIntersections = observeIntersections(el1, (entry) =>
el1Events.push(entry.isIntersecting)
);
toggleViewport(el1, false);
unobserveIntersections();
toggleViewport(el1, true);
toggleViewport(el1, false);
expect(el1Events).to.eql([false]);
});
it('A quick observe and unobserve pair should not cause an error or fire the callback', () => {
const spy = env.sandbox.spy();
const unobserveIntersections = observeIntersections(el1, spy);
unobserveIntersections();
toggleViewport(el1, true);
expect(spy).not.called;
});
it('can have multiple obsevers for the same element', () => {
let elInObEntries = [];
observeIntersections(el1, (entry) => elInObEntries.push(entry));
observeIntersections(el1, (entry) => elInObEntries.push(entry));
toggleViewport(el1, true);
expect(elInObEntries).to.have.lengthOf(2);
expect(elInObEntries[0].target).to.eql(el1);
expect(elInObEntries[0].isIntersecting).to.be.true;
expect(elInObEntries[1].target).to.eql(el1);
expect(elInObEntries[1].isIntersecting).to.be.true;
elInObEntries = [];
toggleViewport(el1, false);
expect(elInObEntries).to.have.lengthOf(2);
expect(elInObEntries[0].isIntersecting).to.be.false;
expect(elInObEntries[1].isIntersecting).to.be.false;
});
it('can observe and unobserve an element with multiple callbacks', () => {
const cb1 = env.sandbox.spy();
const cb2 = env.sandbox.spy();
const unobserveIntersectionsCb1 = observeIntersections(el1, cb1);
const unobserveIntersectionsCb2 = observeIntersections(el1, cb2);
toggleViewport(el1, true);
expect(cb1).to.be.called;
expect(cb2).to.be.called;
cb1.resetHistory();
cb2.resetHistory();
unobserveIntersectionsCb2();
toggleViewport(el1, true);
expect(cb1).to.be.called;
expect(cb2).not.to.be.called;
cb1.resetHistory();
cb2.resetHistory();
unobserveIntersectionsCb1();
toggleViewport(el1, true);
expect(cb1).not.to.be.called;
expect(cb2).not.to.be.called;
});
});
});
| apache-2.0 |
cjellick/rancher | vendor/github.com/rancher/types/client/management/v3/zz_generated_multi_cluster_app_revision.go | 4970 | package client
import (
"github.com/rancher/norman/types"
)
const (
MultiClusterAppRevisionType = "multiClusterAppRevision"
MultiClusterAppRevisionFieldAnnotations = "annotations"
MultiClusterAppRevisionFieldAnswers = "answers"
MultiClusterAppRevisionFieldCreated = "created"
MultiClusterAppRevisionFieldCreatorID = "creatorId"
MultiClusterAppRevisionFieldLabels = "labels"
MultiClusterAppRevisionFieldName = "name"
MultiClusterAppRevisionFieldOwnerReferences = "ownerReferences"
MultiClusterAppRevisionFieldRemoved = "removed"
MultiClusterAppRevisionFieldTemplateVersionID = "templateVersionId"
MultiClusterAppRevisionFieldUUID = "uuid"
)
type MultiClusterAppRevision struct {
types.Resource
Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
Answers []Answer `json:"answers,omitempty" yaml:"answers,omitempty"`
Created string `json:"created,omitempty" yaml:"created,omitempty"`
CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"`
Removed string `json:"removed,omitempty" yaml:"removed,omitempty"`
TemplateVersionID string `json:"templateVersionId,omitempty" yaml:"templateVersionId,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
}
type MultiClusterAppRevisionCollection struct {
types.Collection
Data []MultiClusterAppRevision `json:"data,omitempty"`
client *MultiClusterAppRevisionClient
}
type MultiClusterAppRevisionClient struct {
apiClient *Client
}
type MultiClusterAppRevisionOperations interface {
List(opts *types.ListOpts) (*MultiClusterAppRevisionCollection, error)
ListAll(opts *types.ListOpts) (*MultiClusterAppRevisionCollection, error)
Create(opts *MultiClusterAppRevision) (*MultiClusterAppRevision, error)
Update(existing *MultiClusterAppRevision, updates interface{}) (*MultiClusterAppRevision, error)
Replace(existing *MultiClusterAppRevision) (*MultiClusterAppRevision, error)
ByID(id string) (*MultiClusterAppRevision, error)
Delete(container *MultiClusterAppRevision) error
}
func newMultiClusterAppRevisionClient(apiClient *Client) *MultiClusterAppRevisionClient {
return &MultiClusterAppRevisionClient{
apiClient: apiClient,
}
}
func (c *MultiClusterAppRevisionClient) Create(container *MultiClusterAppRevision) (*MultiClusterAppRevision, error) {
resp := &MultiClusterAppRevision{}
err := c.apiClient.Ops.DoCreate(MultiClusterAppRevisionType, container, resp)
return resp, err
}
func (c *MultiClusterAppRevisionClient) Update(existing *MultiClusterAppRevision, updates interface{}) (*MultiClusterAppRevision, error) {
resp := &MultiClusterAppRevision{}
err := c.apiClient.Ops.DoUpdate(MultiClusterAppRevisionType, &existing.Resource, updates, resp)
return resp, err
}
func (c *MultiClusterAppRevisionClient) Replace(obj *MultiClusterAppRevision) (*MultiClusterAppRevision, error) {
resp := &MultiClusterAppRevision{}
err := c.apiClient.Ops.DoReplace(MultiClusterAppRevisionType, &obj.Resource, obj, resp)
return resp, err
}
func (c *MultiClusterAppRevisionClient) List(opts *types.ListOpts) (*MultiClusterAppRevisionCollection, error) {
resp := &MultiClusterAppRevisionCollection{}
err := c.apiClient.Ops.DoList(MultiClusterAppRevisionType, opts, resp)
resp.client = c
return resp, err
}
func (c *MultiClusterAppRevisionClient) ListAll(opts *types.ListOpts) (*MultiClusterAppRevisionCollection, error) {
resp := &MultiClusterAppRevisionCollection{}
resp, err := c.List(opts)
if err != nil {
return resp, err
}
data := resp.Data
for next, err := resp.Next(); next != nil && err == nil; next, err = next.Next() {
data = append(data, next.Data...)
resp = next
resp.Data = data
}
if err != nil {
return resp, err
}
return resp, err
}
func (cc *MultiClusterAppRevisionCollection) Next() (*MultiClusterAppRevisionCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &MultiClusterAppRevisionCollection{}
err := cc.client.apiClient.Ops.DoNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *MultiClusterAppRevisionClient) ByID(id string) (*MultiClusterAppRevision, error) {
resp := &MultiClusterAppRevision{}
err := c.apiClient.Ops.DoByID(MultiClusterAppRevisionType, id, resp)
return resp, err
}
func (c *MultiClusterAppRevisionClient) Delete(container *MultiClusterAppRevision) error {
return c.apiClient.Ops.DoResourceDelete(MultiClusterAppRevisionType, &container.Resource)
}
| apache-2.0 |
DariusX/camel | core/camel-core/src/test/java/org/apache/camel/component/file/FilerConsumerDoneFileNameSimplePrefixTest.java | 2721 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file;
import java.io.File;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.Exchange;
import org.apache.camel.builder.RouteBuilder;
import org.junit.Before;
import org.junit.Test;
/**
* Unit test for writing done files
*/
public class FilerConsumerDoneFileNameSimplePrefixTest extends ContextTestSupport {
@Override
@Before
public void setUp() throws Exception {
deleteDirectory("target/data/done");
super.setUp();
}
@Test
public void testDoneFile() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBodyAndHeader("file:target/data/done", "Hello World", Exchange.FILE_NAME, "hello.txt");
// wait a bit and it should not pickup the written file as there are no
// done file
Thread.sleep(250);
assertMockEndpointsSatisfied();
resetMocks();
oneExchangeDone.reset();
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
// write the done file
template.sendBodyAndHeader("file:target/data/done", "", Exchange.FILE_NAME, "done-hello.txt");
assertMockEndpointsSatisfied();
oneExchangeDone.matchesMockWaitTime();
// done file should be deleted now
File file = new File("target/data/done/done-hello.txt");
assertFalse("Done file should be deleted: " + file, file.exists());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// using $simple{ to avoid clash with spring property
// placeholder
from("file:target/data/done?doneFileName=done-$simple{file:name}&initialDelay=0&delay=10").to("mock:result");
}
};
}
}
| apache-2.0 |
popojargo/couchdb-fauxton | app/addons/components/base.js | 634 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
import "./assets/less/components.less";
export default {
initialize () {}
};
| apache-2.0 |
mminella/spring-cloud-data | spring-cloud-dataflow-server-core/src/main/java/org/springframework/cloud/dataflow/server/repository/support/OraclePagingQueryProvider.java | 1855 | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.dataflow.server.repository.support;
import org.springframework.data.domain.Pageable;
/**
* Oracle implementation of a {@link PagingQueryProvider} using database specific
* features.
*
* @author Glenn Renfro
*/
public class OraclePagingQueryProvider extends AbstractSqlPagingQueryProvider {
@Override
public String getPageQuery(Pageable pageable) {
long offset = pageable.getOffset() + 1;
return generateRowNumSqlQueryWithNesting(getSelectClause(), false,
"TMP_ROW_NUM >= " + offset + " AND TMP_ROW_NUM < " + (offset + pageable.getPageSize()));
}
private String generateRowNumSqlQueryWithNesting(String selectClause, boolean remainingPageQuery,
String rowNumClause) {
StringBuilder sql = new StringBuilder();
sql.append("SELECT ").append(selectClause).append(" FROM (SELECT ").append(selectClause).append(", ")
.append("ROWNUM as TMP_ROW_NUM");
sql.append(" FROM (SELECT ").append(selectClause).append(" FROM ").append(this.getFromClause());
SqlPagingQueryUtils.buildWhereClause(this, remainingPageQuery, sql);
sql.append(" ORDER BY ").append(SqlPagingQueryUtils.buildSortClause(this));
sql.append(")) WHERE ").append(rowNumClause);
return sql.toString();
}
}
| apache-2.0 |
TristanTong/bbsWirelessTag | yafsrc/ServiceStack.OrmLite/src/ServiceStack.Text/Common/DeserializeBuiltin.cs | 7144 | //
// https://github.com/ServiceStack/ServiceStack.Text
// ServiceStack.Text: .NET C# POCO JSON, JSV and CSV Text Serializers.
//
// Authors:
// Demis Bellot (demis.bellot@gmail.com)
//
// Copyright 2012 Service Stack LLC. All Rights Reserved.
//
// Licensed under the same terms of ServiceStack.
//
using System;
using System.Globalization;
namespace ServiceStack.Text.Common
{
public static class DeserializeBuiltin<T>
{
private static readonly ParseStringDelegate CachedParseFn;
static DeserializeBuiltin()
{
CachedParseFn = GetParseFn();
}
public static ParseStringDelegate Parse
{
get { return CachedParseFn; }
}
private static ParseStringDelegate GetParseFn()
{
var nullableType = Nullable.GetUnderlyingType(typeof(T));
if (nullableType == null)
{
var typeCode = typeof(T).GetTypeCode();
switch (typeCode)
{
case TypeCode.Boolean:
//Lots of kids like to use '1', HTML checkboxes use 'on' as a soft convention
return value =>
value.Length == 1 ?
value == "1"
: value.Length == 2 ?
value == "on" :
bool.Parse(value);
case TypeCode.Byte:
return value => byte.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.SByte:
return value => sbyte.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int16:
return value => short.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt16:
return value => ushort.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int32:
return value => int.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt32:
return value => uint.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int64:
return value => long.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt64:
return value => ulong.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Single:
return value => float.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Double:
return value => double.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Decimal:
return value => decimal.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.DateTime:
return value => DateTimeSerializer.ParseShortestXsdDateTime(value);
case TypeCode.Char:
char cValue;
return value => char.TryParse(value, out cValue) ? cValue : '\0';
}
if (typeof(T) == typeof(Guid))
return value => new Guid(value);
if (typeof(T) == typeof(DateTimeOffset))
return value => DateTimeSerializer.ParseDateTimeOffset(value);
if (typeof(T) == typeof(TimeSpan))
return value => DateTimeSerializer.ParseTimeSpan(value);
#if !(__IOS__ || SL5 || XBOX || ANDROID || PCL)
if (typeof(T) == typeof(System.Data.Linq.Binary))
return value => new System.Data.Linq.Binary(Convert.FromBase64String(value));
#endif
}
else
{
var typeCode = nullableType.GetTypeCode();
switch (typeCode)
{
case TypeCode.Boolean:
return value => string.IsNullOrEmpty(value) ?
(bool?)null
: value.Length == 1 ?
value == "1"
: value.Length == 2 ?
value == "on" :
bool.Parse(value);
case TypeCode.Byte:
return value => string.IsNullOrEmpty(value) ? (byte?)null : byte.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.SByte:
return value => string.IsNullOrEmpty(value) ? (sbyte?)null : sbyte.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int16:
return value => string.IsNullOrEmpty(value) ? (short?)null : short.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt16:
return value => string.IsNullOrEmpty(value) ? (ushort?)null : ushort.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int32:
return value => string.IsNullOrEmpty(value) ? (int?)null : int.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt32:
return value => string.IsNullOrEmpty(value) ? (uint?)null : uint.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Int64:
return value => string.IsNullOrEmpty(value) ? (long?)null : long.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.UInt64:
return value => string.IsNullOrEmpty(value) ? (ulong?)null : ulong.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Single:
return value => string.IsNullOrEmpty(value) ? (float?)null : float.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Double:
return value => string.IsNullOrEmpty(value) ? (double?)null : double.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.Decimal:
return value => string.IsNullOrEmpty(value) ? (decimal?)null : decimal.Parse(value, CultureInfo.InvariantCulture);
case TypeCode.DateTime:
return value => DateTimeSerializer.ParseShortestNullableXsdDateTime(value);
case TypeCode.Char:
char cValue;
return value => string.IsNullOrEmpty(value) ? (char?)null : char.TryParse(value, out cValue) ? cValue : '\0';
}
if (typeof(T) == typeof(TimeSpan?))
return value => DateTimeSerializer.ParseNullableTimeSpan(value);
if (typeof(T) == typeof(Guid?))
return value => string.IsNullOrEmpty(value) ? (Guid?)null : new Guid(value);
if (typeof(T) == typeof(DateTimeOffset?))
return value => DateTimeSerializer.ParseNullableDateTimeOffset(value);
}
return null;
}
}
} | apache-2.0 |
shaoxuan-wang/flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/RegionPartitionReleaseStrategyTest.java | 7785 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.runtime.executiongraph.failover.flip1.partitionrelease.PipelinedRegion;
import org.apache.flink.runtime.executiongraph.failover.flip1.partitionrelease.RegionPartitionReleaseStrategy;
import org.apache.flink.runtime.io.network.partition.ResultPartitionType;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID;
import org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex;
import org.apache.flink.runtime.scheduler.strategy.TestingSchedulingResultPartition;
import org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology;
import org.apache.flink.util.TestLogger;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
/**
* Tests for {@link RegionPartitionReleaseStrategy}.
*/
public class RegionPartitionReleaseStrategyTest extends TestLogger {
private TestingSchedulingTopology testingSchedulingTopology;
@Before
public void setUp() throws Exception {
testingSchedulingTopology = new TestingSchedulingTopology();
}
@Test
public void releasePartitionsIfDownstreamRegionIsFinished() {
final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingResultPartition> resultPartitions = testingSchedulingTopology.connectPointwise(producers, consumers).finish();
final ExecutionVertexID onlyProducerVertexId = producers.get(0).getId();
final ExecutionVertexID onlyConsumerVertexId = consumers.get(0).getId();
final IntermediateResultPartitionID onlyResultPartitionId = resultPartitions.get(0).getId();
final Set<PipelinedRegion> pipelinedRegions = pipelinedRegionsSet(
PipelinedRegion.from(onlyProducerVertexId),
PipelinedRegion.from(onlyConsumerVertexId));
final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology, pipelinedRegions);
final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(onlyConsumerVertexId);
assertThat(partitionsToRelease, contains(onlyResultPartitionId));
}
@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();
final ExecutionVertexID onlySourceVertexId = sourceVertices.get(0).getId();
final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();
final Set<PipelinedRegion> pipelinedRegions = pipelinedRegionsSet(
PipelinedRegion.from(onlySourceVertexId),
PipelinedRegion.from(onlyIntermediateVertexId, onlySinkVertexId));
final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology, pipelinedRegions);
regionPartitionReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(onlySinkVertexId);
assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
@Test
public void notReleasePartitionsIfDownstreamRegionIsNotFinished() {
final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().withParallelism(2).finish();
testingSchedulingTopology.connectAllToAll(producers, consumers).finish();
final ExecutionVertexID onlyProducerVertexId = producers.get(0).getId();
final ExecutionVertexID consumerVertex1 = consumers.get(0).getId();
final ExecutionVertexID consumerVertex2 = consumers.get(1).getId();
final Set<PipelinedRegion> pipelinedRegions = pipelinedRegionsSet(
PipelinedRegion.from(onlyProducerVertexId),
PipelinedRegion.from(consumerVertex1, consumerVertex2));
final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology, pipelinedRegions);
final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(consumerVertex1);
assertThat(partitionsToRelease, is(empty()));
}
@Test
public void toggleVertexFinishedUnfinished() {
final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().withParallelism(2).finish();
testingSchedulingTopology.connectAllToAll(producers, consumers).finish();
final ExecutionVertexID onlyProducerVertexId = producers.get(0).getId();
final ExecutionVertexID consumerVertex1 = consumers.get(0).getId();
final ExecutionVertexID consumerVertex2 = consumers.get(1).getId();
final Set<PipelinedRegion> pipelinedRegions = pipelinedRegionsSet(
PipelinedRegion.from(onlyProducerVertexId),
PipelinedRegion.from(consumerVertex1, consumerVertex2));
final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology, pipelinedRegions);
regionPartitionReleaseStrategy.vertexFinished(consumerVertex1);
regionPartitionReleaseStrategy.vertexFinished(consumerVertex2);
regionPartitionReleaseStrategy.vertexUnfinished(consumerVertex2);
final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(consumerVertex1);
assertThat(partitionsToRelease, is(empty()));
}
private static Set<PipelinedRegion> pipelinedRegionsSet(final PipelinedRegion... pipelinedRegions) {
return new HashSet<>(Arrays.asList(pipelinedRegions));
}
}
| apache-2.0 |
jfernandosf/phoenix | phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java | 5971 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.hbase.index.covered.update;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
/**
*
*/
public class ColumnReference implements Comparable<ColumnReference> {
public static final byte[] ALL_QUALIFIERS = new byte[0];
private static int calcHashCode(ImmutableBytesWritable familyPtr, ImmutableBytesWritable qualifierPtr) {
final int prime = 31;
int result = 1;
result = prime * result + familyPtr.hashCode();
result = prime * result + qualifierPtr.hashCode();
return result;
}
private final int hashCode;
protected volatile byte[] family;
protected volatile byte[] qualifier;
private final ImmutableBytesPtr familyPtr;
private final ImmutableBytesPtr qualifierPtr;
public ColumnReference(byte[] family, byte[] qualifier) {
this.familyPtr = new ImmutableBytesPtr(family);
this.qualifierPtr = new ImmutableBytesPtr(qualifier);
this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr);
}
public ColumnReference(byte[] family, int familyOffset, int familyLength, byte[] qualifier,
int qualifierOffset, int qualifierLength) {
this.familyPtr = new ImmutableBytesPtr(family, familyOffset, familyLength);
this.qualifierPtr = new ImmutableBytesPtr(qualifier, qualifierOffset, qualifierLength);
this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr);
}
public byte[] getFamily() {
if (this.family == null) {
synchronized (this.familyPtr) {
if (this.family == null) {
this.family = this.familyPtr.copyBytesIfNecessary();
}
}
}
return this.family;
}
public byte[] getQualifier() {
if (this.qualifier == null) {
synchronized (this.qualifierPtr) {
if (this.qualifier == null) {
this.qualifier = this.qualifierPtr.copyBytesIfNecessary();
}
}
}
return this.qualifier;
}
public ImmutableBytesPtr getFamilyWritable() {
return this.familyPtr;
}
public ImmutableBytesPtr getQualifierWritable() {
return this.qualifierPtr;
}
public boolean matches(Cell kv) {
if (matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength())) {
return matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
}
return false;
}
/**
* @param qual to check against
* @return <tt>true</tt> if this column covers the given qualifier.
*/
public boolean matchesQualifier(byte[] qual) {
return matchesQualifier(qual, 0, qual.length);
}
public boolean matchesQualifier(byte[] bytes, int offset, int length) {
return allColumns() ? true : match(bytes, offset, length, qualifierPtr.get(),
qualifierPtr.getOffset(), qualifierPtr.getLength());
}
/**
* @param family to check against
* @return <tt>true</tt> if this column covers the given family.
*/
public boolean matchesFamily(byte[] family) {
return matchesFamily(family, 0, family.length);
}
public boolean matchesFamily(byte[] bytes, int offset, int length) {
return match(bytes, offset, length, familyPtr.get(), familyPtr.getOffset(), familyPtr.getLength());
}
/**
* @return <tt>true</tt> if this should include all column qualifiers, <tt>false</tt> otherwise
*/
public boolean allColumns() {
return getQualifier() == ALL_QUALIFIERS;
}
/**
* Check to see if the passed bytes match the stored bytes
* @param first
* @param storedKey the stored byte[], should never be <tt>null</tt>
* @return <tt>true</tt> if they are byte-equal
*/
private boolean match(byte[] first, int offset1, int length1, byte[] storedKey, int offset2,
int length2) {
return first == null ? false : Bytes.equals(first, offset1, length1, storedKey, offset2,
length2);
}
public KeyValue getFirstKeyValueForRow(byte[] row) {
return KeyValue.createFirstOnRow(row, getFamily(), getQualifier() == ALL_QUALIFIERS ? null
: getQualifier());
}
@Override
public int compareTo(ColumnReference o) {
int c = familyPtr.compareTo(o.familyPtr);
if (c == 0) {
// matching families, compare qualifiers
c = qualifierPtr.compareTo(o.qualifierPtr);
}
return c;
}
@Override
public boolean equals(Object o) {
if (o instanceof ColumnReference) {
ColumnReference other = (ColumnReference) o;
if (hashCode == other.hashCode && familyPtr.equals(other.familyPtr)) {
return qualifierPtr.equals(other.qualifierPtr);
}
}
return false;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return "ColumnReference - " + Bytes.toString(getFamily()) + ":" + Bytes.toString(getQualifier());
}
} | apache-2.0 |
swizzley/devechelon-demo | modules/portage/lib/puppet/property/portage_version.rb | 433 | File.expand_path('../..', File.dirname(__FILE__)).tap { |dir| $:.unshift(dir) unless $:.include?(dir) }
require 'puppet/util/portage'
require 'puppet/property'
class Puppet::Property::PortageVersion < Puppet::Property
desc "A properly formatted version string"
validate do |value|
unless Puppet::Util::Portage.valid_version? value
raise ArgumentError, "#{name} must be a properly formatted version"
end
end
end
| apache-2.0 |
jacques-n/incubator-calcite | core/src/main/java/org/eigenbase/relopt/hep/HepMatchOrder.java | 1400 | /*
// Licensed to Julian Hyde under one or more contributor license
// agreements. See the NOTICE file distributed with this work for
// additional information regarding copyright ownership.
//
// Julian Hyde licenses this file to you under the Apache License,
// Version 2.0 (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/
package org.eigenbase.relopt.hep;
/**
* HepMatchOrder specifies the order of graph traversal when looking for rule
* matches.
*/
public enum HepMatchOrder {
/**
* Match in arbitrary order. This is the default because it is the most
* efficient, and most rules don't care about order.
*/
ARBITRARY,
/**
* Match from leaves up. A match attempt at a descendant precedes all match
* attempts at its ancestors.
*/
BOTTOM_UP,
/**
* Match from root down. A match attempt at an ancestor always precedes all
* match attempts at its descendants.
*/
TOP_DOWN
}
// End HepMatchOrder.java
| apache-2.0 |
jinhyukchang/gobblin | gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/event/JobEvent.java | 2008 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.event;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
/**
* Job-related event types and their metadata, stored in {@link GobblinTrackingEvent#metadata}
*
* @author Lorand Bendig
*
*/
public class JobEvent {
public static final String JOB_STATE = "JobStateEvent";
public static final String LOCK_IN_USE = "LockInUse";
public static final String WORK_UNITS_MISSING = "WorkUnitsMissing";
public static final String WORK_UNITS_EMPTY = "WorkUnitsEmpty";
public static final String TASKS_SUBMITTED = "TasksSubmitted";
public static final String METADATA_JOB_ID = "jobId";
public static final String METADATA_JOB_NAME = "jobName";
public static final String METADATA_JOB_START_TIME = "jobBeginTime";
public static final String METADATA_JOB_END_TIME = "jobEndTime";
public static final String METADATA_JOB_STATE = "jobState";
public static final String METADATA_JOB_LAUNCHED_TASKS = "jobLaunchedTasks";
public static final String METADATA_JOB_COMPLETED_TASKS = "jobCompletedTasks";
public static final String METADATA_JOB_LAUNCHER_TYPE = "jobLauncherType";
public static final String METADATA_JOB_TRACKING_URL = "jobTrackingURL";
}
| apache-2.0 |
tekul/spring-security | web/src/main/java/org/springframework/security/web/authentication/rememberme/JdbcTokenRepositoryImpl.java | 5423 | /*
* Copyright 2002-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.web.authentication.rememberme;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.dao.IncorrectResultSizeDataAccessException;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.support.JdbcDaoSupport;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.*;
/**
* JDBC based persistent login token repository implementation.
*
* @author Luke Taylor
* @since 2.0
*/
public class JdbcTokenRepositoryImpl extends JdbcDaoSupport implements PersistentTokenRepository {
//~ Static fields/initializers =====================================================================================
/** Default SQL for creating the database table to store the tokens */
public static final String CREATE_TABLE_SQL =
"create table persistent_logins (username varchar(64) not null, series varchar(64) primary key, " +
"token varchar(64) not null, last_used timestamp not null)";
/** The default SQL used by the <tt>getTokenBySeries</tt> query */
public static final String DEF_TOKEN_BY_SERIES_SQL =
"select username,series,token,last_used from persistent_logins where series = ?";
/** The default SQL used by <tt>createNewToken</tt> */
public static final String DEF_INSERT_TOKEN_SQL =
"insert into persistent_logins (username, series, token, last_used) values(?,?,?,?)";
/** The default SQL used by <tt>updateToken</tt> */
public static final String DEF_UPDATE_TOKEN_SQL =
"update persistent_logins set token = ?, last_used = ? where series = ?";
/** The default SQL used by <tt>removeUserTokens</tt> */
public static final String DEF_REMOVE_USER_TOKENS_SQL =
"delete from persistent_logins where username = ?";
//~ Instance fields ================================================================================================
private String tokensBySeriesSql = DEF_TOKEN_BY_SERIES_SQL;
private String insertTokenSql = DEF_INSERT_TOKEN_SQL;
private String updateTokenSql = DEF_UPDATE_TOKEN_SQL;
private String removeUserTokensSql = DEF_REMOVE_USER_TOKENS_SQL;
private boolean createTableOnStartup;
protected void initDao() {
if (createTableOnStartup) {
getJdbcTemplate().execute(CREATE_TABLE_SQL);
}
}
public void createNewToken(PersistentRememberMeToken token) {
getJdbcTemplate().update(insertTokenSql, token.getUsername(), token.getSeries(),
token.getTokenValue(), token.getDate());
}
public void updateToken(String series, String tokenValue, Date lastUsed) {
getJdbcTemplate().update(updateTokenSql, tokenValue, new Date(), series);
}
/**
* Loads the token data for the supplied series identifier.
*
* If an error occurs, it will be reported and null will be returned (since the result should just be a failed
* persistent login).
*
* @param seriesId
* @return the token matching the series, or null if no match found or an exception occurred.
*/
public PersistentRememberMeToken getTokenForSeries(String seriesId) {
try {
return getJdbcTemplate().queryForObject(tokensBySeriesSql, new RowMapper<PersistentRememberMeToken>() {
public PersistentRememberMeToken mapRow(ResultSet rs, int rowNum) throws SQLException {
return new PersistentRememberMeToken(rs.getString(1), rs.getString(2), rs.getString(3), rs.getTimestamp(4));
}
}, seriesId);
} catch(EmptyResultDataAccessException zeroResults) {
if(logger.isDebugEnabled()) {
logger.debug("Querying token for series '" + seriesId + "' returned no results.", zeroResults);
}
}catch(IncorrectResultSizeDataAccessException moreThanOne) {
logger.error("Querying token for series '" + seriesId + "' returned more than one value. Series" +
" should be unique");
} catch(DataAccessException e) {
logger.error("Failed to load token for series " + seriesId, e);
}
return null;
}
public void removeUserTokens(String username) {
getJdbcTemplate().update(removeUserTokensSql, username);
}
/**
* Intended for convenience in debugging. Will create the persistent_tokens database table when the class
* is initialized during the initDao method.
*
* @param createTableOnStartup set to true to execute the
*/
public void setCreateTableOnStartup(boolean createTableOnStartup) {
this.createTableOnStartup = createTableOnStartup;
}
}
| apache-2.0 |
joerg84/arangodb | 3rdParty/V8/v5.7.0.0/src/ic/arm64/ic-arm64.cc | 11363 | // Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
// label is not done.
// The scratch registers need to be different from elements, name and result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
Register elements, Register name,
Register result, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
DCHECK(!AreAliased(result, scratch1, scratch2));
Label done;
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal property.
__ Bind(&done);
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
__ B(ne, miss);
// Get the value at the masked, scaled index and return.
__ Ldr(result,
FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
}
// Helper function used from StoreIC::GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// value: The value to store (never clobbered).
//
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
Register elements, Register name,
Register value, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
Label done;
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ Bind(&done);
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
static const int kTypeAndReadOnlyMask =
PropertyDetails::TypeField::kMask |
PropertyDetails::AttributesField::encode(READ_ONLY);
__ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
__ Tst(scratch1, kTypeAndReadOnlyMask);
__ B(ne, miss);
// Store the value at the masked, scaled index and return.
static const int kValueOffset = kElementsStartOffset + kPointerSize;
__ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
__ Str(value, MemOperand(scratch2));
// Update the write barrier. Make sure not to clobber the value.
__ Mov(scratch1, value);
__ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
__ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
JSObject::kPropertiesOffset));
GenerateDictionaryLoad(masm, &slow, dictionary,
LoadDescriptor::NameRegister(), x0, x3, x4);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
__ Bind(&slow);
GenerateRuntimeGetProperty(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
ASM_LOCATION("LoadIC::GenerateMiss");
DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
// Perform tail call to the entry.
__ Push(LoadWithVectorDescriptor::ReceiverRegister(),
LoadWithVectorDescriptor::NameRegister(),
LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kGetProperty);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
__ Push(LoadWithVectorDescriptor::ReceiverRegister(),
LoadWithVectorDescriptor::NameRegister(),
LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
StoreWithVectorDescriptor::VectorRegister(),
StoreWithVectorDescriptor::ReceiverRegister(),
StoreWithVectorDescriptor::NameRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
StoreIC_PushArgs(masm);
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateSlow");
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Tail call to the entry.
__ TailCallRuntime(Runtime::kStoreIC_Miss);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
Register value = StoreDescriptor::ValueRegister();
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register dictionary = x5;
DCHECK(!AreAliased(value, receiver, name,
StoreWithVectorDescriptor::SlotRegister(),
StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
__ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
__ Ret();
// Cache miss: Jump to runtime.
__ Bind(&miss);
__ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
GenerateMiss(masm);
}
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return eq;
case Token::LT:
return lt;
case Token::GT:
return gt;
case Token::LTE:
return le;
case Token::GTE:
return ge;
default:
UNREACHABLE();
return al;
}
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address info_address = Assembler::return_address_from_call_start(address);
InstructionSequence* patch_info = InstructionSequence::At(info_address);
return patch_info->IsInlineData();
}
// Activate a SMI fast-path by patching the instructions generated by
// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
// JumpPatchSite::EmitPatchInfo().
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check) {
// The patch information is encoded in the instruction stream using
// instructions which have no side effects, so we can safely execute them.
// The patch information is encoded directly after the call to the helper
// function which is requesting this patch operation.
Address info_address = Assembler::return_address_from_call_start(address);
InlineSmiCheckInfo info(info_address);
// Check and decode the patch information instruction.
if (!info.HasSmiCheck()) {
return;
}
if (FLAG_trace_ic) {
PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
static_cast<void*>(address), static_cast<void*>(info_address),
static_cast<void*>(info.SmiCheck()));
}
// Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
// and JumpPatchSite::EmitJumpIfSmi().
// Changing
// tb(n)z xzr, #0, <target>
// to
// tb(!n)z test_reg, #0, <target>
Instruction* to_patch = info.SmiCheck();
PatchingAssembler patcher(isolate, to_patch, 1);
DCHECK(to_patch->IsTestBranch());
DCHECK(to_patch->ImmTestBranchBit5() == 0);
DCHECK(to_patch->ImmTestBranchBit40() == 0);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
int branch_imm = to_patch->ImmTestBranch();
Register smi_reg;
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(to_patch->Rt() == xzr.code());
smi_reg = info.SmiRegister();
} else {
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
DCHECK(to_patch->Rt() != xzr.code());
smi_reg = xzr;
}
if (to_patch->Mask(TestBranchMask) == TBZ) {
// This is JumpIfNotSmi(smi_reg, branch_imm).
patcher.tbnz(smi_reg, 0, branch_imm);
} else {
DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
// This is JumpIfSmi(smi_reg, branch_imm).
patcher.tbz(smi_reg, 0, branch_imm);
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
| apache-2.0 |