text
stringlengths 2
99.9k
| meta
dict |
|---|---|
<?xml version="1.0" encoding="UTF-8"?>
<results totalResults="74">
<books>
<book isbn="978-3-16-148410-0">
<title>Harry Potter and the Half-Blood Prince</title>
<price>29.95</price>
<authors>
<author>J.K. Rowling</author>
</authors>
</book>
<book isbn="478-2-23-765712-2">
<title>Twilight</title>
<price>19.95</price>
<authors>
<author>Stephenie Meyer</author>
<author>The Hand of God</author>
</authors>
</book>
</books>
</results>
|
{
"pile_set_name": "Github"
}
|
'use strict';
const Utils = require('../../utils');
const AbstractQuery = require('../abstract/query');
const sequelizeErrors = require('../../errors');
const _ = require('lodash');
const { logger } = require('../../utils/logger');
const debug = logger.debugContext('sql:mysql');
class Query extends AbstractQuery {
constructor(connection, sequelize, options) {
super(connection, sequelize, Object.assign({ showWarnings: false }, options));
}
static formatBindParameters(sql, values, dialect) {
const bindParam = [];
const replacementFunc = (match, key, values) => {
if (values[key] !== undefined) {
bindParam.push(values[key]);
return '?';
}
return undefined;
};
sql = AbstractQuery.formatBindParameters(sql, values, dialect, replacementFunc)[0];
return [sql, bindParam.length > 0 ? bindParam : undefined];
}
run(sql, parameters) {
this.sql = sql;
const { connection, options } = this;
//do we need benchmark for this query execution
const showWarnings = this.sequelize.options.showWarnings || options.showWarnings;
const complete = this._logQuery(sql, debug);
return new Utils.Promise((resolve, reject) => {
const handler = (err, results) => {
complete();
if (err) {
// MySQL automatically rolls-back transactions in the event of a deadlock
if (options.transaction && err.errno === 1213) {
options.transaction.finished = 'rollback';
}
err.sql = sql;
err.parameters = parameters;
reject(this.formatError(err));
} else {
resolve(results);
}
};
if (parameters) {
debug('parameters(%j)', parameters);
connection.execute(sql, parameters, handler).setMaxListeners(100);
} else {
connection.query({ sql }, handler).setMaxListeners(100);
}
})
// Log warnings if we've got them.
.then(results => {
if (showWarnings && results && results.warningStatus > 0) {
return this.logWarnings(results);
}
return results;
})
// Return formatted results...
.then(results => this.formatResults(results));
}
/**
* High level function that handles the results of a query execution.
*
*
* Example:
* query.formatResults([
* {
* id: 1, // this is from the main table
* attr2: 'snafu', // this is from the main table
* Tasks.id: 1, // this is from the associated table
* Tasks.title: 'task' // this is from the associated table
* }
* ])
*
* @param {Array} data - The result of the query execution.
* @private
*/
formatResults(data) {
let result = this.instance;
if (this.isInsertQuery(data)) {
this.handleInsertQuery(data);
if (!this.instance) {
// handle bulkCreate AI primiary key
if (
data.constructor.name === 'ResultSetHeader'
&& this.model
&& this.model.autoIncrementAttribute
&& this.model.autoIncrementAttribute === this.model.primaryKeyAttribute
&& this.model.rawAttributes[this.model.primaryKeyAttribute]
) {
const startId = data[this.getInsertIdField()];
result = [];
for (let i = startId; i < startId + data.affectedRows; i++) {
result.push({ [this.model.rawAttributes[this.model.primaryKeyAttribute].field]: i });
}
} else {
result = data[this.getInsertIdField()];
}
}
}
if (this.isSelectQuery()) {
return this.handleSelectQuery(data);
}
if (this.isShowTablesQuery()) {
return this.handleShowTablesQuery(data);
}
if (this.isDescribeQuery()) {
result = {};
for (const _result of data) {
const enumRegex = /^enum/i;
result[_result.Field] = {
type: enumRegex.test(_result.Type) ? _result.Type.replace(enumRegex, 'ENUM') : _result.Type.toUpperCase(),
allowNull: _result.Null === 'YES',
defaultValue: _result.Default,
primaryKey: _result.Key === 'PRI',
autoIncrement: Object.prototype.hasOwnProperty.call(_result, 'Extra') && _result.Extra.toLowerCase() === 'auto_increment',
comment: _result.Comment ? _result.Comment : null
};
}
return result;
}
if (this.isShowIndexesQuery()) {
return this.handleShowIndexesQuery(data);
}
if (this.isCallQuery()) {
return data[0];
}
if (this.isBulkUpdateQuery() || this.isBulkDeleteQuery() || this.isUpsertQuery()) {
return data.affectedRows;
}
if (this.isVersionQuery()) {
return data[0].version;
}
if (this.isForeignKeysQuery()) {
return data;
}
if (this.isInsertQuery() || this.isUpdateQuery()) {
return [result, data.affectedRows];
}
if (this.isShowConstraintsQuery()) {
return data;
}
if (this.isRawQuery()) {
// MySQL returns row data and metadata (affected rows etc) in a single object - let's standarize it, sorta
return [data, data];
}
return result;
}
logWarnings(results) {
return this.run('SHOW WARNINGS').then(warningResults => {
const warningMessage = `MySQL Warnings (${this.connection.uuid || 'default'}): `;
const messages = [];
for (const _warningRow of warningResults) {
if (_warningRow === undefined || typeof _warningRow[Symbol.iterator] !== 'function') continue;
for (const _warningResult of _warningRow) {
if (Object.prototype.hasOwnProperty.call(_warningResult, 'Message')) {
messages.push(_warningResult.Message);
} else {
for (const _objectKey of _warningResult.keys()) {
messages.push([_objectKey, _warningResult[_objectKey]].join(': '));
}
}
}
}
this.sequelize.log(warningMessage + messages.join('; '), this.options);
return results;
});
}
formatError(err) {
const errCode = err.errno || err.code;
switch (errCode) {
case 1062: {
const match = err.message.match(/Duplicate entry '([\s\S]*)' for key '?((.|\s)*?)'?$/);
let fields = {};
let message = 'Validation error';
const values = match ? match[1].split('-') : undefined;
const fieldKey = match ? match[2] : undefined;
const fieldVal = match ? match[1] : undefined;
const uniqueKey = this.model && this.model.uniqueKeys[fieldKey];
if (uniqueKey) {
if (uniqueKey.msg) message = uniqueKey.msg;
fields = _.zipObject(uniqueKey.fields, values);
} else {
fields[fieldKey] = fieldVal;
}
const errors = [];
_.forOwn(fields, (value, field) => {
errors.push(new sequelizeErrors.ValidationErrorItem(
this.getUniqueConstraintErrorMessage(field),
'unique violation', // sequelizeErrors.ValidationErrorItem.Origins.DB,
field,
value,
this.instance,
'not_unique'
));
});
return new sequelizeErrors.UniqueConstraintError({ message, errors, parent: err, fields });
}
case 1451:
case 1452: {
// e.g. CONSTRAINT `example_constraint_name` FOREIGN KEY (`example_id`) REFERENCES `examples` (`id`)
const match = err.message.match(/CONSTRAINT ([`"])(.*)\1 FOREIGN KEY \(\1(.*)\1\) REFERENCES \1(.*)\1 \(\1(.*)\1\)/);
const quoteChar = match ? match[1] : '`';
const fields = match ? match[3].split(new RegExp(`${quoteChar}, *${quoteChar}`)) : undefined;
return new sequelizeErrors.ForeignKeyConstraintError({
reltype: String(errCode) === '1451' ? 'parent' : 'child',
table: match ? match[4] : undefined,
fields,
value: fields && fields.length && this.instance && this.instance[fields[0]] || undefined,
index: match ? match[2] : undefined,
parent: err
});
}
default:
return new sequelizeErrors.DatabaseError(err);
}
}
handleShowIndexesQuery(data) {
// Group by index name, and collect all fields
data = data.reduce((acc, item) => {
if (!(item.Key_name in acc)) {
acc[item.Key_name] = item;
item.fields = [];
}
acc[item.Key_name].fields[item.Seq_in_index - 1] = {
attribute: item.Column_name,
length: item.Sub_part || undefined,
order: item.Collation === 'A' ? 'ASC' : undefined
};
delete item.column_name;
return acc;
}, {});
return _.map(data, item => ({
primary: item.Key_name === 'PRIMARY',
fields: item.fields,
name: item.Key_name,
tableName: item.Table,
unique: item.Non_unique !== 1,
type: item.Index_type
}));
}
}
module.exports = Query;
module.exports.Query = Query;
module.exports.default = Query;
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <linux/export.h>
#include <sound/core.h>
#include <sound/gus.h>
#define __GUS_TABLES_ALLOC__
#include "gus_tables.h"
EXPORT_SYMBOL(snd_gf1_atten_table); /* for snd-gus-synth module */
unsigned short snd_gf1_lvol_to_gvol_raw(unsigned int vol)
{
unsigned short e, m, tmp;
if (vol > 65535)
vol = 65535;
tmp = vol;
e = 7;
if (tmp < 128) {
while (e > 0 && tmp < (1 << e))
e--;
} else {
while (tmp > 255) {
tmp >>= 1;
e++;
}
}
m = vol - (1 << e);
if (m > 0) {
if (e > 8)
m >>= e - 8;
else if (e < 8)
m <<= 8 - e;
m &= 255;
}
return (e << 8) | m;
}
#if 0
unsigned int snd_gf1_gvol_to_lvol_raw(unsigned short gf1_vol)
{
unsigned int rvol;
unsigned short e, m;
if (!gf1_vol)
return 0;
e = gf1_vol >> 8;
m = (unsigned char) gf1_vol;
rvol = 1 << e;
if (e > 8)
return rvol | (m << (e - 8));
return rvol | (m >> (8 - e));
}
unsigned int snd_gf1_calc_ramp_rate(struct snd_gus_card * gus,
unsigned short start,
unsigned short end,
unsigned int us)
{
static unsigned char vol_rates[19] =
{
23, 24, 26, 28, 29, 31, 32, 34,
36, 37, 39, 40, 42, 44, 45, 47,
49, 50, 52
};
unsigned short range, increment, value, i;
start >>= 4;
end >>= 4;
if (start < end)
us /= end - start;
else
us /= start - end;
range = 4;
value = gus->gf1.enh_mode ?
vol_rates[0] :
vol_rates[gus->gf1.active_voices - 14];
for (i = 0; i < 3; i++) {
if (us < value) {
range = i;
break;
} else
value <<= 3;
}
if (range == 4) {
range = 3;
increment = 1;
} else
increment = (value + (value >> 1)) / us;
return (range << 6) | (increment & 0x3f);
}
#endif /* 0 */
unsigned short snd_gf1_translate_freq(struct snd_gus_card * gus, unsigned int freq16)
{
freq16 >>= 3;
if (freq16 < 50)
freq16 = 50;
if (freq16 & 0xf8000000) {
freq16 = ~0xf8000000;
snd_printk(KERN_ERR "snd_gf1_translate_freq: overflow - freq = 0x%x\n", freq16);
}
return ((freq16 << 9) + (gus->gf1.playback_freq >> 1)) / gus->gf1.playback_freq;
}
#if 0
short snd_gf1_compute_vibrato(short cents, unsigned short fc_register)
{
static short vibrato_table[] =
{
0, 0, 32, 592, 61, 1175, 93, 1808,
124, 2433, 152, 3007, 182, 3632, 213, 4290,
241, 4834, 255, 5200
};
long depth;
short *vi1, *vi2, pcents, v1;
pcents = cents < 0 ? -cents : cents;
for (vi1 = vibrato_table, vi2 = vi1 + 2; pcents > *vi2; vi1 = vi2, vi2 += 2);
v1 = *(vi1 + 1);
/* The FC table above is a list of pairs. The first number in the pair */
/* is the cents index from 0-255 cents, and the second number in the */
/* pair is the FC adjustment needed to change the pitch by the indexed */
/* number of cents. The table was created for an FC of 32768. */
/* The following expression does a linear interpolation against the */
/* approximated log curve in the table above, and then scales the number */
/* by the FC before the LFO. This calculation also adjusts the output */
/* value to produce the appropriate depth for the hardware. The depth */
/* is 2 * desired FC + 1. */
depth = (((int) (*(vi2 + 1) - *vi1) * (pcents - *vi1) / (*vi2 - *vi1)) + v1) * fc_register >> 14;
if (depth)
depth++;
if (depth > 255)
depth = 255;
return cents < 0 ? -(short) depth : (short) depth;
}
unsigned short snd_gf1_compute_pitchbend(unsigned short pitchbend, unsigned short sens)
{
static long log_table[] = {1024, 1085, 1149, 1218, 1290, 1367, 1448, 1534, 1625, 1722, 1825, 1933};
int wheel, sensitivity;
unsigned int mantissa, f1, f2;
unsigned short semitones, f1_index, f2_index, f1_power, f2_power;
char bend_down = 0;
int bend;
if (!sens)
return 1024;
wheel = (int) pitchbend - 8192;
sensitivity = ((int) sens * wheel) / 128;
if (sensitivity < 0) {
bend_down = 1;
sensitivity = -sensitivity;
}
semitones = (unsigned int) (sensitivity >> 13);
mantissa = sensitivity % 8192;
f1_index = semitones % 12;
f2_index = (semitones + 1) % 12;
f1_power = semitones / 12;
f2_power = (semitones + 1) / 12;
f1 = log_table[f1_index] << f1_power;
f2 = log_table[f2_index] << f2_power;
bend = (int) ((((f2 - f1) * mantissa) >> 13) + f1);
if (bend_down)
bend = 1048576L / bend;
return bend;
}
unsigned short snd_gf1_compute_freq(unsigned int freq,
unsigned int rate,
unsigned short mix_rate)
{
unsigned int fc;
int scale = 0;
while (freq >= 4194304L) {
scale++;
freq >>= 1;
}
fc = (freq << 10) / rate;
if (fc > 97391L) {
fc = 97391;
snd_printk(KERN_ERR "patch: (1) fc frequency overflow - %u\n", fc);
}
fc = (fc * 44100UL) / mix_rate;
while (scale--)
fc <<= 1;
if (fc > 65535L) {
fc = 65535;
snd_printk(KERN_ERR "patch: (2) fc frequency overflow - %u\n", fc);
}
return (unsigned short) fc;
}
#endif /* 0 */
|
{
"pile_set_name": "Github"
}
|
var path = require('path')
var fs = require('graceful-fs')
var mkdir = require('../mkdirs')
function outputFile (file, data, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = 'utf8'
}
var dir = path.dirname(file)
fs.exists(dir, function (itDoes) {
if (itDoes) return fs.writeFile(file, data, encoding, callback)
mkdir.mkdirs(dir, function (err) {
if (err) return callback(err)
fs.writeFile(file, data, encoding, callback)
})
})
}
function outputFileSync (file, data, encoding) {
var dir = path.dirname(file)
if (fs.existsSync(dir)) {
return fs.writeFileSync.apply(fs, arguments)
}
mkdir.mkdirsSync(dir)
fs.writeFileSync.apply(fs, arguments)
}
module.exports = {
outputFile: outputFile,
outputFileSync: outputFileSync
}
|
{
"pile_set_name": "Github"
}
|
chrI 230218
chrII 813184
chrIII 316620
chrIV 1531933
chrIX 439888
chrV 576874
chrVI 270161
chrVII 1090940
chrVIII 562643
chrX 745751
chrXI 666816
chrXII 1078177
chrXIII 924431
chrXIV 784333
chrXV 1091291
chrXVI 948066
chrM 85780
|
{
"pile_set_name": "Github"
}
|
UnifiedAgentLoggingSource
=========================
.. currentmodule:: oci.logging.models
.. autoclass:: UnifiedAgentLoggingSource
:show-inheritance:
:special-members: __init__
:members:
:undoc-members:
:inherited-members:
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/env php
<?php
require_once('tcpdf/tcpdf.php');
$pdf = new TCPDF('P', 'pt', array($argv[2], 1000), true, 'UTF-8', false);
$pdf->SetMargins(0, 0, 0, 0);
$pdf->SetPrintHeader(false);
$pdf->SetPrintFooter(false);
$pdf->SetAutoPageBreak(TRUE);
if ($argc == 5) {
//Activate the following line, then run as root once to generate the needed files
//$font_name = TCPDF_FONTS::addTTFfont($argv[4], '', '', 32);
$font_name = 'dejavusans';
} else {
$font_name = 'times';
}
$pdf->setFontSubsetting(true);
$pdf->SetFont($font_name, '', 10, '', true);
$pdf->AddPage();
$pdf->setCellHeightRatio(1.12);
// Using the complete string leads to very long runtimes, splitting the string into
// individual lines and only breaking them is much faster with TCPDF
$utf8text = file_get_contents($argv[1], false);
$pieces = explode("\n", $utf8text);
foreach ($pieces as $text) {
$pdf->Write(2, $text, '', 0, '', false, 0, false, false, 0);
$pdf->Ln();
}
if (substr($argv[3], 0, 1) !== '/') {
$file = __DIR__ . '/' . $argv[3];
} else {
$file = $argv[3];
}
$pdf->Output($file, 'F');
|
{
"pile_set_name": "Github"
}
|
package com.baiyi.opscloud.service.server;
import com.baiyi.opscloud.domain.DataTable;
import com.baiyi.opscloud.domain.generator.opscloud.OcServerGroupType;
import com.baiyi.opscloud.domain.param.server.ServerGroupTypeParam;
/**
* @Author baiyi
* @Date 2020/2/21 1:12 下午
* @Version 1.0
*/
public interface OcServerGroupTypeService {
OcServerGroupType queryOcServerGroupTypeById(Integer id);
OcServerGroupType queryOcServerGroupTypeByGrpType(Integer grpType);
OcServerGroupType queryOcServerGroupTypeByName(String name);
DataTable<OcServerGroupType> queryOcServerGroupTypeByParam(ServerGroupTypeParam.PageQuery pageQuery);
void addOcServerGroupType(OcServerGroupType ocServerGroupType);
void updateOcServerGroupType(OcServerGroupType ocServerGroupType);
void deleteOcServerGroupTypeById(int id);
}
|
{
"pile_set_name": "Github"
}
|
'use strict';
module.exports = require('./async').timeout;
|
{
"pile_set_name": "Github"
}
|
{
"@type" : "gx:LocalDateTime",
"@value" : "2016-01-01T12:30"
}
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="de">
<head>
<!-- Generated by javadoc -->
<title>Tuple.Tuple2 (documentation 1.3.3 API)</title>
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Tuple.Tuple2 (documentation 1.3.3 API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="class in net.jqwik.api"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?net/jqwik/api/Tuple.Tuple2.html" target="_top">Frames</a></li>
<li><a href="Tuple.Tuple2.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">net.jqwik.api</div>
<h2 title="Class Tuple.Tuple2" class="title">Class Tuple.Tuple2<T1,T2></h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">net.jqwik.api.Tuple.Tuple1</a><T1></li>
<li>
<ul class="inheritance">
<li>net.jqwik.api.Tuple.Tuple2<T1,T2></li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd>java.io.Serializable, java.lang.Cloneable, <a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></dd>
</dl>
<dl>
<dt>Direct Known Subclasses:</dt>
<dd><a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="class in net.jqwik.api">Tuple.Tuple3</a></dd>
</dl>
<dl>
<dt>Enclosing interface:</dt>
<dd><a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></dd>
</dl>
<hr>
<br>
<pre>public static class <span class="typeNameLabel">Tuple.Tuple2<T1,T2></span>
extends <a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><T1></pre>
<dl>
<dt><span class="seeLabel">See Also:</span></dt>
<dd><a href="../../../serialized-form.html#net.jqwik.api.Tuple.Tuple2">Serialized Form</a></dd>
</dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== NESTED CLASS SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="nested.class.summary">
<!-- -->
</a>
<h3>Nested Class Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="nested.classes.inherited.from.class.net.jqwik.api.Tuple">
<!-- -->
</a>
<h3>Nested classes/interfaces inherited from interface net.jqwik.api.<a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></h3>
<code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="type parameter in Tuple.Tuple1">T1</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="class in net.jqwik.api">Tuple.Tuple2</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T2</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="class in net.jqwik.api">Tuple.Tuple3</a><<a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="type parameter in Tuple.Tuple3">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="type parameter in Tuple.Tuple3">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="type parameter in Tuple.Tuple3">T3</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple4.html" title="class in net.jqwik.api">Tuple.Tuple4</a><<a href="../../../net/jqwik/api/Tuple.Tuple4.html" title="type parameter in Tuple.Tuple4">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple4.html" title="type parameter in Tuple.Tuple4">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple4.html" title="type parameter in Tuple.Tuple4">T3</a>,<a href="../../../net/jqwik/api/Tuple.Tuple4.html" title="type parameter in Tuple.Tuple4">T4</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="class in net.jqwik.api">Tuple.Tuple5</a><<a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="type parameter in Tuple.Tuple5">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="type parameter in Tuple.Tuple5">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="type parameter in Tuple.Tuple5">T3</a>,<a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="type parameter in Tuple.Tuple5">T4</a>,<a href="../../../net/jqwik/api/Tuple.Tuple5.html" title="type parameter in Tuple.Tuple5">T5</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="class in net.jqwik.api">Tuple.Tuple6</a><<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T3</a>,<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T4</a>,<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T5</a>,<a href="../../../net/jqwik/api/Tuple.Tuple6.html" title="type parameter in Tuple.Tuple6">T6</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="class in net.jqwik.api">Tuple.Tuple7</a><<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T3</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T4</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T5</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T6</a>,<a href="../../../net/jqwik/api/Tuple.Tuple7.html" title="type parameter in Tuple.Tuple7">T7</a>>, <a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="class in net.jqwik.api">Tuple.Tuple8</a><<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T1</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T2</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T3</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T4</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T5</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T6</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T7</a>,<a href="../../../net/jqwik/api/Tuple.Tuple8.html" title="type parameter in Tuple.Tuple8">T8</a>></code></li>
</ul>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#equals-java.lang.Object-">equals</a></span>(java.lang.Object o)</code> </td>
</tr>
<tr id="i1" class="rowColor">
<td class="colFirst"><code><a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T2</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#get2--">get2</a></span>()</code> </td>
</tr>
<tr id="i2" class="altColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#hashCode--">hashCode</a></span>()</code> </td>
</tr>
<tr id="i3" class="rowColor">
<td class="colFirst"><code>java.util.List<java.lang.Object></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#items--">items</a></span>()</code> </td>
</tr>
<tr id="i4" class="altColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#size--">size</a></span>()</code> </td>
</tr>
<tr id="i5" class="rowColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../net/jqwik/api/Tuple.Tuple2.html#toString--">toString</a></span>()</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.net.jqwik.api.Tuple.Tuple1">
<!-- -->
</a>
<h3>Methods inherited from class net.jqwik.api.<a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a></h3>
<code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#get1--">get1</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, finalize, getClass, notify, notifyAll, wait, wait, wait</code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.net.jqwik.api.Tuple">
<!-- -->
</a>
<h3>Methods inherited from interface net.jqwik.api.<a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></h3>
<code><a href="../../../net/jqwik/api/Tuple.html#itemsToString--">itemsToString</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-T4-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-T4-T5-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-T4-T5-T6-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-T4-T5-T6-T7-">of</a>, <a href="../../../net/jqwik/api/Tuple.html#of-T1-T2-T3-T4-T5-T6-T7-T8-">of</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="size--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>size</h4>
<pre>public int size()</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.html#size--">size</a></code> in interface <code><a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></code></dd>
<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#size--">size</a></code> in class <code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>></code></dd>
</dl>
</li>
</ul>
<a name="get2--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>get2</h4>
<pre>public <a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T2</a> get2()</pre>
</li>
</ul>
<a name="items--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>items</h4>
<pre>public java.util.List<java.lang.Object> items()</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.html#items--">items</a></code> in interface <code><a href="../../../net/jqwik/api/Tuple.html" title="interface in net.jqwik.api">Tuple</a></code></dd>
<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#items--">items</a></code> in class <code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>></code></dd>
</dl>
</li>
</ul>
<a name="equals-java.lang.Object-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>equals</h4>
<pre>public boolean equals(java.lang.Object o)</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#equals-java.lang.Object-">equals</a></code> in class <code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>></code></dd>
</dl>
</li>
</ul>
<a name="hashCode--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>hashCode</h4>
<pre>public int hashCode()</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#hashCode--">hashCode</a></code> in class <code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>></code></dd>
</dl>
</li>
</ul>
<a name="toString--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>toString</h4>
<pre>public java.lang.String toString()</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
<dd><code><a href="../../../net/jqwik/api/Tuple.Tuple1.html#toString--">toString</a></code> in class <code><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api">Tuple.Tuple1</a><<a href="../../../net/jqwik/api/Tuple.Tuple2.html" title="type parameter in Tuple.Tuple2">T1</a>></code></dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../net/jqwik/api/Tuple.Tuple1.html" title="class in net.jqwik.api"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../net/jqwik/api/Tuple.Tuple3.html" title="class in net.jqwik.api"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?net/jqwik/api/Tuple.Tuple2.html" target="_top">Frames</a></li>
<li><a href="Tuple.Tuple2.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_07ep31f" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Zeebe Modeler" exporterVersion="0.8.0">
<bpmn:collaboration id="message-single-instance">
<bpmn:participant id="Participant_0401xdf" processRef="Process_0uz6tug" />
<bpmn:participant id="Participant_10wuzi9" processRef="Process_0dccold" />
<bpmn:messageFlow id="MessageFlow_1xi2qr2" sourceRef="Task_1lmi6p2" targetRef="StartEvent_1" />
</bpmn:collaboration>
<bpmn:process id="Process_0uz6tug" isExecutable="true">
<bpmn:serviceTask id="Task_0t7m3x6" name="A">
<bpmn:outgoing>SequenceFlow_0fswpsq</bpmn:outgoing>
</bpmn:serviceTask>
<bpmn:sequenceFlow id="SequenceFlow_0fswpsq" sourceRef="Task_0t7m3x6" targetRef="Task_1lmi6p2" />
<bpmn:serviceTask id="Task_1lmi6p2" name="B">
<bpmn:incoming>SequenceFlow_0fswpsq</bpmn:incoming>
</bpmn:serviceTask>
</bpmn:process>
<bpmn:process id="Process_0dccold" isExecutable="false">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_15cqsk5</bpmn:outgoing>
<bpmn:messageEventDefinition />
</bpmn:startEvent>
<bpmn:serviceTask id="Task_0ik0cp5" name="C">
<bpmn:incoming>SequenceFlow_15cqsk5</bpmn:incoming>
</bpmn:serviceTask>
<bpmn:sequenceFlow id="SequenceFlow_15cqsk5" sourceRef="StartEvent_1" targetRef="Task_0ik0cp5" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="message-single-instance">
<bpmndi:BPMNShape id="Participant_0401xdf_di" bpmnElement="Participant_0401xdf" isHorizontal="true">
<dc:Bounds x="150" y="120" width="570" height="130" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="StartEvent_0e5ac35_di" bpmnElement="StartEvent_1">
<dc:Bounds x="422" y="352" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Participant_15zyr41_di" bpmnElement="Participant_10wuzi9" isHorizontal="true">
<dc:Bounds x="150" y="300" width="570" height="130" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ServiceTask_0i4c7nx_di" bpmnElement="Task_1lmi6p2">
<dc:Bounds x="390" y="140" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="MessageFlow_1xi2qr2_di" bpmnElement="MessageFlow_1xi2qr2">
<di:waypoint x="440" y="220" />
<di:waypoint x="440" y="352" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_15cqsk5_di" bpmnElement="SequenceFlow_15cqsk5">
<di:waypoint x="458" y="370" />
<di:waypoint x="540" y="370" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ServiceTask_0wxs96j_di" bpmnElement="Task_0ik0cp5">
<dc:Bounds x="540" y="330" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ServiceTask_0eeiqr3_di" bpmnElement="Task_0t7m3x6">
<dc:Bounds x="210" y="140" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_0fswpsq_di" bpmnElement="SequenceFlow_0fswpsq">
<di:waypoint x="310" y="180" />
<di:waypoint x="390" y="180" />
</bpmndi:BPMNEdge>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>
|
{
"pile_set_name": "Github"
}
|
{ "hello_worlds" => {"type" => "array", "items" => {"type" => "JSONModel(:hello_world) object"}} }
|
{
"pile_set_name": "Github"
}
|
/*
* scsi_error.c Copyright (C) 1997 Eric Youngdale
*
* SCSI error/timeout handling
* Initial versions: Eric Youngdale. Based upon conversations with
* Leonard Zubkoff and David Miller at Linux Expo,
* ideas originating from all over the place.
*
* Restructured scsi_unjam_host and associated functions.
* September 04, 2002 Mike Anderson (andmike@us.ibm.com)
*
* Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
* minor cleanups.
* September 30, 2002 Mike Anderson (andmike@us.ibm.com)
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
#include "scsi_transport_api.h"
#include <trace/events/scsi.h>
static void scsi_eh_done(struct scsi_cmnd *scmd);
/*
* These should *probably* be handled by the host itself.
* Since it is allowed to sleep, it probably should.
*/
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
"Waking error handler thread\n"));
}
}
/**
* scsi_schedule_eh - schedule EH for SCSI host
* @shost: SCSI host to invoke error handling on.
*
* Schedule SCSI EH without scmd.
*/
void scsi_schedule_eh(struct Scsi_Host *shost)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
scsi_eh_wakeup(shost);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
EXPORT_SYMBOL_GPL(scsi_schedule_eh);
static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
{
if (!shost->last_reset || shost->eh_deadline == -1)
return 0;
/*
* 32bit accesses are guaranteed to be atomic
* (on all supported architectures), so instead
* of using a spinlock we can as well double check
* if eh_deadline has been set to 'off' during the
* time_before call.
*/
if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
shost->eh_deadline > -1)
return 0;
return 1;
}
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
*/
void
scmd_eh_abort_handler(struct work_struct *work)
{
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
int rtn;
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
if (rtn == SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
return;
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"finish aborted command\n"));
scsi_finish_command(scmd);
return;
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"cmd abort %s\n",
(rtn == FAST_IO_FAIL) ?
"not send" : "failed"));
}
}
if (!scsi_eh_scmd_add(scmd, 0)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"terminate aborted command\n"));
set_host_byte(scmd, DID_TIME_OUT);
scsi_finish_command(scmd);
}
}
/**
* scsi_abort_command - schedule a command abort
* @scmd: scmd to abort.
*
* We only need to abort commands after a command timeout
*/
static int
scsi_abort_command(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
/*
* Retry after abort failed, escalate to next level.
*/
scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"previous abort failed\n"));
BUG_ON(delayed_work_pending(&scmd->abort_work));
return FAILED;
}
/*
* Do not try a command abort if
* SCSI EH has already started.
*/
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_in_recovery(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"not aborting, host in recovery\n"));
return FAILED;
}
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
spin_unlock_irqrestore(shost->host_lock, flags);
scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
return SUCCESS;
}
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
* @eh_flag: optional SCSI_EH flag.
*
* Return value:
* 0 on failure.
*/
int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
{
struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
int ret = 0;
if (!shost->ehandler)
return 0;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY))
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock;
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
ret = 1;
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
eh_flag &= ~SCSI_EH_CANCEL_CMD;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
shost->host_failed++;
scsi_eh_wakeup(shost);
out_unlock:
spin_unlock_irqrestore(shost->host_lock, flags);
return ret;
}
/**
* scsi_times_out - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
* We do not need to lock this. There is the potential for a race
* only in that the normal completion handling might run, but if the
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
enum blk_eh_timer_return scsi_times_out(struct request *req)
{
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
struct Scsi_Host *host = scmd->device->host;
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
if (host->transportt->eh_timed_out)
rtn = host->transportt->eh_timed_out(scmd);
else if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_NOT_HANDLED) {
if (!host->hostt->no_async_abort &&
scsi_abort_command(scmd) == SUCCESS)
return BLK_EH_NOT_HANDLED;
set_host_byte(scmd, DID_TIME_OUT);
if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
rtn = BLK_EH_HANDLED;
}
return rtn;
}
/**
* scsi_block_when_processing_errors - Prevent cmds from being queued.
* @sdev: Device on which we are performing recovery.
*
* Description:
* We block until the host is out of error recovery, and then check to
* see whether the host or the device is offline.
*
* Return value:
* 0 when dev was taken offline by error recovery. 1 OK to proceed.
*/
int scsi_block_when_processing_errors(struct scsi_device *sdev)
{
int online;
wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
online = scsi_device_online(sdev);
SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
"%s: rtn: %d\n", __func__, online));
return online;
}
EXPORT_SYMBOL(scsi_block_when_processing_errors);
#ifdef CONFIG_SCSI_LOGGING
/**
* scsi_eh_prt_fail_stats - Log info on failures.
* @shost: scsi host being recovered.
* @work_q: Queue of scsi cmds to process.
*/
static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
struct list_head *work_q)
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
int total_failures = 0;
int cmd_failed = 0;
int cmd_cancel = 0;
int devices_failed = 0;
shost_for_each_device(sdev, shost) {
list_for_each_entry(scmd, work_q, eh_entry) {
if (scmd->device == sdev) {
++total_failures;
if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
++cmd_cancel;
else
++cmd_failed;
}
}
if (cmd_cancel || cmd_failed) {
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: cmds failed: %d, cancel: %d\n",
__func__, cmd_failed,
cmd_cancel));
cmd_cancel = 0;
cmd_failed = 0;
++devices_failed;
}
}
SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
"Total of %d commands on %d"
" devices require eh work\n",
total_failures, devices_failed));
}
#endif
/**
* scsi_report_lun_change - Set flag on all *other* devices on the same target
* to indicate that a UNIT ATTENTION is expected.
* @sdev: Device reporting the UNIT ATTENTION
*/
static void scsi_report_lun_change(struct scsi_device *sdev)
{
sdev->sdev_target->expecting_lun_change = 1;
}
/**
* scsi_report_sense - Examine scsi sense information and log messages for
* certain conditions, also issue uevents for some of them.
* @sdev: Device reporting the sense code
* @sshdr: sshdr to be examined
*/
static void scsi_report_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sshdr)
{
enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
if (sshdr->sense_key == UNIT_ATTENTION) {
if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Inquiry data has changed");
} else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"LUN reached a thin provisioning soft "
"threshold.\n");
}
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Mode parameters changed");
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Capacity data has changed");
} else if (sshdr->asc == 0x2a)
sdev_printk(KERN_WARNING, sdev,
"Parameters changed");
}
if (evt_type != SDEV_EVT_MAXBITS) {
set_bit(evt_type, sdev->pending_events);
schedule_work(&sdev->event_work);
}
}
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
*
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
*
* Notes:
* When a deferred error is detected the current command has
* not been executed and needs retrying.
*/
static int scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
if (! scsi_command_normalize_sense(scmd, &sshdr))
return FAILED; /* no valid sense data */
scsi_report_sense(sdev, &sshdr);
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
sdev->scsi_dh_data->scsi_dh->check_sense) {
int rc;
rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
return rc;
/* handler does not care. Drop down to default handling */
}
if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
* issued ones, we need to try to recover and interpret
*/
return SUCCESS;
/*
* Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS.
*/
if (sshdr.response_code == 0x70) {
/* fixed format */
if (scmd->sense_buffer[2] & 0xe0)
return SUCCESS;
} else {
/*
* descriptor format: look for "stream commands sense data
* descriptor" (see SSC-3). Assume single sense data
* descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
*/
if ((sshdr.additional_length > 3) &&
(scmd->sense_buffer[8] == 0x4) &&
(scmd->sense_buffer[11] & 0xe0))
return SUCCESS;
}
switch (sshdr.sense_key) {
case NO_SENSE:
return SUCCESS;
case RECOVERED_ERROR:
return /* soft_error */ SUCCESS;
case ABORTED_COMMAND:
if (sshdr.asc == 0x10) /* DIF */
return SUCCESS;
return NEEDS_RETRY;
case NOT_READY:
case UNIT_ATTENTION:
/*
* if we are expecting a cc/ua because of a bus reset that we
* performed, treat this just as a retry. otherwise this is
* information that we should pass up to the upper-level driver
* so that we can deal with it there.
*/
if (scmd->device->expecting_cc_ua) {
/*
* Because some device does not queue unit
* attentions correctly, we carefully check
* additional sense code and qualifier so as
* not to squash media change unit attention.
*/
if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
scmd->device->expecting_cc_ua = 0;
return NEEDS_RETRY;
}
}
/*
* we might also expect a cc/ua if another LUN on the target
* reported a UA with an ASC/ASCQ of 3F 0E -
* REPORTED LUNS DATA HAS CHANGED.
*/
if (scmd->device->sdev_target->expecting_lun_change &&
sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
return NEEDS_RETRY;
/*
* if the device is in the process of becoming ready, we
* should retry.
*/
if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
* the error handler to start the motor
*/
if (scmd->device->allow_restart &&
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
return FAILED;
/*
* Pass the UA upwards for a determination in the completion
* functions.
*/
return SUCCESS;
/* these are not supported */
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
set_host_byte(scmd, DID_ALLOC_FAILURE);
return SUCCESS;
}
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
set_host_byte(scmd, DID_TARGET_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
set_host_byte(scmd, DID_MEDIUM_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
case HARDWARE_ERROR:
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
set_host_byte(scmd, DID_TARGET_FAILURE);
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26) { /* Parameter value invalid */
set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
default:
return SUCCESS;
}
}
static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
{
struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth)
return;
if (time_before(jiffies,
sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
return;
if (time_before(jiffies,
sdev->last_queue_full_time + sdev->queue_ramp_up_period))
return;
/*
* Walk all devices of a target and do
* ramp up on them.
*/
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id ||
tmp_sdev->queue_depth == sdev->max_queue_depth)
continue;
scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
sdev->last_queue_ramp_up = jiffies;
}
}
static void scsi_handle_queue_full(struct scsi_device *sdev)
{
struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth)
return;
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id)
continue;
/*
* We do not know the number of commands that were at
* the device when we got the queue full so we start
* from the highest possible value and work our way down.
*/
scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
}
}
/**
* scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
* @scmd: SCSI cmd to examine.
*
* Notes:
* This is *only* called when we are examining the status of commands
* queued during error recovery. the main difference here is that we
* don't allow for the possibility of retries here, and we are a lot
* more restrictive about what we consider acceptable.
*/
static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
{
/*
* first check the host byte, to see if there is anything in there
* that would indicate what we need to do.
*/
if (host_byte(scmd->result) == DID_RESET) {
/*
* rats. we are already in the error handler, so we now
* get to try and figure out what to do next. if the sense
* is valid, we have a pretty good idea of what to do.
* if not, we mark it as FAILED.
*/
return scsi_check_sense(scmd);
}
if (host_byte(scmd->result) != DID_OK)
return FAILED;
/*
* next, check the message byte.
*/
if (msg_byte(scmd->result) != COMMAND_COMPLETE)
return FAILED;
/*
* now, check the status byte to see if this indicates
* anything special.
*/
switch (status_byte(scmd->result)) {
case GOOD:
scsi_handle_queue_ramp_up(scmd->device);
case COMMAND_TERMINATED:
return SUCCESS;
case CHECK_CONDITION:
return scsi_check_sense(scmd);
case CONDITION_GOOD:
case INTERMEDIATE_GOOD:
case INTERMEDIATE_C_GOOD:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
case RESERVATION_CONFLICT:
if (scmd->cmnd[0] == TEST_UNIT_READY)
/* it is a success, we probed the device and
* found it */
return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
/* fall through */
case BUSY:
return NEEDS_RETRY;
default:
return FAILED;
}
return FAILED;
}
/**
* scsi_eh_done - Completion function for error handling.
* @scmd: Cmd that is done.
*/
static void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s result: %x\n", __func__, scmd->result));
eh_action = scmd->device->host->eh_action;
if (eh_action)
complete(eh_action);
}
/**
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send host reset.
*/
static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, host, "Snd Host RST\n"));
if (!hostt->eh_host_reset_handler)
return FAILED;
rtn = hostt->eh_host_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!hostt->skip_settle_delay)
ssleep(HOST_RESET_SETTLE_TIME);
spin_lock_irqsave(host->host_lock, flags);
scsi_report_bus_reset(host, scmd_channel(scmd));
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
/**
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
*/
static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s: Snd Bus RST\n", __func__));
if (!hostt->eh_bus_reset_handler)
return FAILED;
rtn = hostt->eh_bus_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!hostt->skip_settle_delay)
ssleep(BUS_RESET_SETTLE_TIME);
spin_lock_irqsave(host->host_lock, flags);
scsi_report_bus_reset(host, scmd_channel(scmd));
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
{
sdev->was_reset = 1;
sdev->expecting_cc_ua = 1;
}
/**
* scsi_try_target_reset - Ask host to perform a target reset
* @scmd: SCSI cmd used to send a target reset
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
static int scsi_try_target_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
if (!hostt->eh_target_reset_handler)
return FAILED;
rtn = hostt->eh_target_reset_handler(scmd);
if (rtn == SUCCESS) {
spin_lock_irqsave(host->host_lock, flags);
__starget_for_each_device(scsi_target(scmd->device), NULL,
__scsi_report_device_reset);
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
/**
* scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
* @scmd: SCSI cmd used to send BDR
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
struct scsi_host_template *hostt = scmd->device->host->hostt;
if (!hostt->eh_device_reset_handler)
return FAILED;
rtn = hostt->eh_device_reset_handler(scmd);
if (rtn == SUCCESS)
__scsi_report_device_reset(scmd->device, NULL);
return rtn;
}
/**
* scsi_try_to_abort_cmd - Ask host to abort a SCSI command
* @hostt: SCSI driver host template
* @scmd: SCSI cmd used to send a target reset
*
* Return value:
* SUCCESS, FAILED, or FAST_IO_FAIL
*
* Notes:
* SUCCESS does not necessarily indicate that the command
* has been aborted; it only indicates that the LLDDs
* has cleared all references to that command.
* LLDDs should return FAILED only if an abort was required
* but could not be executed. LLDDs should return FAST_IO_FAIL
* if the device is temporarily unavailable (eg due to a
* link down on FibreChannel)
*/
static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
struct scsi_cmnd *scmd)
{
if (!hostt->eh_abort_handler)
return FAILED;
return hostt->eh_abort_handler(scmd);
}
static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
{
if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
if (scsi_try_bus_device_reset(scmd) != SUCCESS)
if (scsi_try_target_reset(scmd) != SUCCESS)
if (scsi_try_bus_reset(scmd) != SUCCESS)
scsi_try_host_reset(scmd);
}
/**
* scsi_eh_prep_cmnd - Save a scsi command info as part of error recovery
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
* @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
* @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
*
* This function is used to save a scsi command information before re-execution
* as part of the error recovery process. If @sense_bytes is 0 the command
* sent must be one that does not transfer any data. If @sense_bytes != 0
* @cmnd is ignored and this functions sets up a REQUEST_SENSE command
* and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
*/
void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
/*
* We need saved copies of a number of fields - this is because
* error handling may need to overwrite these with different values
* to run different commands, and once error handling is complete,
* we will need to restore these values prior to running the actual
* command.
*/
ses->cmd_len = scmd->cmd_len;
ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->next_rq = scmd->request->next_rq;
ses->result = scmd->result;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
scmd->prot_op = SCSI_PROT_NORMAL;
scmd->eh_eflags = 0;
scmd->cmnd = ses->eh_cmnd;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->request->next_rq = NULL;
scmd->result = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
sense_bytes);
sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
scmd->sdb.length);
scmd->sdb.table.sgl = &ses->sense_sgl;
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->sdb.table.nents = 1;
scmd->cmnd[0] = REQUEST_SENSE;
scmd->cmnd[4] = scmd->sdb.length;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
} else {
scmd->sc_data_direction = DMA_NONE;
if (cmnd) {
BUG_ON(cmnd_size > BLK_MAX_CDB);
memcpy(scmd->cmnd, cmnd, cmnd_size);
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
}
}
scmd->underflow = 0;
if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
(sdev->lun << 5 & 0xe0);
/*
* Zero the sense buffer. The scsi spec mandates that any
* untransferred sense data should be interpreted as being zero.
*/
memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_eh_prep_cmnd);
/**
* scsi_eh_restore_cmnd - Restore a scsi command info as part of error recovery
* @scmd: SCSI command structure to restore
* @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
*
* Undo any damage done by above scsi_eh_prep_cmnd().
*/
void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
{
/*
* Restore original data
*/
scmd->cmd_len = ses->cmd_len;
scmd->cmnd = ses->cmnd;
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->request->next_rq = ses->next_rq;
scmd->result = ses->result;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
/**
* scsi_send_eh_cmnd - submit a scsi command as part of error recovery
* @scmd: SCSI command structure to hijack
* @cmnd: CDB to send
* @cmnd_size: size in bytes of @cmnd
* @timeout: timeout for this request
* @sense_bytes: size of sense data to copy or 0
*
* This function is used to send a scsi command down to a target device
* as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
*
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
*/
static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
int cmnd_size, int timeout, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft = timeout;
struct scsi_eh_save ses;
const unsigned long stall_for = msecs_to_jiffies(100);
int rtn;
retry:
scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
shost->eh_action = &done;
scsi_log_send(scmd);
scmd->scsi_done = scsi_eh_done;
rtn = shost->hostt->queuecommand(shost, scmd);
if (rtn) {
if (timeleft > stall_for) {
scsi_eh_restore_cmnd(scmd, &ses);
timeleft -= stall_for;
msleep(jiffies_to_msecs(stall_for));
goto retry;
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
rtn = FAILED;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = SUCCESS;
}
shost->eh_action = NULL;
scsi_log_completion(scmd, rtn);
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s timeleft: %ld\n",
__func__, timeleft));
/*
* If there is time left scsi_eh_done got called, and we will examine
* the actual status codes to see whether the command actually did
* complete normally, else if we have a zero return and no time left,
* the command must still be pending, so abort it and return FAILED.
* If we never actually managed to issue the command, because
* ->queuecommand() kept returning non zero, use the rtn = FAILED
* value above (so don't execute either branch of the if)
*/
if (timeleft) {
rtn = scsi_eh_completed_normally(scmd);
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s: scsi_eh_completed_normally %x\n", __func__, rtn));
switch (rtn) {
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
break;
default:
rtn = FAILED;
break;
}
} else if (rtn != FAILED) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}
scsi_eh_restore_cmnd(scmd, &ses);
return rtn;
}
/**
* scsi_request_sense - Request sense data from a particular target.
* @scmd: SCSI cmd for request sense.
*
* Notes:
* Some hosts automatically obtain this information, others require
* that we obtain it on our own. This function will *not* return until
* the command either times out, or it completes.
*/
static int scsi_request_sense(struct scsi_cmnd *scmd)
{
return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
}
static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
{
if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
}
return rtn;
}
/**
* scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
* @scmd: Original SCSI cmd that eh has finished.
* @done_q: Queue for processed commands.
*
* Notes:
* We don't want to use the normal command completion while we are are
* still handling errors - it may cause other commands to be queued,
* and that would disturb what we are doing. Thus we really want to
* keep a list of pending commands for final completion, and once we
* are ready to leave error handling we handle completion for real.
*/
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
scmd->device->host->host_failed--;
scmd->eh_eflags = 0;
list_move_tail(&scmd->eh_entry, done_q);
}
EXPORT_SYMBOL(scsi_eh_finish_cmd);
/**
* scsi_eh_get_sense - Get device sense data.
* @work_q: Queue of commands to process.
* @done_q: Queue of processed commands.
*
* Description:
* See if we need to request sense information. if so, then get it
* now, so we have a better idea of what to do.
*
* Notes:
* This has the unfortunate side effect that if a shost adapter does
* not automatically request sense information, we end up shutting
* it down before we request it.
*
* All drivers should request sense information internally these days,
* so for now all I have to say is tough noogies if you end up in here.
*
* XXX: Long term this code should go away, but that needs an audit of
* all LLDDs first.
*/
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
struct Scsi_Host *shost;
int rtn;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
SCSI_SENSE_VALID(scmd))
continue;
shost = scmd->device->host;
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: skip request sense, past eh deadline\n",
current->comm));
break;
}
if (status_byte(scmd->result) != CHECK_CONDITION)
/*
* don't request sense if there's no check condition
* status because the error we're processing isn't one
* that has a sense code (and some devices get
* confused by sense requests out of the blue)
*/
continue;
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
rtn = scsi_request_sense(scmd);
if (rtn != SUCCESS)
continue;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"sense requested, result %x\n", scmd->result));
SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
rtn = scsi_decide_disposition(scmd);
/*
* if the result was normal, then just pass it along to the
* upper level.
*/
if (rtn == SUCCESS)
/* we don't want this command reissued, just
* finished with the sense data, so set
* retries to the max allowed to ensure it
* won't get reissued */
scmd->retries = scmd->allowed;
else if (rtn != NEEDS_RETRY)
continue;
scsi_eh_finish_cmd(scmd, done_q);
}
return list_empty(work_q);
}
EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
/**
* scsi_eh_tur - Send TUR to device.
* @scmd: &scsi_cmnd to send TUR
*
* Return value:
* 0 - Device is ready. 1 - Device NOT ready.
*/
static int scsi_eh_tur(struct scsi_cmnd *scmd)
{
static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
int retry_cnt = 1, rtn;
retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
scmd->device->eh_timeout, 0);
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s return: %x\n", __func__, rtn));
switch (rtn) {
case NEEDS_RETRY:
if (retry_cnt--)
goto retry_tur;
/*FALLTHRU*/
case SUCCESS:
return 0;
default:
return 1;
}
}
/**
* scsi_eh_test_devices - check if devices are responding from error recovery.
* @cmd_list: scsi commands in error recovery.
* @work_q: queue for commands which still need more error recovery
* @done_q: queue for commands which are finished
* @try_stu: boolean on if a STU command should be tried in addition to TUR.
*
* Decription:
* Tests if devices are in a working state. Commands to devices now in
* a working state are sent to the done_q while commands to devices which
* are still failing to respond are returned to the work_q for more
* processing.
**/
static int scsi_eh_test_devices(struct list_head *cmd_list,
struct list_head *work_q,
struct list_head *done_q, int try_stu)
{
struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev;
int finish_cmds;
while (!list_empty(cmd_list)) {
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
sdev = scmd->device;
if (!try_stu) {
if (scsi_host_eh_past_deadline(sdev->host)) {
/* Push items back onto work_q */
list_splice_init(cmd_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: skip test device, past eh deadline",
current->comm));
break;
}
}
finish_cmds = !scsi_device_online(scmd->device) ||
(try_stu && !scsi_eh_try_stu(scmd) &&
!scsi_eh_tur(scmd)) ||
!scsi_eh_tur(scmd);
list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
if (scmd->device == sdev) {
if (finish_cmds &&
(try_stu ||
scsi_eh_action(scmd, SUCCESS) == SUCCESS))
scsi_eh_finish_cmd(scmd, done_q);
else
list_move_tail(&scmd->eh_entry, work_q);
}
}
return list_empty(work_q);
}
/**
* scsi_eh_abort_cmds - abort pending commands.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Decription:
* Try and see whether or not it makes sense to try and abort the
* running command. This only works out to be the case if we have one
* command that has timed out. If the command simply failed, it makes
* no sense to try and abort the command, since as far as the shost
* adapter is concerned, it isn't running.
*/
static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
int rtn;
struct Scsi_Host *shost;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
continue;
shost = scmd->device->host;
if (scsi_host_eh_past_deadline(shost)) {
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: skip aborting cmd, past eh deadline\n",
current->comm));
return list_empty(work_q);
}
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: aborting cmd\n", current->comm));
rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
if (rtn == FAILED) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: aborting cmd failed\n",
current->comm));
list_splice_init(&check_list, work_q);
return list_empty(work_q);
}
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
list_move_tail(&scmd->eh_entry, &check_list);
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
* scsi_eh_try_stu - Send START_UNIT to device.
* @scmd: &scsi_cmnd to send START_UNIT
*
* Return value:
* 0 - Device is ready. 1 - Device NOT ready.
*/
static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
{
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
if (scmd->device->allow_restart) {
int i, rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
if (rtn == SUCCESS)
return 0;
}
return 1;
}
/**
* scsi_eh_stu - send START_UNIT if needed
* @shost: &scsi host being recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
* If commands are failing due to not ready, initializing command required,
* try revalidating the device, which will end up sending a start unit.
*/
static int scsi_eh_stu(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev;
shost_for_each_device(sdev, shost) {
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: skip START_UNIT, past eh deadline\n",
current->comm));
break;
}
stu_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
scsi_check_sense(scmd) == FAILED ) {
stu_scmd = scmd;
break;
}
if (!stu_scmd)
continue;
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: Sending START_UNIT\n",
current->comm));
if (!scsi_eh_try_stu(stu_scmd)) {
if (!scsi_device_online(sdev) ||
!scsi_eh_tur(stu_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
if (scmd->device == sdev &&
scsi_eh_action(scmd, SUCCESS) == SUCCESS)
scsi_eh_finish_cmd(scmd, done_q);
}
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: START_UNIT failed\n",
current->comm));
}
}
return list_empty(work_q);
}
/**
* scsi_eh_bus_device_reset - send bdr if needed
* @shost: scsi host being recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
* Try a bus device reset. Still, look to see whether we have multiple
* devices that are jammed or not - if we have multiple devices, it
* makes no sense to try bus_device_reset - we really would need to try
* a bus_reset instead.
*/
static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
int rtn;
shost_for_each_device(sdev, shost) {
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: skip BDR, past eh deadline\n",
current->comm));
break;
}
bdr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev) {
bdr_scmd = scmd;
break;
}
if (!bdr_scmd)
continue;
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: Sending BDR\n", current->comm));
rtn = scsi_try_bus_device_reset(bdr_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
if (!scsi_device_online(sdev) ||
rtn == FAST_IO_FAIL ||
!scsi_eh_tur(bdr_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
if (scmd->device == sdev &&
scsi_eh_action(scmd, rtn) != FAILED)
scsi_eh_finish_cmd(scmd,
done_q);
}
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: BDR failed\n", current->comm));
}
}
return list_empty(work_q);
}
/**
* scsi_eh_target_reset - send target reset if needed
* @shost: scsi host being recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*
* Notes:
* Try a target reset.
*/
static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
LIST_HEAD(tmp_list);
LIST_HEAD(check_list);
list_splice_init(work_q, &tmp_list);
while (!list_empty(&tmp_list)) {
struct scsi_cmnd *next, *scmd;
int rtn;
unsigned int id;
if (scsi_host_eh_past_deadline(shost)) {
/* push back on work queue for further processing */
list_splice_init(&check_list, work_q);
list_splice_init(&tmp_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: Skip target reset, past eh deadline\n",
current->comm));
return list_empty(work_q);
}
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: Sending target reset to target %d\n",
current->comm, id));
rtn = scsi_try_target_reset(scmd);
if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: Target reset failed"
" target: %d\n",
current->comm, id));
list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
if (scmd_id(scmd) != id)
continue;
if (rtn == SUCCESS)
list_move_tail(&scmd->eh_entry, &check_list);
else if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
/* push back on work queue for further processing */
list_move(&scmd->eh_entry, work_q);
}
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
* scsi_eh_bus_reset - send a bus reset
* @shost: &scsi host being recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *chan_scmd, *next;
LIST_HEAD(check_list);
unsigned int channel;
int rtn;
/*
* we really want to loop over the various channels, and do this on
* a channel by channel basis. we should also check to see if any
* of the failed commands are on soft_reset devices, and if so, skip
* the reset.
*/
for (channel = 0; channel <= shost->max_channel; channel++) {
if (scsi_host_eh_past_deadline(shost)) {
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: skip BRST, past eh deadline\n",
current->comm));
return list_empty(work_q);
}
chan_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (channel == scmd_channel(scmd)) {
chan_scmd = scmd;
break;
/*
* FIXME add back in some support for
* soft_reset devices.
*/
}
}
if (!chan_scmd)
continue;
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: Sending BRST chan: %d\n",
current->comm, channel));
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (channel == scmd_channel(scmd)) {
if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd,
done_q);
else
list_move_tail(&scmd->eh_entry,
&check_list);
}
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: BRST failed chan: %d\n",
current->comm, channel));
}
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
* scsi_eh_host_reset - send a host reset
* @shost: host to be reset.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
static int scsi_eh_host_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
int rtn;
if (!list_empty(work_q)) {
scmd = list_entry(work_q->next,
struct scsi_cmnd, eh_entry);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: Sending HRST\n",
current->comm));
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS) {
list_splice_init(work_q, &check_list);
} else if (rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: HRST failed\n",
current->comm));
}
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
}
/**
* scsi_eh_offline_sdevs - offline scsi devices that fail to recover
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
"not ready after error recovery\n");
scsi_device_set_state(scmd->device, SDEV_OFFLINE);
if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
/*
* FIXME: Handle lost cmds.
*/
}
scsi_eh_finish_cmd(scmd, done_q);
}
return;
}
/**
* scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
int scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
switch (host_byte(scmd->result)) {
case DID_OK:
break;
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
return 0;
/* fall through */
case DID_SOFT_ERROR:
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
if (status_byte(scmd->result) != CHECK_CONDITION)
return 0;
check_type:
/*
* assume caller has checked sense and determined
* the check condition was retryable.
*/
if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
return 1;
else
return 0;
}
/**
* scsi_decide_disposition - Disposition a cmd on return from LLD.
* @scmd: SCSI cmd to examine.
*
* Notes:
* This is *only* called when we are examining the status after sending
* out the actual data command. any commands that are queued for error
* recovery (e.g. test_unit_ready) do *not* come through here.
*
* When this routine returns failed, it means the error handler thread
* is woken. In cases where the error code indicates an error that
* doesn't require the error handler read (i.e. we don't need to
* abort/reset), this function should return SUCCESS.
*/
int scsi_decide_disposition(struct scsi_cmnd *scmd)
{
int rtn;
/*
* if the device is offline, then we clearly just pass the result back
* up to the top level.
*/
if (!scsi_device_online(scmd->device)) {
SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
"%s: device offline - report as SUCCESS\n", __func__));
return SUCCESS;
}
/*
* first check the host byte, to see if there is anything in there
* that would indicate what we need to do.
*/
switch (host_byte(scmd->result)) {
case DID_PASSTHROUGH:
/*
* no matter what, pass this through to the upper layer.
* nuke this special code so that it looks like we are saying
* did_ok.
*/
scmd->result &= 0xff00ffff;
return SUCCESS;
case DID_OK:
/*
* looks good. drop through, and check the next byte.
*/
break;
case DID_ABORT:
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
set_host_byte(scmd, DID_TIME_OUT);
return SUCCESS;
}
case DID_NO_CONNECT:
case DID_BAD_TARGET:
/*
* note - this means that we just report the status back
* to the top level driver, not that we actually think
* that it indicates SUCCESS.
*/
return SUCCESS;
/*
* when the low level driver returns did_soft_error,
* it is responsible for keeping an internal retry counter
* in order to avoid endless loops (db)
*
* actually this is a bug in this function here. we should
* be mindful of the maximum number of retries specified
* and not get stuck in a loop.
*/
case DID_SOFT_ERROR:
goto maybe_retry;
case DID_IMM_RETRY:
return NEEDS_RETRY;
case DID_REQUEUE:
return ADD_TO_MLQUEUE;
case DID_TRANSPORT_DISRUPTED:
/*
* LLD/transport was disrupted during processing of the IO.
* The transport class is now blocked/blocking,
* and the transport will decide what to do with the IO
* based on its timers and recovery capablilities if
* there are enough retries.
*/
goto maybe_retry;
case DID_TRANSPORT_FAILFAST:
/*
* The transport decided to failfast the IO (most likely
* the fast io fail tmo fired), so send IO directly upwards.
*/
return SUCCESS;
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
/*
* execute reservation conflict processing code
* lower down
*/
break;
/* fallthrough */
case DID_BUS_BUSY:
case DID_PARITY:
goto maybe_retry;
case DID_TIME_OUT:
/*
* when we scan the bus, we get timeout messages for
* these commands if there is no device available.
* other hosts report did_no_connect for the same thing.
*/
if ((scmd->cmnd[0] == TEST_UNIT_READY ||
scmd->cmnd[0] == INQUIRY)) {
return SUCCESS;
} else {
return FAILED;
}
case DID_RESET:
return SUCCESS;
default:
return FAILED;
}
/*
* next, check the message byte.
*/
if (msg_byte(scmd->result) != COMMAND_COMPLETE)
return FAILED;
/*
* check the status byte to see if this indicates anything special.
*/
switch (status_byte(scmd->result)) {
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
/*
* the case of trying to send too many commands to a
* tagged queueing device.
*/
case BUSY:
/*
* device can't talk to us at the moment. Should only
* occur (SAM-3) when the task queue is empty, so will cause
* the empty queue handling to trigger a stall in the
* device.
*/
return ADD_TO_MLQUEUE;
case GOOD:
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
case COMMAND_TERMINATED:
return SUCCESS;
case TASK_ABORTED:
goto maybe_retry;
case CHECK_CONDITION:
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
* disposition */
return rtn;
case CONDITION_GOOD:
case INTERMEDIATE_GOOD:
case INTERMEDIATE_C_GOOD:
case ACA_ACTIVE:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
set_host_byte(scmd, DID_NEXUS_FAILURE);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
}
return FAILED;
maybe_retry:
/* we requeue for retry because the error was retryable, and
* the request was not marked fast fail. Note that above,
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
if ((++scmd->retries) <= scmd->allowed
&& !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
* no more retries - report this one back to upper level.
*/
return SUCCESS;
}
}
static void eh_lock_door_done(struct request *req, int uptodate)
{
__blk_put_request(req->q, req);
}
/**
* scsi_eh_lock_door - Prevent medium removal for the specified device
* @sdev: SCSI device to prevent medium removal
*
* Locking:
* We must be called from process context.
*
* Notes:
* We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
* head of the devices request queue, and continue.
*/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
struct request *req;
/*
* blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
* request becomes available
*/
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
if (IS_ERR(req))
return;
blk_rq_set_block_pc(req);
req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
req->cmd[1] = 0;
req->cmd[2] = 0;
req->cmd[3] = 0;
req->cmd[4] = SCSI_REMOVAL_PREVENT;
req->cmd[5] = 0;
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
req->cmd_flags |= REQ_QUIET;
req->timeout = 10 * HZ;
req->retries = 5;
blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
}
/**
* scsi_restart_operations - restart io operations to the specified host.
* @shost: Host we are restarting.
*
* Notes:
* When we entered the error handler, we blocked all further i/o to
* this device. we need to 'reverse' this process.
*/
static void scsi_restart_operations(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
unsigned long flags;
/*
* If the door was locked, we need to insert a door lock request
* onto the head of the SCSI request queue for the device. There
* is no point trying to lock the door of an off-line device.
*/
shost_for_each_device(sdev, shost) {
if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
scsi_eh_lock_door(sdev);
sdev->was_reset = 0;
}
}
/*
* next free up anything directly waiting upon the host. this
* will be requests for character device operations, and also for
* ioctls to queued block devices.
*/
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
if (scsi_host_set_state(shost, SHOST_CANCEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
spin_unlock_irqrestore(shost->host_lock, flags);
wake_up(&shost->host_wait);
/*
* finally we need to re-initiate requests that may be pending. we will
* have had everything blocked while error handling is taking place, and
* now that error recovery is done, we will need to ensure that these
* requests are started.
*/
scsi_run_host_queues(shost);
/*
* if eh is active and host_eh_scheduled is pending we need to re-run
* recovery. we do this check after scsi_run_host_queues() to allow
* everything pent up since the last eh run a chance to make forward
* progress before we sync again. Either we'll immediately re-run
* recovery or scsi_device_unbusy() will wake us again when these
* pending commands complete.
*/
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_eh_scheduled)
if (scsi_host_set_state(shost, SHOST_RECOVERY))
WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* scsi_eh_ready_devs - check device ready state and recover if not.
* @shost: host to be recovered.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
*/
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{
if (!scsi_eh_stu(shost, work_q, done_q))
if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
if (!scsi_eh_target_reset(shost, work_q, done_q))
if (!scsi_eh_bus_reset(shost, work_q, done_q))
if (!scsi_eh_host_reset(shost, work_q, done_q))
scsi_eh_offline_sdevs(work_q,
done_q);
}
EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
/**
* scsi_eh_flush_done_q - finish processed commands or retry them.
* @done_q: list_head of processed commands.
*/
void scsi_eh_flush_done_q(struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
current->comm));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
} else {
/*
* If just we got sense for the device (called
* scsi_eh_get_sense), scmd->result is already
* set, do not set DRIVER_TIMEOUT.
*/
if (!scmd->result)
scmd->result |= (DRIVER_TIMEOUT << 24);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush finish cmd\n",
current->comm));
scsi_finish_command(scmd);
}
}
}
EXPORT_SYMBOL(scsi_eh_flush_done_q);
/**
* scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
* @shost: Host to unjam.
*
* Notes:
* When we come in here, we *know* that all commands on the bus have
* either completed, failed or timed out. we also know that no further
* commands are being sent to the host, so things are relatively quiet
* and we have freedom to fiddle with things as we wish.
*
* This is only the *default* implementation. it is possible for
* individual drivers to supply their own version of this function, and
* if the maintainer wishes to do this, it is strongly suggested that
* this function be taken as a template and modified. this function
* was designed to correctly handle problems for about 95% of the
* different cases out there, and it should always provide at least a
* reasonable amount of error recovery.
*
* Any command marked 'failed' or 'timeout' must eventually have
* scsi_finish_cmd() called for it. we do all of the retry stuff
* here, so when we restart the host after we return it should have an
* empty queue.
*/
static void scsi_unjam_host(struct Scsi_Host *shost)
{
unsigned long flags;
LIST_HEAD(eh_work_q);
LIST_HEAD(eh_done_q);
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->eh_deadline != -1)
shost->last_reset = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_flush_done_q(&eh_done_q);
}
/**
* scsi_error_handler - SCSI error handler thread
* @data: Host for which we are running.
*
* Notes:
* This is the main error handling loop. This is run as a kernel thread
* for every SCSI host and handles all error handling activity.
*/
int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = data;
/*
* We use TASK_INTERRUPTIBLE so that the thread is not
* counted against the load average as a running process.
* We never actually get interrupted because kthread_run
* disables signal delivery for the created thread.
*/
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != atomic_read(&shost->host_busy)) {
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"scsi_eh_%d: sleeping\n",
shost->host_no));
schedule();
continue;
}
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"scsi_eh_%d: waking up %d/%d/%d\n",
shost->host_no, shost->host_eh_scheduled,
shost->host_failed,
atomic_read(&shost->host_busy)));
/*
* We have a host that is failing for some reason. Figure out
* what we need to do to get it up and online again (if we can).
* If we fail, we end up taking the thing offline.
*/
if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_ERR, shost,
"scsi_eh_%d: unable to autoresume\n",
shost->host_no));
continue;
}
if (shost->transportt->eh_strategy_handler)
shost->transportt->eh_strategy_handler(shost);
else
scsi_unjam_host(shost);
/*
* Note - if the above fails completely, the action is to take
* individual devices offline and flush the queue of any
* outstanding requests that may have been pending. When we
* restart, we restart any I/O to any other devices on the bus
* which are still online.
*/
scsi_restart_operations(shost);
if (!shost->eh_noresume)
scsi_autopm_put_host(shost);
}
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"Error handler scsi_eh_%d exiting\n",
shost->host_no));
shost->ehandler = NULL;
return 0;
}
/*
* Function: scsi_report_bus_reset()
*
* Purpose: Utility function used by low-level drivers to report that
* they have observed a bus reset on the bus being handled.
*
* Arguments: shost - Host in question
* channel - channel on which reset was observed.
*
* Returns: Nothing
*
* Lock status: Host lock must be held.
*
* Notes: This only needs to be called if the reset is one which
* originates from an unknown location. Resets originated
* by the mid-level itself don't need to call this, but there
* should be no harm.
*
* The main purpose of this is to make sure that a CHECK_CONDITION
* is properly treated.
*/
void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
{
struct scsi_device *sdev;
__shost_for_each_device(sdev, shost) {
if (channel == sdev_channel(sdev))
__scsi_report_device_reset(sdev, NULL);
}
}
EXPORT_SYMBOL(scsi_report_bus_reset);
/*
* Function: scsi_report_device_reset()
*
* Purpose: Utility function used by low-level drivers to report that
* they have observed a device reset on the device being handled.
*
* Arguments: shost - Host in question
* channel - channel on which reset was observed
* target - target on which reset was observed
*
* Returns: Nothing
*
* Lock status: Host lock must be held
*
* Notes: This only needs to be called if the reset is one which
* originates from an unknown location. Resets originated
* by the mid-level itself don't need to call this, but there
* should be no harm.
*
* The main purpose of this is to make sure that a CHECK_CONDITION
* is properly treated.
*/
void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
{
struct scsi_device *sdev;
__shost_for_each_device(sdev, shost) {
if (channel == sdev_channel(sdev) &&
target == sdev_id(sdev))
__scsi_report_device_reset(sdev, NULL);
}
}
EXPORT_SYMBOL(scsi_report_device_reset);
static void
scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
{
}
/**
* scsi_ioctl_reset: explicitly reset a host/bus/target/device
* @dev: scsi_device to operate on
* @arg: reset type (see sg.h)
*/
int
scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
{
struct scsi_cmnd *scmd;
struct Scsi_Host *shost = dev->host;
struct request req;
unsigned long flags;
int error = 0, rtn, val;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
error = get_user(val, arg);
if (error)
return error;
if (scsi_autopm_get_host(shost) < 0)
return -EIO;
error = -EIO;
scmd = scsi_get_command(dev, GFP_KERNEL);
if (!scmd)
goto out_put_autopm_host;
blk_rq_init(NULL, &req);
scmd->request = &req;
scmd->cmnd = req.cmd;
scmd->scsi_done = scsi_reset_provider_done_command;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->cmd_len = 0;
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
spin_lock_irqsave(shost->host_lock, flags);
shost->tmf_in_progress = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
case SG_SCSI_RESET_NOTHING:
rtn = SUCCESS;
break;
case SG_SCSI_RESET_DEVICE:
rtn = scsi_try_bus_device_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
/* FALLTHROUGH */
case SG_SCSI_RESET_TARGET:
rtn = scsi_try_target_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
/* FALLTHROUGH */
case SG_SCSI_RESET_BUS:
rtn = scsi_try_bus_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
/* FALLTHROUGH */
case SG_SCSI_RESET_HOST:
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS)
break;
default:
/* FALLTHROUGH */
rtn = FAILED;
break;
}
error = (rtn == SUCCESS) ? 0 : -EIO;
spin_lock_irqsave(shost->host_lock, flags);
shost->tmf_in_progress = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* be sure to wake up anyone who was sleeping or had their queue
* suspended while we performed the TMF.
*/
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"waking up host to restart after TMF\n"));
wake_up(&shost->host_wait);
scsi_run_host_queues(shost);
scsi_put_command(scmd);
out_put_autopm_host:
scsi_autopm_put_host(shost);
return error;
}
EXPORT_SYMBOL(scsi_ioctl_reset);
/**
* scsi_normalize_sense - normalize main elements from either fixed or
* descriptor sense data format into a common format.
*
* @sense_buffer: byte array containing sense data returned by device
* @sb_len: number of valid bytes in sense_buffer
* @sshdr: pointer to instance of structure that common
* elements are written to.
*
* Notes:
* The "main elements" from sense data are: response_code, sense_key,
* asc, ascq and additional_length (only for descriptor format).
*
* Typically this function can be called after a device has
* responded to a SCSI command with the CHECK_CONDITION status.
*
* Return value:
* true if valid sense data information found, else false;
*/
bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr)
{
if (!sense_buffer || !sb_len)
return false;
memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
sshdr->response_code = (sense_buffer[0] & 0x7f);
if (!scsi_sense_valid(sshdr))
return false;
if (sshdr->response_code >= 0x72) {
/*
* descriptor format
*/
if (sb_len > 1)
sshdr->sense_key = (sense_buffer[1] & 0xf);
if (sb_len > 2)
sshdr->asc = sense_buffer[2];
if (sb_len > 3)
sshdr->ascq = sense_buffer[3];
if (sb_len > 7)
sshdr->additional_length = sense_buffer[7];
} else {
/*
* fixed format
*/
if (sb_len > 2)
sshdr->sense_key = (sense_buffer[2] & 0xf);
if (sb_len > 7) {
sb_len = (sb_len < (sense_buffer[7] + 8)) ?
sb_len : (sense_buffer[7] + 8);
if (sb_len > 12)
sshdr->asc = sense_buffer[12];
if (sb_len > 13)
sshdr->ascq = sense_buffer[13];
}
}
return true;
}
EXPORT_SYMBOL(scsi_normalize_sense);
bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr)
{
return scsi_normalize_sense(cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, sshdr);
}
EXPORT_SYMBOL(scsi_command_normalize_sense);
/**
* scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
* @sense_buffer: byte array of descriptor format sense data
* @sb_len: number of valid bytes in sense_buffer
* @desc_type: value of descriptor type to find
* (e.g. 0 -> information)
*
* Notes:
* only valid when sense data is in descriptor format
*
* Return value:
* pointer to start of (first) descriptor if found else NULL
*/
const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
int desc_type)
{
int add_sen_len, add_len, desc_len, k;
const u8 * descp;
if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
return NULL;
if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
return NULL;
add_sen_len = (add_sen_len < (sb_len - 8)) ?
add_sen_len : (sb_len - 8);
descp = &sense_buffer[8];
for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
descp += desc_len;
add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
desc_len = add_len + 2;
if (descp[0] == desc_type)
return descp;
if (add_len < 0) // short descriptor ??
break;
}
return NULL;
}
EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
* @sense_buffer: byte array of sense data
* @sb_len: number of valid bytes in sense_buffer
* @info_out: pointer to 64 integer where 8 or 4 byte information
* field will be placed if found.
*
* Return value:
* 1 if information field found, 0 if not found.
*/
int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out)
{
int j;
const u8 * ucp;
u64 ull;
if (sb_len < 7)
return 0;
switch (sense_buffer[0] & 0x7f) {
case 0x70:
case 0x71:
if (sense_buffer[0] & 0x80) {
*info_out = (sense_buffer[3] << 24) +
(sense_buffer[4] << 16) +
(sense_buffer[5] << 8) + sense_buffer[6];
return 1;
} else
return 0;
case 0x72:
case 0x73:
ucp = scsi_sense_desc_find(sense_buffer, sb_len,
0 /* info desc */);
if (ucp && (0xa == ucp[1])) {
ull = 0;
for (j = 0; j < 8; ++j) {
if (j > 0)
ull <<= 8;
ull |= ucp[4 + j];
}
*info_out = ull;
return 1;
} else
return 0;
default:
return 0;
}
}
EXPORT_SYMBOL(scsi_get_sense_info_fld);
/**
* scsi_build_sense_buffer - build sense data in a buffer
* @desc: Sense format (non zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
* @asc: Additional sense code
* @ascq: Additional sense code qualifier
*
**/
void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
{
if (desc) {
buf[0] = 0x72; /* descriptor, current */
buf[1] = key;
buf[2] = asc;
buf[3] = ascq;
buf[7] = 0;
} else {
buf[0] = 0x70; /* fixed, current */
buf[2] = key;
buf[7] = 0xa;
buf[12] = asc;
buf[13] = ascq;
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
|
{
"pile_set_name": "Github"
}
|
# Fri Nov 16 19:52:40 2018 -- reformated by PCGen PrettyLST v6.08.00
# CVS $Revision: $ $Author: $ -- Thu May 12 14:50:48 2016 -- reformated by PCGen PrettyLST v6.06.00
SOURCELONG:Pathfinder Player Companion: Melee Tactics Toolbox SOURCESHORT:MTT SOURCEWEB:http://paizo.com/products/btpy9c23 SOURCEDATE:2015-03
# ORIGINAL ENTRY: Gwen T. (evilpixie87)
# ORIGINAL ENTRY DATE: 26 April 2016
# ==============================
# New Kits
# ==============================
STARTPACK:Bounty Hunter's Kit (Common) TYPE:Adventuring Kit APPLY:INSTANT EQUIPBUY:105
GEAR:Area Map QTY:1 SIZE:PC
GEAR:Caltrops QTY:1 SIZE:PC
GEAR:Chain (10 Ft) QTY:1 SIZE:PC
GEAR:Lock (Average) QTY:1 SIZE:PC
GEAR:Manacles (Medium) QTY:1 SIZE:PC
GEAR:Net QTY:1 SIZE:PC
GEAR:Sap QTY:1 SIZE:PC
STARTPACK:Bounty Hunter's Kit (Superior) TYPE:Adventuring Kit APPLY:INSTANT EQUIPBUY:380
GEAR:Area Map QTY:1 SIZE:PC
GEAR:Caltrops QTY:1 SIZE:PC
GEAR:Chain (10 Ft) QTY:1 SIZE:PC
GEAR:Footprint Book QTY:1 SIZE:PC
GEAR:Lock (Good) QTY:1 SIZE:PC
GEAR:Manacles (Masterwork/Medium) QTY:1 SIZE:PC
GEAR:Net QTY:1 SIZE:PC
GEAR:Sap QTY:1 SIZE:PC
GEAR:Tanglefoot Bag QTY:1 SIZE:PC
GEAR:Thieves' Tools (Masterwork) QTY:1 SIZE:PC
STARTPACK:Melee Contingency Kit TYPE:Adventuring Kit APPLY:INSTANT EQUIPBUY:150
GEAR:Morningstar (Cold Iron) QTY:1 SIZE:PC
GEAR:Sickle (Silver) QTY:1 SIZE:PC
GEAR:Acid (Flask) QTY:2 SIZE:PC
GEAR:Alchemist's Fire (Flask) QTY:2 SIZE:PC
GEAR:Holy Water (Flask) QTY:2 SIZE:PC
|
{
"pile_set_name": "Github"
}
|
<?php
$type = 'Core';
$name = 'Times-Bold';
$up = -100;
$ut = 50;
$cw = array(
chr(0)=>250,chr(1)=>250,chr(2)=>250,chr(3)=>250,chr(4)=>250,chr(5)=>250,chr(6)=>250,chr(7)=>250,chr(8)=>250,chr(9)=>250,chr(10)=>250,chr(11)=>250,chr(12)=>250,chr(13)=>250,chr(14)=>250,chr(15)=>250,chr(16)=>250,chr(17)=>250,chr(18)=>250,chr(19)=>250,chr(20)=>250,chr(21)=>250,
chr(22)=>250,chr(23)=>250,chr(24)=>250,chr(25)=>250,chr(26)=>250,chr(27)=>250,chr(28)=>250,chr(29)=>250,chr(30)=>250,chr(31)=>250,' '=>250,'!'=>333,'"'=>555,'#'=>500,'$'=>500,'%'=>1000,'&'=>833,'\''=>278,'('=>333,')'=>333,'*'=>500,'+'=>570,
','=>250,'-'=>333,'.'=>250,'/'=>278,'0'=>500,'1'=>500,'2'=>500,'3'=>500,'4'=>500,'5'=>500,'6'=>500,'7'=>500,'8'=>500,'9'=>500,':'=>333,';'=>333,'<'=>570,'='=>570,'>'=>570,'?'=>500,'@'=>930,'A'=>722,
'B'=>667,'C'=>722,'D'=>722,'E'=>667,'F'=>611,'G'=>778,'H'=>778,'I'=>389,'J'=>500,'K'=>778,'L'=>667,'M'=>944,'N'=>722,'O'=>778,'P'=>611,'Q'=>778,'R'=>722,'S'=>556,'T'=>667,'U'=>722,'V'=>722,'W'=>1000,
'X'=>722,'Y'=>722,'Z'=>667,'['=>333,'\\'=>278,']'=>333,'^'=>581,'_'=>500,'`'=>333,'a'=>500,'b'=>556,'c'=>444,'d'=>556,'e'=>444,'f'=>333,'g'=>500,'h'=>556,'i'=>278,'j'=>333,'k'=>556,'l'=>278,'m'=>833,
'n'=>556,'o'=>500,'p'=>556,'q'=>556,'r'=>444,'s'=>389,'t'=>333,'u'=>556,'v'=>500,'w'=>722,'x'=>500,'y'=>500,'z'=>444,'{'=>394,'|'=>220,'}'=>394,'~'=>520,chr(127)=>350,chr(128)=>500,chr(129)=>350,chr(130)=>333,chr(131)=>500,
chr(132)=>500,chr(133)=>1000,chr(134)=>500,chr(135)=>500,chr(136)=>333,chr(137)=>1000,chr(138)=>556,chr(139)=>333,chr(140)=>1000,chr(141)=>350,chr(142)=>667,chr(143)=>350,chr(144)=>350,chr(145)=>333,chr(146)=>333,chr(147)=>500,chr(148)=>500,chr(149)=>350,chr(150)=>500,chr(151)=>1000,chr(152)=>333,chr(153)=>1000,
chr(154)=>389,chr(155)=>333,chr(156)=>722,chr(157)=>350,chr(158)=>444,chr(159)=>722,chr(160)=>250,chr(161)=>333,chr(162)=>500,chr(163)=>500,chr(164)=>500,chr(165)=>500,chr(166)=>220,chr(167)=>500,chr(168)=>333,chr(169)=>747,chr(170)=>300,chr(171)=>500,chr(172)=>570,chr(173)=>333,chr(174)=>747,chr(175)=>333,
chr(176)=>400,chr(177)=>570,chr(178)=>300,chr(179)=>300,chr(180)=>333,chr(181)=>556,chr(182)=>540,chr(183)=>250,chr(184)=>333,chr(185)=>300,chr(186)=>330,chr(187)=>500,chr(188)=>750,chr(189)=>750,chr(190)=>750,chr(191)=>500,chr(192)=>722,chr(193)=>722,chr(194)=>722,chr(195)=>722,chr(196)=>722,chr(197)=>722,
chr(198)=>1000,chr(199)=>722,chr(200)=>667,chr(201)=>667,chr(202)=>667,chr(203)=>667,chr(204)=>389,chr(205)=>389,chr(206)=>389,chr(207)=>389,chr(208)=>722,chr(209)=>722,chr(210)=>778,chr(211)=>778,chr(212)=>778,chr(213)=>778,chr(214)=>778,chr(215)=>570,chr(216)=>778,chr(217)=>722,chr(218)=>722,chr(219)=>722,
chr(220)=>722,chr(221)=>722,chr(222)=>611,chr(223)=>556,chr(224)=>500,chr(225)=>500,chr(226)=>500,chr(227)=>500,chr(228)=>500,chr(229)=>500,chr(230)=>722,chr(231)=>444,chr(232)=>444,chr(233)=>444,chr(234)=>444,chr(235)=>444,chr(236)=>278,chr(237)=>278,chr(238)=>278,chr(239)=>278,chr(240)=>500,chr(241)=>556,
chr(242)=>500,chr(243)=>500,chr(244)=>500,chr(245)=>500,chr(246)=>500,chr(247)=>570,chr(248)=>500,chr(249)=>556,chr(250)=>556,chr(251)=>556,chr(252)=>556,chr(253)=>500,chr(254)=>556,chr(255)=>500);
?>
|
{
"pile_set_name": "Github"
}
|
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of Qt for Python.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
''' unit test for BUG #1060 '''
from PySide2.QtWidgets import QApplication
from PySide2.QtUiTools import QUiLoader
from helper import adjust_filename
class MyQUiLoader(QUiLoader):
def __init__(self):
super(MyQUiLoader, self).__init__()
def createWidget(self, *args):
return super(MyQUiLoader, self).createWidget(*args)
if __name__ == "__main__":
app = QApplication([])
ui = MyQUiLoader().load(adjust_filename("bug_1060.ui", __file__))
ui.show()
|
{
"pile_set_name": "Github"
}
|
/**
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
*
* The Apereo Foundation licenses this file to you under the Educational
* Community License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License
* at:
*
* http://opensource.org/licenses/ecl2.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package org.opencastproject.security.api;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.List;
import junit.framework.Assert;
/**
* Tests JAXB un/marshalling of acces control lists
*/
public class AccessControlParserTest {
/** The acl to test */
private AccessControlList acl = null;
@Before
public void setUp() throws Exception {
// Construct an ACL with 100 entries
acl = new AccessControlList();
List<AccessControlEntry> entries = acl.getEntries();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
entries.add(new AccessControlEntry(Integer.toString(i), Integer.toString(j), (i + j % 2 == 0)));
}
}
}
@After
public void tearDown() throws Exception {
acl = null;
}
@Test
public void testXmlParsing() throws Exception {
// Get the acl as an xml string
String xml = AccessControlParser.toXml(acl);
// Now convert back to an acl and confirm that the roles, etc are as expected
AccessControlList aclAfterMarshaling = AccessControlParser.parseAcl(xml);
for (AccessControlEntry entry : aclAfterMarshaling.getEntries()) {
int role = Integer.parseInt(entry.getRole());
int action = Integer.parseInt(entry.getAction());
boolean allowed = entry.isAllow();
Assert.assertEquals(allowed, role + action % 2 == 0);
}
}
@Test
public void testJsonParsing() throws Exception {
// Get the acl as a JSON string
String json = AccessControlParser.toJson(acl);
// Now convert back to an acl and confirm that the roles, etc are as expected
// Now convert back to an acl and confirm that the roles, etc are as expected
AccessControlList aclAfterMarshaling = AccessControlParser.parseAcl(json);
for (AccessControlEntry entry : aclAfterMarshaling.getEntries()) {
int role = Integer.parseInt(entry.getRole());
int action = Integer.parseInt(entry.getAction());
boolean allowed = entry.isAllow();
Assert.assertEquals(allowed, role + action % 2 == 0);
}
}
}
|
{
"pile_set_name": "Github"
}
|
# graceful-fs
graceful-fs functions as a drop-in replacement for the fs module,
making various improvements.
The improvements are meant to normalize behavior across different
platforms and environments, and to make filesystem access more
resilient to errors.
## Improvements over [fs module](https://nodejs.org/api/fs.html)
* Queues up `open` and `readdir` calls, and retries them once
something closes if there is an EMFILE error from too many file
descriptors.
* fixes `lchmod` for Node versions prior to 0.6.2.
* implements `fs.lutimes` if possible. Otherwise it becomes a noop.
* ignores `EINVAL` and `EPERM` errors in `chown`, `fchown` or
`lchown` if the user isn't root.
* makes `lchmod` and `lchown` become noops, if not available.
* retries reading a file if `read` results in EAGAIN error.
On Windows, it retries renaming a file for up to one second if `EACCESS`
or `EPERM` error occurs, likely because antivirus software has locked
the directory.
## USAGE
```javascript
// use just like fs
var fs = require('graceful-fs')
// now go and do stuff with it...
fs.readFileSync('some-file-or-whatever')
```
## Global Patching
If you want to patch the global fs module (or any other fs-like
module) you can do this:
```javascript
// Make sure to read the caveat below.
var realFs = require('fs')
var gracefulFs = require('graceful-fs')
gracefulFs.gracefulify(realFs)
```
This should only ever be done at the top-level application layer, in
order to delay on EMFILE errors from any fs-using dependencies. You
should **not** do this in a library, because it can cause unexpected
delays in other parts of the program.
## Changes
This module is fairly stable at this point, and used by a lot of
things. That being said, because it implements a subtle behavior
change in a core part of the node API, even modest changes can be
extremely breaking, and the versioning is thus biased towards
bumping the major when in doubt.
The main change between major versions has been switching between
providing a fully-patched `fs` module vs monkey-patching the node core
builtin, and the approach by which a non-monkey-patched `fs` was
created.
The goal is to trade `EMFILE` errors for slower fs operations. So, if
you try to open a zillion files, rather than crashing, `open`
operations will be queued up and wait for something else to `close`.
There are advantages to each approach. Monkey-patching the fs means
that no `EMFILE` errors can possibly occur anywhere in your
application, because everything is using the same core `fs` module,
which is patched. However, it can also obviously cause undesirable
side-effects, especially if the module is loaded multiple times.
Implementing a separate-but-identical patched `fs` module is more
surgical (and doesn't run the risk of patching multiple times), but
also imposes the challenge of keeping in sync with the core module.
The current approach loads the `fs` module, and then creates a
lookalike object that has all the same methods, except a few that are
patched. It is safe to use in all versions of Node from 0.8 through
7.0.
### v4
* Do not monkey-patch the fs module. This module may now be used as a
drop-in dep, and users can opt into monkey-patching the fs builtin
if their app requires it.
### v3
* Monkey-patch fs, because the eval approach no longer works on recent
node.
* fixed possible type-error throw if rename fails on windows
* verify that we *never* get EMFILE errors
* Ignore ENOSYS from chmod/chown
* clarify that graceful-fs must be used as a drop-in
### v2.1.0
* Use eval rather than monkey-patching fs.
* readdir: Always sort the results
* win32: requeue a file if error has an OK status
### v2.0
* A return to monkey patching
* wrap process.cwd
### v1.1
* wrap readFile
* Wrap fs.writeFile.
* readdir protection
* Don't clobber the fs builtin
* Handle fs.read EAGAIN errors by trying again
* Expose the curOpen counter
* No-op lchown/lchmod if not implemented
* fs.rename patch only for win32
* Patch fs.rename to handle AV software on Windows
* Close #4 Chown should not fail on einval or eperm if non-root
* Fix isaacs/fstream#1 Only wrap fs one time
* Fix #3 Start at 1024 max files, then back off on EMFILE
* lutimes that doens't blow up on Linux
* A full on-rewrite using a queue instead of just swallowing the EMFILE error
* Wrap Read/Write streams as well
### 1.0
* Update engines for node 0.6
* Be lstat-graceful on Windows
* first
|
{
"pile_set_name": "Github"
}
|
<?php
use yii\web\View;
use yii\helpers\Html;
use yii\helpers\Url;
use source\libs\Resource;
use source\core\widgets\ListView;
/* @var $this yii\web\View */
?>
<div class="content-wrap">
<div class="content">
<?php echo $message;?>
</div>
</div>
|
{
"pile_set_name": "Github"
}
|
namespace Eto.Drawing
{
/// <summary>
/// Defines generic font families that can be used on all systems
/// </summary>
/// <remarks>
/// The font families here may correspond to certain fonts on each system, depending on the platform.
///
/// These font families are "guaranteed" to be available, mainly by using pre-installed fonts on each
/// platform.
/// </remarks>
/// <copyright>(c) 2014 by Curtis Wensley</copyright>
/// <license type="BSD-3">See LICENSE for full terms</license>
public static class FontFamilies
{
/// <summary>
/// Gets the name of the monospace system family name
/// </summary>
/// <remarks>
/// Not intended to be used directly, use <see cref="FontFamilies.Monospace"/>. Used by platform handlers
/// to determine which system font family to get
/// </remarks>
public const string MonospaceFamilyName = "MONOSPACE";
/// <summary>
/// Gets a monospace font family
/// </summary>
/// <returns>A font family instance for the monospace font</returns>
public static FontFamily Monospace
{
get { return new FontFamily(MonospaceFamilyName); }
}
/// <summary>
/// Gets the name of a sans-serif system family name
/// </summary>
/// <remarks>
/// Not intended to be used directly, use <see cref="FontFamilies.Sans"/>. Used by platform handlers
/// to determine which system font family to get
/// </remarks>
public const string SansFamilyName = "SANS-SERIF";
/// <summary>
/// Gets a sans-serif font family
/// </summary>
/// <returns>A font family instance for the sans font</returns>
public static FontFamily Sans
{
get { return new FontFamily(SansFamilyName); }
}
/// <summary>
/// Gets the name of a serif system family name
/// </summary>
/// <remarks>
/// Not intended to be used directly, use <see cref="FontFamilies.Serif"/>. Used by platform handlers
/// to determine which system font family to get
/// </remarks>
public const string SerifFamilyName = "SERIF";
/// <summary>
/// Gets a serif font family
/// </summary>
/// <returns>A font family instance for the serif font</returns>
public static FontFamily Serif
{
get { return new FontFamily(SerifFamilyName); }
}
/// <summary>
/// Gets a cursive font family
/// </summary>
/// <returns>A font family instance for the cursive font</returns>
public static FontFamily Cursive
{
get { return new FontFamily(CursiveFamilyName); }
}
/// <summary>
/// Name of the cursive system family name
/// </summary>
/// <remarks>
/// Not intended to be used directly, use <see cref="FontFamilies.Cursive"/>. Used by platform handlers
/// to determine which system font family to get
/// </remarks>
public const string CursiveFamilyName = "CURSIVE";
/// <summary>
/// Gets a fantasy font family
/// </summary>
/// <returns>A font family instance for the fantasy font</returns>
public static FontFamily Fantasy
{
get { return new FontFamily(FantasyFamilyName); }
}
/// <summary>
/// Name of the fantasy system family name
/// </summary>
/// <remarks>
/// Not intended to be used directly, use <see cref="FontFamilies.Fantasy"/>. Used by platform handlers
/// to determine which system font family to get
/// </remarks>
public const string FantasyFamilyName = "FANTASY";
}
}
|
{
"pile_set_name": "Github"
}
|
------------------------------------------------------------------------
-- dqNextToward.decTest -- decQuad next toward rhs [754r nextafter] --
-- Copyright (c) IBM Corporation, 1981, 2008. All rights reserved. --
------------------------------------------------------------------------
-- Please see the document "General Decimal Arithmetic Testcases" --
-- at http://www2.hursley.ibm.com/decimal for the description of --
-- these testcases. --
-- --
-- These testcases are experimental ('beta' versions), and they --
-- may contain errors. They are offered on an as-is basis. In --
-- particular, achieving the same results as the tests here is not --
-- a guarantee that an implementation complies with any Standard --
-- or specification. The tests are not exhaustive. --
-- --
-- Please send comments, suggestions, and corrections to the author: --
-- Mike Cowlishaw, IBM Fellow --
-- IBM UK, PO Box 31, Birmingham Road, Warwick CV34 5JL, UK --
-- mfc@uk.ibm.com --
------------------------------------------------------------------------
version: 2.59
-- All operands and results are decQuads.
extended: 1
clamp: 1
precision: 34
maxExponent: 6144
minExponent: -6143
rounding: half_even
-- Sanity check with a scattering of numerics
dqnextt001 nexttoward 10 10 -> 10
dqnextt002 nexttoward -10 -10 -> -10
dqnextt003 nexttoward 1 10 -> 1.000000000000000000000000000000001
dqnextt004 nexttoward 1 -10 -> 0.9999999999999999999999999999999999
dqnextt005 nexttoward -1 10 -> -0.9999999999999999999999999999999999
dqnextt006 nexttoward -1 -10 -> -1.000000000000000000000000000000001
dqnextt007 nexttoward 0 10 -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt008 nexttoward 0 -10 -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt009 nexttoward 9.999999999999999999999999999999999E+6144 +Infinity -> Infinity Overflow Inexact Rounded
dqnextt010 nexttoward -9.999999999999999999999999999999999E+6144 -Infinity -> -Infinity Overflow Inexact Rounded
dqnextt011 nexttoward 9.999999999999999999999999999999999 10 -> 10.00000000000000000000000000000000
dqnextt012 nexttoward 10 9.999999999999999999999999999999999 -> 9.999999999999999999999999999999999
dqnextt013 nexttoward -9.999999999999999999999999999999999 -10 -> -10.00000000000000000000000000000000
dqnextt014 nexttoward -10 -9.999999999999999999999999999999999 -> -9.999999999999999999999999999999999
dqnextt015 nexttoward 9.999999999999999999999999999999998 10 -> 9.999999999999999999999999999999999
dqnextt016 nexttoward 10 9.999999999999999999999999999999998 -> 9.999999999999999999999999999999999
dqnextt017 nexttoward -9.999999999999999999999999999999998 -10 -> -9.999999999999999999999999999999999
dqnextt018 nexttoward -10 -9.999999999999999999999999999999998 -> -9.999999999999999999999999999999999
------- lhs=rhs
-- finites
dqnextt101 nexttoward 7 7 -> 7
dqnextt102 nexttoward -7 -7 -> -7
dqnextt103 nexttoward 75 75 -> 75
dqnextt104 nexttoward -75 -75 -> -75
dqnextt105 nexttoward 7.50 7.5 -> 7.50
dqnextt106 nexttoward -7.50 -7.50 -> -7.50
dqnextt107 nexttoward 7.500 7.5000 -> 7.500
dqnextt108 nexttoward -7.500 -7.5 -> -7.500
-- zeros
dqnextt111 nexttoward 0 0 -> 0
dqnextt112 nexttoward -0 -0 -> -0
dqnextt113 nexttoward 0E+4 0 -> 0E+4
dqnextt114 nexttoward -0E+4 -0 -> -0E+4
dqnextt115 nexttoward 0.00000000000 0.000000000000 -> 0E-11
dqnextt116 nexttoward -0.00000000000 -0.00 -> -0E-11
dqnextt117 nexttoward 0E-141 0 -> 0E-141
dqnextt118 nexttoward -0E-141 -000 -> -0E-141
-- full coefficients, alternating bits
dqnextt121 nexttoward 268268268 268268268 -> 268268268
dqnextt122 nexttoward -268268268 -268268268 -> -268268268
dqnextt123 nexttoward 134134134 134134134 -> 134134134
dqnextt124 nexttoward -134134134 -134134134 -> -134134134
-- Nmax, Nmin, Ntiny
dqnextt131 nexttoward 9.999999999999999999999999999999999E+6144 9.999999999999999999999999999999999E+6144 -> 9.999999999999999999999999999999999E+6144
dqnextt132 nexttoward 1E-6143 1E-6143 -> 1E-6143
dqnextt133 nexttoward 1.000000000000000000000000000000000E-6143 1.000000000000000000000000000000000E-6143 -> 1.000000000000000000000000000000000E-6143
dqnextt134 nexttoward 1E-6176 1E-6176 -> 1E-6176
dqnextt135 nexttoward -1E-6176 -1E-6176 -> -1E-6176
dqnextt136 nexttoward -1.000000000000000000000000000000000E-6143 -1.000000000000000000000000000000000E-6143 -> -1.000000000000000000000000000000000E-6143
dqnextt137 nexttoward -1E-6143 -1E-6143 -> -1E-6143
dqnextt138 nexttoward -9.999999999999999999999999999999999E+6144 -9.999999999999999999999999999999999E+6144 -> -9.999999999999999999999999999999999E+6144
------- lhs<rhs
dqnextt201 nexttoward 0.9999999999999999999999999999999995 Infinity -> 0.9999999999999999999999999999999996
dqnextt202 nexttoward 0.9999999999999999999999999999999996 Infinity -> 0.9999999999999999999999999999999997
dqnextt203 nexttoward 0.9999999999999999999999999999999997 Infinity -> 0.9999999999999999999999999999999998
dqnextt204 nexttoward 0.9999999999999999999999999999999998 Infinity -> 0.9999999999999999999999999999999999
dqnextt205 nexttoward 0.9999999999999999999999999999999999 Infinity -> 1.000000000000000000000000000000000
dqnextt206 nexttoward 1.000000000000000000000000000000000 Infinity -> 1.000000000000000000000000000000001
dqnextt207 nexttoward 1.0 Infinity -> 1.000000000000000000000000000000001
dqnextt208 nexttoward 1 Infinity -> 1.000000000000000000000000000000001
dqnextt209 nexttoward 1.000000000000000000000000000000001 Infinity -> 1.000000000000000000000000000000002
dqnextt210 nexttoward 1.000000000000000000000000000000002 Infinity -> 1.000000000000000000000000000000003
dqnextt211 nexttoward 1.000000000000000000000000000000003 Infinity -> 1.000000000000000000000000000000004
dqnextt212 nexttoward 1.000000000000000000000000000000004 Infinity -> 1.000000000000000000000000000000005
dqnextt213 nexttoward 1.000000000000000000000000000000005 Infinity -> 1.000000000000000000000000000000006
dqnextt214 nexttoward 1.000000000000000000000000000000006 Infinity -> 1.000000000000000000000000000000007
dqnextt215 nexttoward 1.000000000000000000000000000000007 Infinity -> 1.000000000000000000000000000000008
dqnextt216 nexttoward 1.000000000000000000000000000000008 Infinity -> 1.000000000000000000000000000000009
dqnextt217 nexttoward 1.000000000000000000000000000000009 Infinity -> 1.000000000000000000000000000000010
dqnextt218 nexttoward 1.000000000000000000000000000000010 Infinity -> 1.000000000000000000000000000000011
dqnextt219 nexttoward 1.000000000000000000000000000000011 Infinity -> 1.000000000000000000000000000000012
dqnextt221 nexttoward -0.9999999999999999999999999999999995 Infinity -> -0.9999999999999999999999999999999994
dqnextt222 nexttoward -0.9999999999999999999999999999999996 Infinity -> -0.9999999999999999999999999999999995
dqnextt223 nexttoward -0.9999999999999999999999999999999997 Infinity -> -0.9999999999999999999999999999999996
dqnextt224 nexttoward -0.9999999999999999999999999999999998 Infinity -> -0.9999999999999999999999999999999997
dqnextt225 nexttoward -0.9999999999999999999999999999999999 Infinity -> -0.9999999999999999999999999999999998
dqnextt226 nexttoward -1.000000000000000000000000000000000 Infinity -> -0.9999999999999999999999999999999999
dqnextt227 nexttoward -1.0 Infinity -> -0.9999999999999999999999999999999999
dqnextt228 nexttoward -1 Infinity -> -0.9999999999999999999999999999999999
dqnextt229 nexttoward -1.000000000000000000000000000000001 Infinity -> -1.000000000000000000000000000000000
dqnextt230 nexttoward -1.000000000000000000000000000000002 Infinity -> -1.000000000000000000000000000000001
dqnextt231 nexttoward -1.000000000000000000000000000000003 Infinity -> -1.000000000000000000000000000000002
dqnextt232 nexttoward -1.000000000000000000000000000000004 Infinity -> -1.000000000000000000000000000000003
dqnextt233 nexttoward -1.000000000000000000000000000000005 Infinity -> -1.000000000000000000000000000000004
dqnextt234 nexttoward -1.000000000000000000000000000000006 Infinity -> -1.000000000000000000000000000000005
dqnextt235 nexttoward -1.000000000000000000000000000000007 Infinity -> -1.000000000000000000000000000000006
dqnextt236 nexttoward -1.000000000000000000000000000000008 Infinity -> -1.000000000000000000000000000000007
dqnextt237 nexttoward -1.000000000000000000000000000000009 Infinity -> -1.000000000000000000000000000000008
dqnextt238 nexttoward -1.000000000000000000000000000000010 Infinity -> -1.000000000000000000000000000000009
dqnextt239 nexttoward -1.000000000000000000000000000000011 Infinity -> -1.000000000000000000000000000000010
dqnextt240 nexttoward -1.000000000000000000000000000000012 Infinity -> -1.000000000000000000000000000000011
-- Zeros
dqnextt300 nexttoward 0 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt301 nexttoward 0.00 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt302 nexttoward 0E-300 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt303 nexttoward 0E+300 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt304 nexttoward 0E+30000 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt305 nexttoward -0 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt306 nexttoward -0.00 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt307 nexttoward -0E-300 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt308 nexttoward -0E+300 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt309 nexttoward -0E+30000 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
-- specials
dqnextt350 nexttoward Inf Infinity -> Infinity
dqnextt351 nexttoward -Inf Infinity -> -9.999999999999999999999999999999999E+6144
dqnextt352 nexttoward NaN Infinity -> NaN
dqnextt353 nexttoward sNaN Infinity -> NaN Invalid_operation
dqnextt354 nexttoward NaN77 Infinity -> NaN77
dqnextt355 nexttoward sNaN88 Infinity -> NaN88 Invalid_operation
dqnextt356 nexttoward -NaN Infinity -> -NaN
dqnextt357 nexttoward -sNaN Infinity -> -NaN Invalid_operation
dqnextt358 nexttoward -NaN77 Infinity -> -NaN77
dqnextt359 nexttoward -sNaN88 Infinity -> -NaN88 Invalid_operation
-- Nmax, Nmin, Ntiny, subnormals
dqnextt370 nexttoward -9.999999999999999999999999999999999E+6144 Infinity -> -9.999999999999999999999999999999998E+6144
dqnextt371 nexttoward -9.999999999999999999999999999999998E+6144 Infinity -> -9.999999999999999999999999999999997E+6144
dqnextt372 nexttoward -1E-6143 Infinity -> -9.99999999999999999999999999999999E-6144 Underflow Subnormal Inexact Rounded
dqnextt373 nexttoward -1.000000000000000E-6143 Infinity -> -9.99999999999999999999999999999999E-6144 Underflow Subnormal Inexact Rounded
dqnextt374 nexttoward -9E-6176 Infinity -> -8E-6176 Underflow Subnormal Inexact Rounded
dqnextt375 nexttoward -9.9E-6175 Infinity -> -9.8E-6175 Underflow Subnormal Inexact Rounded
dqnextt376 nexttoward -9.99999999999999999999999999999E-6147 Infinity -> -9.99999999999999999999999999998E-6147 Underflow Subnormal Inexact Rounded
dqnextt377 nexttoward -9.99999999999999999999999999999999E-6144 Infinity -> -9.99999999999999999999999999999998E-6144 Underflow Subnormal Inexact Rounded
dqnextt378 nexttoward -9.99999999999999999999999999999998E-6144 Infinity -> -9.99999999999999999999999999999997E-6144 Underflow Subnormal Inexact Rounded
dqnextt379 nexttoward -9.99999999999999999999999999999997E-6144 Infinity -> -9.99999999999999999999999999999996E-6144 Underflow Subnormal Inexact Rounded
dqnextt380 nexttoward -0E-6176 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt381 nexttoward -1E-6176 Infinity -> -0E-6176 Underflow Subnormal Inexact Rounded Clamped
dqnextt382 nexttoward -2E-6176 Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt383 nexttoward 0E-6176 Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt384 nexttoward 1E-6176 Infinity -> 2E-6176 Underflow Subnormal Inexact Rounded
dqnextt385 nexttoward 2E-6176 Infinity -> 3E-6176 Underflow Subnormal Inexact Rounded
dqnextt386 nexttoward 10E-6176 Infinity -> 1.1E-6175 Underflow Subnormal Inexact Rounded
dqnextt387 nexttoward 100E-6176 Infinity -> 1.01E-6174 Underflow Subnormal Inexact Rounded
dqnextt388 nexttoward 100000E-6176 Infinity -> 1.00001E-6171 Underflow Subnormal Inexact Rounded
dqnextt389 nexttoward 1.00000000000000000000000000000E-6143 Infinity -> 1.000000000000000000000000000000001E-6143
dqnextt390 nexttoward 1.000000000000000000000000000000000E-6143 Infinity -> 1.000000000000000000000000000000001E-6143
dqnextt391 nexttoward 1E-6143 Infinity -> 1.000000000000000000000000000000001E-6143
dqnextt392 nexttoward 9.999999999999999999999999999999997E+6144 Infinity -> 9.999999999999999999999999999999998E+6144
dqnextt393 nexttoward 9.999999999999999999999999999999998E+6144 Infinity -> 9.999999999999999999999999999999999E+6144
dqnextt394 nexttoward 9.999999999999999999999999999999999E+6144 Infinity -> Infinity Overflow Inexact Rounded
------- lhs>rhs
dqnextt401 nexttoward 0.9999999999999999999999999999999995 -Infinity -> 0.9999999999999999999999999999999994
dqnextt402 nexttoward 0.9999999999999999999999999999999996 -Infinity -> 0.9999999999999999999999999999999995
dqnextt403 nexttoward 0.9999999999999999999999999999999997 -Infinity -> 0.9999999999999999999999999999999996
dqnextt404 nexttoward 0.9999999999999999999999999999999998 -Infinity -> 0.9999999999999999999999999999999997
dqnextt405 nexttoward 0.9999999999999999999999999999999999 -Infinity -> 0.9999999999999999999999999999999998
dqnextt406 nexttoward 1.000000000000000000000000000000000 -Infinity -> 0.9999999999999999999999999999999999
dqnextt407 nexttoward 1.0 -Infinity -> 0.9999999999999999999999999999999999
dqnextt408 nexttoward 1 -Infinity -> 0.9999999999999999999999999999999999
dqnextt409 nexttoward 1.000000000000000000000000000000001 -Infinity -> 1.000000000000000000000000000000000
dqnextt410 nexttoward 1.000000000000000000000000000000002 -Infinity -> 1.000000000000000000000000000000001
dqnextt411 nexttoward 1.000000000000000000000000000000003 -Infinity -> 1.000000000000000000000000000000002
dqnextt412 nexttoward 1.000000000000000000000000000000004 -Infinity -> 1.000000000000000000000000000000003
dqnextt413 nexttoward 1.000000000000000000000000000000005 -Infinity -> 1.000000000000000000000000000000004
dqnextt414 nexttoward 1.000000000000000000000000000000006 -Infinity -> 1.000000000000000000000000000000005
dqnextt415 nexttoward 1.000000000000000000000000000000007 -Infinity -> 1.000000000000000000000000000000006
dqnextt416 nexttoward 1.000000000000000000000000000000008 -Infinity -> 1.000000000000000000000000000000007
dqnextt417 nexttoward 1.000000000000000000000000000000009 -Infinity -> 1.000000000000000000000000000000008
dqnextt418 nexttoward 1.000000000000000000000000000000010 -Infinity -> 1.000000000000000000000000000000009
dqnextt419 nexttoward 1.000000000000000000000000000000011 -Infinity -> 1.000000000000000000000000000000010
dqnextt420 nexttoward 1.000000000000000000000000000000012 -Infinity -> 1.000000000000000000000000000000011
dqnextt421 nexttoward -0.9999999999999999999999999999999995 -Infinity -> -0.9999999999999999999999999999999996
dqnextt422 nexttoward -0.9999999999999999999999999999999996 -Infinity -> -0.9999999999999999999999999999999997
dqnextt423 nexttoward -0.9999999999999999999999999999999997 -Infinity -> -0.9999999999999999999999999999999998
dqnextt424 nexttoward -0.9999999999999999999999999999999998 -Infinity -> -0.9999999999999999999999999999999999
dqnextt425 nexttoward -0.9999999999999999999999999999999999 -Infinity -> -1.000000000000000000000000000000000
dqnextt426 nexttoward -1.000000000000000000000000000000000 -Infinity -> -1.000000000000000000000000000000001
dqnextt427 nexttoward -1.0 -Infinity -> -1.000000000000000000000000000000001
dqnextt428 nexttoward -1 -Infinity -> -1.000000000000000000000000000000001
dqnextt429 nexttoward -1.000000000000000000000000000000001 -Infinity -> -1.000000000000000000000000000000002
dqnextt430 nexttoward -1.000000000000000000000000000000002 -Infinity -> -1.000000000000000000000000000000003
dqnextt431 nexttoward -1.000000000000000000000000000000003 -Infinity -> -1.000000000000000000000000000000004
dqnextt432 nexttoward -1.000000000000000000000000000000004 -Infinity -> -1.000000000000000000000000000000005
dqnextt433 nexttoward -1.000000000000000000000000000000005 -Infinity -> -1.000000000000000000000000000000006
dqnextt434 nexttoward -1.000000000000000000000000000000006 -Infinity -> -1.000000000000000000000000000000007
dqnextt435 nexttoward -1.000000000000000000000000000000007 -Infinity -> -1.000000000000000000000000000000008
dqnextt436 nexttoward -1.000000000000000000000000000000008 -Infinity -> -1.000000000000000000000000000000009
dqnextt437 nexttoward -1.000000000000000000000000000000009 -Infinity -> -1.000000000000000000000000000000010
dqnextt438 nexttoward -1.000000000000000000000000000000010 -Infinity -> -1.000000000000000000000000000000011
dqnextt439 nexttoward -1.000000000000000000000000000000011 -Infinity -> -1.000000000000000000000000000000012
-- Zeros
dqnextt500 nexttoward -0 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt501 nexttoward 0 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt502 nexttoward 0.00 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt503 nexttoward -0.00 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt504 nexttoward 0E-300 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt505 nexttoward 0E+300 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt506 nexttoward 0E+30000 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt507 nexttoward -0E+30000 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
-- specials
dqnextt550 nexttoward Inf -Infinity -> 9.999999999999999999999999999999999E+6144
dqnextt551 nexttoward -Inf -Infinity -> -Infinity
dqnextt552 nexttoward NaN -Infinity -> NaN
dqnextt553 nexttoward sNaN -Infinity -> NaN Invalid_operation
dqnextt554 nexttoward NaN77 -Infinity -> NaN77
dqnextt555 nexttoward sNaN88 -Infinity -> NaN88 Invalid_operation
dqnextt556 nexttoward -NaN -Infinity -> -NaN
dqnextt557 nexttoward -sNaN -Infinity -> -NaN Invalid_operation
dqnextt558 nexttoward -NaN77 -Infinity -> -NaN77
dqnextt559 nexttoward -sNaN88 -Infinity -> -NaN88 Invalid_operation
-- Nmax, Nmin, Ntiny, subnormals
dqnextt670 nexttoward 9.999999999999999999999999999999999E+6144 -Infinity -> 9.999999999999999999999999999999998E+6144
dqnextt671 nexttoward 9.999999999999999999999999999999998E+6144 -Infinity -> 9.999999999999999999999999999999997E+6144
dqnextt672 nexttoward 1E-6143 -Infinity -> 9.99999999999999999999999999999999E-6144 Underflow Subnormal Inexact Rounded
dqnextt673 nexttoward 1.000000000000000000000000000000000E-6143 -Infinity -> 9.99999999999999999999999999999999E-6144 Underflow Subnormal Inexact Rounded
dqnextt674 nexttoward 9E-6176 -Infinity -> 8E-6176 Underflow Subnormal Inexact Rounded
dqnextt675 nexttoward 9.9E-6175 -Infinity -> 9.8E-6175 Underflow Subnormal Inexact Rounded
dqnextt676 nexttoward 9.99999999999999999999999999999E-6147 -Infinity -> 9.99999999999999999999999999998E-6147 Underflow Subnormal Inexact Rounded
dqnextt677 nexttoward 9.99999999999999999999999999999999E-6144 -Infinity -> 9.99999999999999999999999999999998E-6144 Underflow Subnormal Inexact Rounded
dqnextt678 nexttoward 9.99999999999999999999999999999998E-6144 -Infinity -> 9.99999999999999999999999999999997E-6144 Underflow Subnormal Inexact Rounded
dqnextt679 nexttoward 9.99999999999999999999999999999997E-6144 -Infinity -> 9.99999999999999999999999999999996E-6144 Underflow Subnormal Inexact Rounded
dqnextt680 nexttoward 0E-6176 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt681 nexttoward 1E-6176 -Infinity -> 0E-6176 Underflow Subnormal Inexact Rounded Clamped
dqnextt682 nexttoward 2E-6176 -Infinity -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt683 nexttoward -0E-6176 -Infinity -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt684 nexttoward -1E-6176 -Infinity -> -2E-6176 Underflow Subnormal Inexact Rounded
dqnextt685 nexttoward -2E-6176 -Infinity -> -3E-6176 Underflow Subnormal Inexact Rounded
dqnextt686 nexttoward -10E-6176 -Infinity -> -1.1E-6175 Underflow Subnormal Inexact Rounded
dqnextt687 nexttoward -100E-6176 -Infinity -> -1.01E-6174 Underflow Subnormal Inexact Rounded
dqnextt688 nexttoward -100000E-6176 -Infinity -> -1.00001E-6171 Underflow Subnormal Inexact Rounded
dqnextt689 nexttoward -1.00000000000000000000000000000E-6143 -Infinity -> -1.000000000000000000000000000000001E-6143
dqnextt690 nexttoward -1.000000000000000000000000000000000E-6143 -Infinity -> -1.000000000000000000000000000000001E-6143
dqnextt691 nexttoward -1E-6143 -Infinity -> -1.000000000000000000000000000000001E-6143
dqnextt692 nexttoward -9.999999999999999999999999999999998E+6144 -Infinity -> -9.999999999999999999999999999999999E+6144
dqnextt693 nexttoward -9.999999999999999999999999999999999E+6144 -Infinity -> -Infinity Overflow Inexact Rounded
------- Specials
dqnextt780 nexttoward -Inf -Inf -> -Infinity
dqnextt781 nexttoward -Inf -1000 -> -9.999999999999999999999999999999999E+6144
dqnextt782 nexttoward -Inf -1 -> -9.999999999999999999999999999999999E+6144
dqnextt783 nexttoward -Inf -0 -> -9.999999999999999999999999999999999E+6144
dqnextt784 nexttoward -Inf 0 -> -9.999999999999999999999999999999999E+6144
dqnextt785 nexttoward -Inf 1 -> -9.999999999999999999999999999999999E+6144
dqnextt786 nexttoward -Inf 1000 -> -9.999999999999999999999999999999999E+6144
dqnextt787 nexttoward -1000 -Inf -> -1000.000000000000000000000000000001
dqnextt788 nexttoward -Inf -Inf -> -Infinity
dqnextt789 nexttoward -1 -Inf -> -1.000000000000000000000000000000001
dqnextt790 nexttoward -0 -Inf -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt791 nexttoward 0 -Inf -> -1E-6176 Underflow Subnormal Inexact Rounded
dqnextt792 nexttoward 1 -Inf -> 0.9999999999999999999999999999999999
dqnextt793 nexttoward 1000 -Inf -> 999.9999999999999999999999999999999
dqnextt794 nexttoward Inf -Inf -> 9.999999999999999999999999999999999E+6144
dqnextt800 nexttoward Inf -Inf -> 9.999999999999999999999999999999999E+6144
dqnextt801 nexttoward Inf -1000 -> 9.999999999999999999999999999999999E+6144
dqnextt802 nexttoward Inf -1 -> 9.999999999999999999999999999999999E+6144
dqnextt803 nexttoward Inf -0 -> 9.999999999999999999999999999999999E+6144
dqnextt804 nexttoward Inf 0 -> 9.999999999999999999999999999999999E+6144
dqnextt805 nexttoward Inf 1 -> 9.999999999999999999999999999999999E+6144
dqnextt806 nexttoward Inf 1000 -> 9.999999999999999999999999999999999E+6144
dqnextt807 nexttoward Inf Inf -> Infinity
dqnextt808 nexttoward -1000 Inf -> -999.9999999999999999999999999999999
dqnextt809 nexttoward -Inf Inf -> -9.999999999999999999999999999999999E+6144
dqnextt810 nexttoward -1 Inf -> -0.9999999999999999999999999999999999
dqnextt811 nexttoward -0 Inf -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt812 nexttoward 0 Inf -> 1E-6176 Underflow Subnormal Inexact Rounded
dqnextt813 nexttoward 1 Inf -> 1.000000000000000000000000000000001
dqnextt814 nexttoward 1000 Inf -> 1000.000000000000000000000000000001
dqnextt815 nexttoward Inf Inf -> Infinity
dqnextt821 nexttoward NaN -Inf -> NaN
dqnextt822 nexttoward NaN -1000 -> NaN
dqnextt823 nexttoward NaN -1 -> NaN
dqnextt824 nexttoward NaN -0 -> NaN
dqnextt825 nexttoward NaN 0 -> NaN
dqnextt826 nexttoward NaN 1 -> NaN
dqnextt827 nexttoward NaN 1000 -> NaN
dqnextt828 nexttoward NaN Inf -> NaN
dqnextt829 nexttoward NaN NaN -> NaN
dqnextt830 nexttoward -Inf NaN -> NaN
dqnextt831 nexttoward -1000 NaN -> NaN
dqnextt832 nexttoward -1 NaN -> NaN
dqnextt833 nexttoward -0 NaN -> NaN
dqnextt834 nexttoward 0 NaN -> NaN
dqnextt835 nexttoward 1 NaN -> NaN
dqnextt836 nexttoward 1000 NaN -> NaN
dqnextt837 nexttoward Inf NaN -> NaN
dqnextt841 nexttoward sNaN -Inf -> NaN Invalid_operation
dqnextt842 nexttoward sNaN -1000 -> NaN Invalid_operation
dqnextt843 nexttoward sNaN -1 -> NaN Invalid_operation
dqnextt844 nexttoward sNaN -0 -> NaN Invalid_operation
dqnextt845 nexttoward sNaN 0 -> NaN Invalid_operation
dqnextt846 nexttoward sNaN 1 -> NaN Invalid_operation
dqnextt847 nexttoward sNaN 1000 -> NaN Invalid_operation
dqnextt848 nexttoward sNaN NaN -> NaN Invalid_operation
dqnextt849 nexttoward sNaN sNaN -> NaN Invalid_operation
dqnextt850 nexttoward NaN sNaN -> NaN Invalid_operation
dqnextt851 nexttoward -Inf sNaN -> NaN Invalid_operation
dqnextt852 nexttoward -1000 sNaN -> NaN Invalid_operation
dqnextt853 nexttoward -1 sNaN -> NaN Invalid_operation
dqnextt854 nexttoward -0 sNaN -> NaN Invalid_operation
dqnextt855 nexttoward 0 sNaN -> NaN Invalid_operation
dqnextt856 nexttoward 1 sNaN -> NaN Invalid_operation
dqnextt857 nexttoward 1000 sNaN -> NaN Invalid_operation
dqnextt858 nexttoward Inf sNaN -> NaN Invalid_operation
dqnextt859 nexttoward NaN sNaN -> NaN Invalid_operation
-- propagating NaNs
dqnextt861 nexttoward NaN1 -Inf -> NaN1
dqnextt862 nexttoward +NaN2 -1000 -> NaN2
dqnextt863 nexttoward NaN3 1000 -> NaN3
dqnextt864 nexttoward NaN4 Inf -> NaN4
dqnextt865 nexttoward NaN5 +NaN6 -> NaN5
dqnextt866 nexttoward -Inf NaN7 -> NaN7
dqnextt867 nexttoward -1000 NaN8 -> NaN8
dqnextt868 nexttoward 1000 NaN9 -> NaN9
dqnextt869 nexttoward Inf +NaN10 -> NaN10
dqnextt871 nexttoward sNaN11 -Inf -> NaN11 Invalid_operation
dqnextt872 nexttoward sNaN12 -1000 -> NaN12 Invalid_operation
dqnextt873 nexttoward sNaN13 1000 -> NaN13 Invalid_operation
dqnextt874 nexttoward sNaN14 NaN17 -> NaN14 Invalid_operation
dqnextt875 nexttoward sNaN15 sNaN18 -> NaN15 Invalid_operation
dqnextt876 nexttoward NaN16 sNaN19 -> NaN19 Invalid_operation
dqnextt877 nexttoward -Inf +sNaN20 -> NaN20 Invalid_operation
dqnextt878 nexttoward -1000 sNaN21 -> NaN21 Invalid_operation
dqnextt879 nexttoward 1000 sNaN22 -> NaN22 Invalid_operation
dqnextt880 nexttoward Inf sNaN23 -> NaN23 Invalid_operation
dqnextt881 nexttoward +NaN25 +sNaN24 -> NaN24 Invalid_operation
dqnextt882 nexttoward -NaN26 NaN28 -> -NaN26
dqnextt883 nexttoward -sNaN27 sNaN29 -> -NaN27 Invalid_operation
dqnextt884 nexttoward 1000 -NaN30 -> -NaN30
dqnextt885 nexttoward 1000 -sNaN31 -> -NaN31 Invalid_operation
-- Null tests
dqnextt900 nexttoward 1 # -> NaN Invalid_operation
dqnextt901 nexttoward # 1 -> NaN Invalid_operation
|
{
"pile_set_name": "Github"
}
|
// Karma configuration file, see link for more information
// https://karma-runner.github.io/0.13/config/configuration-file.html
module.exports = function (config) {
config.set({
basePath: '',
frameworks: ['jasmine', 'angular-cli'],
plugins: [
require('karma-jasmine'),
require('karma-chrome-launcher'),
require('karma-remap-istanbul'),
require('angular-cli/plugins/karma')
],
files: [
{ pattern: './src/test.ts', watched: false }
],
preprocessors: {
'./src/test.ts': ['angular-cli']
},
mime: {
'text/x-typescript': ['ts','tsx']
},
remapIstanbulReporter: {
reports: {
html: 'coverage',
lcovonly: './coverage/coverage.lcov'
}
},
angularCli: {
config: './angular-cli.json',
environment: 'dev'
},
reporters: config.angularCli && config.angularCli.codeCoverage
? ['progress', 'karma-remap-istanbul']
: ['progress'],
port: 9876,
colors: true,
logLevel: config.LOG_INFO,
autoWatch: true,
browsers: ['Chrome'],
singleRun: false
});
};
|
{
"pile_set_name": "Github"
}
|
module MAC_SHA256
open FStar.Seq
open FStar.Error
open Mem
open TLSConstants
open TLSInfo
open TLSError
module B = FStar.Bytes
// idealizing HMAC
// for concreteness; the rest of the module is parametric in a
let a = HMac Hashing.Spec.SHA2_256
type id = i:id { ID12? i /\ ~(AEAD? (aeAlg_of_id i)) }
let alg (i:id) = macAlg_of_id i
type text = B.bytes
type tag (i:id) = B.lbytes32 (macSize (alg i))
type keyrepr (i:id) = B.lbytes32 (macSize (alg i))
type key (i:id) = keyrepr i
// TBD in Encode?
type good (i:id) (b:B.bytes) = True
// we keep the tag in case we want to enforce tag authentication
type entry (i:id) = | Entry: t:tag i -> p:B.bytes { good i p } -> entry i
// readers and writers share the same state: a log of MACed messages
(*
* AR: similar to MAC changes, region is of type rgn and log is a ref.
*)
noeq type state (i:id) (rw:rw) = | State:
#region:rgn -> // the region of the *writer*
key: key i ->
log: ref (seq (entry i)){(HyperStack.frameOf log) = region} ->
state i rw
private type writer i = s:state i Writer
private type reader i = s:state i Reader
val gen: w:rid{is_eternal_region w}
-> i:id
-> St (reader i * writer i) //TODO: a more complete spec here
let gen writer_parent i =
let kv = CoreCrypto.random32 (macKeySize a) in
let writer_r = new_region writer_parent in
let log = ralloc writer_r Seq.empty in
State #i #Reader #writer_r kv log,
State #i #Writer #writer_r kv log
val mac: i:id -> wr:writer i -> p:B.bytes { good i p } -> ST (tag i)
(requires (fun h0 -> True))
(ensures (fun h0 t h1 ->
modifies (Set.singleton wr.region) h0 h1 /\ // skipping modifies rref, as the region contains only one ref
sel h1 wr.log == snoc (sel h0 wr.log) (Entry t p)))
(*
* AR: similar to MAC, had to add a recall on wr.log.
*)
let mac i wr p =
let t : tag i = HMAC.tls_mac a wr.key p in
recall wr.log;
wr.log := snoc !wr.log (Entry #i t p); // We log every authenticated texts, with their index and resulting tag
t
val matches: i:id -> p:text -> entry i -> Tot bool
let matches i p (Entry _ p') = p = p'
val verify: i:id -> rd:reader i -> p:B.bytes -> t:tag i -> ST bool
(requires (fun h0 -> True))
(ensures (fun h0 b h1 -> modifies Set.empty h0 h1 /\ (b ==> good i p)))
let verify i rd p t =
let x = HMAC.tls_macVerify a rd.key p t in
let l = !rd.log in
// We use the log to correct any verification errors
x &&
Some? (seq_find (matches i p) l)
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef STORAGE_LEVELDB_DB_BUILDER_H_
#define STORAGE_LEVELDB_DB_BUILDER_H_
#include "leveldb/status.h"
namespace leveldb {
struct Options;
struct FileMetaData;
class Env;
class Iterator;
class TableCache;
class VersionEdit;
// Build a Table file from the contents of *iter. The generated file
// will be named according to meta->number. On success, the rest of
// *meta will be filled with metadata about the generated table.
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
extern Status BuildTable(const std::string& dbname,
Env* env,
const Options& options,
TableCache* table_cache,
Iterator* iter,
FileMetaData* meta);
} // namespace leveldb
#endif // STORAGE_LEVELDB_DB_BUILDER_H_
|
{
"pile_set_name": "Github"
}
|
# Generated by superflore -- DO NOT EDIT
#
# Copyright Open Source Robotics Foundation
inherit ros_distro_dashing
inherit ros_superflore_generated
DESCRIPTION = "The auto-magic functions for ease to use of the ament buildsystem in CMake."
AUTHOR = "Dirk Thomas <dthomas@osrfoundation.org>"
HOMEPAGE = "https://wiki.ros.org"
SECTION = "devel"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://package.xml;beginline=8;endline=8;md5=12c26a18c7f493fdc7e8a93b16b7c04f"
ROS_CN = "ament_cmake"
ROS_BPN = "ament_cmake_auto"
ROS_BUILD_DEPENDS = ""
ROS_BUILDTOOL_DEPENDS = " \
ament-cmake-native \
"
ROS_EXPORT_DEPENDS = ""
ROS_BUILDTOOL_EXPORT_DEPENDS = " \
ament-cmake-native \
"
ROS_EXEC_DEPENDS = ""
# Currently informational only -- see http://www.ros.org/reps/rep-0149.html#dependency-tags.
ROS_TEST_DEPENDS = ""
DEPENDS = "${ROS_BUILD_DEPENDS} ${ROS_BUILDTOOL_DEPENDS}"
# Bitbake doesn't support the "export" concept, so build them as if we needed them to build this package (even though we actually
# don't) so that they're guaranteed to have been staged should this package appear in another's DEPENDS.
DEPENDS += "${ROS_EXPORT_DEPENDS} ${ROS_BUILDTOOL_EXPORT_DEPENDS}"
RDEPENDS_${PN} += "${ROS_EXEC_DEPENDS}"
# matches with: https://github.com/ros2-gbp/ament_cmake-release/archive/release/dashing/ament_cmake_auto/0.7.5-1.tar.gz
ROS_BRANCH ?= "branch=release/dashing/ament_cmake_auto"
SRC_URI = "git://github.com/ros2-gbp/ament_cmake-release;${ROS_BRANCH};protocol=https"
SRCREV = "187a255e39b7c28ad67009b1fa4e5b0b485ae961"
S = "${WORKDIR}/git"
ROS_BUILD_TYPE = "ament_cmake"
inherit ros_${ROS_BUILD_TYPE}
|
{
"pile_set_name": "Github"
}
|
all: awk.pdf
awk.pdf: *.tex
latexmk -pdf -pdflatex="xelatex" -use-make awk.tex
clean:
latexmk -CA
.PHONY: all clean awk.pdf
|
{
"pile_set_name": "Github"
}
|
@import "scripts/common.js";
@import "scripts/MochaJSDelegate.js";
// Init
function commandInit(context) { MD.init(context, "init"); }
// Buttons
function drawPattern(context) { MD.init(context, 'generate-pattern');}
|
{
"pile_set_name": "Github"
}
|
require.config({
config: {
foo: {
related: 'bar'
}
}
});
require(["foo"], function (foo) {
doh.register(
"specialDeps",
[
function specialDeps(t) {
t.is("foo", foo.name);
t.is("bar", foo.related);
}
]
);
doh.run();
});
|
{
"pile_set_name": "Github"
}
|
<?php
class actionUsersProfileInvites extends cmsAction {
public $lock_explicit_call = true;
public function run($profile) {
// проверяем наличие доступа
if (!$this->is_own_profile) {
cmsCore::error404();
}
if (!$profile['invites_count']) {
$this->redirectToAction($profile['id']);
}
// Форма отправлена?
$is_submitted = $this->request->has('submit');
$form = new cmsForm();
$fieldset_id = $form->addFieldset(sprintf(LANG_USERS_INVITES_COUNT, html_spellcount($profile['invites_count'], LANG_USERS_INVITES_SPELLCOUNT)));
if ($profile['invites_count'] > 1) {
$form->addField($fieldset_id, new fieldText('emails', array(
'title' => LANG_USERS_INVITES_EMAILS,
'hint' => LANG_USERS_INVITES_EMAILS_HINT,
'is_strip_tags' => true,
'rules' => array(
array('required')
)
)));
}
if ($profile['invites_count'] == 1) {
$form->addField($fieldset_id, new fieldString('emails', array(
'title' => LANG_USERS_INVITES_EMAIL,
'rules' => array(
array('required'),
array('email')
)
)));
}
$fieldset_id = $form->addFieldset(LANG_USERS_INVITES_LINKS);
$invites = $this->model_auth->getUserInvites($this->cms_user->id);
foreach ($invites as $invite) {
$form->addField($fieldset_id, new fieldString('invite:'.$invite['id'], array(
'default' => $invite['page_url'],
'attributes' => array(
'readonly' => '',
'onclick' => '$(this).select();'
)
)));
}
$input = array();
if ($is_submitted) {
// Парсим форму и получаем поля записи
$input = $form->parse($this->request, $is_submitted);
// Проверям правильность заполнения
$errors = $form->validate($this, $input);
if (!$errors) {
$results = $this->sendInvites($profile, $input['emails']);
return $this->cms_template->render('profile_invites_results', array(
'id' => $profile['id'],
'profile' => $profile,
'results' => $results
));
}
if ($errors) {
cmsUser::addSessionMessage(LANG_FORM_ERRORS, 'error');
}
}
return $this->cms_template->render('profile_invites', array(
'id' => $profile['id'],
'profile' => $profile,
'invites' => $invites,
'form' => $form,
'input' => $input,
'errors' => isset($errors) ? $errors : false
));
}
private function sendInvites($profile, $emails_list) {
$results = array(
'success' => array(),
'failed' => array()
);
$emails = string_explode_list($emails_list);
foreach ($emails as $email) {
if ($this->validate_email($email) !== true) {
$results['failed'][$email] = ERR_VALIDATE_EMAIL;
continue;
}
if ($this->model->getUserByEmail($email)) {
$results['failed'][$email] = LANG_REG_EMAIL_EXISTS;
continue;
}
if (!$this->controller_auth->isEmailAllowed($email)) {
$results['failed'][$email] = LANG_AUTH_RESTRICTED_EMAILS;
continue;
}
$invite = $this->model_auth->getNextInvite($this->cms_user->id);
$to = array('email' => $email, 'name' => $email);
$letter = array('name' => 'users_invite');
$this->controller_messages->sendEmail($to, $letter, array(
'nickname' => $this->cms_user->nickname,
'code' => $invite['code'],
'page_url' => href_to_abs('auth', 'register') . "?inv={$invite['code']}"
));
$results['success'][$email] = true;
$this->model_auth->markInviteSended($invite['id'], $this->cms_user->id, $email);
if ((sizeof($results['success']) + sizeof($results['failed'])) >= $profile['invites_count']) {
break;
}
}
return $results;
}
}
|
{
"pile_set_name": "Github"
}
|
config BE2ISCSI
tristate "Emulex 10Gbps iSCSI - BladeEngine 2"
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
select IRQ_POLL
help
This driver implements the iSCSI functionality for Emulex
10Gbps Storage adapter - BladeEngine 2.
|
{
"pile_set_name": "Github"
}
|
import Resolver from './helpers/Resolver'
export default {
Location: {
...Resolver('Location', {
undefinedToNull: [
'nameEN',
'nameDE',
'nameFR',
'nameNL',
'nameIT',
'nameES',
'namePT',
'namePL',
'nameRU',
],
}),
},
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2020 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scorch
import (
"sync"
"sync/atomic"
"testing"
"github.com/blevesearch/bleve/document"
"github.com/blevesearch/bleve/index"
)
func TestObsoleteSegmentMergeIntroduction(t *testing.T) {
testConfig := CreateConfig("TestObsoleteSegmentMergeIntroduction")
err := InitTest(testConfig)
if err != nil {
t.Fatal(err)
}
defer func() {
err := DestroyTest(testConfig)
if err != nil {
t.Fatal(err)
}
}()
var introComplete, mergeIntroStart, mergeIntroComplete sync.WaitGroup
introComplete.Add(1)
mergeIntroStart.Add(1)
mergeIntroComplete.Add(1)
var segIntroCompleted int
RegistryEventCallbacks["test"] = func(e Event) {
if e.Kind == EventKindBatchIntroduction {
segIntroCompleted++
if segIntroCompleted == 3 {
// all 3 segments introduced
introComplete.Done()
}
} else if e.Kind == EventKindMergeTaskIntroductionStart {
// signal the start of merge task introduction so that
// we can introduce a new batch which obsoletes the
// merged segment's contents.
mergeIntroStart.Done()
// hold the merge task introduction until the merged segment contents
// are obsoleted with the next batch/segment introduction.
introComplete.Wait()
} else if e.Kind == EventKindMergeTaskIntroduction {
// signal the completion of the merge task introduction.
mergeIntroComplete.Done()
}
}
ourConfig := make(map[string]interface{}, len(testConfig))
for k, v := range testConfig {
ourConfig[k] = v
}
ourConfig["eventCallbackName"] = "test"
analysisQueue := index.NewAnalysisQueue(1)
idx, err := NewScorch(Name, ourConfig, analysisQueue)
if err != nil {
t.Fatal(err)
}
err = idx.Open()
if err != nil {
t.Fatalf("error opening index: %v", err)
}
defer func() {
err := idx.Close()
if err != nil {
t.Fatal(err)
}
}()
// first introduce two documents over two batches.
batch := index.NewBatch()
doc := document.NewDocument("1")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test3")))
batch.Update(doc)
err = idx.Batch(batch)
if err != nil {
t.Error(err)
}
batch.Reset()
doc = document.NewDocument("2")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test2updated")))
batch.Update(doc)
err = idx.Batch(batch)
if err != nil {
t.Error(err)
}
// wait until the merger trying to introduce the new merged segment.
mergeIntroStart.Wait()
// execute another batch which obsoletes the contents of the new merged
// segment awaiting introduction.
batch.Reset()
batch.Delete("1")
batch.Delete("2")
doc = document.NewDocument("3")
doc.AddField(document.NewTextField("name", []uint64{}, []byte("test3updated")))
batch.Update(doc)
err = idx.Batch(batch)
if err != nil {
t.Error(err)
}
// wait until the merge task introduction complete.
mergeIntroComplete.Wait()
idxr, err := idx.Reader()
if err != nil {
t.Error(err)
}
numSegments := len(idxr.(*IndexSnapshot).segment)
if numSegments != 1 {
t.Errorf("expected one segment at the root, got: %d", numSegments)
}
skipIntroCount := atomic.LoadUint64(&idxr.(*IndexSnapshot).parent.stats.TotFileMergeIntroductionsObsoleted)
if skipIntroCount != 1 {
t.Errorf("expected one obsolete merge segment skipping the introduction, got: %d", skipIntroCount)
}
docCount, err := idxr.DocCount()
if err != nil {
t.Fatal(err)
}
if docCount != 1 {
t.Errorf("Expected document count to be %d got %d", 1, docCount)
}
err = idxr.Close()
if err != nil {
t.Fatal(err)
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0"?>
<package>
<metadata>
<id>Xamarin.PullToBounce</id>
<title>PullToBounce for Xamarin.iOS</title>
<version>1.0.0.3</version>
<authors>Microsoft</authors>
<owners>Microsoft</owners>
<requireLicenseAcceptance>true</requireLicenseAcceptance>
<description>Pull To Bounce can wrap any UIScrollView and add a cool, bouncy pull-to-refresh animation. All that needs to be done is to add a UIScrollView as a sub view, and then subscribe to an event.</description>
<copyright>© Microsoft Corporation. All rights reserved.</copyright>
<projectUrl>https://go.microsoft.com/fwlink/?linkid=874626</projectUrl>
<licenseUrl>https://go.microsoft.com/fwlink/?linkid=874627</licenseUrl>
</metadata>
<files>
<file src="output/unified/PullToBounce.dll" target="lib/Xamarin.iOS" />
<file src="External-Dependency-Info.txt" target="THIRD-PARTY-NOTICES.txt" />
</files>
</package>
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) Tunnel Vision Laboratories, LLC. All Rights Reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
namespace StyleCop.Analyzers.Lightup
{
using System;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
using Microsoft.CodeAnalysis.CSharp.Syntax;
internal struct SubpatternSyntaxWrapper : ISyntaxWrapper<CSharpSyntaxNode>
{
internal const string WrappedTypeName = "Microsoft.CodeAnalysis.CSharp.Syntax.SubpatternSyntax";
private static readonly Type WrappedType;
private static readonly Func<CSharpSyntaxNode, NameColonSyntax> NameColonAccessor;
private static readonly Func<CSharpSyntaxNode, CSharpSyntaxNode> PatternAccessor;
private static readonly Func<CSharpSyntaxNode, NameColonSyntax, CSharpSyntaxNode> WithNameColonAccessor;
private static readonly Func<CSharpSyntaxNode, CSharpSyntaxNode, CSharpSyntaxNode> WithPatternAccessor;
private readonly CSharpSyntaxNode node;
static SubpatternSyntaxWrapper()
{
WrappedType = WrapperHelper.GetWrappedType(typeof(SubpatternSyntaxWrapper));
NameColonAccessor = LightupHelpers.CreateSyntaxPropertyAccessor<CSharpSyntaxNode, NameColonSyntax>(WrappedType, nameof(NameColon));
PatternAccessor = LightupHelpers.CreateSyntaxPropertyAccessor<CSharpSyntaxNode, CSharpSyntaxNode>(WrappedType, nameof(Pattern));
WithNameColonAccessor = LightupHelpers.CreateSyntaxWithPropertyAccessor<CSharpSyntaxNode, NameColonSyntax>(WrappedType, nameof(NameColon));
WithPatternAccessor = LightupHelpers.CreateSyntaxWithPropertyAccessor<CSharpSyntaxNode, CSharpSyntaxNode>(WrappedType, nameof(Pattern));
}
private SubpatternSyntaxWrapper(CSharpSyntaxNode node)
{
this.node = node;
}
public CSharpSyntaxNode SyntaxNode => this.node;
public NameColonSyntax NameColon
{
get
{
return NameColonAccessor(this.SyntaxNode);
}
}
public PatternSyntaxWrapper Pattern
{
get
{
return (PatternSyntaxWrapper)PatternAccessor(this.SyntaxNode);
}
}
public static explicit operator SubpatternSyntaxWrapper(SyntaxNode node)
{
if (node == null)
{
return default;
}
if (!IsInstance(node))
{
throw new InvalidCastException($"Cannot cast '{node.GetType().FullName}' to '{WrappedTypeName}'");
}
return new SubpatternSyntaxWrapper((CSharpSyntaxNode)node);
}
public static implicit operator CSharpSyntaxNode(SubpatternSyntaxWrapper wrapper)
{
return wrapper.node;
}
public static bool IsInstance(SyntaxNode node)
{
return node != null && LightupHelpers.CanWrapNode(node, WrappedType);
}
public SubpatternSyntaxWrapper WithNameColon(NameColonSyntax nameColon)
{
return new SubpatternSyntaxWrapper(WithNameColonAccessor(this.SyntaxNode, nameColon));
}
public SubpatternSyntaxWrapper WithPattern(PatternSyntaxWrapper pattern)
{
return new SubpatternSyntaxWrapper(WithPatternAccessor(this.SyntaxNode, pattern));
}
}
}
|
{
"pile_set_name": "Github"
}
|
ALTER TABLE db_version CHANGE COLUMN required_11773_01_mangos_spell_proc_event required_11773_02_mangos_spell_chain bit;
DELETE FROM spell_chain WHERE spell_id IN (53672, 54149);
INSERT INTO spell_chain VALUES
(53672,0,53672,1,0),
(54149,53672,53672,2,0);
|
{
"pile_set_name": "Github"
}
|
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = "proto2";
package k8s.io.apimachinery.pkg.apis.meta.v1;
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// APIGroup contains the name, the supported versions, and the preferred version
// of a group.
message APIGroup {
// name is the name of the group.
optional string name = 1;
// versions are the versions supported in this group.
repeated GroupVersionForDiscovery versions = 2;
// preferredVersion is the version preferred by the API server, which
// probably is the storage version.
// +optional
optional GroupVersionForDiscovery preferredVersion = 3;
// a map of client CIDR to server address that is serving this group.
// This is to help clients reach servers in the most network-efficient way possible.
// Clients can use the appropriate server address as per the CIDR that they match.
// In case of multiple matches, clients should use the longest matching CIDR.
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
// +optional
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
}
// APIGroupList is a list of APIGroup, to allow clients to discover the API at
// /apis.
message APIGroupList {
// groups is a list of APIGroup.
repeated APIGroup groups = 1;
}
// APIResource specifies the name of a resource and whether it is namespaced.
message APIResource {
// name is the plural name of the resource.
optional string name = 1;
// singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely.
// The singularName is more correct for reporting status on a single item and both singular and plural are allowed
// from the kubectl CLI interface.
optional string singularName = 6;
// namespaced indicates if a resource is namespaced or not.
optional bool namespaced = 2;
// group is the preferred group of the resource. Empty implies the group of the containing resource list.
// For subresources, this may have a different value, for example: Scale".
optional string group = 8;
// version is the preferred version of the resource. Empty implies the version of the containing resource list
// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
optional string version = 9;
// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
optional string kind = 3;
// verbs is a list of supported kube verbs (this includes get, list, watch, create,
// update, patch, delete, deletecollection, and proxy)
optional Verbs verbs = 4;
// shortNames is a list of suggested short names of the resource.
repeated string shortNames = 5;
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
repeated string categories = 7;
// The hash value of the storage version, the version this resource is
// converted to when written to the data store. Value must be treated
// as opaque by clients. Only equality comparison on the value is valid.
// This is an alpha feature and may change or be removed in the future.
// The field is populated by the apiserver only if the
// StorageVersionHash feature gate is enabled.
// This field will remain optional even if it graduates.
// +optional
optional string storageVersionHash = 10;
}
// APIResourceList is a list of APIResource, it is used to expose the name of the
// resources supported in a specific group and version, and if the resource
// is namespaced.
message APIResourceList {
// groupVersion is the group and version this APIResourceList is for.
optional string groupVersion = 1;
// resources contains the name of the resources and if they are namespaced.
repeated APIResource resources = 2;
}
// APIVersions lists the versions that are available, to allow clients to
// discover the API at /api, which is the root path of the legacy v1 API.
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message APIVersions {
// versions are the api versions that are available.
repeated string versions = 1;
// a map of client CIDR to server address that is serving this group.
// This is to help clients reach servers in the most network-efficient way possible.
// Clients can use the appropriate server address as per the CIDR that they match.
// In case of multiple matches, clients should use the longest matching CIDR.
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
}
// Condition contains details for one aspect of the current state of this API Resource.
// ---
// This struct is intended for direct use as an array at the field path .status.conditions. For example,
// type FooStatus struct{
// // Represents the observations of a foo's current state.
// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
// // +patchMergeKey=type
// // +patchStrategy=merge
// // +listType=map
// // +listMapKey=type
// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
//
// // other fields
// }
message Condition {
// type of condition in CamelCase or in foo.example.com/CamelCase.
// ---
// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
// useful (see .node.status.conditions), the ability to deconflict is important.
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
optional string type = 1;
// status of the condition, one of True, False, Unknown.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum=True;False;Unknown
optional string status = 2;
// observedGeneration represents the .metadata.generation that the condition was set based upon.
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
// with respect to the current state of the instance.
// +optional
// +kubebuilder:validation:Minimum=0
optional int64 observedGeneration = 3;
// lastTransitionTime is the last time the condition transitioned from one status to another.
// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Format=date-time
optional Time lastTransitionTime = 4;
// reason contains a programmatic identifier indicating the reason for the condition's last transition.
// Producers of specific condition types may define expected values and meanings for this field,
// and whether the values are considered a guaranteed API.
// The value should be a CamelCase string.
// This field may not be empty.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=1024
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
optional string reason = 5;
// message is a human readable message indicating details about the transition.
// This may be an empty string.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=32768
optional string message = 6;
}
// CreateOptions may be provided when creating an API object.
message CreateOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint.
// +optional
optional string fieldManager = 3;
}
// DeleteOptions may be provided when deleting an API object.
message DeleteOptions {
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
optional int64 gracePeriodSeconds = 1;
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +k8s:conversion-gen=false
// +optional
optional Preconditions preconditions = 2;
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
optional bool orphanDependents = 3;
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
// +optional
optional string propagationPolicy = 4;
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 5;
}
// Duration is a wrapper around time.Duration which supports correct
// marshaling to YAML and JSON. In particular, it marshals into strings, which
// can be used as map keys in json.
message Duration {
optional int64 duration = 1;
}
// ExportOptions is the query options to the standard REST get call.
// Deprecated. Planned for removal in 1.18.
message ExportOptions {
// Should this value be exported. Export strips fields that a user can not specify.
// Deprecated. Planned for removal in 1.18.
optional bool export = 1;
// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
// Deprecated. Planned for removal in 1.18.
optional bool exact = 2;
}
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
//
// Each key is either a '.' representing the field itself, and will always map to an empty set,
// or a string representing a sub-field or item. The string will follow one of these four formats:
// 'f:<name>', where <name> is the name of a field in a struct, or key in a map
// 'v:<value>', where <value> is the exact json formatted value of a list item
// 'i:<index>', where <index> is position of a item in a list
// 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
// +protobuf.options.(gogoproto.goproto_stringer)=false
message FieldsV1 {
// Raw is the underlying serialization of this object.
optional bytes Raw = 1;
}
// GetOptions is the standard query options to the standard REST get call.
message GetOptions {
// resourceVersion sets a constraint on what resource versions a request may be served from.
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersion = 1;
}
// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupKind {
optional string group = 1;
optional string kind = 2;
}
// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupResource {
optional string group = 1;
optional string resource = 2;
}
// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersion {
optional string group = 1;
optional string version = 2;
}
// GroupVersion contains the "group/version" and "version" string of a version.
// It is made a struct to keep extensibility.
message GroupVersionForDiscovery {
// groupVersion specifies the API group and version in the form "group/version"
optional string groupVersion = 1;
// version specifies the version in the form of "version". This is to save
// the clients the trouble of splitting the GroupVersion.
optional string version = 2;
}
// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion
// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionKind {
optional string group = 1;
optional string version = 2;
optional string kind = 3;
}
// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionResource {
optional string group = 1;
optional string version = 2;
optional string resource = 3;
}
// A label selector is a label query over a set of resources. The result of matchLabels and
// matchExpressions are ANDed. An empty label selector matches all objects. A null
// label selector matches no objects.
message LabelSelector {
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
// map is equivalent to an element of matchExpressions, whose key field is "key", the
// operator is "In", and the values array contains only "value". The requirements are ANDed.
// +optional
map<string, string> matchLabels = 1;
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
// +optional
repeated LabelSelectorRequirement matchExpressions = 2;
}
// A label selector requirement is a selector that contains values, a key, and an operator that
// relates the key and values.
message LabelSelectorRequirement {
// key is the label key that the selector applies to.
// +patchMergeKey=key
// +patchStrategy=merge
optional string key = 1;
// operator represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists and DoesNotExist.
optional string operator = 2;
// values is an array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. This array is replaced during a strategic
// merge patch.
// +optional
repeated string values = 3;
}
// List holds a list of objects, which may not be known by the server.
message List {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// List of objects
repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
message ListMeta {
// selfLink is a URL representing this object.
// Populated by the system.
// Read-only.
//
// DEPRECATED
// Kubernetes will stop propagating this field in 1.20 release and the field is planned
// to be removed in 1.21 release.
// +optional
optional string selfLink = 1;
// String that identifies the server's internal version of this object that
// can be used by clients to determine when objects have changed.
// Value must be treated as opaque by clients and passed unmodified back to the server.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
optional string resourceVersion = 2;
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
// consistent list may not be possible if the server configuration has changed or more than a few
// minutes have passed. The resourceVersion field returned when using this continue value will be
// identical to the value in the first response, unless you have received this token from an error
// message.
optional string continue = 3;
// remainingItemCount is the number of subsequent items in the list which are not included in this
// list response. If the list request contained label or field selectors, then the number of
// remaining items is unknown and the field will be left unset and omitted during serialization.
// If the list is complete (either because it is not chunking or because this is the last chunk),
// then there are no more remaining items and this field will be left unset and omitted during
// serialization.
// Servers older than v1.15 do not set this field.
// The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
// should not rely on the remainingItemCount to be set or to be exact.
// +optional
optional int64 remainingItemCount = 4;
}
// ListOptions is the query options to a standard REST list call.
message ListOptions {
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
optional string labelSelector = 1;
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
optional string fieldSelector = 2;
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
optional bool watch = 3;
// allowWatchBookmarks requests watch events with type "BOOKMARK".
// Servers that do not implement bookmarks may ignore this flag and
// bookmarks are sent at the server's discretion. Clients should not
// assume bookmarks are returned at any specific interval, nor may they
// assume the server will send any BOOKMARK event during a session.
// If this is not a watch, this field is ignored.
// If the feature gate WatchBookmarks is not enabled in apiserver,
// this field is ignored.
// +optional
optional bool allowWatchBookmarks = 9;
// resourceVersion sets a constraint on what resource versions a request may be served from.
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersion = 4;
// resourceVersionMatch determines how resourceVersion is applied to list calls.
// It is highly recommended that resourceVersionMatch be set for list calls where
// resourceVersion is set
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersionMatch = 10;
// Timeout for the list/watch call.
// This limits the duration of the call, regardless of any activity or inactivity.
// +optional
optional int64 timeoutSeconds = 5;
// limit is a maximum number of responses to return for a list call. If more items exist, the
// server will set the `continue` field on the list metadata to a value that can be used with the
// same initial query to retrieve the next set of results. Setting a limit may return fewer than
// the requested amount of items (up to zero items) in the event all requested objects are
// filtered out and clients should only use the presence of the continue field to determine whether
// more results are available. Servers may choose not to support the limit argument and will return
// all of the available results. If limit is specified and the continue field is empty, clients may
// assume that no more results are available. This field is not supported if watch is true.
//
// The server guarantees that the objects returned when using continue will be identical to issuing
// a single list call without a limit - that is, no objects created, modified, or deleted after the
// first request is issued will be included in any subsequent continued requests. This is sometimes
// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
// smaller chunks of a very large result can ensure they see all possible objects. If objects are
// updated during a chunked list the version of the object that was present at the time the first list
// result was calculated is returned.
optional int64 limit = 7;
// The continue option should be set when retrieving more results from the server. Since this value is
// server defined, clients may only use the continue value from a previous query result with identical
// query parameters (except for the value of continue) and the server may reject a continue value it
// does not recognize. If the specified continue value is no longer valid whether due to expiration
// (generally five to fifteen minutes) or a configuration change on the server, the server will
// respond with a 410 ResourceExpired error together with a continue token. If the client needs a
// consistent list, it must restart their list without the continue field. Otherwise, the client may
// send another list request with the token received with the 410 error, the server will respond with
// a list starting from the next key, but from the latest snapshot, which is inconsistent from the
// previous list results - objects that are created, modified, or deleted after the first list request
// will be included in the response, as long as their keys are after the "next key".
//
// This field is not supported when watch is true. Clients may start a watch from the last
// resourceVersion value returned by the server and not miss any modifications.
optional string continue = 8;
}
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
// that the fieldset applies to.
message ManagedFieldsEntry {
// Manager is an identifier of the workflow managing these fields.
optional string manager = 1;
// Operation is the type of operation which lead to this ManagedFieldsEntry being created.
// The only valid values for this field are 'Apply' and 'Update'.
optional string operation = 2;
// APIVersion defines the version of this resource that this field set
// applies to. The format is "group/version" just like the top-level
// APIVersion field. It is necessary to track the version of a field
// set because it cannot be automatically converted.
optional string apiVersion = 3;
// Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
// +optional
optional Time time = 4;
// FieldsType is the discriminator for the different fields format and version.
// There is currently only one possible value: "FieldsV1"
optional string fieldsType = 6;
// FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
// +optional
optional FieldsV1 fieldsV1 = 7;
}
// MicroTime is version of Time with microsecond level precision.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
message MicroTime {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
message ObjectMeta {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: http://kubernetes.io/docs/user-guide/identifiers#names
// +optional
optional string name = 1;
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
// +optional
optional string generateName = 2;
// Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: http://kubernetes.io/docs/user-guide/namespaces
// +optional
optional string namespace = 3;
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
//
// DEPRECATED
// Kubernetes will stop propagating this field in 1.20 release and the field is planned
// to be removed in 1.21 release.
// +optional
optional string selfLink = 4;
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
// +optional
optional string uid = 5;
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
optional string resourceVersion = 6;
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
optional int64 generation = 7;
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional Time creationTimestamp = 8;
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field, once the
// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
// Once the deletionTimestamp is set, this value may not be unset or be set further into the
// future, although it may be shortened or the resource may be deleted prior to this time.
// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
// remove the pod from the API. In the presence of network partitions, this object may still
// exist after this timestamp, until an administrator or automated process can determine the
// resource is fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional Time deletionTimestamp = 9;
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
optional int64 deletionGracePeriodSeconds = 10;
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: http://kubernetes.io/docs/user-guide/labels
// +optional
map<string, string> labels = 11;
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: http://kubernetes.io/docs/user-guide/annotations
// +optional
map<string, string> annotations = 12;
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
repeated OwnerReference ownerReferences = 13;
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// Finalizers may be processed and removed in any order. Order is NOT enforced
// because it introduces significant risk of stuck finalizers.
// finalizers is a shared field, any actor with permission can reorder it.
// If the finalizer list is processed in order, then this can lead to a situation
// in which the component responsible for the first finalizer in the list is
// waiting for a signal (field value, external system, or other) produced by a
// component responsible for a finalizer later in the list, resulting in a deadlock.
// Without enforced ordering finalizers are free to order amongst themselves and
// are not vulnerable to ordering changes in the list.
// +optional
// +patchStrategy=merge
repeated string finalizers = 14;
// The name of the cluster which the object belongs to.
// This is used to distinguish resources with same name and namespace in different clusters.
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
optional string clusterName = 15;
// ManagedFields maps workflow-id and version to the set of fields
// that are managed by that workflow. This is mostly for internal
// housekeeping, and users typically shouldn't need to set or
// understand this field. A workflow can be the user's name, a
// controller's name, or the name of a specific apply path like
// "ci-cd". The set of fields is always in the version that the
// workflow used when modifying the object.
//
// +optional
repeated ManagedFieldsEntry managedFields = 17;
}
// OwnerReference contains enough information to let you identify an owning
// object. An owning object must be in the same namespace as the dependent, or
// be cluster-scoped, so there is no namespace field.
message OwnerReference {
// API version of the referent.
optional string apiVersion = 5;
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
optional string kind = 1;
// Name of the referent.
// More info: http://kubernetes.io/docs/user-guide/identifiers#names
optional string name = 3;
// UID of the referent.
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
optional string uid = 4;
// If true, this reference points to the managing controller.
// +optional
optional bool controller = 6;
// If true, AND if the owner has the "foregroundDeletion" finalizer, then
// the owner cannot be deleted from the key-value store until this
// reference is removed.
// Defaults to false.
// To set this field, a user needs "delete" permission of the owner,
// otherwise 422 (Unprocessable Entity) will be returned.
// +optional
optional bool blockOwnerDeletion = 7;
}
// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
// to get access to a particular ObjectMeta schema without knowing the details of the version.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message PartialObjectMetadata {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional ObjectMeta metadata = 1;
}
// PartialObjectMetadataList contains a list of objects containing only their metadata
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message PartialObjectMetadataList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// items contains each of the included items.
repeated PartialObjectMetadata items = 2;
}
// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
message Patch {
}
// PatchOptions may be provided when patching an API object.
// PatchOptions is meant to be a superset of UpdateOptions.
message PatchOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// Force is going to "force" Apply requests. It means user will
// re-acquire conflicting fields owned by other people. Force
// flag must be unset for non-apply patch requests.
// +optional
optional bool force = 2;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint. This
// field is required for apply requests
// (application/apply-patch) but optional for non-apply patch
// types (JsonPatch, MergePatch, StrategicMergePatch).
// +optional
optional string fieldManager = 3;
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
message Preconditions {
// Specifies the target UID.
// +optional
optional string uid = 1;
// Specifies the target ResourceVersion
// +optional
optional string resourceVersion = 2;
}
// RootPaths lists the paths available at root.
// For example: "/healthz", "/apis".
message RootPaths {
// paths are the paths available at root.
repeated string paths = 1;
}
// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
message ServerAddressByClientCIDR {
// The CIDR with which clients can match their IP to figure out the server address that they should use.
optional string clientCIDR = 1;
// Address of this server, suitable for a client that matches the above CIDR.
// This can be a hostname, hostname:port, IP or IP:port.
optional string serverAddress = 2;
}
// Status is a return value for calls that don't return other objects.
message Status {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// Status of the operation.
// One of: "Success" or "Failure".
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
optional string status = 2;
// A human-readable description of the status of this operation.
// +optional
optional string message = 3;
// A machine-readable description of why this operation is in the
// "Failure" status. If this value is empty there
// is no information available. A Reason clarifies an HTTP status
// code but does not override it.
// +optional
optional string reason = 4;
// Extended data associated with the reason. Each reason may define its
// own extended details. This field is optional and the data returned
// is not guaranteed to conform to any schema except that defined by
// the reason type.
// +optional
optional StatusDetails details = 5;
// Suggested HTTP return code for this status, 0 if not set.
// +optional
optional int32 code = 6;
}
// StatusCause provides more information about an api.Status failure, including
// cases when multiple errors are encountered.
message StatusCause {
// A machine-readable description of the cause of the error. If this value is
// empty there is no information available.
// +optional
optional string reason = 1;
// A human-readable description of the cause of the error. This field may be
// presented as-is to a reader.
// +optional
optional string message = 2;
// The field of the resource that has caused this error, as named by its JSON
// serialization. May include dot and postfix notation for nested attributes.
// Arrays are zero-indexed. Fields may appear more than once in an array of
// causes due to fields having multiple errors.
// Optional.
//
// Examples:
// "name" - the field "name" on the current resource
// "items[0].name" - the field "name" on the first array entry in "items"
// +optional
optional string field = 3;
}
// StatusDetails is a set of additional properties that MAY be set by the
// server to provide additional information about a response. The Reason
// field of a Status object defines what attributes will be set. Clients
// must ignore fields that do not match the defined type of each attribute,
// and should assume that any attribute may be empty, invalid, or under
// defined.
message StatusDetails {
// The name attribute of the resource associated with the status StatusReason
// (when there is a single name which can be described).
// +optional
optional string name = 1;
// The group attribute of the resource associated with the status StatusReason.
// +optional
optional string group = 2;
// The kind attribute of the resource associated with the status StatusReason.
// On some operations may differ from the requested resource Kind.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional string kind = 3;
// UID of the resource.
// (when there is a single resource which can be described).
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
// +optional
optional string uid = 6;
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
// +optional
repeated StatusCause causes = 4;
// If specified, the time in seconds before the operation should be retried. Some errors may indicate
// the client must take an alternate action - for those errors this field may indicate how long to wait
// before taking the alternate action.
// +optional
optional int32 retryAfterSeconds = 5;
}
// TableOptions are used when a Table is requested by the caller.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message TableOptions {
// includeObject decides whether to include each object along with its columnar information.
// Specifying "None" will return no object, specifying "Object" will return the full object contents, and
// specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
// in version v1beta1 of the meta.k8s.io API group.
optional string includeObject = 1;
}
// Time is a wrapper around time.Time which supports correct
// marshaling to YAML and JSON. Wrappers are provided for many
// of the factory methods that the time package offers.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Time {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// Timestamp is a struct that is equivalent to Time, but intended for
// protobuf marshalling/unmarshalling. It is generated into a serialization
// that matches Time. Do not use in Go structs.
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
//
// +k8s:deepcopy-gen=false
message TypeMeta {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
// Cannot be updated.
// In CamelCase.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional string kind = 1;
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
// +optional
optional string apiVersion = 2;
}
// UpdateOptions may be provided when updating an API object.
// All fields in UpdateOptions should also be present in PatchOptions.
message UpdateOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint.
// +optional
optional string fieldManager = 2;
}
// Verbs masks the value so protobuf can generate
//
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Verbs {
// items, if empty, will result in an empty slice
repeated string items = 1;
}
// Event represents a single event to a watched resource.
//
// +protobuf=true
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message WatchEvent {
optional string type = 1;
// Object is:
// * If Type is Added or Modified: the new state of the object.
// * If Type is Deleted: the state of the object immediately before deletion.
// * If Type is Error: *Status is recommended; other types may make sense
// depending on context.
optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
}
|
{
"pile_set_name": "Github"
}
|
// This file was procedurally generated from the following sources:
// - src/function-forms/rest-params-trailing-comma-early-error.case
// - src/function-forms/syntax/cls-decl-gen-meth-static.template
/*---
description: It's a syntax error if a FunctionRestParameter is followed by a trailing comma (static class expression generator method)
esid: sec-runtime-semantics-bindingclassdeclarationevaluation
features: [generators]
flags: [generated]
negative:
phase: parse
type: SyntaxError
info: |
ClassDeclaration : class BindingIdentifier ClassTail
1. Let className be StringValue of BindingIdentifier.
2. Let value be the result of ClassDefinitionEvaluation of ClassTail with
argument className.
[...]
14.5.14 Runtime Semantics: ClassDefinitionEvaluation
21. For each ClassElement m in order from methods
a. If IsStatic of m is false, then
b. Else,
Let status be the result of performing PropertyDefinitionEvaluation for
m with arguments F and false.
[...]
14.4.13 Runtime Semantics: PropertyDefinitionEvaluation
GeneratorMethod : * PropertyName ( StrictFormalParameters ) { GeneratorBody }
1. Let propKey be the result of evaluating PropertyName.
2. ReturnIfAbrupt(propKey).
3. If the function code for this GeneratorMethod is strict mode code,
let strict be true. Otherwise let strict be false.
4. Let scope be the running execution context's LexicalEnvironment.
5. Let closure be GeneratorFunctionCreate(Method,
StrictFormalParameters, GeneratorBody, scope, strict).
9.2.1 [[Call]] ( thisArgument, argumentsList)
[...]
7. Let result be OrdinaryCallEvaluateBody(F, argumentsList).
[...]
9.2.1.3 OrdinaryCallEvaluateBody ( F, argumentsList )
1. Let status be FunctionDeclarationInstantiation(F, argumentsList).
[...]
9.2.12 FunctionDeclarationInstantiation(func, argumentsList)
[...]
23. Let iteratorRecord be Record {[[iterator]]:
CreateListIterator(argumentsList), [[done]]: false}.
24. If hasDuplicates is true, then
[...]
25. Else,
b. Let formalStatus be IteratorBindingInitialization for formals with
iteratorRecord and env as arguments.
[...]
Trailing comma in the parameters list
14.1 Function Definitions
FormalParameters[Yield, Await] :
[empty]
FunctionRestParameter[?Yield, ?Await]
FormalParameterList[?Yield, ?Await]
FormalParameterList[?Yield, ?Await] ,
FormalParameterList[?Yield, ?Await] , FunctionRestParameter[?Yield, ?Await]
---*/
throw "Test262: This statement should not be evaluated.";
class C {
static *method(...a,) {
}
}
|
{
"pile_set_name": "Github"
}
|
package hostsystem
import (
"context"
"fmt"
"log"
"time"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/provider"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
// SystemOrDefault returns a HostSystem from a specific host name and
// datacenter. If the user is connecting over ESXi, the default host system is
// used.
func SystemOrDefault(client *govmomi.Client, name string, dc *object.Datacenter) (*object.HostSystem, error) {
finder := find.NewFinder(client.Client, false)
finder.SetDatacenter(dc)
ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout)
defer cancel()
t := client.ServiceContent.About.ApiType
switch t {
case "HostAgent":
return finder.DefaultHostSystem(ctx)
case "VirtualCenter":
if name != "" {
return finder.HostSystem(ctx, name)
}
return finder.DefaultHostSystem(ctx)
}
return nil, fmt.Errorf("unsupported ApiType: %s", t)
}
// FromID locates a HostSystem by its managed object reference ID.
func FromID(client *govmomi.Client, id string) (*object.HostSystem, error) {
log.Printf("[DEBUG] Locating host system ID %s", id)
finder := find.NewFinder(client.Client, false)
ref := types.ManagedObjectReference{
Type: "HostSystem",
Value: id,
}
ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout)
defer cancel()
hs, err := finder.ObjectReference(ctx, ref)
if err != nil {
return nil, err
}
log.Printf("[DEBUG] Host system found: %s", hs.Reference().Value)
return hs.(*object.HostSystem), nil
}
// Properties is a convenience method that wraps fetching the HostSystem MO
// from its higher-level object.
func Properties(host *object.HostSystem) (*mo.HostSystem, error) {
ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout)
defer cancel()
var props mo.HostSystem
if err := host.Properties(ctx, host.Reference(), nil, &props); err != nil {
return nil, err
}
return &props, nil
}
// ResourcePool is a convenience method that wraps fetching the host system's
// root resource pool
func ResourcePool(host *object.HostSystem) (*object.ResourcePool, error) {
ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout)
defer cancel()
return host.ResourcePool(ctx)
}
// hostSystemNameFromID returns the name of a host via its its managed object
// reference ID.
func hostSystemNameFromID(client *govmomi.Client, id string) (string, error) {
hs, err := FromID(client, id)
if err != nil {
return "", err
}
return hs.Name(), nil
}
// NameOrID is a convenience method mainly for helping displaying friendly
// errors where space is important - it displays either the host name or the ID
// if there was an error fetching it.
func NameOrID(client *govmomi.Client, id string) string {
name, err := hostSystemNameFromID(client, id)
if err != nil {
return id
}
return name
}
// HostInMaintenance checks a HostSystem's maintenance mode and returns true if the
// the host is in maintenance mode.
func HostInMaintenance(host *object.HostSystem) (bool, error) {
hostObject, err := Properties(host)
if err != nil {
return false, err
}
return hostObject.Runtime.InMaintenanceMode, nil
}
// EnterMaintenanceMode puts a host into maintenance mode. If evacuate is set
// to true, all powered off VMs will be removed from the host, or the task will
// block until this is the case, depending on whether or not DRS is on or off
// for the host's cluster. This parameter is ignored on direct ESXi.
func EnterMaintenanceMode(host *object.HostSystem, timeout int, evacuate bool) error {
if err := viapi.VimValidateVirtualCenter(host.Client()); err != nil {
evacuate = false
}
maintMode, err := HostInMaintenance(host)
if maintMode {
log.Printf("[DEBUG] Host %q is already in maintenance mode", host.Name())
return nil
}
log.Printf("[DEBUG] Host %q is entering maintenance mode (evacuate: %t)", host.Name(), evacuate)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
defer cancel()
task, err := host.EnterMaintenanceMode(ctx, int32(timeout), evacuate, nil)
if err != nil {
return err
}
err = task.Wait(ctx)
if err != nil {
return err
}
var to mo.Task
err = task.Properties(context.TODO(), task.Reference(), nil, &to)
if err != nil {
log.Printf("[DEBUG] Failed while getting task results: %s", err)
return err
}
if to.Info.State != "success" {
return fmt.Errorf("Error while putting host(%s) in maintenance mode: %s", host.Reference(), to.Info.Error)
}
return nil
}
// ExitMaintenanceMode takes a host out of maintenance mode.
func ExitMaintenanceMode(host *object.HostSystem, timeout int) error {
maintMode, err := HostInMaintenance(host)
if !maintMode {
log.Printf("[DEBUG] Host %q is already not in maintenance mode", host.Name())
return nil
}
log.Printf("[DEBUG] Host %q is exiting maintenance mode", host.Name())
// Add 5 minutes to timeout for the context timeout to allow for any issues
// with the request after.
// TODO: Fix this so that it ultimately uses the provider context.
ctxTimeout := timeout + 300
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(ctxTimeout))
defer cancel()
task, err := host.ExitMaintenanceMode(ctx, int32(timeout))
if err != nil {
return err
}
err = task.Wait(ctx)
if err != nil {
return err
}
var to mo.Task
err = task.Properties(context.TODO(), task.Reference(), nil, &to)
if err != nil {
log.Printf("[DEBUG] Failed while getting task results: %s", err)
return err
}
if to.Info.State != "success" {
return fmt.Errorf("Error while getting host(%s) out of maintenance mode: %s", host.Reference(), to.Info.Error)
}
return nil
}
// GetConnectionState returns the host's connection state (see vim.HostSystem.ConnectionState)
func GetConnectionState(host *object.HostSystem) (types.HostSystemConnectionState, error) {
hostProps, err := Properties(host)
if err != nil {
return "", err
}
return hostProps.Runtime.ConnectionState, nil
}
|
{
"pile_set_name": "Github"
}
|
stderr of test 'cross_join-SF.919524` in directory 'sql/test/BugDay_2005-10-06_2.8` itself:
# 14:50:14 >
# 14:50:14 > Mtimeout -timeout 180 Mserver "--config=/home/sjoerd/etc/MonetDB.conf" --debug=10 --set "monet_mod_path=/home/sjoerd/lib/MonetDB:/home/sjoerd/lib/bin" --set "gdk_dbfarm=/home/sjoerd/var/MonetDB/dbfarm" --set "sql_logdir=/home/sjoerd/var/MonetDB/log" --set mapi_port=33764 --set sql_port=45654 --set xquery_port=54182 --set monet_prompt= --trace "--dbname=mTests_src_test_BugDay_2005-10-06_2.8" --dbinit="module(sql_server); sql_server_start();" ; echo ; echo Over..
# 14:50:14 >
# 14:50:14 >
# 14:50:14 > Mtimeout -timeout 60 MapiClient -lsql -u monetdb -P monetdb --host=localhost --port=45654 < cross_join-SF.919524.sql
# 14:50:14 >
# 14:50:14 >
# 14:50:14 > Done.
# 14:50:14 >
|
{
"pile_set_name": "Github"
}
|
{
"formatVersion": 1,
"database": {
"version": 8,
"identityHash": "aef5fdefabc56501799ced8a278ae77f",
"entities": [
{
"tableName": "note",
"createSql": "CREATE TABLE IF NOT EXISTS `${TABLE_NAME}` (`uid` INTEGER PRIMARY KEY AUTOINCREMENT, `title` TEXT, `description` TEXT, `displayTimestamp` TEXT, `timestamp` INTEGER, `color` INTEGER, `state` TEXT, `locked` INTEGER NOT NULL, `tags` TEXT, `updateTimestamp` INTEGER NOT NULL, `pinned` INTEGER NOT NULL, `uuid` TEXT)",
"fields": [
{
"fieldPath": "uid",
"columnName": "uid",
"affinity": "INTEGER",
"notNull": false
},
{
"fieldPath": "title",
"columnName": "title",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "description",
"columnName": "description",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "displayTimestamp",
"columnName": "displayTimestamp",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "timestamp",
"columnName": "timestamp",
"affinity": "INTEGER",
"notNull": false
},
{
"fieldPath": "color",
"columnName": "color",
"affinity": "INTEGER",
"notNull": false
},
{
"fieldPath": "state",
"columnName": "state",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "locked",
"columnName": "locked",
"affinity": "INTEGER",
"notNull": true
},
{
"fieldPath": "tags",
"columnName": "tags",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "updateTimestamp",
"columnName": "updateTimestamp",
"affinity": "INTEGER",
"notNull": true
},
{
"fieldPath": "pinned",
"columnName": "pinned",
"affinity": "INTEGER",
"notNull": true
},
{
"fieldPath": "uuid",
"columnName": "uuid",
"affinity": "TEXT",
"notNull": false
}
],
"primaryKey": {
"columnNames": [
"uid"
],
"autoGenerate": true
},
"indices": [
{
"name": "index_note_uid",
"unique": false,
"columnNames": [
"uid"
],
"createSql": "CREATE INDEX `index_note_uid` ON `${TABLE_NAME}` (`uid`)"
}
],
"foreignKeys": []
},
{
"tableName": "tag",
"createSql": "CREATE TABLE IF NOT EXISTS `${TABLE_NAME}` (`uid` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, `title` TEXT, `uuid` TEXT)",
"fields": [
{
"fieldPath": "uid",
"columnName": "uid",
"affinity": "INTEGER",
"notNull": true
},
{
"fieldPath": "title",
"columnName": "title",
"affinity": "TEXT",
"notNull": false
},
{
"fieldPath": "uuid",
"columnName": "uuid",
"affinity": "TEXT",
"notNull": false
}
],
"primaryKey": {
"columnNames": [
"uid"
],
"autoGenerate": true
},
"indices": [
{
"name": "index_tag_uid",
"unique": false,
"columnNames": [
"uid"
],
"createSql": "CREATE INDEX `index_tag_uid` ON `${TABLE_NAME}` (`uid`)"
}
],
"foreignKeys": []
}
],
"setupQueries": [
"CREATE TABLE IF NOT EXISTS room_master_table (id INTEGER PRIMARY KEY,identity_hash TEXT)",
"INSERT OR REPLACE INTO room_master_table (id,identity_hash) VALUES(42, \"aef5fdefabc56501799ced8a278ae77f\")"
]
}
}
|
{
"pile_set_name": "Github"
}
|
<web-app xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="3.0"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee ./xsd/web-app_3_0.xsd">
<listener>
<listener-class>org.springframework.web.context.ContextLoaderListener</listener-class>
</listener>
<context-param>
<param-name>contextClass</param-name>
<param-value>
org.springframework.web.context.support.AnnotationConfigWebApplicationContext
</param-value>
</context-param>
<context-param>
<param-name>contextConfigLocation</param-name>
<param-value>
ca.uhn.fhir.jpa.demo.FhirServerConfigDstu2
</param-value>
</context-param>
<!-- Servlets -->
<servlet>
<servlet-name>spring</servlet-name>
<servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class>
<init-param>
<param-name>contextClass</param-name>
<param-value>org.springframework.web.context.support.AnnotationConfigWebApplicationContext</param-value>
</init-param>
<init-param>
<param-name>contextConfigLocation</param-name>
<param-value>ca.uhn.fhir.jpa.demo.FhirTesterConfigDstu2</param-value>
</init-param>
<load-on-startup>2</load-on-startup>
</servlet>
<servlet>
<servlet-name>fhirServlet</servlet-name>
<servlet-class>ca.uhn.fhir.jpa.demo.JpaServerDemoDstu2</servlet-class>
<init-param>
<param-name>ImplementationDescription</param-name>
<param-value>FHIR JPA Server</param-value>
</init-param>
<init-param>
<param-name>FhirVersion</param-name>
<param-value>DSTU2</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>fhirServlet</servlet-name>
<url-pattern>/baseDstu2/*</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>spring</servlet-name>
<url-pattern>/</url-pattern>
</servlet-mapping>
<!-- This filters provide support for Cross Origin Resource Sharing (CORS) -->
<filter>
<filter-name>CORS Filter</filter-name>
<filter-class>org.ebaysf.web.cors.CORSFilter</filter-class>
<init-param>
<description>A comma separated list of allowed origins. Note: An '*' cannot be used for an allowed origin when using credentials.</description>
<param-name>cors.allowed.origins</param-name>
<param-value>*</param-value>
</init-param>
<init-param>
<description>A comma separated list of HTTP verbs, using which a CORS request can be made.</description>
<param-name>cors.allowed.methods</param-name>
<param-value>GET,POST,PUT,DELETE,OPTIONS</param-value>
</init-param>
<init-param>
<description>A comma separated list of allowed headers when making a non simple CORS request.</description>
<param-name>cors.allowed.headers</param-name>
<param-value>X-FHIR-Starter,Origin,Accept,X-Requested-With,Content-Type,Access-Control-Request-Method,Access-Control-Request-Headers</param-value>
</init-param>
<init-param>
<description>A comma separated list non-standard response headers that will be exposed to XHR2 object.</description>
<param-name>cors.exposed.headers</param-name>
<param-value>Location,Content-Location</param-value>
</init-param>
<init-param>
<description>A flag that suggests if CORS is supported with cookies</description>
<param-name>cors.support.credentials</param-name>
<param-value>true</param-value>
</init-param>
<init-param>
<description>A flag to control logging</description>
<param-name>cors.logging.enabled</param-name>
<param-value>true</param-value>
</init-param>
<init-param>
<description>Indicates how long (in seconds) the results of a preflight request can be cached in a preflight result cache.</description>
<param-name>cors.preflight.maxage</param-name>
<param-value>300</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>CORS Filter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>
|
{
"pile_set_name": "Github"
}
|
{
"runOn": [
{
"minServerVersion": "4.0",
"topology": [
"single",
"replicaset"
]
},
{
"minServerVersion": "4.1.7",
"topology": [
"sharded"
]
}
],
"database_name": "retryable-reads-tests",
"collection_name": "coll",
"data": [],
"tests": [
{
"description": "ListCollectionNames succeeds on first attempt",
"operations": [
{
"name": "listCollectionNames",
"object": "database"
}
],
"expectations": [
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
}
]
},
{
"description": "ListCollectionNames succeeds on second attempt",
"failPoint": {
"configureFailPoint": "failCommand",
"mode": {
"times": 1
},
"data": {
"failCommands": [
"listCollections"
],
"closeConnection": true
}
},
"operations": [
{
"name": "listCollectionNames",
"object": "database"
}
],
"expectations": [
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
},
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
}
]
},
{
"description": "ListCollectionNames fails on first attempt",
"clientOptions": {
"retryReads": false
},
"failPoint": {
"configureFailPoint": "failCommand",
"mode": {
"times": 1
},
"data": {
"failCommands": [
"listCollections"
],
"closeConnection": true
}
},
"operations": [
{
"name": "listCollectionNames",
"object": "database",
"error": true
}
],
"expectations": [
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
}
]
},
{
"description": "ListCollectionNames fails on second attempt",
"failPoint": {
"configureFailPoint": "failCommand",
"mode": {
"times": 2
},
"data": {
"failCommands": [
"listCollections"
],
"closeConnection": true
}
},
"operations": [
{
"name": "listCollectionNames",
"object": "database",
"error": true
}
],
"expectations": [
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
},
{
"command_started_event": {
"command": {
"listCollections": 1
}
}
}
]
}
]
}
|
{
"pile_set_name": "Github"
}
|
//
// SnapKit
//
// Copyright (c) 2011-Present SnapKit Team - https://github.com/SnapKit
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#if os(iOS) || os(tvOS)
import UIKit
#else
import AppKit
#endif
public class ConstraintMakerFinalizable {
internal let description: ConstraintDescription
internal init(_ description: ConstraintDescription) {
self.description = description
}
@discardableResult
public func labeled(_ label: String) -> ConstraintMakerFinalizable {
self.description.label = label
return self
}
public var constraint: Constraint {
return self.description.constraint!
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2016 The Elastos.ELA.SideChain.ETH Authors
// This file is part of the Elastos.ELA.SideChain.ETH library.
//
// The Elastos.ELA.SideChain.ETH library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Elastos.ELA.SideChain.ETH library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Elastos.ELA.SideChain.ETH library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"context"
"errors"
"fmt"
"net"
"sync"
"time"
"github.com/elastos/Elastos.ELA.SideChain.ETH/crypto"
"github.com/elastos/Elastos.ELA.SideChain.ETH/p2p"
"github.com/elastos/Elastos.ELA.SideChain.ETH/p2p/enode"
"github.com/elastos/Elastos.ELA.SideChain.ETH/p2p/protocols"
"github.com/elastos/Elastos.ELA.SideChain.ETH/rpc"
"github.com/elastos/Elastos.ELA.SideChain.ETH/swarm/log"
"github.com/elastos/Elastos.ELA.SideChain.ETH/swarm/state"
)
const (
DefaultNetworkID = 3
// ProtocolMaxMsgSize maximum allowed message size
ProtocolMaxMsgSize = 10 * 1024 * 1024
// timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond
)
// BzzSpec is the spec of the generic swarm handshake
var BzzSpec = &protocols.Spec{
Name: "bzz",
Version: 7,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
HandshakeMsg{},
},
}
// DiscoverySpec is the spec for the bzz discovery subprotocols
var DiscoverySpec = &protocols.Spec{
Name: "hive",
Version: 6,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
peersMsg{},
subPeersMsg{},
},
}
// BzzConfig captures the config params used by the hive
type BzzConfig struct {
OverlayAddr []byte // base address of the overlay network
UnderlayAddr []byte // node's underlay address
HiveParams *HiveParams
NetworkID uint64
LightNode bool
}
// Bzz is the swarm protocol bundle
type Bzz struct {
*Hive
NetworkID uint64
LightNode bool
localAddr *BzzAddr
mtx sync.Mutex
handshakes map[enode.ID]*HandshakeMsg
streamerSpec *protocols.Spec
streamerRun func(*BzzPeer) error
}
// NewBzz is the swarm protocol constructor
// arguments
// * bzz config
// * overlay driver
// * peer store
func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz {
return &Bzz{
Hive: NewHive(config.HiveParams, kad, store),
NetworkID: config.NetworkID,
LightNode: config.LightNode,
localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr},
handshakes: make(map[enode.ID]*HandshakeMsg),
streamerRun: streamerRun,
streamerSpec: streamerSpec,
}
}
// UpdateLocalAddr updates underlayaddress of the running node
func (b *Bzz) UpdateLocalAddr(byteaddr []byte) *BzzAddr {
b.localAddr = b.localAddr.Update(&BzzAddr{
UAddr: byteaddr,
OAddr: b.localAddr.OAddr,
})
return b.localAddr
}
// NodeInfo returns the node's overlay address
func (b *Bzz) NodeInfo() interface{} {
return b.localAddr.Address()
}
// Protocols return the protocols swarm offers
// Bzz implements the node.Service interface
// * handshake/hive
// * discovery
func (b *Bzz) Protocols() []p2p.Protocol {
protocol := []p2p.Protocol{
{
Name: BzzSpec.Name,
Version: BzzSpec.Version,
Length: BzzSpec.Length(),
Run: b.runBzz,
NodeInfo: b.NodeInfo,
},
{
Name: DiscoverySpec.Name,
Version: DiscoverySpec.Version,
Length: DiscoverySpec.Length(),
Run: b.RunProtocol(DiscoverySpec, b.Hive.Run),
NodeInfo: b.Hive.NodeInfo,
PeerInfo: b.Hive.PeerInfo,
},
}
if b.streamerSpec != nil && b.streamerRun != nil {
protocol = append(protocol, p2p.Protocol{
Name: b.streamerSpec.Name,
Version: b.streamerSpec.Version,
Length: b.streamerSpec.Length(),
Run: b.RunProtocol(b.streamerSpec, b.streamerRun),
})
}
return protocol
}
// APIs returns the APIs offered by bzz
// * hive
// Bzz implements the node.Service interface
func (b *Bzz) APIs() []rpc.API {
return []rpc.API{{
Namespace: "hive",
Version: "3.0",
Service: b.Hive,
}}
}
// RunProtocol is a wrapper for swarm subprotocols
// returns a p2p protocol run function that can be assigned to p2p.Protocol#Run field
// arguments:
// * p2p protocol spec
// * run function taking BzzPeer as argument
// this run function is meant to block for the duration of the protocol session
// on return the session is terminated and the peer is disconnected
// the protocol waits for the bzz handshake is negotiated
// the overlay address on the BzzPeer is set from the remote handshake
func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(*p2p.Peer, p2p.MsgReadWriter) error {
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
// wait for the bzz protocol to perform the handshake
handshake, _ := b.GetHandshake(p.ID())
defer b.removeHandshake(p.ID())
select {
case <-handshake.done:
case <-time.After(bzzHandshakeTimeout):
return fmt.Errorf("%08x: %s protocol timeout waiting for handshake on %08x", b.BaseAddr()[:4], spec.Name, p.ID().Bytes()[:4])
}
if handshake.err != nil {
return fmt.Errorf("%08x: %s protocol closed: %v", b.BaseAddr()[:4], spec.Name, handshake.err)
}
// the handshake has succeeded so construct the BzzPeer and run the protocol
peer := &BzzPeer{
Peer: protocols.NewPeer(p, rw, spec),
BzzAddr: handshake.peerAddr,
lastActive: time.Now(),
LightNode: handshake.LightNode,
}
log.Debug("peer created", "addr", handshake.peerAddr.String())
return run(peer)
}
}
// performHandshake implements the negotiation of the bzz handshake
// shared among swarm subprotocols
func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error {
ctx, cancel := context.WithTimeout(context.Background(), bzzHandshakeTimeout)
defer func() {
close(handshake.done)
cancel()
}()
rsh, err := p.Handshake(ctx, handshake, b.checkHandshake)
if err != nil {
handshake.err = err
return err
}
handshake.peerAddr = rsh.(*HandshakeMsg).Addr
handshake.LightNode = rsh.(*HandshakeMsg).LightNode
return nil
}
// runBzz is the p2p protocol run function for the bzz base protocol
// that negotiates the bzz handshake
func (b *Bzz) runBzz(p *p2p.Peer, rw p2p.MsgReadWriter) error {
handshake, _ := b.GetHandshake(p.ID())
if !<-handshake.init {
return fmt.Errorf("%08x: bzz already started on peer %08x", b.localAddr.Over()[:4], p.ID().Bytes()[:4])
}
close(handshake.init)
defer b.removeHandshake(p.ID())
peer := protocols.NewPeer(p, rw, BzzSpec)
err := b.performHandshake(peer, handshake)
if err != nil {
log.Warn(fmt.Sprintf("%08x: handshake failed with remote peer %08x: %v", b.localAddr.Over()[:4], p.ID().Bytes()[:4], err))
return err
}
// fail if we get another handshake
msg, err := rw.ReadMsg()
if err != nil {
return err
}
msg.Discard()
return errors.New("received multiple handshakes")
}
// BzzPeer is the bzz protocol view of a protocols.Peer (itself an extension of p2p.Peer)
// implements the Peer interface and all interfaces Peer implements: Addr, OverlayPeer
type BzzPeer struct {
*protocols.Peer // represents the connection for online peers
*BzzAddr // remote address -> implements Addr interface = protocols.Peer
lastActive time.Time // time is updated whenever mutexes are releasing
LightNode bool
}
func NewBzzPeer(p *protocols.Peer) *BzzPeer {
return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())}
}
// LastActive returns the time the peer was last active
func (p *BzzPeer) LastActive() time.Time {
return p.lastActive
}
// ID returns the peer's underlay node identifier.
func (p *BzzPeer) ID() enode.ID {
// This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded
// into the struct and provide ID(). The protocols.Peer version is faster, ensure it
// gets used.
return p.Peer.ID()
}
/*
Handshake
* Version: 8 byte integer version of the protocol
* NetworkID: 8 byte integer network identifier
* Addr: the address advertised by the node including underlay and overlay connecctions
*/
type HandshakeMsg struct {
Version uint64
NetworkID uint64
Addr *BzzAddr
LightNode bool
// peerAddr is the address received in the peer handshake
peerAddr *BzzAddr
init chan bool
done chan struct{}
err error
}
// String pretty prints the handshake
func (bh *HandshakeMsg) String() string {
return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v, LightNode: %v, peerAddr: %v", bh.Version, bh.NetworkID, bh.Addr, bh.LightNode, bh.peerAddr)
}
// Perform initiates the handshake and validates the remote handshake message
func (b *Bzz) checkHandshake(hs interface{}) error {
rhs := hs.(*HandshakeMsg)
if rhs.NetworkID != b.NetworkID {
return fmt.Errorf("network id mismatch %d (!= %d)", rhs.NetworkID, b.NetworkID)
}
if rhs.Version != uint64(BzzSpec.Version) {
return fmt.Errorf("version mismatch %d (!= %d)", rhs.Version, BzzSpec.Version)
}
return nil
}
// removeHandshake removes handshake for peer with peerID
// from the bzz handshake store
func (b *Bzz) removeHandshake(peerID enode.ID) {
b.mtx.Lock()
defer b.mtx.Unlock()
delete(b.handshakes, peerID)
}
// GetHandshake returns the bzz handhake that the remote peer with peerID sent
func (b *Bzz) GetHandshake(peerID enode.ID) (*HandshakeMsg, bool) {
b.mtx.Lock()
defer b.mtx.Unlock()
handshake, found := b.handshakes[peerID]
if !found {
handshake = &HandshakeMsg{
Version: uint64(BzzSpec.Version),
NetworkID: b.NetworkID,
Addr: b.localAddr,
LightNode: b.LightNode,
init: make(chan bool, 1),
done: make(chan struct{}),
}
// when handhsake is first created for a remote peer
// it is initialised with the init
handshake.init <- true
b.handshakes[peerID] = handshake
}
return handshake, found
}
// BzzAddr implements the PeerAddr interface
type BzzAddr struct {
OAddr []byte
UAddr []byte
}
// Address implements OverlayPeer interface to be used in Overlay.
func (a *BzzAddr) Address() []byte {
return a.OAddr
}
// Over returns the overlay address.
func (a *BzzAddr) Over() []byte {
return a.OAddr
}
// Under returns the underlay address.
func (a *BzzAddr) Under() []byte {
return a.UAddr
}
// ID returns the node identifier in the underlay.
func (a *BzzAddr) ID() enode.ID {
n, err := enode.ParseV4(string(a.UAddr))
if err != nil {
return enode.ID{}
}
return n.ID()
}
// Update updates the underlay address of a peer record
func (a *BzzAddr) Update(na *BzzAddr) *BzzAddr {
return &BzzAddr{a.OAddr, na.UAddr}
}
// String pretty prints the address
func (a *BzzAddr) String() string {
return fmt.Sprintf("%x <%s>", a.OAddr, a.UAddr)
}
// RandomAddr is a utility method generating an address from a public key
func RandomAddr() *BzzAddr {
key, err := crypto.GenerateKey()
if err != nil {
panic("unable to generate key")
}
node := enode.NewV4(&key.PublicKey, net.IP{127, 0, 0, 1}, 20638, 20638)
return NewAddr(node)
}
// NewAddr constucts a BzzAddr from a node record.
func NewAddr(node *enode.Node) *BzzAddr {
return &BzzAddr{OAddr: node.ID().Bytes(), UAddr: []byte(node.String())}
}
|
{
"pile_set_name": "Github"
}
|
from presidio_analyzer import EntityRecognizer
class LocalRecognizer(EntityRecognizer):
# pylint: disable=abstract-method, unused-argument
def __init__(self, supported_entities, supported_language, name=None,
version=None, **kwargs):
super().__init__(supported_entities=supported_entities,
supported_language=supported_language, name=name,
version=version)
|
{
"pile_set_name": "Github"
}
|
{
"indent": {
"BinaryExpression": 1
}
}
|
{
"pile_set_name": "Github"
}
|
<?php
namespace Lexik\Bundle\MaintenanceBundle\Tests\Maintenance;
use Lexik\Bundle\MaintenanceBundle\Drivers\FileDriver;
use Lexik\Bundle\MaintenanceBundle\Tests\TestHelper;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\ParameterBag\ParameterBag;
use Symfony\Component\Translation\MessageSelector;
/**
* Test driver file
*
* @package LexikMaintenanceBundle
* @author Gilles Gauthier <g.gauthier@lexik.fr>
*/
class FileMaintenanceTest extends TestCase
{
static protected $tmpDir;
protected $container;
public static function setUpBeforeClass()
{
parent::setUpBeforeClass();
self::$tmpDir = sys_get_temp_dir().'/symfony2_finder';
}
public function setUp()
{
$this->container = $this->initContainer();
}
public function tearDown()
{
$this->container = null;
}
public function testDecide()
{
$options = array('file_path' => self::$tmpDir.'/lock.lock');
$fileM = new FileDriver($options);
$fileM->setTranslator($this->getTranslator());
$this->assertTrue($fileM->decide());
$options = array('file_path' => self::$tmpDir.'/clok');
$fileM2 = new FileDriver($options);
$fileM2->setTranslator($this->getTranslator());
$this->assertFalse($fileM2->decide());
}
/**
* @expectedException InvalidArgumentException
*/
public function testExceptionInvalidPath()
{
$fileM = new FileDriver(array());
$fileM->setTranslator($this->getTranslator());
}
public function testLock()
{
$options = array('file_path' => self::$tmpDir.'/lock.lock');
$fileM = new FileDriver($options);
$fileM->setTranslator($this->getTranslator());
$fileM->lock();
$this->assertFileExists($options['file_path']);
}
public function testUnlock()
{
$options = array('file_path' => self::$tmpDir.'/lock.lock');
$fileM = new FileDriver($options);
$fileM->setTranslator($this->getTranslator());
$fileM->lock();
$fileM->unlock();
$this->assertFileNotExists($options['file_path']);
}
public function testIsExists()
{
$options = array('file_path' => self::$tmpDir.'/lock.lock', 'ttl' => 3600);
$fileM = new FileDriver($options);
$fileM->setTranslator($this->getTranslator());
$fileM->lock();
$this->assertTrue($fileM->isEndTime(3600));
}
public function testMessages()
{
$options = array('file_path' => self::$tmpDir.'/lock.lock', 'ttl' => 3600);
$fileM = new FileDriver($options);
$fileM->setTranslator($this->getTranslator());
$fileM->lock();
// lock
$this->assertEquals($fileM->getMessageLock(true), 'lexik_maintenance.success_lock_file');
$this->assertEquals($fileM->getMessageLock(false), 'lexik_maintenance.not_success_lock');
// unlock
$this->assertEquals($fileM->getMessageUnlock(true), 'lexik_maintenance.success_unlock');
$this->assertEquals($fileM->getMessageUnlock(false), 'lexik_maintenance.not_success_unlock');
}
static public function tearDownAfterClass()
{
parent::tearDownAfterClass();
}
protected function initContainer()
{
$container = new ContainerBuilder(new ParameterBag(array(
'kernel.debug' => false,
'kernel.bundles' => array('MaintenanceBundle' => 'Lexik\Bundle\MaintenanceBundle'),
'kernel.cache_dir' => sys_get_temp_dir(),
'kernel.environment' => 'dev',
'kernel.root_dir' => __DIR__.'/../../../../', // src dir
'kernel.default_locale' => 'fr',
)));
return $container;
}
public function getTranslator()
{
/** @var MessageSelector|\PHPUnit_Framework_MockObject_MockObject $messageSelector */
$messageSelector = $this->getMockBuilder('Symfony\Component\Translation\MessageSelector')
->disableOriginalConstructor()
->getMock();
return TestHelper::getTranslator($this->container, $messageSelector);
}
}
|
{
"pile_set_name": "Github"
}
|
# onceonly.m4 serial 7
dnl Copyright (C) 2002-2003, 2005-2006, 2008-2011 Free Software Foundation,
dnl Inc.
dnl This file is free software, distributed under the terms of the GNU
dnl General Public License. As a special exception to the GNU General
dnl Public License, this file may be distributed as part of a program
dnl that contains a configuration script generated by Autoconf, under
dnl the same distribution terms as the rest of that program.
dnl This file defines some "once only" variants of standard autoconf macros.
dnl AC_CHECK_HEADERS_ONCE like AC_CHECK_HEADERS
dnl AC_CHECK_FUNCS_ONCE like AC_CHECK_FUNCS
dnl AC_CHECK_DECLS_ONCE like AC_CHECK_DECLS
dnl AC_REQUIRE([AC_FUNC_STRCOLL]) like AC_FUNC_STRCOLL
dnl The advantage is that the check for each of the headers/functions/decls
dnl will be put only once into the 'configure' file. It keeps the size of
dnl the 'configure' file down, and avoids redundant output when 'configure'
dnl is run.
dnl The drawback is that the checks cannot be conditionalized. If you write
dnl if some_condition; then gl_CHECK_HEADERS(stdlib.h); fi
dnl inside an AC_DEFUNed function, the gl_CHECK_HEADERS macro call expands to
dnl empty, and the check will be inserted before the body of the AC_DEFUNed
dnl function.
dnl The original code implemented AC_CHECK_HEADERS_ONCE and AC_CHECK_FUNCS_ONCE
dnl in terms of AC_DEFUN and AC_REQUIRE. This implementation uses diversions to
dnl named sections DEFAULTS and INIT_PREPARE in order to check all requested
dnl headers at once, thus reducing the size of 'configure'. It is known to work
dnl with autoconf 2.57..2.62 at least . The size reduction is ca. 9%.
dnl Autoconf version 2.59 plus gnulib is required; this file is not needed
dnl with Autoconf 2.60 or greater. But note that autoconf's implementation of
dnl AC_CHECK_DECLS_ONCE expects a comma-separated list of symbols as first
dnl argument!
AC_PREREQ([2.59])
# AC_CHECK_HEADERS_ONCE(HEADER1 HEADER2 ...) is a once-only variant of
# AC_CHECK_HEADERS(HEADER1 HEADER2 ...).
AC_DEFUN([AC_CHECK_HEADERS_ONCE], [
:
m4_foreach_w([gl_HEADER_NAME], [$1], [
AC_DEFUN([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME,
[./-], [___])), [
m4_divert_text([INIT_PREPARE],
[gl_header_list="$gl_header_list gl_HEADER_NAME"])
gl_HEADERS_EXPANSION
AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_HEADER_NAME])),
[Define to 1 if you have the <]m4_defn([gl_HEADER_NAME])[> header file.])
])
AC_REQUIRE([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME,
[./-], [___])))
])
])
m4_define([gl_HEADERS_EXPANSION], [
m4_divert_text([DEFAULTS], [gl_header_list=])
AC_CHECK_HEADERS([$gl_header_list])
m4_define([gl_HEADERS_EXPANSION], [])
])
# AC_CHECK_FUNCS_ONCE(FUNC1 FUNC2 ...) is a once-only variant of
# AC_CHECK_FUNCS(FUNC1 FUNC2 ...).
AC_DEFUN([AC_CHECK_FUNCS_ONCE], [
:
m4_foreach_w([gl_FUNC_NAME], [$1], [
AC_DEFUN([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME]), [
m4_divert_text([INIT_PREPARE],
[gl_func_list="$gl_func_list gl_FUNC_NAME"])
gl_FUNCS_EXPANSION
AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_FUNC_NAME])),
[Define to 1 if you have the `]m4_defn([gl_FUNC_NAME])[' function.])
])
AC_REQUIRE([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME]))
])
])
m4_define([gl_FUNCS_EXPANSION], [
m4_divert_text([DEFAULTS], [gl_func_list=])
AC_CHECK_FUNCS([$gl_func_list])
m4_define([gl_FUNCS_EXPANSION], [])
])
# AC_CHECK_DECLS_ONCE(DECL1 DECL2 ...) is a once-only variant of
# AC_CHECK_DECLS(DECL1, DECL2, ...).
AC_DEFUN([AC_CHECK_DECLS_ONCE], [
:
m4_foreach_w([gl_DECL_NAME], [$1], [
AC_DEFUN([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME]), [
AC_CHECK_DECLS(m4_defn([gl_DECL_NAME]))
])
AC_REQUIRE([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME]))
])
])
|
{
"pile_set_name": "Github"
}
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <sales@documize.com>.
//
// https://documize.com
import Route from '@ember/routing/route';
export default Route.extend({
});
|
{
"pile_set_name": "Github"
}
|
# UI strings. Buttons and similar.
[ui_pager_prev]
other = "Previous"
[ui_pager_next]
other = "Next"
[ui_read_more]
other = "Read More"
[ui_search]
other = "Search this site…"
# Used in sentences such as "Posted in News"
[ui_in]
other = "in"
# Footer text
[footer_all_rights_reserved]
other = "All Rights Reserved"
[footer_privacy_policy]
other = "Privacy Policy"
# Post (blog, articles etc.)
[post_byline_by]
other = "By"
[post_created]
other = "Created"
[post_last_mod]
other = "Last modified"
[post_edit_this]
other = "Edit this page"
[post_create_issue]
other = "Create issue"
[post_posts_in]
other = "Posts in"
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>classNames</key>
<dict>
<key>UIImageExtensionTests</key>
<dict>
<key>testPerformanceToGrayscale()</key>
<dict>
<key>com.apple.XCTPerformanceMetric_WallClockTime</key>
<dict>
<key>baselineAverage</key>
<real>0.49576</real>
<key>baselineIntegrationDisplayName</key>
<string>Local Baseline</string>
</dict>
</dict>
</dict>
</dict>
</dict>
</plist>
|
{
"pile_set_name": "Github"
}
|
package nc.recipe.multiblock;
import static nc.config.NCConfig.*;
import java.util.*;
import com.google.common.collect.Lists;
import nc.radiation.RadSources;
import nc.recipe.ProcessorRecipeHandler;
public class FissionIrradiatorRecipes extends ProcessorRecipeHandler {
public FissionIrradiatorRecipes() {
super("fission_irradiator", 1, 0, 1, 0);
}
@Override
public void addRecipes() {
addRecipe(Lists.newArrayList("ingotThorium", "dustThorium"), "dustTBP", 160000, fission_irradiator_heat_per_flux[0], fission_irradiator_efficiency[0], RadSources.THORIUM);
addRecipe(Lists.newArrayList("ingotTBP", "dustTBP"), "dustProtactinium233", 2720000, fission_irradiator_heat_per_flux[1], fission_irradiator_efficiency[1], RadSources.TBP);
addRecipe(Lists.newArrayList("ingotBismuth", "dustBismuth"), "dustPolonium", 1920000, fission_irradiator_heat_per_flux[2], fission_irradiator_efficiency[2], RadSources.BISMUTH);
}
@Override
public List fixExtras(List extras) {
List fixed = new ArrayList(4);
fixed.add(extras.size() > 0 && extras.get(0) instanceof Integer ? (int) extras.get(0) : 1);
fixed.add(extras.size() > 1 && extras.get(1) instanceof Double ? (double) extras.get(1) : 0D);
fixed.add(extras.size() > 2 && extras.get(2) instanceof Double ? (double) extras.get(2) : 0D);
fixed.add(extras.size() > 3 && extras.get(3) instanceof Double ? (double) extras.get(3) : 0D);
return fixed;
}
}
|
{
"pile_set_name": "Github"
}
|
declare module 'json-to-ast' {
export default function parse(code: string, options?: { source?: string }): any;
}
|
{
"pile_set_name": "Github"
}
|
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>ip::basic_resolver::async_resolve (2 of 6 overloads)</title>
<link rel="stylesheet" href="../../../../boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.75.2">
<link rel="home" href="../../../../index.html" title="Asio">
<link rel="up" href="../async_resolve.html" title="ip::basic_resolver::async_resolve">
<link rel="prev" href="overload1.html" title="ip::basic_resolver::async_resolve (1 of 6 overloads)">
<link rel="next" href="overload3.html" title="ip::basic_resolver::async_resolve (3 of 6 overloads)">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr><td valign="top"><img alt="asio C++ library" width="250" height="60" src="../../../../asio.png"></td></tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="overload1.html"><img src="../../../../prev.png" alt="Prev"></a><a accesskey="u" href="../async_resolve.html"><img src="../../../../up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../home.png" alt="Home"></a><a accesskey="n" href="overload3.html"><img src="../../../../next.png" alt="Next"></a>
</div>
<div class="section">
<div class="titlepage"><div><div><h5 class="title">
<a name="asio.reference.ip__basic_resolver.async_resolve.overload2"></a><a class="link" href="overload2.html" title="ip::basic_resolver::async_resolve (2 of 6 overloads)">ip::basic_resolver::async_resolve
(2 of 6 overloads)</a>
</h5></div></div></div>
<p>
Asynchronously perform forward resolution of a query to a list of entries.
</p>
<pre class="programlisting">template<
typename <a class="link" href="../../ResolveHandler.html" title="Resolve handler requirements">ResolveHandler</a>>
<a class="link" href="../../asynchronous_operations.html#asio.reference.asynchronous_operations.automatic_deduction_of_initiating_function_return_type"><span class="emphasis"><em>DEDUCED</em></span></a> async_resolve(
string_view host,
string_view service,
ResolveHandler && handler);
</pre>
<p>
This function is used to resolve host and service names into a list of
endpoint entries.
</p>
<h6>
<a name="asio.reference.ip__basic_resolver.async_resolve.overload2.h0"></a>
<span><a name="asio.reference.ip__basic_resolver.async_resolve.overload2.parameters"></a></span><a class="link" href="overload2.html#asio.reference.ip__basic_resolver.async_resolve.overload2.parameters">Parameters</a>
</h6>
<div class="variablelist">
<p class="title"><b></b></p>
<dl>
<dt><span class="term">host</span></dt>
<dd><p>
A string identifying a location. May be a descriptive name or a
numeric address string. If an empty string and the passive flag
has been specified, the resolved endpoints are suitable for local
service binding. If an empty string and passive is not specified,
the resolved endpoints will use the loopback address.
</p></dd>
<dt><span class="term">service</span></dt>
<dd><p>
A string identifying the requested service. This may be a descriptive
name or a numeric string corresponding to a port number. May be
an empty string, in which case all resolved endpoints will have
a port number of 0.
</p></dd>
<dt><span class="term">handler</span></dt>
<dd>
<p>
The handler to be called when the resolve operation completes.
Copies will be made of the handler as required. The function signature
of the handler must be:
</p>
<pre class="programlisting">void handler(
const asio::error_code& error, // Result of operation.
resolver::results_type results // Resolved endpoints as a range.
);
</pre>
<p>
Regardless of whether the asynchronous operation completes immediately
or not, the handler will not be invoked from within this function.
On immediate completion, invocation of the handler will be performed
in a manner equivalent to using <a class="link" href="../../post.html" title="post"><code class="computeroutput">post</code></a>.
</p>
</dd>
</dl>
</div>
<p>
A successful resolve operation is guaranteed to pass a non-empty range
to the handler.
</p>
<h6>
<a name="asio.reference.ip__basic_resolver.async_resolve.overload2.h1"></a>
<span><a name="asio.reference.ip__basic_resolver.async_resolve.overload2.remarks"></a></span><a class="link" href="overload2.html#asio.reference.ip__basic_resolver.async_resolve.overload2.remarks">Remarks</a>
</h6>
<p>
On POSIX systems, host names may be locally defined in the file <code class="computeroutput">/etc/hosts</code>.
On Windows, host names may be defined in the file <code class="computeroutput">c:\windows\system32\drivers\etc\hosts</code>.
Remote host name resolution is performed using DNS. Operating systems
may use additional locations when resolving host names (such as NETBIOS
names on Windows).
</p>
<p>
On POSIX systems, service names are typically defined in the file <code class="computeroutput">/etc/services</code>.
On Windows, service names may be found in the file <code class="computeroutput">c:\windows\system32\drivers\etc\services</code>.
Operating systems may use additional locations when resolving service
names.
</p>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2003-2019 Christopher M. Kohlhoff<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="overload1.html"><img src="../../../../prev.png" alt="Prev"></a><a accesskey="u" href="../async_resolve.html"><img src="../../../../up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../home.png" alt="Home"></a><a accesskey="n" href="overload3.html"><img src="../../../../next.png" alt="Next"></a>
</div>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
{\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf210
{\fonttbl\f0\fnil\fcharset0 PragmataPro;\f1\fnil\fcharset0 PragmataPro-Italic;\f2\fnil\fcharset0 PragmataPro-Bold;
\f3\fnil\fcharset0 PragmataPro-BoldItalic;}
{\colortbl;\red255\green255\blue255;}
\paperw11900\paperh16840\margl1440\margr1440\vieww12040\viewh18700\viewkind0
\deftab720
{\*\background {\shp{\*\shpinst\shpleft0\shptop0\shpright0\shpbottom0\shpfhdr0\shpbxmargin\shpbymargin\shpwr0\shpwrk0\shpfblwtxt1\shpz0\shplid1025{\sp{\sn shapeType}{\sv 1}}{\sp{\sn fFlipH}{\sv 0}}{\sp{\sn fFlipV}{\sv 0}}{\sp{\sn fillColor}{\sv 3944939}}{\sp{\sn fFilled}{\sv 1}}{\sp{\sn lineWidth}{\sv 0}}{\sp{\sn fLine}{\sv 0}}{\sp{\sn bWMode}{\sv 9}}{\sp{\sn fBackground}{\sv 1}}}}}
\pard\pardeftab720
\f0\fs58 \AppleTypeServices\AppleTypeServicesF2293778 \cf1 \
PragmataPro Serif Italic\
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293776\AppleTypeServicesF2293780 \cf1 PragmataPro Serif Bold\
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293780 \cf1 PragmataPro Serif BoldItalic\
\
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\pardirnatural
\fs48 \AppleTypeServices\AppleTypeServicesF2293780 \cf1 s\AppleTypeServices (\AppleTypeServices\AppleTypeServicesF2293778 t\AppleTypeServices )=\AppleTypeServices\AppleTypeServicesF2293780 A\AppleTypeServices sin(2\AppleTypeServices\AppleTypeServicesF2293778 wt\AppleTypeServices )\'b7cos(
\f1\i \AppleTypeServices\AppleTypeServicesF2293776 x
\f0\i0 \AppleTypeServices \'b3-4)\
\AppleTypeServices\AppleTypeServicesF2293780 V-E+F=2 \AppleTypeServices\AppleTypeServicesF2293778 h\AppleTypeServices \'b2(3\AppleTypeServices\AppleTypeServicesF2293778 r\AppleTypeServices -\AppleTypeServices\AppleTypeServicesF2293778 h\AppleTypeServices )
\fs82 \AppleTypeServices\AppleTypeServicesF2293776\AppleTypeServicesF2293780 \
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293780\AppleTypeServicesF2293776 \cf1 \
PragmataPro Serif Bold\AppleTypeServices\AppleTypeServicesF2293778 \cf0 \
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293776\AppleTypeServicesF2293780 \cf0 Patience and perseverance have a magical effect before which difficulties disappear and obstacles vanish.
\fs96 \AppleTypeServices\AppleTypeServicesF2293780\AppleTypeServicesF2293776 \
\fs48 \AppleTypeServices\AppleTypeServicesF2293776 \
\fs40 \AppleTypeServices\AppleTypeServicesF2293776 You can find these glyphs in the PragmataPro
\f2\b \AppleTypeServices 0.819
\f0\b0 \AppleTypeServices\AppleTypeServicesF2293776 charset Unicode range Mathematical Alphanumeric Symbols.\
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293780\AppleTypeServicesF2293776 \cf0 Also available using OpenType panel selecting \AppleTypeServices\AppleTypeServicesF2293770 (SS08)\
\pard\pardeftab720
\AppleTypeServices \cf0 ABCDEFGHIJKLMNopqrstuvwxyz \uc0\u8594 \AppleTypeServices\AppleTypeServicesF2293776 ABCDEFGHIJKLMNopqrstuvwxyz
\fs48 \AppleTypeServices\AppleTypeServicesF2293776 \
\pard\pardeftab720
\fs40 \AppleTypeServices\AppleTypeServicesF2293776 \cf0 \
In monetary economics, a money multiplier is one of various closely related ratios of commercial bank money to central bank money under a fractional-reserve banking system. Most often, it measures the maximum amount of commercial bank money that can be created by a given unit of central bank money. That is, in a fractional-reserve banking system, the total amount of loans that commercial banks are allowed to extend
\f2\b \AppleTypeServices (
\f0\b0 \AppleTypeServices\AppleTypeServicesF2293776 the commercial bank money that they can legally create
\f2\b \AppleTypeServices )
\f0\b0 \AppleTypeServices\AppleTypeServicesF2293776 is a multiple of reserves; this multiple is the reciprocal of the reserve ratio, and it is an economic multiplier.\AppleTypeServices\AppleTypeServicesF2293770 \
\pard\pardeftab720
\fs96 \AppleTypeServices\AppleTypeServicesF2293778 \cf0 \
\pard\pardeftab720
\fs82 \AppleTypeServices\AppleTypeServicesF2293780 \cf1 PragmataPro Serif BoldItalic\AppleTypeServices\AppleTypeServicesF2293778 \cf0 \
\AppleTypeServices\AppleTypeServicesF2293780 Patience and perseverance have a magical effect before which difficulties disappear and obstacles vanish.
\fs96 \AppleTypeServices\AppleTypeServicesF2293780 \
\pard\pardeftab720
\fs48 \AppleTypeServices\AppleTypeServicesF2293780 \cf0 \
\fs40 \AppleTypeServices\AppleTypeServicesF2293780 You can find these glyphs in the PragmataPro
\f3\i\b \AppleTypeServices 0.819
\f0\i0\b0 \AppleTypeServices\AppleTypeServicesF2293780 charset Unicode range Mathematical Alphanumeric Symbols.\
Also available using OpenType panel selecting\AppleTypeServices\AppleTypeServicesF2293776\AppleTypeServicesF2293780 \AppleTypeServices\AppleTypeServicesF2293770 (SS10)\
\pard\pardeftab720
\AppleTypeServices \cf0 ABCDEFGHIJKLMNopqrstuvwxyz \uc0\u8594 \AppleTypeServices\AppleTypeServicesF2293780 ABCDEFGHIJKLMNopqrstuvwxyz
\fs48 \AppleTypeServices\AppleTypeServicesF2293780 \
\pard\pardeftab720
\fs40 \AppleTypeServices\AppleTypeServicesF2293780 \cf0 \
If banks lend out close to the maximum allowed by their reserves, then the inequality becomes an approximate equality
\f3\i\b \AppleTypeServices ,
\f0\i0\b0 \AppleTypeServices\AppleTypeServicesF2293780 and commercial bank money is central bank money times the multiplier. If banks instead lend less than the maximum
\f3\i\b \AppleTypeServices ,
\f0\i0\b0 \AppleTypeServices\AppleTypeServicesF2293780 accumulating excess reserves
\f3\i\b \AppleTypeServices ,
\f0\i0\b0 \AppleTypeServices\AppleTypeServicesF2293780 then commercial bank money will be less than central bank money times the theoretical multiplier.
\fs36 \AppleTypeServices\AppleTypeServicesF2293780 \
\pard\pardeftab720
\fs96 \AppleTypeServices\AppleTypeServicesF2293780 \cf0 \
\pard\pardeftab720
\fs82 \AppleTypeServices\AppleTypeServicesF2293778 \cf1 PragmataPro Serif Italic\AppleTypeServices\AppleTypeServicesF2293770 \cf0 \
\pard\pardeftab720
\AppleTypeServices\AppleTypeServicesF2293778 \cf0 Patience and perseverance have a magical effect before which difficulties disappear and obstacles vanish.
\fs96 \AppleTypeServices\AppleTypeServicesF2293778 \
\pard\pardeftab720
\fs48 \AppleTypeServices\AppleTypeServicesF2293778\AppleTypeServicesF2293780 \cf0 \
\fs40 \AppleTypeServices\AppleTypeServicesF2293778\AppleTypeServicesF2293780 You can find these glyphs in the PragmataPro
\f1\i \AppleTypeServices 0.819
\f0\i0 \AppleTypeServices\AppleTypeServicesF2293778\AppleTypeServicesF2293780 charset Unicode range Mathematical Alphanumeric Symbols.\
Also available using OpenType panel selecting\AppleTypeServices\AppleTypeServicesF2293780\AppleTypeServicesF2293776 \AppleTypeServices\AppleTypeServicesF2293770 (SS09)\
\pard\pardeftab720
\AppleTypeServices \cf0 ABCDEFGHIJKLMNopqrstuvwxyz \uc0\u8594 \AppleTypeServices\AppleTypeServicesF2293778 ABCDEFGHIJKLMNopqrstuvwxyz
\fs48 \AppleTypeServices\AppleTypeServicesF2293778 \
\pard\pardeftab720
\fs40 \AppleTypeServices\AppleTypeServicesF2293778 \cf0 \
In equations, writing M for commercial bank money (loans), R for reserves (central bank money), and RR for the reserve ratio, the reserve ratio requirement is that R/M \uc0\u8805 RR; the fraction of reserves must be at least the reserve ratio. Taking the reciprocal, M/R \u8804 1/RR, which yields M \u8804 R \'d7 (1/RR), meaning that commercial bank money is at most reserves times (1/RR), the latter being the multiplier.
\fs36 \AppleTypeServices\AppleTypeServicesF2293780 \
\
}
|
{
"pile_set_name": "Github"
}
|
/* Copyright (C) 1993, 2000 Aladdin Enterprises. All rights reserved.
This software is provided AS-IS with no warranty, either express or
implied.
This software is distributed under license and may not be copied,
modified or distributed except as expressly authorized under the terms
of the license contained in the file LICENSE in this distribution.
For more information about licensing, please refer to
http://www.ghostscript.com/licensing/. For information on
commercial licensing, go to http://www.artifex.com/licensing/ or
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
*/
/*$Id: gxacpath.c,v 1.10 2004/08/04 19:36:12 stefan Exp $ */
/* Accumulator for clipping paths */
#include "gx.h"
#include "gserrors.h"
#include "gsrop.h"
#include "gsstruct.h"
#include "gsutil.h"
#include "gsdcolor.h"
#include "gxdevice.h"
#include "gxfixed.h"
#include "gxistate.h"
#include "gzpath.h"
#include "gxpaint.h"
#include "gzcpath.h"
#include "gzacpath.h"
/* Device procedures */
private dev_proc_open_device(accum_open);
private dev_proc_close_device(accum_close);
private dev_proc_fill_rectangle(accum_fill_rectangle);
/* The device descriptor */
/* Many of these procedures won't be called; they are set to NULL. */
private const gx_device_cpath_accum gs_cpath_accum_device =
{std_device_std_body(gx_device_cpath_accum, 0, "clip list accumulator",
0, 0, 1, 1),
{accum_open,
NULL,
NULL,
NULL,
accum_close,
NULL,
NULL,
accum_fill_rectangle,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
gx_default_fill_path,
gx_default_stroke_path,
NULL,
gx_default_fill_trapezoid,
gx_default_fill_parallelogram,
gx_default_fill_triangle,
gx_default_draw_thin_line,
gx_default_begin_image,
gx_default_image_data,
gx_default_end_image,
NULL,
NULL,
gx_get_largest_clipping_box,
gx_default_begin_typed_image,
NULL,
NULL,
NULL,
NULL,
gx_default_text_begin,
gx_default_finish_copydevice,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
}
};
/* Start accumulating a clipping path. */
void
gx_cpath_accum_begin(gx_device_cpath_accum * padev, gs_memory_t * mem)
{
gx_device_init((gx_device *) padev,
(const gx_device *) & gs_cpath_accum_device,
NULL /* allocated on stack */ , true);
padev->list_memory = mem;
(*dev_proc(padev, open_device)) ((gx_device *) padev);
}
void
gx_cpath_accum_set_cbox(gx_device_cpath_accum * padev,
const gs_fixed_rect * pbox)
{
padev->clip_box.p.x = fixed2int_var(pbox->p.x);
padev->clip_box.p.y = fixed2int_var(pbox->p.y);
padev->clip_box.q.x = fixed2int_var_ceiling(pbox->q.x);
padev->clip_box.q.y = fixed2int_var_ceiling(pbox->q.y);
}
/* Finish accumulating a clipping path. */
int
gx_cpath_accum_end(const gx_device_cpath_accum * padev, gx_clip_path * pcpath)
{
int code = (*dev_proc(padev, close_device)) ((gx_device *) padev);
/* Make an entire clipping path so we can use cpath_assign. */
gx_clip_path apath;
if (code < 0)
return code;
gx_cpath_init_local(&apath, padev->list_memory);
apath.rect_list->list = padev->list;
if (padev->list.count == 0)
apath.path.bbox.p.x = apath.path.bbox.p.y =
apath.path.bbox.q.x = apath.path.bbox.q.y = 0;
else {
apath.path.bbox.p.x = int2fixed(padev->bbox.p.x);
apath.path.bbox.p.y = int2fixed(padev->bbox.p.y);
apath.path.bbox.q.x = int2fixed(padev->bbox.q.x);
apath.path.bbox.q.y = int2fixed(padev->bbox.q.y);
}
/* indicate that the bbox is accurate */
apath.path.bbox_accurate = 1;
/* Note that the result of the intersection might be */
/* a single rectangle. This will cause clip_path_is_rect.. */
/* to return true. This, in turn, requires that */
/* we set apath.inner_box correctly. */
if (clip_list_is_rectangle(&padev->list))
apath.inner_box = apath.path.bbox;
else {
/* The quick check must fail. */
apath.inner_box.p.x = apath.inner_box.p.y = 0;
apath.inner_box.q.x = apath.inner_box.q.y = 0;
}
gx_cpath_set_outer_box(&apath);
apath.path_valid = false;
apath.id = gs_next_ids(padev->list_memory, 1); /* path changed => change id */
gx_cpath_assign_free(pcpath, &apath);
return 0;
}
/* Discard an accumulator in case of error. */
void
gx_cpath_accum_discard(gx_device_cpath_accum * padev)
{
gx_clip_list_free(&padev->list, padev->list_memory);
}
/* Intersect two clipping paths using an accumulator. */
int
gx_cpath_intersect_path_slow(gx_clip_path * pcpath, gx_path * ppath,
int rule, gs_imager_state *pis)
{
gs_logical_operation_t save_lop = gs_current_logical_op_inline(pis);
gx_device_cpath_accum adev;
gx_device_color devc;
gx_fill_params params;
int code;
gx_cpath_accum_begin(&adev, pcpath->path.memory);
set_nonclient_dev_color(&devc, 0); /* arbitrary, but not transparent */
gs_set_logical_op_inline(pis, lop_default);
params.rule = rule;
params.adjust.x = params.adjust.y = fixed_half;
params.flatness = gs_currentflat_inline(pis);
params.fill_zero_width = true;
code = gx_fill_path_only(ppath, (gx_device *)&adev, pis,
¶ms, &devc, pcpath);
if (code < 0 || (code = gx_cpath_accum_end(&adev, pcpath)) < 0)
gx_cpath_accum_discard(&adev);
gs_set_logical_op_inline(pis, save_lop);
return code;
}
/* ------ Device implementation ------ */
#ifdef DEBUG
/* Validate a clipping path after accumulation. */
private bool
clip_list_validate(const gx_clip_list * clp)
{
if (clp->count <= 1)
return (clp->head == 0 && clp->tail == 0 &&
clp->single.next == 0 && clp->single.prev == 0);
else {
const gx_clip_rect *prev = clp->head;
const gx_clip_rect *ptr;
bool ok = true;
while ((ptr = prev->next) != 0) {
if (ptr->ymin > ptr->ymax || ptr->xmin > ptr->xmax ||
!(ptr->ymin >= prev->ymax ||
(ptr->ymin == prev->ymin &&
ptr->ymax == prev->ymax &&
ptr->xmin >= prev->xmax)) ||
ptr->prev != prev
) {
clip_rect_print('q', "WRONG:", ptr);
ok = false;
}
prev = ptr;
}
return ok && prev == clp->tail;
}
}
#endif /* DEBUG */
/* Initialize the accumulation device. */
private int
accum_open(register gx_device * dev)
{
gx_device_cpath_accum * const adev = (gx_device_cpath_accum *)dev;
gx_clip_list_init(&adev->list);
adev->bbox.p.x = adev->bbox.p.y = max_int;
adev->bbox.q.x = adev->bbox.q.y = min_int;
adev->clip_box.p.x = adev->clip_box.p.y = min_int;
adev->clip_box.q.x = adev->clip_box.q.y = max_int;
return 0;
}
/* Close the accumulation device. */
private int
accum_close(gx_device * dev)
{
gx_device_cpath_accum * const adev = (gx_device_cpath_accum *)dev;
adev->list.xmin = adev->bbox.p.x;
adev->list.xmax = adev->bbox.q.x;
#ifdef DEBUG
if (gs_debug_c('q')) {
gx_clip_rect *rp =
(adev->list.count <= 1 ? &adev->list.single : adev->list.head);
dlprintf6("[q]list at 0x%lx, count=%d, head=0x%lx, tail=0x%lx, xrange=(%d,%d):\n",
(ulong) & adev->list, adev->list.count,
(ulong) adev->list.head, (ulong) adev->list.tail,
adev->list.xmin, adev->list.xmax);
while (rp != 0) {
clip_rect_print('q', " ", rp);
rp = rp->next;
}
}
if (!clip_list_validate(&adev->list)) {
lprintf1("[q]Bad clip list 0x%lx!\n", (ulong) & adev->list);
return_error(gs_error_Fatal);
}
#endif
return 0;
}
/* Accumulate one rectangle. */
/* Allocate a rectangle to be added to the list. */
static const gx_clip_rect clip_head_rect = {
0, 0, min_int, min_int, min_int, min_int
};
static const gx_clip_rect clip_tail_rect = {
0, 0, max_int, max_int, max_int, max_int
};
private gx_clip_rect *
accum_alloc_rect(gx_device_cpath_accum * adev)
{
gs_memory_t *mem = adev->list_memory;
gx_clip_rect *ar = gs_alloc_struct(mem, gx_clip_rect, &st_clip_rect,
"accum_alloc_rect");
if (ar == 0)
return 0;
if (adev->list.count == 2) {
/* We're switching from a single rectangle to a list. */
/* Allocate the head and tail entries. */
gx_clip_rect *head = ar;
gx_clip_rect *tail =
gs_alloc_struct(mem, gx_clip_rect, &st_clip_rect,
"accum_alloc_rect(tail)");
gx_clip_rect *single =
gs_alloc_struct(mem, gx_clip_rect, &st_clip_rect,
"accum_alloc_rect(single)");
ar = gs_alloc_struct(mem, gx_clip_rect, &st_clip_rect,
"accum_alloc_rect(head)");
if (tail == 0 || single == 0 || ar == 0) {
gs_free_object(mem, ar, "accum_alloc_rect");
gs_free_object(mem, single, "accum_alloc_rect(single)");
gs_free_object(mem, tail, "accum_alloc_rect(tail)");
gs_free_object(mem, head, "accum_alloc_rect(head)");
return 0;
}
*head = clip_head_rect;
head->next = single;
*single = adev->list.single;
single->prev = head;
single->next = tail;
*tail = clip_tail_rect;
tail->prev = single;
adev->list.head = head;
adev->list.tail = tail;
}
return ar;
}
#define ACCUM_ALLOC(s, ar, px, py, qx, qy)\
if (++(adev->list.count) == 1)\
ar = &adev->list.single;\
else if ((ar = accum_alloc_rect(adev)) == 0)\
return_error(gs_error_VMerror);\
ACCUM_SET(s, ar, px, py, qx, qy)
#define ACCUM_SET(s, ar, px, py, qx, qy)\
(ar)->xmin = px, (ar)->ymin = py, (ar)->xmax = qx, (ar)->ymax = qy;\
clip_rect_print('Q', s, ar)
/* Link or unlink a rectangle in the list. */
#define ACCUM_ADD_LAST(ar)\
ACCUM_ADD_BEFORE(ar, adev->list.tail)
#define ACCUM_ADD_AFTER(ar, rprev)\
ar->prev = (rprev), (ar->next = (rprev)->next)->prev = ar,\
(rprev)->next = ar
#define ACCUM_ADD_BEFORE(ar, rnext)\
(ar->prev = (rnext)->prev)->next = ar, ar->next = (rnext),\
(rnext)->prev = ar
#define ACCUM_REMOVE(ar)\
ar->next->prev = ar->prev, ar->prev->next = ar->next
/* Free a rectangle that was removed from the list. */
#define ACCUM_FREE(s, ar)\
if (--(adev->list.count)) {\
clip_rect_print('Q', s, ar);\
gs_free_object(adev->list_memory, ar, "accum_rect");\
}
/*
* Add a rectangle to the list. It would be wonderful if rectangles
* were always disjoint and always presented in the correct order,
* but they aren't: the fill loop works by trapezoids, not by scan lines,
* and may produce slightly overlapping rectangles because of "fattening".
* All we can count on is that they are approximately disjoint and
* approximately in order.
*
* Because of the way the fill loop handles a path that is just a single
* rectangle, we take special care to merge Y-adjacent rectangles when
* this is possible.
*/
private int
accum_fill_rectangle(gx_device * dev, int x, int y, int w, int h,
gx_color_index color)
{
gx_device_cpath_accum * const adev = (gx_device_cpath_accum *)dev;
int xe = x + w, ye = y + h;
gx_clip_rect *nr;
gx_clip_rect *ar;
register gx_clip_rect *rptr;
int ymin, ymax;
/* Clip the rectangle being added. */
if (y < adev->clip_box.p.y)
y = adev->clip_box.p.y;
if (ye > adev->clip_box.q.y)
ye = adev->clip_box.q.y;
if (y >= ye)
return 0;
if (x < adev->clip_box.p.x)
x = adev->clip_box.p.x;
if (xe > adev->clip_box.q.x)
xe = adev->clip_box.q.x;
if (x >= xe)
return 0;
/* Update the bounding box. */
if (x < adev->bbox.p.x)
adev->bbox.p.x = x;
if (y < adev->bbox.p.y)
adev->bbox.p.y = y;
if (xe > adev->bbox.q.x)
adev->bbox.q.x = xe;
if (ye > adev->bbox.q.y)
adev->bbox.q.y = ye;
top:
if (adev->list.count == 0) { /* very first rectangle */
adev->list.count = 1;
ACCUM_SET("single", &adev->list.single, x, y, xe, ye);
return 0;
}
if (adev->list.count == 1) { /* check for Y merging */
rptr = &adev->list.single;
if (x == rptr->xmin && xe == rptr->xmax &&
y <= rptr->ymax && ye >= rptr->ymin
) {
if (y < rptr->ymin)
rptr->ymin = y;
if (ye > rptr->ymax)
rptr->ymax = ye;
return 0;
}
}
else
rptr = adev->list.tail->prev;
if (y >= rptr->ymax) {
if (y == rptr->ymax && x == rptr->xmin && xe == rptr->xmax &&
(rptr->prev == 0 || y != rptr->prev->ymax)
) {
rptr->ymax = ye;
return 0;
}
ACCUM_ALLOC("app.y", nr, x, y, xe, ye);
ACCUM_ADD_LAST(nr);
return 0;
} else if (y == rptr->ymin && ye == rptr->ymax && x >= rptr->xmin) {
if (x <= rptr->xmax) {
if (xe > rptr->xmax)
rptr->xmax = xe;
return 0;
}
ACCUM_ALLOC("app.x", nr, x, y, xe, ye);
ACCUM_ADD_LAST(nr);
return 0;
}
ACCUM_ALLOC("accum", nr, x, y, xe, ye);
rptr = adev->list.tail->prev;
/* Work backwards till we find the insertion point. */
while (ye <= rptr->ymin)
rptr = rptr->prev;
ymin = rptr->ymin;
ymax = rptr->ymax;
if (ye > ymax) {
if (y >= ymax) { /* Insert between two bands. */
ACCUM_ADD_AFTER(nr, rptr);
return 0;
}
/* Split off the top part of the new rectangle. */
ACCUM_ALLOC("a.top", ar, x, ymax, xe, ye);
ACCUM_ADD_AFTER(ar, rptr);
ye = nr->ymax = ymax;
clip_rect_print('Q', " ymax", nr);
}
/* Here we know ymin < ye <= ymax; */
/* rptr points to the last node with this value of ymin/ymax. */
/* If necessary, split off the part of the existing band */
/* that is above the new band. */
if (ye < ymax) {
gx_clip_rect *rsplit = rptr;
while (rsplit->ymax == ymax) {
ACCUM_ALLOC("s.top", ar, rsplit->xmin, ye, rsplit->xmax, ymax);
ACCUM_ADD_AFTER(ar, rptr);
rsplit->ymax = ye;
rsplit = rsplit->prev;
}
ymax = ye;
}
/* Now ye = ymax. If necessary, split off the part of the */
/* existing band that is below the new band. */
if (y > ymin) {
gx_clip_rect *rbot = rptr, *rsplit;
while (rbot->prev->ymin == ymin)
rbot = rbot->prev;
for (rsplit = rbot;;) {
ACCUM_ALLOC("s.bot", ar, rsplit->xmin, ymin, rsplit->xmax, y);
ACCUM_ADD_BEFORE(ar, rbot);
rsplit->ymin = y;
if (rsplit == rptr)
break;
rsplit = rsplit->next;
}
ymin = y;
}
/* Now y <= ymin as well. (y < ymin is possible.) */
nr->ymin = ymin;
/* Search for the X insertion point. */
for (; rptr->ymin == ymin; rptr = rptr->prev) {
if (xe < rptr->xmin)
continue; /* still too far to right */
if (x > rptr->xmax)
break; /* disjoint */
/* The new rectangle overlaps an existing one. Merge them. */
if (xe > rptr->xmax) {
rptr->xmax = nr->xmax; /* might be > xe if */
/* we already did a merge */
clip_rect_print('Q', "widen", rptr);
}
ACCUM_FREE("free", nr);
if (x >= rptr->xmin)
goto out;
/* Might overlap other rectangles to the left. */
rptr->xmin = x;
nr = rptr;
ACCUM_REMOVE(rptr);
clip_rect_print('Q', "merge", nr);
}
ACCUM_ADD_AFTER(nr, rptr);
out:
/* Check whether there are only 0 or 1 rectangles left. */
if (adev->list.count <= 1) {
/* We're switching from a list to at most 1 rectangle. */
/* Free the head and tail entries. */
gs_memory_t *mem = adev->list_memory;
gx_clip_rect *single = adev->list.head->next;
if (single != adev->list.tail) {
adev->list.single = *single;
gs_free_object(mem, single, "accum_free_rect(single)");
adev->list.single.next = adev->list.single.prev = 0;
}
gs_free_object(mem, adev->list.tail, "accum_free_rect(tail)");
gs_free_object(mem, adev->list.head, "accum_free_rect(head)");
adev->list.head = 0;
adev->list.tail = 0;
}
/* Check whether there is still more of the new band to process. */
if (y < ymin) {
/* Continue with the bottom part of the new rectangle. */
clip_rect_print('Q', " ymin", nr);
ye = ymin;
goto top;
}
return 0;
}
|
{
"pile_set_name": "Github"
}
|
version: '3.1'
services:
webflux-netty:
image: webflux-netty:0.0.1-SNAPSHOT
ports:
- "8080:8080"
|
{
"pile_set_name": "Github"
}
|
name: CreateTarBall_01
version: 1.0.0
summary: Please fill in a short summary
readme: ""
tags:
- sample_tag
properties:
- name: sample_name
type: string
required: false
default: foofoo
|
{
"pile_set_name": "Github"
}
|
!-----------------------------------------------------------------------
!module dll_module
!-----------------------------------------------------------------------
module dll_module
use iso_c_binding
implicit none
private ! all by default
public :: os_type, dll_type, load_dll, free_dll, init_os_type, init_dll
! general constants:
! the number of bits in an address (32-bit or 64-bit).
integer, parameter :: bits_in_addr = c_intptr_t*8
! global error-level variables:
integer, parameter :: errid_none = 0
integer, parameter :: errid_info = 1
integer, parameter :: errid_warn = 2
integer, parameter :: errid_severe = 3
integer, parameter :: errid_fatal = 4
integer :: os_id
type os_type
character(10) :: endian
character(len=:), allocatable :: newline
character(len=:), allocatable :: os_desc
character(1) :: pathsep
character(1) :: swchar
character(11) :: unfform
end type os_type
type dll_type
integer(c_intptr_t) :: fileaddr
type(c_ptr) :: fileaddrx
type(c_funptr) :: procaddr
character(1024) :: filename
character(1024) :: procname
end type dll_type
! interface to linux API
interface
function dlopen(filename,mode) bind(c,name="dlopen")
! void *dlopen(const char *filename, int mode);
use iso_c_binding
implicit none
type(c_ptr) :: dlopen
character(c_char), intent(in) :: filename(*)
integer(c_int), value :: mode
end function
function dlsym(handle,name) bind(c,name="dlsym")
! void *dlsym(void *handle, const char *name);
use iso_c_binding
implicit none
type(c_funptr) :: dlsym
type(c_ptr), value :: handle
character(c_char), intent(in) :: name(*)
end function
function dlclose(handle) bind(c,name="dlclose")
! int dlclose(void *handle);
use iso_c_binding
implicit none
integer(c_int) :: dlclose
type(c_ptr), value :: handle
end function
end interface
contains
!-----------------------------------------------------------------------
!Subroutine init_dll
!-----------------------------------------------------------------------
subroutine init_dll(dll)
implicit none
type(dll_type), intent(inout) :: dll
dll % fileaddr = 0
dll % fileaddrx = c_null_ptr
dll % procaddr = c_null_funptr
dll % filename = " "
dll % procname = " "
end subroutine init_dll
!-----------------------------------------------------------------------
!Subroutine init_os_type
!-----------------------------------------------------------------------
subroutine init_os_type(os_id,os)
implicit none
integer, intent(in) :: os_id
type(os_type), intent(inout) :: os
select case (os_id)
case (1) ! Linux
os % endian = 'big_endian'
os % newline = achar(10)
os % os_desc = 'Linux'
os % pathsep = '/'
os % swchar = '-'
os % unfform = 'unformatted'
case (2) ! MacOS
os % endian = 'big_endian'
os % newline = achar(10)
os % os_desc = 'MacOS'
os % pathsep = '/'
os % swchar = '-'
os % unfform = 'unformatted'
case default
end select
end subroutine init_os_type
!-----------------------------------------------------------------------
!Subroutine load_dll
!-----------------------------------------------------------------------
subroutine load_dll (os, dll, errstat, errmsg )
! this subroutine is used to dynamically load a dll.
type (os_type), intent(in) :: os
type (dll_type), intent(inout) :: dll
integer, intent( out) :: errstat
character(*), intent( out) :: errmsg
integer(c_int), parameter :: rtld_lazy=1
integer(c_int), parameter :: rtld_now=2
integer(c_int), parameter :: rtld_global=256
integer(c_int), parameter :: rtld_local=0
errstat = errid_none
errmsg = ''
select case (os%os_desc)
case ("Linux","MacOS")
! load the dll and get the file address:
dll%fileaddrx = dlopen( trim(dll%filename)//c_null_char, rtld_lazy )
if( .not. c_associated(dll%fileaddrx) ) then
errstat = errid_fatal
write(errmsg,'(i2)') bits_in_addr
errmsg = 'the dynamic library '//trim(dll%filename)//' could not be loaded. check that the file '// &
'exists in the specified location and that it is compiled for '//trim(errmsg)//'-bit systems.'
return
end if
! get the procedure address:
dll%procaddr = dlsym( dll%fileaddrx, trim(dll%procname)//c_null_char )
if(.not. c_associated(dll%procaddr)) then
errstat = errid_fatal
errmsg = 'the procedure '//trim(dll%procname)//' in file '//trim(dll%filename)//' could not be loaded.'
return
end if
case ("Windows")
errstat = errid_fatal
errmsg = ' load_dll not implemented for '//trim(os%os_desc)
case default
errstat = errid_fatal
errmsg = ' load_dll not implemented for '//trim(os%os_desc)
end select
return
end subroutine load_dll
!-----------------------------------------------------------------------
!Subroutine free_dll
!-----------------------------------------------------------------------
subroutine free_dll (os, dll, errstat, errmsg )
! this subroutine is used to free a dynamically loaded dll
type (os_type), intent(in) :: os
type (dll_type), intent(inout) :: dll
integer, intent( out) :: errstat
character(*), intent( out) :: errmsg
integer(c_int) :: success
errstat = errid_none
errmsg = ''
select case (os%os_desc)
case ("Linux","MacOS")
! close the library:
success = dlclose( dll%fileaddrx )
if ( success /= 0 ) then
errstat = errid_fatal
errmsg = 'the dynamic library could not be freed.'
return
else
errstat = errid_none
errmsg = ''
end if
case ("Windows")
errstat = errid_fatal
errmsg = ' free_dll not implemented for '//trim(os%os_desc)
case default
errstat = errid_fatal
errmsg = ' free_dll not implemented for '//trim(os%os_desc)
end select
return
end subroutine free_dll
end module dll_module
!-----------------------------------------------------------------------
!Main program
!-----------------------------------------------------------------------
program test_load_dll
use, intrinsic :: iso_c_binding
use dll_module
implicit none
! interface to our shared lib
abstract interface
function add_n(a,b)
use, intrinsic :: iso_c_binding
implicit none
real(c_double), intent(in) :: a,b
real(c_double) :: add_n
end function add_n
end interface
type(os_type) :: os
type(dll_type) :: dll
integer :: errstat
character(1024) :: errmsg
type(c_funptr) :: cfun
procedure(add_n), pointer :: fproc
call init_os_type(1,os)
call init_dll(dll)
dll%filename="/full_path_to/shared_lib/shared_lib_new.so"
! name of the procedure in shared_lib
! c version of the function
dll%procname="add_n"
write(*,*) "address: ", dll%procaddr
call load_dll(os, dll, errstat, errmsg )
write(*,*)"load_dll: errstat=", errstat
write(*,*) "address: ", dll%procaddr
call c_f_procpointer(dll%procaddr,fproc)
write(*,*) "add_n(2,5)=",fproc(2.d0,5.d0)
call free_dll (os, dll, errstat, errmsg )
write(*,*)"free_dll: errstat=", errstat
! fortran version
dll%procname="add_nf"
call load_dll(os, dll, errstat, errmsg )
write(*,*)"load_dll: errstat=", errstat
write(*,*) "address: ", dll%procaddr
call c_f_procpointer(dll%procaddr,fproc)
write(*,*) "add_nf(2,5)=",fproc(2.d0,5.d0)
call free_dll (os, dll, errstat, errmsg )
write(*,*)"free_dll: errstat=", errstat
end program test_load_dll
|
{
"pile_set_name": "Github"
}
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u0628.\u0646",
"\u062f.\u0646"
],
"DAY": [
"\u06cc\u06d5\u06a9\u0634\u06d5\u0645\u0645\u06d5",
"\u062f\u0648\u0648\u0634\u06d5\u0645\u0645\u06d5",
"\u0633\u06ce\u0634\u06d5\u0645\u0645\u06d5",
"\u0686\u0648\u0627\u0631\u0634\u06d5\u0645\u0645\u06d5",
"\u067e\u06ce\u0646\u062c\u0634\u06d5\u0645\u0645\u06d5",
"\u06be\u06d5\u06cc\u0646\u06cc",
"\u0634\u06d5\u0645\u0645\u06d5"
],
"MONTH": [
"\u06a9\u0627\u0646\u0648\u0648\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u0634\u0648\u0628\u0627\u062a",
"\u0626\u0627\u0632\u0627\u0631",
"\u0646\u06cc\u0633\u0627\u0646",
"\u0626\u0627\u06cc\u0627\u0631",
"\u062d\u0648\u0632\u06d5\u06cc\u0631\u0627\u0646",
"\u062a\u06d5\u0645\u0648\u0648\u0632",
"\u0626\u0627\u0628",
"\u0626\u06d5\u06cc\u0644\u0648\u0648\u0644",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u06a9\u0627\u0646\u0648\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645"
],
"SHORTDAY": [
"\u06cc\u06d5\u06a9\u0634\u06d5\u0645\u0645\u06d5",
"\u062f\u0648\u0648\u0634\u06d5\u0645\u0645\u06d5",
"\u0633\u06ce\u0634\u06d5\u0645\u0645\u06d5",
"\u0686\u0648\u0627\u0631\u0634\u06d5\u0645\u0645\u06d5",
"\u067e\u06ce\u0646\u062c\u0634\u06d5\u0645\u0645\u06d5",
"\u06be\u06d5\u06cc\u0646\u06cc",
"\u0634\u06d5\u0645\u0645\u06d5"
],
"SHORTMONTH": [
"\u06a9\u0627\u0646\u0648\u0648\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u0634\u0648\u0628\u0627\u062a",
"\u0626\u0627\u0632\u0627\u0631",
"\u0646\u06cc\u0633\u0627\u0646",
"\u0626\u0627\u06cc\u0627\u0631",
"\u062d\u0648\u0632\u06d5\u06cc\u0631\u0627\u0646",
"\u062a\u06d5\u0645\u0648\u0648\u0632",
"\u0626\u0627\u0628",
"\u0626\u06d5\u06cc\u0644\u0648\u0648\u0644",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645",
"\u062a\u0634\u0631\u06cc\u0646\u06cc \u062f\u0648\u0648\u06d5\u0645",
"\u06a9\u0627\u0646\u0648\u0646\u06cc \u06cc\u06d5\u06a9\u06d5\u0645"
],
"fullDate": "y MMMM d, EEEE",
"longDate": "d\u06cc MMMM\u06cc y",
"medium": "y MMM d HH:mm:ss",
"mediumDate": "y MMM d",
"mediumTime": "HH:mm:ss",
"short": "y-MM-dd HH:mm",
"shortDate": "y-MM-dd",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "din",
"DECIMAL_SEP": "\u066b",
"GROUP_SEP": "\u066c",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4\u00a0-",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "ckb-iq",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
{
"pile_set_name": "Github"
}
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GrantConstraint',
'GetSecretSecretResult',
'GetSecretsSecretResult',
]
@pulumi.output_type
class GrantConstraint(dict):
def __init__(__self__, *,
encryption_context_equals: Optional[Mapping[str, str]] = None,
encryption_context_subset: Optional[Mapping[str, str]] = None):
"""
:param Mapping[str, str] encryption_context_equals: A list of key-value pairs that must match the encryption context in subsequent cryptographic operation requests. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint. Conflicts with `encryption_context_subset`.
:param Mapping[str, str] encryption_context_subset: A list of key-value pairs that must be included in the encryption context of subsequent cryptographic operation requests. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs. Conflicts with `encryption_context_equals`.
"""
if encryption_context_equals is not None:
pulumi.set(__self__, "encryption_context_equals", encryption_context_equals)
if encryption_context_subset is not None:
pulumi.set(__self__, "encryption_context_subset", encryption_context_subset)
@property
@pulumi.getter(name="encryptionContextEquals")
def encryption_context_equals(self) -> Optional[Mapping[str, str]]:
"""
A list of key-value pairs that must match the encryption context in subsequent cryptographic operation requests. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint. Conflicts with `encryption_context_subset`.
"""
return pulumi.get(self, "encryption_context_equals")
@property
@pulumi.getter(name="encryptionContextSubset")
def encryption_context_subset(self) -> Optional[Mapping[str, str]]:
"""
A list of key-value pairs that must be included in the encryption context of subsequent cryptographic operation requests. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs. Conflicts with `encryption_context_equals`.
"""
return pulumi.get(self, "encryption_context_subset")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GetSecretSecretResult(dict):
def __init__(__self__, *,
name: str,
payload: str,
context: Optional[Mapping[str, str]] = None,
grant_tokens: Optional[Sequence[str]] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "payload", payload)
if context is not None:
pulumi.set(__self__, "context", context)
if grant_tokens is not None:
pulumi.set(__self__, "grant_tokens", grant_tokens)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def payload(self) -> str:
return pulumi.get(self, "payload")
@property
@pulumi.getter
def context(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "context")
@property
@pulumi.getter(name="grantTokens")
def grant_tokens(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "grant_tokens")
@pulumi.output_type
class GetSecretsSecretResult(dict):
def __init__(__self__, *,
name: str,
payload: str,
context: Optional[Mapping[str, str]] = None,
grant_tokens: Optional[Sequence[str]] = None):
"""
:param str name: The name to export this secret under in the attributes.
:param str payload: Base64 encoded payload, as returned from a KMS encrypt operation.
:param Mapping[str, str] context: An optional mapping that makes up the Encryption Context for the secret.
:param Sequence[str] grant_tokens: An optional list of Grant Tokens for the secret.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "payload", payload)
if context is not None:
pulumi.set(__self__, "context", context)
if grant_tokens is not None:
pulumi.set(__self__, "grant_tokens", grant_tokens)
@property
@pulumi.getter
def name(self) -> str:
"""
The name to export this secret under in the attributes.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def payload(self) -> str:
"""
Base64 encoded payload, as returned from a KMS encrypt operation.
"""
return pulumi.get(self, "payload")
@property
@pulumi.getter
def context(self) -> Optional[Mapping[str, str]]:
"""
An optional mapping that makes up the Encryption Context for the secret.
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="grantTokens")
def grant_tokens(self) -> Optional[Sequence[str]]:
"""
An optional list of Grant Tokens for the secret.
"""
return pulumi.get(self, "grant_tokens")
|
{
"pile_set_name": "Github"
}
|
<!-- v2html Files index -->
<html><head>
<title>Files index</title>
<link rel="Stylesheet" title="v2html stylesheet" media="Screen" href="v2html.css">
</head>
<script language="JavaScript" type="text/javascript"><!--
var agt=navigator.userAgent.toLowerCase();
var is_nav = ((agt.indexOf('mozilla')!=-1) &&
(agt.indexOf('spoofer')==-1) &&
(agt.indexOf('compatible') == -1) &&
(agt.indexOf('opera')==-1) &&
(agt.indexOf('webtv')==-1));
var is_major = parseInt(navigator.appVersion);
var is_nav4up = (is_nav && (is_major >= 4));
var is_ie = (agt.indexOf("msie") != -1);
var is_ie4up = (is_ie && (is_major >= 4));
var is_nav5up = (is_nav && (is_major >= 5));
var dirSep = (window.location.pathname.indexOf('\\') != -1) ? '\\' : '/' ;
function setbuttons (wndw) {
var i;
var sl=wndw.loc[ wndw.document.forms[0].elements[0].selectedIndex ];
for (i=0;i<sl.length;i++) {
if(sl[i]) wndw.document.images[i].src='v2html-b1.gif';
else wndw.document.images[i].src='v2html-b2.gif';
}
if ( wndw.document.forms[0].elements[0].options[
wndw.document.forms[0].elements[0].selectedIndex ].text != '-') {
wndw.document.images[i ].src='v2html-b1.gif';
wndw.document.images[i+1].src='v2html-b1.gif';
}
else {
wndw.document.images[i ].src='v2html-b2.gif';
wndw.document.images[i+1].src='v2html-b2.gif';
}
}
function search () { return false; }
// -->
</script>
<body>
<a name="top_of_page"></a>
<center><table class=NB cols=7 ><tr><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index.html';"><a target="_top" href="index.html">Hierarchy</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" ><font color="#808080">Files</font></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-m.html';"><a target="_top" href="index-m.html">Modules</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-s.html';"><a target="_top" href="index-s.html">Signals</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-t.html';"><a target="_top" href="index-t.html">Tasks</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-fn.html';"><a target="_top" href="index-fn.html">Functions</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='http://www.burbleland.com/v2html/help_7_30.html?htf-ni-s-';"><a target="_top" href="http://www.burbleland.com/v2html/help_7_30.html?htf-ni-s-">Help</a></td></tr></table></center>
<center><table class=NB cols=7 ><tr><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--A';"><a target="_top" href="index-f.html#index--A">A</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--B';"><a target="_top" href="index-f.html#index--B">B</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--C';"><a target="_top" href="index-f.html#index--C">C</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--I';"><a target="_top" href="index-f.html#index--I">I</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--P';"><a target="_top" href="index-f.html#index--P">P</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--T';"><a target="_top" href="index-f.html#index--T">T</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--U';"><a target="_top" href="index-f.html#index--U">U</a></td></tr></table></center>
<center><h3>Files index</h3></center>
<a name="index--A"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">A</a></td></tr></table></center>
<b><a name="addsub.v"></a><a href="addsub.v.html">addsub.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">addsub.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#addsub1">addsub1</a> , <a href="index-m.html#addsub2">addsub2</a> </td></tr>
</table></div>
<b><a name="addsub_tb.v"></a><a href="addsub_tb.v.html">addsub_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">addsub_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#addsub_tb">addsub_tb</a> </td></tr>
</table></div>
<a name="index--B"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">B</a></td></tr></table></center>
<b><a name="blockram.v"></a><a href="blockram.v.html">blockram.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">blockram.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#blockram">blockram</a> </td></tr>
</table></div>
<a name="index--C"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">C</a></td></tr></table></center>
<b><a name="cmprnd3_tb.v"></a><a href="cmprnd3_tb.v.html">cmprnd3_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">cmprnd3_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#cmprnd3_tb">cmprnd3_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="compare3m_tb.v"></a><a href="compare3m_tb.v.html">compare3m_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">compare3m_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#compare3m_tb">compare3m_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="compare3_tb.v"></a><a href="compare3_tb.v.html">compare3_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">compare3_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#compare3_tb">compare3_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<a name="index--I"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">I</a></td></tr></table></center>
<b><a name="int_test.v"></a><a href="int_test.v.html">int_test.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">int_test.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#int_test">int_test</a> </td></tr>
</table></div>
<a name="index--P"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">P</a></td></tr></table></center>
<b><a name="pacoblaze.v"></a><a href="pacoblaze.v.html">pacoblaze.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze3m">pacoblaze3m</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_alu.v">pacoblaze_alu.v</a> , <a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#pacoblaze_register.v">pacoblaze_register.v</a> , <a href="index-f.html#pacoblaze_scratch.v">pacoblaze_scratch.v</a> , <a href="index-f.html#pacoblaze_stack.v">pacoblaze_stack.v</a> </td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze1.v"></a><a href="pacoblaze1.v.html">pacoblaze1.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze1.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze_alu.v">pacoblaze_alu.v</a> , <a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#pacoblaze_register.v">pacoblaze_register.v</a> , <a href="index-f.html#pacoblaze_scratch.v">pacoblaze_scratch.v</a> , <a href="index-f.html#pacoblaze_stack.v">pacoblaze_stack.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze1_idu_xst.v"></a><a href="pacoblaze1_idu_xst.v.html">pacoblaze1_idu_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze1_idu_xst.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze1_tb.v"></a><a href="pacoblaze1_tb.v.html">pacoblaze1_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze1_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze1_tb">pacoblaze1_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze1_xst.v"></a><a href="pacoblaze1_xst.v.html">pacoblaze1_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze1_xst.v</td></tr>
</table></div>
<b><a name="pacoblaze2.v"></a><a href="pacoblaze2.v.html">pacoblaze2.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze2.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze_alu.v">pacoblaze_alu.v</a> , <a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#pacoblaze_register.v">pacoblaze_register.v</a> , <a href="index-f.html#pacoblaze_scratch.v">pacoblaze_scratch.v</a> , <a href="index-f.html#pacoblaze_stack.v">pacoblaze_stack.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze2_idu_xst.v"></a><a href="pacoblaze2_idu_xst.v.html">pacoblaze2_idu_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze2_idu_xst.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze2_xst.v"></a><a href="pacoblaze2_xst.v.html">pacoblaze2_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze2_xst.v</td></tr>
</table></div>
<b><a name="pacoblaze3.v"></a><a href="pacoblaze3.v.html">pacoblaze3.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze_alu.v">pacoblaze_alu.v</a> , <a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#pacoblaze_register.v">pacoblaze_register.v</a> , <a href="index-f.html#pacoblaze_scratch.v">pacoblaze_scratch.v</a> , <a href="index-f.html#pacoblaze_stack.v">pacoblaze_stack.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3b_tb.v"></a><a href="pacoblaze3b_tb.v.html">pacoblaze3b_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3b_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3m.v"></a><a href="pacoblaze3m.v.html">pacoblaze3m.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3m.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze_alu.v">pacoblaze_alu.v</a> , <a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#pacoblaze_register.v">pacoblaze_register.v</a> , <a href="index-f.html#pacoblaze_scratch.v">pacoblaze_scratch.v</a> , <a href="index-f.html#pacoblaze_stack.v">pacoblaze_stack.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3m_tb.v"></a><a href="pacoblaze3m_tb.v.html">pacoblaze3m_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3m_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze3m_tb">pacoblaze3m_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3m_xst.v"></a><a href="pacoblaze3m_xst.v.html">pacoblaze3m_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3m_xst.v</td></tr>
</table></div>
<b><a name="pacoblaze3_dregister_xst.v"></a><a href="pacoblaze3_dregister_xst.v.html">pacoblaze3_dregister_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3_dregister_xst.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_dregister.v">pacoblaze_dregister.v</a> , <a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3_idu_xst.v"></a><a href="pacoblaze3_idu_xst.v.html">pacoblaze3_idu_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3_idu_xst.v</td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_idu.v">pacoblaze_idu.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3_tb.v"></a><a href="pacoblaze3_tb.v.html">pacoblaze3_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze3_tb">pacoblaze3_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze3_xst.v"></a><a href="pacoblaze3_xst.v.html">pacoblaze3_xst.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze3_xst.v</td></tr>
</table></div>
<b><a name="pacoblaze_alu.v"></a><a href="pacoblaze_alu.v.html">pacoblaze_alu.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_alu.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_dregister.v"></a><a href="pacoblaze_dregister.v.html">pacoblaze_dregister.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_dregister.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze3m_register">pacoblaze3m_register</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> </td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze3_dregister_xst.v">pacoblaze3_dregister_xst.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_dregister_tb.v"></a><a href="pacoblaze_dregister_tb.v.html">pacoblaze_dregister_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_dregister_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze_dregister_tb">pacoblaze_dregister_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_idu.v"></a><a href="pacoblaze_idu.v.html">pacoblaze_idu.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_idu.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze1_idu_xst.v">pacoblaze1_idu_xst.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze2_idu_xst.v">pacoblaze2_idu_xst.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3_idu_xst.v">pacoblaze3_idu_xst.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_idu_tb.v"></a><a href="pacoblaze_idu_tb.v.html">pacoblaze_idu_tb.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_idu_tb.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#pacoblaze_idu_tb">pacoblaze_idu_tb</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_inc.v"></a><a href="pacoblaze_inc.v.html">pacoblaze_inc.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_inc.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#cmprnd3_tb.v">cmprnd3_tb.v</a> , <a href="index-f.html#compare3_tb.v">compare3_tb.v</a> , <a href="index-f.html#compare3m_tb.v">compare3m_tb.v</a> , <a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze1_tb.v">pacoblaze1_tb.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3_dregister_xst.v">pacoblaze3_dregister_xst.v</a> , <a href="index-f.html#pacoblaze3_tb.v">pacoblaze3_tb.v</a> , <a href="index-f.html#pacoblaze3b_tb.v">pacoblaze3b_tb.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> , <a href="index-f.html#pacoblaze3m_tb.v">pacoblaze3m_tb.v</a> , <a href="index-f.html#pacoblaze_dregister.v">pacoblaze_dregister.v</a> , <a href="index-f.html#pacoblaze_dregister_tb.v">pacoblaze_dregister_tb.v</a> , <a href="index-f.html#pacoblaze_idu_tb.v">pacoblaze_idu_tb.v</a> , <a href="index-f.html#uclock_ti.v">uclock_ti.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_register.v"></a><a href="pacoblaze_register.v.html">pacoblaze_register.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_register.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_scratch.v"></a><a href="pacoblaze_scratch.v.html">pacoblaze_scratch.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_scratch.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_stack.v"></a><a href="pacoblaze_stack.v.html">pacoblaze_stack.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_stack.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze.v">pacoblaze.v</a> , <a href="index-f.html#pacoblaze1.v">pacoblaze1.v</a> , <a href="index-f.html#pacoblaze2.v">pacoblaze2.v</a> , <a href="index-f.html#pacoblaze3.v">pacoblaze3.v</a> , <a href="index-f.html#pacoblaze3m.v">pacoblaze3m.v</a> </td></tr>
</table></div>
<b><a name="pacoblaze_util.v"></a><a href="pacoblaze_util.v.html">pacoblaze_util.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">pacoblaze_util.v</td></tr>
</table></div>
<a name="index--T"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">T</a></td></tr></table></center>
<b><a name="test.v"></a><a href="test.v.html">test.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">test.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#one">one</a> </td></tr>
</table></div>
<b><a name="timescale_inc.v"></a><a href="timescale_inc.v.html">timescale_inc.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">timescale_inc.v</td></tr>
<tr><td valign=top width="17%"><i>Included by:</i></td><td valign=top width="83%"><a href="index-f.html#cmprnd3_tb.v">cmprnd3_tb.v</a> , <a href="index-f.html#compare3_tb.v">compare3_tb.v</a> , <a href="index-f.html#compare3m_tb.v">compare3m_tb.v</a> , <a href="index-f.html#pacoblaze1_tb.v">pacoblaze1_tb.v</a> , <a href="index-f.html#pacoblaze3_tb.v">pacoblaze3_tb.v</a> , <a href="index-f.html#pacoblaze3b_tb.v">pacoblaze3b_tb.v</a> , <a href="index-f.html#pacoblaze3m_tb.v">pacoblaze3m_tb.v</a> , <a href="index-f.html#pacoblaze_dregister_tb.v">pacoblaze_dregister_tb.v</a> , <a href="index-f.html#pacoblaze_idu_tb.v">pacoblaze_idu_tb.v</a> , <a href="index-f.html#uclock_ti.v">uclock_ti.v</a> </td></tr>
</table></div>
<a name="index--U"></a>
<center><table class=NB cols=1 ><tr><td align="center" width="100%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='#top_of_page';"><a target="_top" href="#top_of_page">U</a></td></tr></table></center>
<b><a name="uclock.v"></a><a href="uclock.v.html">uclock.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">uclock.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#uclock">uclock</a> </td></tr>
</table></div>
<b><a name="uclock_ti.v"></a><a href="uclock_ti.v.html">uclock_ti.v</a></b>
<div align=right><table cols=2 width="97%">
<tr><td valign=top width="17%"><i>Full name:</i></td><td valign=top width="83%">uclock_ti.v</td></tr>
<tr><td valign=top width="17%"><i>Modules:</i></td><td valign=top width="83%"><a href="index-m.html#uclock_ti">uclock_ti</a> </td></tr>
<tr><td valign=top width="17%"><i>Includes:</i></td><td valign=top width="83%"><a href="index-f.html#pacoblaze_inc.v">pacoblaze_inc.v</a> , <a href="index-f.html#timescale_inc.v">timescale_inc.v</a> </td></tr>
</table></div>
<center><table class=NB cols=7 ><tr><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--A';"><a target="_top" href="index-f.html#index--A">A</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--B';"><a target="_top" href="index-f.html#index--B">B</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--C';"><a target="_top" href="index-f.html#index--C">C</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--I';"><a target="_top" href="index-f.html#index--I">I</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--P';"><a target="_top" href="index-f.html#index--P">P</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--T';"><a target="_top" href="index-f.html#index--T">T</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-f.html#index--U';"><a target="_top" href="index-f.html#index--U">U</a></td></tr></table></center>
<center><table class=NB cols=7 ><tr><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index.html';"><a target="_top" href="index.html">Hierarchy</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" ><font color="#808080">Files</font></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-m.html';"><a target="_top" href="index-m.html">Modules</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-s.html';"><a target="_top" href="index-s.html">Signals</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-t.html';"><a target="_top" href="index-t.html">Tasks</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='index-fn.html';"><a target="_top" href="index-fn.html">Functions</a></td><td align="center" width="14%" onmousedown="this.style.border='inset';" onmouseup="this.style.border='outset';" onclick="location='http://www.burbleland.com/v2html/help_7_30.html?htf-ni-s-';"><a target="_top" href="http://www.burbleland.com/v2html/help_7_30.html?htf-ni-s-">Help</a></td></tr></table></center>
<hr>
<table>
<tr><td><i>This page:</i></td>
<td><i>Maintained by:</i></td>
<td><i><a href="mailto:pablo.N@SPAM.bleyer.org">
pablo.N@SPAM.bleyer.org</a></i></tr>
<tr>
<td> </td>
<td><i>Created:</i></td><td><i>Tue May 29 02:37:53 2007</i></td></tr>
</table>
<hr>
<table width="100%"><tr><td><i>Verilog converted to html by <a target="_top" href="http://www.burbleland.com/v2html/v2html.html"> v2html 7.30</a>
(written by <a href="mailto:v2html730@burbleland.com">Costas Calamvokis</a>).</i></td><td align="right"><b><a href="http://www.burbleland.com/v2html/help_7_30.html?htf-ni-s-">Help</a></b></td></tr></table><table height="90%"><tr><td></td></tr></table>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
using System.Collections.Generic;
using System.Linq;
using EventStore.Projections.Core.Services.Processing;
namespace EventStore.Projections.Core.Tests.Services.partition_state_update_manager {
class FakeEventWriter : IEventWriter {
private readonly List<EmittedEvent[]> _writes = new List<EmittedEvent[]>();
public List<EmittedEvent[]> Writes {
get { return _writes; }
}
public void ValidateOrderAndEmitEvents(EmittedEventEnvelope[] events) {
Writes.Add(events.Select(v => v.Event).ToArray());
}
}
}
|
{
"pile_set_name": "Github"
}
|
using System.ComponentModel;
using System.Runtime.CompilerServices;
using YAPA.Shared.Contracts;
namespace YAPA.WPF.SettingsMananger
{
public class SettingManager : ISettingManager
{
private bool _restartNeeded;
private string _newVersion;
public bool RestartNeeded
{
get => _restartNeeded;
set
{
_restartNeeded = value;
OnPropertyChanged();
}
}
public string NewVersion
{
get => _newVersion;
set
{
_newVersion = value;
OnPropertyChanged();
}
}
public event PropertyChangedEventHandler PropertyChanged;
protected virtual void OnPropertyChanged([CallerMemberName] string propertyName = null)
{
PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName));
}
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!--
~ Copyright (c) 2020 Evolveum and contributors
~
~ This work is dual-licensed under the Apache License 2.0
~ and European Union Public License. See LICENSE file for details.
-->
<s:search xmlns:s="http://midpoint.evolveum.com/xml/ns/public/model/scripting-3"
xmlns:c="http://midpoint.evolveum.com/xml/ns/public/common/common-3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<s:type>c:ShadowType</s:type>
<s:searchFilter>
<and xmlns="http://prism.evolveum.com/xml/ns/public/query-3">
<ref>
<path>c:resourceRef</path>
<value xsi:type="c:ObjectReferenceType" oid="10000000-0000-0000-0000-000000000004"/>
</ref>
<equal>
<path>c:objectClass</path>
<value xmlns:ri="http://midpoint.evolveum.com/xml/ns/public/resource/instance/10000000-0000-0000-0000-000000000004">ri:AccountObjectClass</value>
</equal>
</and>
</s:searchFilter>
<s:options>
<c:option>
<c:options>
<c:noFetch>true</c:noFetch>
</c:options>
</c:option>
</s:options>
</s:search>
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<resources>
<!--design 375px-->
<dimen name="px_0_5">0.5dp</dimen>
<dimen name="px_48">48.8dp</dimen>
<dimen name="px_75">75dp</dimen>
<dimen name="px_100">100dp</dimen>
<dimen name="px_125">125dp</dimen>
<dimen name="px_150">150dp</dimen>
<dimen name="px_200">200dp</dimen>
<dimen name="px_250">250dp</dimen>
<dimen name="px_300">300dp</dimen>
<dimen name="px_375">375dp</dimen>
<!--text size-->
<dimen name="text_px_28">28sp</dimen>
<dimen name="text_px_32">32sp</dimen>
<dimen name="text_px_40">40sp</dimen>
</resources>
|
{
"pile_set_name": "Github"
}
|
#! /usr/bin/env perl
use strict;
my @stringInTar;
my @temp;
my $arg;
my $arg2;
my $command;
my $usrID;
my @existPatterns;
my @removePatterns;
my $index;
my $success=1;
#Print contant info.
print "========================================================\n";
print "This self-checking program is designed by Jason Chou.\n";
print "If you have any questions, please contact class TAs or instructor.\n";
print "All right reserved, DVLab, GIEE/EE, NTU.\n";
print "========================================================\n\n";
#Usage checking
if(@ARGV < 1){
die "Usage:./SelfCheck CompressedFile\n";
}
#File I/O checking
if(! open EXIST, "<MustExist.txt"){
die "Can't open the exist pattern file!!\n";
}
if(! open REMOVE, "<MustRemove.txt"){
die "Can't open the remove pattern file!!\n";
}
#Get the student ID
$arg = $ARGV[0];
print "$arg is self checking!\n";
@temp = split /_hw/, $arg;
$usrID = $temp[0];
print "StudentID:$usrID\n";
$_ = $usrID;
if(!/[a-z][0-9]{2}[a|b|1-9][0-9]{5}/){
die "Error: Wrong student ID format\n";
}
#Generate the patterns
$index=0;
while(defined($_ = <EXIST>)){
chomp($_);
$existPatterns[$index] = $usrID . $_;
$index++;
}
$index=0;
while(defined($_ = <REMOVE>)){
chomp($_);
$removePatterns[$index] = $usrID . $_;
$index++;
}
$command = "tar ". "-zvtf". $arg;
@stringInTar = `$command`;
foreach $arg2 (@existPatterns){
if(&checkExist($arg2)==0){
$success=0;
print "Error: Missing file $arg2 in your compressed file.\n";
}
}
foreach $arg2 (@removePatterns){
if(&checkRemove($arg2)==1){
$success=0;
print "Error: File $arg2 in your compressed file must remove.\n";
}
}
if($success==1){
print "Succeeded in self checking\n";
}
else{
print "Failed in self checking\n";
}
sub checkExist{
my($flag,$index,$arg2,@pathInTar);
$flag=0;#pattern is not found
foreach $arg2(@stringInTar){
@pathInTar = split(/[ \t]+/, $arg2);
if($pathInTar[$#pathInTar] =~ m/^$_[0]$/){
$flag=1;#pattern is found
}
else {
if($#pathInTar > 1 && $pathInTar[$#pathInTar-1] =~ m/->/ && $pathInTar[$#pathInTar-2] =~ m/^$_[0]$/){
$flag=1;#pattern is found (symbolic link file support)
}
}
}
$flag;
}
sub checkRemove{
my($flag,$index,$arg2,@pathInTar);
$flag=0;#pattern is not found
foreach $arg2(@stringInTar){
@pathInTar = split(/[ \t]+/, $arg2);
$index = index($arg2, $_[0]);
if($pathInTar[$#pathInTar] =~ m/^$_[0]$/){
$flag=1;#pattern is found
}
else {
if($#pathInTar > 1 && $pathInTar[$#pathInTar-1] =~ m/->/ && $pathInTar[$#pathInTar-2] =~ m/^$_[0]$/){
$flag=1;#pattern is found (symbolic link file support)
}
}
}
$flag;
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{32E1EFD1-F87C-4ABD-8C76-BA8E94AFE160}</ProjectGuid>
<OutputType>Library</OutputType>
<AppDesignerFolder>Properties</AppDesignerFolder>
<RootNamespace>_05.Bigger_from_its_neighbours___Test</RootNamespace>
<AssemblyName>_05.Bigger_from_its_neighbours___Test</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<ProjectTypeGuids>{3AC096D0-A1C2-E12C-1390-A8335801FDAB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
<VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">10.0</VisualStudioVersion>
<VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
<ReferencePath>$(ProgramFiles)\Common Files\microsoft shared\VSTT\$(VisualStudioVersion)\UITestExtensionPackages</ReferencePath>
<IsCodedUITest>False</IsCodedUITest>
<TestProjectType>UnitTest</TestProjectType>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />
</ItemGroup>
<Choose>
<When Condition="('$(VisualStudioVersion)' == '10.0' or '$(VisualStudioVersion)' == '') and '$(TargetFrameworkVersion)' == 'v3.5'">
<ItemGroup>
<Reference Include="Microsoft.VisualStudio.QualityTools.UnitTestFramework, Version=10.1.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL" />
</ItemGroup>
</When>
<Otherwise>
<ItemGroup>
<Reference Include="Microsoft.VisualStudio.QualityTools.UnitTestFramework" />
</ItemGroup>
</Otherwise>
</Choose>
<ItemGroup>
<Compile Include="BiggerFromItsNeighboursTest.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\05. Bigger from its neighbours\05. Bigger from its neighbours.csproj">
<Project>{e732fe5e-bbde-41c7-ad30-469293a079d3}</Project>
<Name>05. Bigger from its neighbours</Name>
</ProjectReference>
</ItemGroup>
<Choose>
<When Condition="'$(VisualStudioVersion)' == '10.0' And '$(IsCodedUITest)' == 'True'">
<ItemGroup>
<Reference Include="Microsoft.VisualStudio.QualityTools.CodedUITestFramework, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<Private>False</Private>
</Reference>
<Reference Include="Microsoft.VisualStudio.TestTools.UITest.Common, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<Private>False</Private>
</Reference>
<Reference Include="Microsoft.VisualStudio.TestTools.UITest.Extension, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<Private>False</Private>
</Reference>
<Reference Include="Microsoft.VisualStudio.TestTools.UITesting, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<Private>False</Private>
</Reference>
</ItemGroup>
</When>
</Choose>
<Import Project="$(VSToolsPath)\TeamTest\Microsoft.TestTools.targets" Condition="Exists('$(VSToolsPath)\TeamTest\Microsoft.TestTools.targets')" />
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
</Project>
|
{
"pile_set_name": "Github"
}
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { ThemeStore } from 'app/service/theme/theme.store';
import { RepoManagerService } from '../../../service/repomanager/project.repomanager.service';
import { SharedModule } from '../../shared.module';
import { SharedService } from '../../shared.service';
import { ParameterValueComponent } from './parameter.value.component';
describe('CDS: Parameter Value Component', () => {
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [
],
providers: [
SharedService,
RepoManagerService,
ThemeStore,
],
imports: [
SharedModule,
HttpClientTestingModule
]
});
});
it('should create an input text', fakeAsync(() => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'string';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('input[type=text]')).toBeTruthy('INput type text must be displayed');
}));
it('should create an input number', fakeAsync(() => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'number';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('input[type=number]')).toBeTruthy('Input type number must be displayed');
}));
it('should create a checkbox', fakeAsync(() => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'boolean';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('input[type=checkbox]')).toBeTruthy('Input type checkbox must be displayed');
}));
/*
it('should create a textarea', fakeAsync( () => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'text';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('codemirror')).toBeTruthy('textarea must be displayed');
}));
*/
it('should create a select for pipeline', fakeAsync(() => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'pipeline';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('select')).toBeTruthy('select must be displayed');
}));
it('should create a select for environments', fakeAsync(() => {
// Create component
let fixture = TestBed.createComponent(ParameterValueComponent);
let component = fixture.debugElement.componentInstance;
expect(component).toBeTruthy();
fixture.componentInstance.type = 'env';
fixture.detectChanges();
tick(50);
let compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('select')).toBeTruthy('select must be displayed');
}));
});
|
{
"pile_set_name": "Github"
}
|
modbus_set_float_dcba(3)
========================
NAME
----
modbus_set_float_dcba - set a float value in 2 registers using DCBA byte order
SYNOPSIS
--------
*void modbus_set_float_dcba(float 'f', uint16_t *'dest');*
DESCRIPTION
-----------
The *modbus_set_float_dcba()* function shall set a float to 4 bytes in inverted
Modbus format (DCBA order). The _dest_ array must be pointer on two 16 bits
values to be able to store the full result of the conversion.
RETURN VALUE
------------
There is no return values.
SEE ALSO
--------
linkmb:modbus_get_float_dcba[3]
linkmb:modbus_set_float[3]
linkmb:modbus_get_float[3]
AUTHORS
-------
The libmodbus documentation was written by Stéphane Raimbault
<stephane.raimbault@gmail.com>
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<html lang="en">
<head>
<title>CGSize Extension Reference</title>
<link rel="stylesheet" type="text/css" href="../css/jazzy.css" />
<link rel="stylesheet" type="text/css" href="../css/highlight.css" />
<meta charset='utf-8'>
<script src="../js/jquery.min.js" defer></script>
<script src="../js/jazzy.js" defer></script>
</head>
<body>
<a name="//apple_ref/swift/Extension/CGSize" class="dashAnchor"></a>
<a title="CGSize Extension Reference"></a>
<header>
<div class="content-wrapper">
<p><a href="../index.html">Katana Docs</a> (100% documented)</p>
<p class="header-right"><a href="https://github.com/BendingSpoons/katana-swift"><img src="../img/gh.png"/>View on GitHub</a></p>
</div>
</header>
<div class="content-wrapper">
<p id="breadcrumbs">
<a href="../index.html">Katana Reference</a>
<img id="carat" src="../img/carat.png" />
CGSize Extension Reference
</p>
</div>
<div class="content-wrapper">
<nav class="sidebar">
<ul class="nav-groups">
<li class="nav-group-name">
<a href="../Classes.html">Classes</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Classes/Node.html">Node</a>
</li>
<li class="nav-group-task">
<a href="../Classes/PlasticNode.html">PlasticNode</a>
</li>
<li class="nav-group-task">
<a href="../Classes/PlasticView.html">PlasticView</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Renderer.html">Renderer</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Store.html">Store</a>
</li>
<li class="nav-group-task">
<a href="../Classes/ViewsContainer.html">ViewsContainer</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Enums.html">Enums</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Enums/AnimationType.html">AnimationType</a>
</li>
<li class="nav-group-task">
<a href="../Enums/AsyncActionState.html">AsyncActionState</a>
</li>
<li class="nav-group-task">
<a href="../Enums.html#/s:O6Katana9EmptyKeys">EmptyKeys</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Extensions.html">Extensions</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Extensions/Array.html">Array</a>
</li>
<li class="nav-group-task">
<a href="../Extensions/CGSize.html">CGSize</a>
</li>
<li class="nav-group-task">
<a href="../Extensions/UIView.html">UIView</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Protocols.html">Protocols</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Protocols/Action.html">Action</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/ActionWithSideEffect.html">ActionWithSideEffect</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyAction.html">AnyAction</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyActionWithSideEffect.html">AnyActionWithSideEffect</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyConnectedNodeDescription.html">AnyConnectedNodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyNode.html">AnyNode</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyNodeDescription.html">AnyNodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyNodeDescriptionProps.html">AnyNodeDescriptionProps</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyNodeDescriptionWithChildren.html">AnyNodeDescriptionWithChildren</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyPlasticNodeDescription.html">AnyPlasticNodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AnyStore.html">AnyStore</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/AsyncAction.html">AsyncAction</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/Childrenable.html">Childrenable</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/ConnectedNodeDescription.html">ConnectedNodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/NodeDescription.html">NodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/NodeDescriptionProps.html">NodeDescriptionProps</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/NodeDescriptionState.html">NodeDescriptionState</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/NodeDescriptionWithChildren.html">NodeDescriptionWithChildren</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/PlasticNodeDescription.html">PlasticNodeDescription</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/PlasticReferenceSizeable.html">PlasticReferenceSizeable</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/PlatformNativeView.html">PlatformNativeView</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/SideEffectDependencyContainer.html">SideEffectDependencyContainer</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/State.html">State</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/SyncAction.html">SyncAction</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Structs.html">Structs</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Structs/Anchor.html">Anchor</a>
</li>
<li class="nav-group-task">
<a href="../Structs/Anchor/Kind.html">– Kind</a>
</li>
<li class="nav-group-task">
<a href="../Structs/Animation.html">Animation</a>
</li>
<li class="nav-group-task">
<a href="../Structs/AnimationContainer.html">AnimationContainer</a>
</li>
<li class="nav-group-task">
<a href="../Structs/AnimationOptions.html">AnimationOptions</a>
</li>
<li class="nav-group-task">
<a href="../Structs/AnimationProps.html">AnimationProps</a>
</li>
<li class="nav-group-task">
<a href="../Structs/ChildrenAnimations.html">ChildrenAnimations</a>
</li>
<li class="nav-group-task">
<a href="../Structs/EdgeInsets.html">EdgeInsets</a>
</li>
<li class="nav-group-task">
<a href="../Structs/EmptyProps.html">EmptyProps</a>
</li>
<li class="nav-group-task">
<a href="../Structs/EmptySideEffectDependencyContainer.html">EmptySideEffectDependencyContainer</a>
</li>
<li class="nav-group-task">
<a href="../Structs/EmptyState.html">EmptyState</a>
</li>
<li class="nav-group-task">
<a href="../Structs/Size.html">Size</a>
</li>
<li class="nav-group-task">
<a href="../Structs/Value.html">Value</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Typealiases.html">Typealiases</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Typealiases.html#/s:6Katana25AnimationPropsTransformer">AnimationPropsTransformer</a>
</li>
<li class="nav-group-task">
<a href="../Typealiases.html#/s:6Katana20NodeUpdateCompletion">NodeUpdateCompletion</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Associated Types.html">Associated Types</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Associated Types.html#/s:P6Katana27NodeDescriptionWithChildren9PropsType">PropsType</a>
</li>
</ul>
</li>
</ul>
</nav>
<article class="main-content">
<section>
<section class="section">
<h1>CGSize</h1>
<p>Undocumented</p>
</section>
<section class="section task-group-section">
<div class="task-group">
<ul>
<li class="item">
<div>
<code>
<a name="/s:vPs8Hashable9hashValueSi"></a>
<a name="//apple_ref/swift/Property/hashValue" class="dashAnchor"></a>
<a class="token" href="#/s:vPs8Hashable9hashValueSi">hashValue</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p><code>CGSize</code> extension that makes it hashable</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">hashValue</span><span class="p">:</span> <span class="kt">Int</span></code></pre>
</div>
</div>
<div class="slightly-smaller">
<a href="https://github.com/BendingSpoons/katana-swift/tree/0.4.0/Katana/Plastic/LayoutsCache.swift#L14-L18">Show on GitHub</a>
</div>
</section>
</div>
</li>
</ul>
</div>
</section>
</section>
<section id="footer">
<p>© 2016 <a class="link" href="http://bendingspoons.com" target="_blank" rel="external">Bending Spoons Team</a>. All rights reserved. (Last updated: 2016-12-24)</p>
<p>Generated by <a class="link" href="https://github.com/realm/jazzy" target="_blank" rel="external">jazzy ♪♫ v0.7.2</a>, a <a class="link" href="http://realm.io" target="_blank" rel="external">Realm</a> project.</p>
</section>
</article>
</div>
</body>
</div>
</html>
|
{
"pile_set_name": "Github"
}
|
<?xml version='1.0' encoding='utf-8'?>
<section xmlns="https://code.dccouncil.us/schemas/dc-library" xmlns:codified="https://code.dccouncil.us/schemas/codified" xmlns:codify="https://code.dccouncil.us/schemas/codify" xmlns:xi="http://www.w3.org/2001/XInclude" containing-doc="D.C. Code">
<num>10-503.22</num>
<heading>Suspension of prohibitions against use — Authorization generally.</heading>
<text>In order to admit of the due observance within the United States Capitol Grounds of occasions of national interest becoming the cognizance and entertainment of Congress, the President of the Senate and the Speaker of the House of Representatives, acting concurrently, are hereby authorized to suspend for such proper occasions so much of the prohibitions contained in §§ <cite path="§10-503.12">10-503.12</cite> to <cite path="§10-503.17">10-503.17</cite> as would prevent the use of the roads and walks of the said grounds by processions or assemblages, and the use upon them of suitable decorations, music, addresses, and ceremonies; provided, that responsible officers shall have been appointed, and arrangements determined which are adequate, in the judgment of said President of the Senate and Speaker of the House of Representatives, for the maintenance of suitable order and decorum in the proceedings, and for guarding the Capitol and its grounds from injury.</text>
<annotations>
<annotation type="History" doc="Stat. 79-2-ch707" path="§11">July 31, 1946, 60 Stat. 719, ch. 707, § 11</annotation>
<annotation type="Prior Codifications">1973 Ed., § 9-128.</annotation>
<annotation type="Prior Codifications">1981 Ed., § 9-124.</annotation>
<annotation type="Section References">This section is referenced in <cite path="§10-503.17">§ 10-503.17</cite> and <cite path="§10-503.23">§ 10-503.23</cite>.</annotation>
</annotations>
</section>
|
{
"pile_set_name": "Github"
}
|
StartChar: O.sinf
Encoding: 65755 -1 1347
Width: 508
VWidth: -144
Flags: MW
LayerCount: 2
Fore
Refer: 1245 -1 N 1 0 0 1 -44 -144 2
Validated: 1
Comment: "."
EndChar
|
{
"pile_set_name": "Github"
}
|
"switch_off" : "Schalter aus",
"switch_on" : "Schalter ein",
"switch_on_off" : "Schalter ein / aus",
"switch_on_off_toggle" : "Schalter ein / aus / Toggle",
"panic_off" : "Panik aus",
"panic_on" : "Panik ein",
"panic_change_signal" : "Änderungssignal",
"panic_on_off" : "Panik ein / aus",
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.IIS.Administration.WebServer.WorkerProcesses {
using AspNetCore.Mvc;
using System.Linq;
using System.Net;
using Core.Http;
using System.Collections.Generic;
using Web.Administration;
using AppPools;
using Core.Utils;
using Core;
[RequireWebServer]
public class WorkerProcessesController : ApiBaseController {
[HttpGet]
[ResourceInfo(Name = Defines.WorkerProcessesName)]
public object Get() {
IEnumerable<WorkerProcess> wps = null;
//
// Filter by AppPool
string appPoolUuid = Context.Request.Query[AppPools.Defines.IDENTIFIER];
if (!string.IsNullOrEmpty(appPoolUuid)) {
ApplicationPool pool = AppPoolHelper.GetAppPool(AppPoolId.CreateFromUuid(appPoolUuid).Name);
if (pool == null) {
return NotFound();
}
wps = WorkerProcessHelper.GetWorkerProcesses(pool);
}
//
// All
if (wps == null) {
wps = WorkerProcessHelper.GetWorkerProcesses();
}
//
// Set HTTP header for total count
this.Context.Response.SetItemsCount(wps.Count());
Fields fields = Context.Request.GetFields();
var obj = new {
worker_processes = wps.Select(wp => WorkerProcessHelper.ToJsonModelRef(wp, fields))
};
return obj;
}
[HttpGet]
[ResourceInfo(Name = Defines.WorkerProcessName)]
public object Get(string id)
{
var target = WorkerProcessHelper.GetWorkerProcess(new WorkerProcessId(id).Id);
if (target == null) {
return NotFound();
}
return WorkerProcessHelper.WpToJsonModel(target, Context.Request.GetFields());
}
[HttpDelete]
[Audit]
public void Delete(string id)
{
var target = WorkerProcessHelper.GetWorkerProcess(new WorkerProcessId(id).Id);
if (target != null) {
WorkerProcessHelper.Kill(target);
}
// Success
Context.Response.StatusCode = (int)HttpStatusCode.NoContent;
}
}
}
|
{
"pile_set_name": "Github"
}
|
//
// AppDelegate.h
// ThreadNumberDemo
//
// Created by everettjf on 2018/11/12.
// Copyright © 2018 everettjf. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface AppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@end
|
{
"pile_set_name": "Github"
}
|
/*****************************************************************************
* Project: RooFit *
* Package: RooFitCore *
* File: $Id: RooDataProjBinding.h,v 1.6 2007/05/11 09:11:30 verkerke Exp $
* Authors: *
* WV, Wouter Verkerke, UC Santa Barbara, verkerke@slac.stanford.edu *
* DK, David Kirkby, UC Irvine, dkirkby@uci.edu *
* *
* Copyright (c) 2000-2005, Regents of the University of California *
* and Stanford University. All rights reserved. *
* *
* Redistribution and use in source and binary forms, *
* with or without modification, are permitted according to the terms *
* listed in LICENSE (http://roofit.sourceforge.net/license.txt) *
*****************************************************************************/
#ifndef ROO_DATA_PROJ_BINDING
#define ROO_DATA_PROJ_BINDING
#include "RooRealBinding.h"
class RooAbsReal ;
class RooAbsData ;
class RooSuperCategory ;
class Roo1DTable ;
class RooDataProjBinding : public RooRealBinding {
public:
RooDataProjBinding(const RooAbsReal &real, const RooAbsData& data, const RooArgSet &vars, const RooArgSet* normSet=0) ;
virtual ~RooDataProjBinding() ;
virtual Double_t operator()(const Double_t xvector[]) const;
protected:
mutable Bool_t _first ; // Bit indicating if operator() has been called yet
const RooAbsReal* _real ; // Real function to be projected
const RooAbsData* _data ; // Dataset used for projection
const RooArgSet* _nset ; // Normalization set for real function
RooSuperCategory* _superCat ; // Supercategory constructed from _data's category variables
Roo1DTable* _catTable ; // Supercategory table generated from _data
ClassDef(RooDataProjBinding,0) // RealFunc/Dataset binding for data projection of a real function
};
#endif
|
{
"pile_set_name": "Github"
}
|
package netlink
import (
"fmt"
"net"
"syscall"
"github.com/vishvananda/netlink/nl"
)
// RuleAdd adds a rule to the system.
// Equivalent to: ip rule add
func RuleAdd(rule *Rule) error {
return pkgHandle.RuleAdd(rule)
}
// RuleAdd adds a rule to the system.
// Equivalent to: ip rule add
func (h *Handle) RuleAdd(rule *Rule) error {
req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
return ruleHandle(rule, req)
}
// RuleDel deletes a rule from the system.
// Equivalent to: ip rule del
func RuleDel(rule *Rule) error {
return pkgHandle.RuleDel(rule)
}
// RuleDel deletes a rule from the system.
// Equivalent to: ip rule del
func (h *Handle) RuleDel(rule *Rule) error {
req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
return ruleHandle(rule, req)
}
func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
msg := nl.NewRtMsg()
msg.Family = syscall.AF_INET
if rule.Family != 0 {
msg.Family = uint8(rule.Family)
}
var dstFamily uint8
var rtAttrs []*nl.RtAttr
if rule.Dst != nil && rule.Dst.IP != nil {
dstLen, _ := rule.Dst.Mask.Size()
msg.Dst_len = uint8(dstLen)
msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP))
dstFamily = msg.Family
var dstData []byte
if msg.Family == syscall.AF_INET {
dstData = rule.Dst.IP.To4()
} else {
dstData = rule.Dst.IP.To16()
}
rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
}
if rule.Src != nil && rule.Src.IP != nil {
msg.Family = uint8(nl.GetIPFamily(rule.Src.IP))
if dstFamily != 0 && dstFamily != msg.Family {
return fmt.Errorf("source and destination ip are not the same IP family")
}
srcLen, _ := rule.Src.Mask.Size()
msg.Src_len = uint8(srcLen)
var srcData []byte
if msg.Family == syscall.AF_INET {
srcData = rule.Src.IP.To4()
} else {
srcData = rule.Src.IP.To16()
}
rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData))
}
if rule.Table >= 0 {
msg.Table = uint8(rule.Table)
if rule.Table >= 256 {
msg.Table = syscall.RT_TABLE_UNSPEC
}
}
req.AddData(msg)
for i := range rtAttrs {
req.AddData(rtAttrs[i])
}
native := nl.NativeEndian()
if rule.Priority >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Priority))
req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b))
}
if rule.Mark >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Mark))
req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b))
}
if rule.Mask >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Mask))
req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b))
}
if rule.Flow >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Flow))
req.AddData(nl.NewRtAttr(nl.FRA_FLOW, b))
}
if rule.TunID > 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.TunID))
req.AddData(nl.NewRtAttr(nl.FRA_TUN_ID, b))
}
if rule.Table >= 256 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Table))
req.AddData(nl.NewRtAttr(nl.FRA_TABLE, b))
}
if msg.Table > 0 {
if rule.SuppressPrefixlen >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.SuppressPrefixlen))
req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_PREFIXLEN, b))
}
if rule.SuppressIfgroup >= 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.SuppressIfgroup))
req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_IFGROUP, b))
}
}
if rule.IifName != "" {
req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName)))
}
if rule.OifName != "" {
req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName)))
}
if rule.Goto >= 0 {
msg.Type = nl.FR_ACT_NOP
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Goto))
req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b))
}
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// RuleList lists rules in the system.
// Equivalent to: ip rule list
func RuleList(family int) ([]Rule, error) {
return pkgHandle.RuleList(family)
}
// RuleList lists rules in the system.
// Equivalent to: ip rule list
func (h *Handle) RuleList(family int) ([]Rule, error) {
req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST)
msg := nl.NewIfInfomsg(family)
req.AddData(msg)
msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE)
if err != nil {
return nil, err
}
native := nl.NativeEndian()
var res = make([]Rule, 0)
for i := range msgs {
msg := nl.DeserializeRtMsg(msgs[i])
attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():])
if err != nil {
return nil, err
}
rule := NewRule()
for j := range attrs {
switch attrs[j].Attr.Type {
case syscall.RTA_TABLE:
rule.Table = int(native.Uint32(attrs[j].Value[0:4]))
case nl.FRA_SRC:
rule.Src = &net.IPNet{
IP: attrs[j].Value,
Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)),
}
case nl.FRA_DST:
rule.Dst = &net.IPNet{
IP: attrs[j].Value,
Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)),
}
case nl.FRA_FWMARK:
rule.Mark = int(native.Uint32(attrs[j].Value[0:4]))
case nl.FRA_FWMASK:
rule.Mask = int(native.Uint32(attrs[j].Value[0:4]))
case nl.FRA_TUN_ID:
rule.TunID = uint(native.Uint64(attrs[j].Value[0:4]))
case nl.FRA_IIFNAME:
rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1])
case nl.FRA_OIFNAME:
rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1])
case nl.FRA_SUPPRESS_PREFIXLEN:
i := native.Uint32(attrs[j].Value[0:4])
if i != 0xffffffff {
rule.SuppressPrefixlen = int(i)
}
case nl.FRA_SUPPRESS_IFGROUP:
i := native.Uint32(attrs[j].Value[0:4])
if i != 0xffffffff {
rule.SuppressIfgroup = int(i)
}
case nl.FRA_FLOW:
rule.Flow = int(native.Uint32(attrs[j].Value[0:4]))
case nl.FRA_GOTO:
rule.Goto = int(native.Uint32(attrs[j].Value[0:4]))
case nl.FRA_PRIORITY:
rule.Priority = int(native.Uint32(attrs[j].Value[0:4]))
}
}
res = append(res, *rule)
}
return res, nil
}
|
{
"pile_set_name": "Github"
}
|
int parsedef(char *);
char parseout(time_t, int,
struct devtstat *, struct sstat *,
int, unsigned int, char);
|
{
"pile_set_name": "Github"
}
|
/***************************************************************************/
/* */
/* svbdf.h */
/* */
/* The FreeType BDF services (specification). */
/* */
/* Copyright 2003, 2009, 2012 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
/* modified, and distributed under the terms of the FreeType project */
/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
/* this file you indicate that you have read the license and */
/* understand and accept it fully. */
/* */
/***************************************************************************/
#ifndef __SVBDF_H__
#define __SVBDF_H__
#include FT_BDF_H
#include FT_INTERNAL_SERVICE_H
FT_BEGIN_HEADER
#define FT_SERVICE_ID_BDF "bdf"
typedef FT_Error
(*FT_BDF_GetCharsetIdFunc)( FT_Face face,
const char* *acharset_encoding,
const char* *acharset_registry );
typedef FT_Error
(*FT_BDF_GetPropertyFunc)( FT_Face face,
const char* prop_name,
BDF_PropertyRec *aproperty );
FT_DEFINE_SERVICE( BDF )
{
FT_BDF_GetCharsetIdFunc get_charset_id;
FT_BDF_GetPropertyFunc get_property;
};
#ifndef FT_CONFIG_OPTION_PIC
#define FT_DEFINE_SERVICE_BDFRec( class_, \
get_charset_id_, \
get_property_ ) \
static const FT_Service_BDFRec class_ = \
{ \
get_charset_id_, get_property_ \
};
#else /* FT_CONFIG_OPTION_PIC */
#define FT_DEFINE_SERVICE_BDFRec( class_, \
get_charset_id_, \
get_property_ ) \
void \
FT_Init_Class_ ## class_( FT_Service_BDFRec* clazz ) \
{ \
clazz->get_charset_id = get_charset_id_; \
clazz->get_property = get_property_; \
}
#endif /* FT_CONFIG_OPTION_PIC */
/* */
FT_END_HEADER
#endif /* __SVBDF_H__ */
/* END */
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2005-2019 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "TypingCommand.h"
#include "AXObjectCache.h"
#include "BreakBlockquoteCommand.h"
#include "DataTransfer.h"
#include "DeleteSelectionCommand.h"
#include "Document.h"
#include "Editing.h"
#include "Editor.h"
#include "Element.h"
#include "Frame.h"
#include "HTMLElement.h"
#include "HTMLNames.h"
#include "InsertLineBreakCommand.h"
#include "InsertParagraphSeparatorCommand.h"
#include "InsertTextCommand.h"
#include "Logging.h"
#include "MarkupAccumulator.h"
#include "MathMLElement.h"
#include "RenderElement.h"
#include "StaticRange.h"
#include "TextIterator.h"
#include "VisibleUnits.h"
namespace WebCore {
using namespace HTMLNames;
class TypingCommandLineOperation
{
public:
TypingCommandLineOperation(TypingCommand* typingCommand, bool selectInsertedText, const String& text)
: m_typingCommand(typingCommand)
, m_selectInsertedText(selectInsertedText)
, m_text(text)
{ }
void operator()(size_t lineOffset, size_t lineLength, bool isLastLine) const
{
if (isLastLine) {
if (!lineOffset || lineLength > 0)
m_typingCommand->insertTextRunWithoutNewlines(m_text.substring(lineOffset, lineLength), m_selectInsertedText);
} else {
if (lineLength > 0)
m_typingCommand->insertTextRunWithoutNewlines(m_text.substring(lineOffset, lineLength), false);
m_typingCommand->insertParagraphSeparator();
}
}
private:
TypingCommand* m_typingCommand;
bool m_selectInsertedText;
const String& m_text;
};
static inline EditAction editActionForTypingCommand(TypingCommand::ETypingCommand command, TextGranularity granularity, TypingCommand::TextCompositionType compositionType, bool isAutocompletion)
{
if (compositionType == TypingCommand::TextCompositionPending) {
if (command == TypingCommand::InsertText)
return EditAction::TypingInsertPendingComposition;
if (command == TypingCommand::DeleteSelection)
return EditAction::TypingDeletePendingComposition;
ASSERT_NOT_REACHED();
}
if (compositionType == TypingCommand::TextCompositionFinal) {
if (command == TypingCommand::InsertText)
return EditAction::TypingInsertFinalComposition;
if (command == TypingCommand::DeleteSelection)
return EditAction::TypingDeleteFinalComposition;
ASSERT_NOT_REACHED();
}
switch (command) {
case TypingCommand::DeleteSelection:
return EditAction::TypingDeleteSelection;
case TypingCommand::DeleteKey: {
if (granularity == WordGranularity)
return EditAction::TypingDeleteWordBackward;
if (granularity == LineBoundary)
return EditAction::TypingDeleteLineBackward;
return EditAction::TypingDeleteBackward;
}
case TypingCommand::ForwardDeleteKey:
if (granularity == WordGranularity)
return EditAction::TypingDeleteWordForward;
if (granularity == LineBoundary)
return EditAction::TypingDeleteLineForward;
return EditAction::TypingDeleteForward;
case TypingCommand::InsertText:
return isAutocompletion ? EditAction::InsertReplacement : EditAction::TypingInsertText;
case TypingCommand::InsertLineBreak:
return EditAction::TypingInsertLineBreak;
case TypingCommand::InsertParagraphSeparator:
case TypingCommand::InsertParagraphSeparatorInQuotedContent:
return EditAction::TypingInsertParagraph;
default:
return EditAction::Unspecified;
}
}
static inline bool editActionIsDeleteByTyping(EditAction action)
{
switch (action) {
case EditAction::TypingDeleteSelection:
case EditAction::TypingDeleteBackward:
case EditAction::TypingDeleteWordBackward:
case EditAction::TypingDeleteLineBackward:
case EditAction::TypingDeleteForward:
case EditAction::TypingDeleteWordForward:
case EditAction::TypingDeleteLineForward:
return true;
default:
return false;
}
}
TypingCommand::TypingCommand(Document& document, ETypingCommand commandType, const String &textToInsert, Options options, TextGranularity granularity, TextCompositionType compositionType)
: TextInsertionBaseCommand(document, editActionForTypingCommand(commandType, granularity, compositionType, options & IsAutocompletion))
, m_commandType(commandType)
, m_textToInsert(textToInsert)
, m_currentTextToInsert(textToInsert)
, m_openForMoreTyping(true)
, m_selectInsertedText(options & SelectInsertedText)
, m_smartDelete(options & SmartDelete)
, m_granularity(granularity)
, m_compositionType(compositionType)
, m_shouldAddToKillRing(options & AddsToKillRing)
, m_isAutocompletion(options & IsAutocompletion)
, m_openedByBackwardDelete(false)
, m_shouldRetainAutocorrectionIndicator(options & RetainAutocorrectionIndicator)
, m_shouldPreventSpellChecking(options & PreventSpellChecking)
{
m_currentTypingEditAction = editingAction();
updatePreservesTypingStyle(m_commandType);
}
void TypingCommand::deleteSelection(Document& document, Options options, TextCompositionType compositionType)
{
Frame* frame = document.frame();
ASSERT(frame);
if (!frame->selection().isRange())
return;
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*frame)) {
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(compositionType);
lastTypingCommand->setShouldPreventSpellChecking(options & PreventSpellChecking);
lastTypingCommand->deleteSelection(options & SmartDelete);
return;
}
TypingCommand::create(document, DeleteSelection, emptyString(), options, compositionType)->apply();
}
void TypingCommand::deleteKeyPressed(Document& document, Options options, TextGranularity granularity)
{
if (granularity == CharacterGranularity) {
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*document.frame())) {
updateSelectionIfDifferentFromCurrentSelection(lastTypingCommand.get(), document.frame());
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(TextCompositionNone);
lastTypingCommand->setShouldPreventSpellChecking(options & PreventSpellChecking);
lastTypingCommand->deleteKeyPressed(granularity, options & AddsToKillRing);
return;
}
}
TypingCommand::create(document, DeleteKey, emptyString(), options, granularity)->apply();
}
void TypingCommand::forwardDeleteKeyPressed(Document& document, Options options, TextGranularity granularity)
{
// FIXME: Forward delete in TextEdit appears to open and close a new typing command.
Frame* frame = document.frame();
if (granularity == CharacterGranularity) {
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*frame)) {
updateSelectionIfDifferentFromCurrentSelection(lastTypingCommand.get(), frame);
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(TextCompositionNone);
lastTypingCommand->setShouldPreventSpellChecking(options & PreventSpellChecking);
lastTypingCommand->forwardDeleteKeyPressed(granularity, options & AddsToKillRing);
return;
}
}
TypingCommand::create(document, ForwardDeleteKey, emptyString(), options, granularity)->apply();
}
void TypingCommand::updateSelectionIfDifferentFromCurrentSelection(TypingCommand* typingCommand, Frame* frame)
{
ASSERT(frame);
VisibleSelection currentSelection = frame->selection().selection();
if (currentSelection == typingCommand->endingSelection())
return;
typingCommand->setStartingSelection(currentSelection);
typingCommand->setEndingSelection(currentSelection);
}
void TypingCommand::insertText(Document& document, const String& text, Options options, TextCompositionType composition)
{
Frame* frame = document.frame();
ASSERT(frame);
if (!text.isEmpty())
frame->editor().updateMarkersForWordsAffectedByEditing(isSpaceOrNewline(text[0]));
insertText(document, text, frame->selection().selection(), options, composition);
}
// FIXME: We shouldn't need to take selectionForInsertion. It should be identical to FrameSelection's current selection.
void TypingCommand::insertText(Document& document, const String& text, const VisibleSelection& selectionForInsertion, Options options, TextCompositionType compositionType)
{
RefPtr<Frame> frame = document.frame();
ASSERT(frame);
LOG(Editing, "TypingCommand::insertText (text %s)", text.utf8().data());
VisibleSelection currentSelection = frame->selection().selection();
String newText = dispatchBeforeTextInsertedEvent(text, selectionForInsertion, compositionType == TextCompositionPending);
// Set the starting and ending selection appropriately if we are using a selection
// that is different from the current selection. In the future, we should change EditCommand
// to deal with custom selections in a general way that can be used by all of the commands.
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*frame)) {
if (lastTypingCommand->endingSelection() != selectionForInsertion) {
lastTypingCommand->setStartingSelection(selectionForInsertion);
lastTypingCommand->setEndingSelection(selectionForInsertion);
}
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(compositionType);
lastTypingCommand->setShouldRetainAutocorrectionIndicator(options & RetainAutocorrectionIndicator);
lastTypingCommand->setShouldPreventSpellChecking(options & PreventSpellChecking);
lastTypingCommand->insertTextAndNotifyAccessibility(newText, options & SelectInsertedText);
return;
}
auto cmd = TypingCommand::create(document, InsertText, newText, options, compositionType);
applyTextInsertionCommand(frame.get(), cmd.get(), selectionForInsertion, currentSelection);
}
void TypingCommand::insertLineBreak(Document& document, Options options)
{
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*document.frame())) {
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(TextCompositionNone);
lastTypingCommand->setShouldRetainAutocorrectionIndicator(options & RetainAutocorrectionIndicator);
lastTypingCommand->insertLineBreakAndNotifyAccessibility();
return;
}
TypingCommand::create(document, InsertLineBreak, emptyString(), options)->apply();
}
void TypingCommand::insertParagraphSeparatorInQuotedContent(Document& document)
{
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*document.frame())) {
lastTypingCommand->setIsAutocompletion(false);
lastTypingCommand->setCompositionType(TextCompositionNone);
lastTypingCommand->insertParagraphSeparatorInQuotedContentAndNotifyAccessibility();
return;
}
TypingCommand::create(document, InsertParagraphSeparatorInQuotedContent)->apply();
}
void TypingCommand::insertParagraphSeparator(Document& document, Options options)
{
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*document.frame())) {
lastTypingCommand->setIsAutocompletion(options & IsAutocompletion);
lastTypingCommand->setCompositionType(TextCompositionNone);
lastTypingCommand->setShouldRetainAutocorrectionIndicator(options & RetainAutocorrectionIndicator);
lastTypingCommand->insertParagraphSeparatorAndNotifyAccessibility();
return;
}
TypingCommand::create(document, InsertParagraphSeparator, emptyString(), options)->apply();
}
RefPtr<TypingCommand> TypingCommand::lastTypingCommandIfStillOpenForTyping(Frame& frame)
{
RefPtr<CompositeEditCommand> lastEditCommand = frame.editor().lastEditCommand();
if (!lastEditCommand || !lastEditCommand->isTypingCommand() || !static_cast<TypingCommand*>(lastEditCommand.get())->isOpenForMoreTyping())
return nullptr;
return static_cast<TypingCommand*>(lastEditCommand.get());
}
bool TypingCommand::shouldDeferWillApplyCommandUntilAddingTypingCommand() const
{
return !m_isHandlingInitialTypingCommand || editActionIsDeleteByTyping(editingAction());
}
void TypingCommand::closeTyping(Frame* frame)
{
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*frame))
lastTypingCommand->closeTyping();
}
#if PLATFORM(IOS_FAMILY)
void TypingCommand::ensureLastEditCommandHasCurrentSelectionIfOpenForMoreTyping(Frame* frame, const VisibleSelection& newSelection)
{
if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(*frame)) {
lastTypingCommand->setEndingSelection(newSelection);
lastTypingCommand->setEndingSelectionOnLastInsertCommand(newSelection);
}
}
#endif
void TypingCommand::postTextStateChangeNotificationForDeletion(const VisibleSelection& selection)
{
if (!AXObjectCache::accessibilityEnabled())
return;
postTextStateChangeNotification(AXTextEditTypeDelete, AccessibilityObject::stringForVisiblePositionRange(selection), selection.start());
VisiblePositionIndexRange range;
range.startIndex.value = indexForVisiblePosition(selection.start(), range.startIndex.scope);
range.endIndex.value = indexForVisiblePosition(selection.end(), range.endIndex.scope);
composition()->setRangeDeletedByUnapply(range);
}
bool TypingCommand::willApplyCommand()
{
if (shouldDeferWillApplyCommandUntilAddingTypingCommand()) {
// The TypingCommand will handle the willApplyCommand logic separately in TypingCommand::willAddTypingToOpenCommand.
return true;
}
return CompositeEditCommand::willApplyCommand();
}
void TypingCommand::doApply()
{
if (endingSelection().isNoneOrOrphaned())
return;
if (m_commandType == DeleteKey)
if (m_commands.isEmpty())
m_openedByBackwardDelete = true;
switch (m_commandType) {
case DeleteSelection:
deleteSelection(m_smartDelete);
return;
case DeleteKey:
deleteKeyPressed(m_granularity, m_shouldAddToKillRing);
return;
case ForwardDeleteKey:
forwardDeleteKeyPressed(m_granularity, m_shouldAddToKillRing);
return;
case InsertLineBreak:
insertLineBreakAndNotifyAccessibility();
return;
case InsertParagraphSeparator:
insertParagraphSeparatorAndNotifyAccessibility();
return;
case InsertParagraphSeparatorInQuotedContent:
insertParagraphSeparatorInQuotedContentAndNotifyAccessibility();
return;
case InsertText:
insertTextAndNotifyAccessibility(m_textToInsert, m_selectInsertedText);
return;
}
ASSERT_NOT_REACHED();
}
String TypingCommand::inputEventTypeName() const
{
return inputTypeNameForEditingAction(m_currentTypingEditAction);
}
bool TypingCommand::isBeforeInputEventCancelable() const
{
return m_currentTypingEditAction != EditAction::TypingInsertPendingComposition && m_currentTypingEditAction != EditAction::TypingDeletePendingComposition;
}
String TypingCommand::inputEventData() const
{
switch (m_currentTypingEditAction) {
case EditAction::TypingInsertText:
case EditAction::TypingInsertPendingComposition:
case EditAction::TypingInsertFinalComposition:
return m_currentTextToInsert;
case EditAction::InsertReplacement:
return isEditingTextAreaOrTextInput() ? m_currentTextToInsert : String();
default:
return CompositeEditCommand::inputEventData();
}
}
RefPtr<DataTransfer> TypingCommand::inputEventDataTransfer() const
{
if (m_currentTypingEditAction != EditAction::InsertReplacement || isEditingTextAreaOrTextInput())
return nullptr;
StringBuilder htmlText;
MarkupAccumulator::appendCharactersReplacingEntities(htmlText, m_currentTextToInsert, 0, m_currentTextToInsert.length(), EntityMaskInHTMLPCDATA);
return DataTransfer::createForInputEvent(m_currentTextToInsert, htmlText.toString());
}
void TypingCommand::didApplyCommand()
{
// TypingCommands handle applied editing separately (see TypingCommand::typingAddedToOpenCommand).
m_isHandlingInitialTypingCommand = false;
}
void TypingCommand::markMisspellingsAfterTyping(ETypingCommand commandType)
{
Frame& frame = this->frame();
#if PLATFORM(MAC)
if (!frame.editor().isContinuousSpellCheckingEnabled()
&& !frame.editor().isAutomaticQuoteSubstitutionEnabled()
&& !frame.editor().isAutomaticLinkDetectionEnabled()
&& !frame.editor().isAutomaticDashSubstitutionEnabled()
&& !frame.editor().isAutomaticTextReplacementEnabled())
return;
if (frame.editor().isHandlingAcceptedCandidate())
return;
#else
if (!frame.editor().isContinuousSpellCheckingEnabled())
return;
#endif
// Take a look at the selection that results after typing and determine whether we need to spellcheck.
// Since the word containing the current selection is never marked, this does a check to
// see if typing made a new word that is not in the current selection. Basically, you
// get this by being at the end of a word and typing a space.
VisiblePosition start(endingSelection().start(), endingSelection().affinity());
VisiblePosition previous = start.previous();
if (previous.isNotNull()) {
#if !PLATFORM(IOS_FAMILY)
VisiblePosition p1 = startOfWord(previous, LeftWordIfOnBoundary);
VisiblePosition p2 = startOfWord(start, LeftWordIfOnBoundary);
if (p1 != p2) {
RefPtr<Range> range = makeRange(p1, p2);
String strippedPreviousWord;
if (range && (commandType == TypingCommand::InsertText || commandType == TypingCommand::InsertLineBreak || commandType == TypingCommand::InsertParagraphSeparator || commandType == TypingCommand::InsertParagraphSeparatorInQuotedContent))
strippedPreviousWord = plainText(range.get()).stripWhiteSpace();
frame.editor().markMisspellingsAfterTypingToWord(p1, endingSelection(), !strippedPreviousWord.isEmpty());
} else if (commandType == TypingCommand::InsertText)
frame.editor().startAlternativeTextUITimer();
#else
UNUSED_PARAM(commandType);
// If this bug gets fixed, this PLATFORM(IOS_FAMILY) code could be removed:
// <rdar://problem/7259611> Word boundary code on iPhone gives different results than desktop
EWordSide startWordSide = LeftWordIfOnBoundary;
UChar32 c = previous.characterAfter();
// FIXME: VisiblePosition::characterAfter() and characterBefore() do not emit newlines the same
// way as TextIterator, so we do an isEndOfParagraph check here.
if (isSpaceOrNewline(c) || c == noBreakSpace || isEndOfParagraph(previous)) {
startWordSide = RightWordIfOnBoundary;
}
VisiblePosition p1 = startOfWord(previous, startWordSide);
VisiblePosition p2 = startOfWord(start, startWordSide);
if (p1 != p2)
frame.editor().markMisspellingsAfterTypingToWord(p1, endingSelection(), false);
#endif // !PLATFORM(IOS_FAMILY)
}
}
bool TypingCommand::willAddTypingToOpenCommand(ETypingCommand commandType, TextGranularity granularity, const String& text, RefPtr<Range>&& range)
{
m_currentTextToInsert = text;
m_currentTypingEditAction = editActionForTypingCommand(commandType, granularity, m_compositionType, m_isAutocompletion);
if (!shouldDeferWillApplyCommandUntilAddingTypingCommand())
return true;
if (!range || isEditingTextAreaOrTextInput())
return frame().editor().willApplyEditing(*this, CompositeEditCommand::targetRangesForBindings());
return frame().editor().willApplyEditing(*this, { 1, StaticRange::createFromRange(*range) });
}
void TypingCommand::typingAddedToOpenCommand(ETypingCommand commandTypeForAddedTyping)
{
Frame& frame = this->frame();
updatePreservesTypingStyle(commandTypeForAddedTyping);
#if PLATFORM(COCOA)
frame.editor().appliedEditing(*this);
// Since the spellchecking code may also perform corrections and other replacements, it should happen after the typing changes.
if (!m_shouldPreventSpellChecking)
markMisspellingsAfterTyping(commandTypeForAddedTyping);
#else
// The old spellchecking code requires that checking be done first, to prevent issues like that in 6864072, where <doesn't> is marked as misspelled.
markMisspellingsAfterTyping(commandTypeForAddedTyping);
frame.editor().appliedEditing(*this);
#endif
}
void TypingCommand::insertText(const String &text, bool selectInsertedText)
{
// FIXME: Need to implement selectInsertedText for cases where more than one insert is involved.
// This requires support from insertTextRunWithoutNewlines and insertParagraphSeparator for extending
// an existing selection; at the moment they can either put the caret after what's inserted or
// select what's inserted, but there's no way to "extend selection" to include both an old selection
// that ends just before where we want to insert text and the newly inserted text.
TypingCommandLineOperation operation(this, selectInsertedText, text);
forEachLineInString(text, operation);
}
void TypingCommand::insertTextAndNotifyAccessibility(const String &text, bool selectInsertedText)
{
LOG(Editing, "TypingCommand %p insertTextAndNotifyAccessibility (text %s, selectInsertedText %d)", this, text.utf8().data(), selectInsertedText);
AccessibilityReplacedText replacedText(frame().selection().selection());
insertText(text, selectInsertedText);
replacedText.postTextStateChangeNotification(document().existingAXObjectCache(), AXTextEditTypeTyping, text, frame().selection().selection());
composition()->setRangeDeletedByUnapply(replacedText.replacedRange());
}
void TypingCommand::insertTextRunWithoutNewlines(const String &text, bool selectInsertedText)
{
if (!willAddTypingToOpenCommand(InsertText, CharacterGranularity, text))
return;
auto command = InsertTextCommand::create(document(), text, selectInsertedText,
m_compositionType == TextCompositionNone ? InsertTextCommand::RebalanceLeadingAndTrailingWhitespaces : InsertTextCommand::RebalanceAllWhitespaces, EditAction::TypingInsertText);
applyCommandToComposite(WTFMove(command), endingSelection());
Frame& frame = this->frame();
Ref<Frame> protector(frame);
typingAddedToOpenCommand(InsertText);
}
void TypingCommand::insertLineBreak()
{
if (!canAppendNewLineFeedToSelection(endingSelection()))
return;
if (!willAddTypingToOpenCommand(InsertLineBreak, LineGranularity))
return;
applyCommandToComposite(InsertLineBreakCommand::create(document()));
Frame& frame = this->frame();
Ref<Frame> protector(frame);
typingAddedToOpenCommand(InsertLineBreak);
}
void TypingCommand::insertLineBreakAndNotifyAccessibility()
{
AccessibilityReplacedText replacedText(frame().selection().selection());
insertLineBreak();
replacedText.postTextStateChangeNotification(document().existingAXObjectCache(), AXTextEditTypeTyping, "\n", frame().selection().selection());
composition()->setRangeDeletedByUnapply(replacedText.replacedRange());
}
void TypingCommand::insertParagraphSeparator()
{
if (!canAppendNewLineFeedToSelection(endingSelection()))
return;
if (!willAddTypingToOpenCommand(InsertParagraphSeparator, ParagraphGranularity))
return;
applyCommandToComposite(InsertParagraphSeparatorCommand::create(document(), false, false, EditAction::TypingInsertParagraph));
Frame& frame = this->frame();
Ref<Frame> protector(frame);
typingAddedToOpenCommand(InsertParagraphSeparator);
}
void TypingCommand::insertParagraphSeparatorAndNotifyAccessibility()
{
AccessibilityReplacedText replacedText(frame().selection().selection());
insertParagraphSeparator();
replacedText.postTextStateChangeNotification(document().existingAXObjectCache(), AXTextEditTypeTyping, "\n", frame().selection().selection());
composition()->setRangeDeletedByUnapply(replacedText.replacedRange());
}
void TypingCommand::insertParagraphSeparatorInQuotedContent()
{
if (!willAddTypingToOpenCommand(InsertParagraphSeparatorInQuotedContent, ParagraphGranularity))
return;
// If the selection starts inside a table, just insert the paragraph separator normally
// Breaking the blockquote would also break apart the table, which is unecessary when inserting a newline
if (enclosingNodeOfType(endingSelection().start(), &isTableStructureNode)) {
insertParagraphSeparator();
return;
}
applyCommandToComposite(BreakBlockquoteCommand::create(document()));
Frame& frame = this->frame();
Ref<Frame> protector(frame);
typingAddedToOpenCommand(InsertParagraphSeparatorInQuotedContent);
}
void TypingCommand::insertParagraphSeparatorInQuotedContentAndNotifyAccessibility()
{
AccessibilityReplacedText replacedText(frame().selection().selection());
insertParagraphSeparatorInQuotedContent();
replacedText.postTextStateChangeNotification(document().existingAXObjectCache(), AXTextEditTypeTyping, "\n", frame().selection().selection());
composition()->setRangeDeletedByUnapply(replacedText.replacedRange());
}
bool TypingCommand::makeEditableRootEmpty()
{
Element* root = endingSelection().rootEditableElement();
if (!root || !root->firstChild())
return false;
if (root->firstChild() == root->lastChild() && root->firstElementChild() && root->firstElementChild()->hasTagName(brTag)) {
// If there is a single child and it could be a placeholder, leave it alone.
if (root->renderer() && root->renderer()->isRenderBlockFlow())
return false;
}
while (Node* child = root->firstChild())
removeNode(*child);
addBlockPlaceholderIfNeeded(root);
setEndingSelection(VisibleSelection(firstPositionInNode(root), DOWNSTREAM, endingSelection().isDirectional()));
return true;
}
void TypingCommand::deleteKeyPressed(TextGranularity granularity, bool shouldAddToKillRing)
{
Frame& frame = this->frame();
Ref<Frame> protector(frame);
frame.editor().updateMarkersForWordsAffectedByEditing(false);
VisibleSelection selectionToDelete;
VisibleSelection selectionAfterUndo;
bool expandForSpecialElements = !endingSelection().isCaret();
switch (endingSelection().selectionType()) {
case VisibleSelection::RangeSelection:
selectionToDelete = endingSelection();
selectionAfterUndo = selectionToDelete;
break;
case VisibleSelection::CaretSelection: {
// After breaking out of an empty mail blockquote, we still want continue with the deletion
// so actual content will get deleted, and not just the quote style.
if (breakOutOfEmptyMailBlockquotedParagraph())
typingAddedToOpenCommand(DeleteKey);
m_smartDelete = false;
FrameSelection selection;
selection.setSelection(endingSelection());
selection.modify(FrameSelection::AlterationExtend, DirectionBackward, granularity);
if (shouldAddToKillRing && selection.isCaret() && granularity != CharacterGranularity)
selection.modify(FrameSelection::AlterationExtend, DirectionBackward, CharacterGranularity);
const VisiblePosition& visibleStart = endingSelection().visibleStart();
const VisiblePosition& previousPosition = visibleStart.previous(CannotCrossEditingBoundary);
Node* enclosingTableCell = enclosingNodeOfType(visibleStart.deepEquivalent(), &isTableCell);
const Node* enclosingTableCellForPreviousPosition = enclosingNodeOfType(previousPosition.deepEquivalent(), &isTableCell);
if (previousPosition.isNull() || enclosingTableCell != enclosingTableCellForPreviousPosition) {
// When the caret is at the start of the editable area in an empty list item, break out of the list item.
if (auto deleteListSelection = shouldBreakOutOfEmptyListItem()) {
if (willAddTypingToOpenCommand(DeleteKey, granularity, { }, Range::create(document(), deleteListSelection.value().start(), deleteListSelection.value().end()))) {
breakOutOfEmptyListItem();
typingAddedToOpenCommand(DeleteKey);
}
return;
}
}
if (previousPosition.isNull()) {
// When there are no visible positions in the editing root, delete its entire contents.
// FIXME: Dispatch a `beforeinput` event here and bail if preventDefault() was invoked.
if (visibleStart.next(CannotCrossEditingBoundary).isNull() && makeEditableRootEmpty()) {
typingAddedToOpenCommand(DeleteKey);
return;
}
}
// If we have a caret selection at the beginning of a cell, we have nothing to do.
if (enclosingTableCell && visibleStart == firstPositionInNode(enclosingTableCell))
return;
// If the caret is at the start of a paragraph after a table, move content into the last table cell.
if (isStartOfParagraph(visibleStart) && isFirstPositionAfterTable(visibleStart.previous(CannotCrossEditingBoundary))) {
// Unless the caret is just before a table. We don't want to move a table into the last table cell.
if (isLastPositionBeforeTable(visibleStart))
return;
// Extend the selection backward into the last cell, then deletion will handle the move.
selection.modify(FrameSelection::AlterationExtend, DirectionBackward, granularity);
// If the caret is just after a table, select the table and don't delete anything.
} else if (Node* table = isFirstPositionAfterTable(visibleStart)) {
setEndingSelection(VisibleSelection(positionBeforeNode(table), endingSelection().start(), DOWNSTREAM, endingSelection().isDirectional()));
typingAddedToOpenCommand(DeleteKey);
return;
}
selectionToDelete = selection.selection();
if (granularity == CharacterGranularity && selectionToDelete.end().containerNode() == selectionToDelete.start().containerNode()
&& selectionToDelete.end().computeOffsetInContainerNode() - selectionToDelete.start().computeOffsetInContainerNode() > 1) {
// If there are multiple Unicode code points to be deleted, adjust the range to match platform conventions.
selectionToDelete.setWithoutValidation(selectionToDelete.end(), selectionToDelete.end().previous(BackwardDeletion));
}
if (!startingSelection().isRange() || selectionToDelete.base() != startingSelection().start())
selectionAfterUndo = selectionToDelete;
else
// It's a little tricky to compute what the starting selection would have been in the original document.
// We can't let the VisibleSelection class's validation kick in or it'll adjust for us based on
// the current state of the document and we'll get the wrong result.
selectionAfterUndo.setWithoutValidation(startingSelection().end(), selectionToDelete.extent());
break;
}
case VisibleSelection::NoSelection:
ASSERT_NOT_REACHED();
break;
}
ASSERT(!selectionToDelete.isNone());
if (selectionToDelete.isNone()) {
#if PLATFORM(IOS_FAMILY)
// Workaround for this bug:
// <rdar://problem/4653755> UIKit text widgets should use WebKit editing API to manipulate text
setEndingSelection(frame.selection().selection());
closeTyping(&frame);
#endif
return;
}
if (selectionToDelete.isCaret() || !frame.selection().shouldDeleteSelection(selectionToDelete))
return;
if (!willAddTypingToOpenCommand(DeleteKey, granularity, { }, selectionToDelete.firstRange()))
return;
if (shouldAddToKillRing)
frame.editor().addRangeToKillRing(*selectionToDelete.toNormalizedRange().get(), Editor::KillRingInsertionMode::PrependText);
// Post the accessibility notification before actually deleting the content while selectionToDelete is still valid
postTextStateChangeNotificationForDeletion(selectionToDelete);
// Make undo select everything that has been deleted, unless an undo will undo more than just this deletion.
// FIXME: This behaves like TextEdit except for the case where you open with text insertion and then delete
// more text than you insert. In that case all of the text that was around originally should be selected.
if (m_openedByBackwardDelete)
setStartingSelection(selectionAfterUndo);
CompositeEditCommand::deleteSelection(selectionToDelete, m_smartDelete, /* mergeBlocksAfterDelete*/ true, /* replace*/ false, expandForSpecialElements, /*sanitizeMarkup*/ true);
setSmartDelete(false);
typingAddedToOpenCommand(DeleteKey);
}
void TypingCommand::forwardDeleteKeyPressed(TextGranularity granularity, bool shouldAddToKillRing)
{
Frame& frame = this->frame();
Ref<Frame> protector(frame);
frame.editor().updateMarkersForWordsAffectedByEditing(false);
VisibleSelection selectionToDelete;
VisibleSelection selectionAfterUndo;
bool expandForSpecialElements = !endingSelection().isCaret();
switch (endingSelection().selectionType()) {
case VisibleSelection::RangeSelection:
selectionToDelete = endingSelection();
selectionAfterUndo = selectionToDelete;
break;
case VisibleSelection::CaretSelection: {
m_smartDelete = false;
// Handle delete at beginning-of-block case.
// Do nothing in the case that the caret is at the start of a
// root editable element or at the start of a document.
FrameSelection selection;
selection.setSelection(endingSelection());
selection.modify(FrameSelection::AlterationExtend, DirectionForward, granularity);
if (shouldAddToKillRing && selection.isCaret() && granularity != CharacterGranularity)
selection.modify(FrameSelection::AlterationExtend, DirectionForward, CharacterGranularity);
Position downstreamEnd = endingSelection().end().downstream();
VisiblePosition visibleEnd = endingSelection().visibleEnd();
Node* enclosingTableCell = enclosingNodeOfType(visibleEnd.deepEquivalent(), &isTableCell);
if (enclosingTableCell && visibleEnd == lastPositionInNode(enclosingTableCell))
return;
if (visibleEnd == endOfParagraph(visibleEnd))
downstreamEnd = visibleEnd.next(CannotCrossEditingBoundary).deepEquivalent().downstream();
// When deleting tables: Select the table first, then perform the deletion
if (downstreamEnd.containerNode() && downstreamEnd.containerNode()->renderer() && downstreamEnd.containerNode()->renderer()->isTable()
&& downstreamEnd.computeOffsetInContainerNode() <= caretMinOffset(*downstreamEnd.containerNode())) {
setEndingSelection(VisibleSelection(endingSelection().end(), positionAfterNode(downstreamEnd.containerNode()), DOWNSTREAM, endingSelection().isDirectional()));
typingAddedToOpenCommand(ForwardDeleteKey);
return;
}
// deleting to end of paragraph when at end of paragraph needs to merge the next paragraph (if any)
if (granularity == ParagraphBoundary && selection.selection().isCaret() && isEndOfParagraph(selection.selection().visibleEnd()))
selection.modify(FrameSelection::AlterationExtend, DirectionForward, CharacterGranularity);
selectionToDelete = selection.selection();
if (!startingSelection().isRange() || selectionToDelete.base() != startingSelection().start())
selectionAfterUndo = selectionToDelete;
else {
// It's a little tricky to compute what the starting selection would have been in the original document.
// We can't let the VisibleSelection class's validation kick in or it'll adjust for us based on
// the current state of the document and we'll get the wrong result.
Position extent = startingSelection().end();
if (extent.containerNode() != selectionToDelete.end().containerNode())
extent = selectionToDelete.extent();
else {
int extraCharacters;
if (selectionToDelete.start().containerNode() == selectionToDelete.end().containerNode())
extraCharacters = selectionToDelete.end().computeOffsetInContainerNode() - selectionToDelete.start().computeOffsetInContainerNode();
else
extraCharacters = selectionToDelete.end().computeOffsetInContainerNode();
extent = Position(extent.containerNode(), extent.computeOffsetInContainerNode() + extraCharacters, Position::PositionIsOffsetInAnchor);
}
selectionAfterUndo.setWithoutValidation(startingSelection().start(), extent);
}
break;
}
case VisibleSelection::NoSelection:
ASSERT_NOT_REACHED();
break;
}
ASSERT(!selectionToDelete.isNone());
if (selectionToDelete.isNone()) {
#if PLATFORM(IOS_FAMILY)
// Workaround for this bug:
// <rdar://problem/4653755> UIKit text widgets should use WebKit editing API to manipulate text
setEndingSelection(frame.selection().selection());
closeTyping(&frame);
#endif
return;
}
if (selectionToDelete.isCaret() || !frame.selection().shouldDeleteSelection(selectionToDelete))
return;
if (!willAddTypingToOpenCommand(ForwardDeleteKey, granularity, { }, selectionToDelete.firstRange()))
return;
// Post the accessibility notification before actually deleting the content while selectionToDelete is still valid
postTextStateChangeNotificationForDeletion(selectionToDelete);
if (shouldAddToKillRing)
frame.editor().addRangeToKillRing(*selectionToDelete.toNormalizedRange().get(), Editor::KillRingInsertionMode::AppendText);
// make undo select what was deleted
setStartingSelection(selectionAfterUndo);
CompositeEditCommand::deleteSelection(selectionToDelete, m_smartDelete, /* mergeBlocksAfterDelete*/ true, /* replace*/ false, expandForSpecialElements, /*sanitizeMarkup*/ true);
setSmartDelete(false);
typingAddedToOpenCommand(ForwardDeleteKey);
}
void TypingCommand::deleteSelection(bool smartDelete)
{
if (!willAddTypingToOpenCommand(DeleteSelection, CharacterGranularity))
return;
CompositeEditCommand::deleteSelection(smartDelete);
typingAddedToOpenCommand(DeleteSelection);
}
#if PLATFORM(IOS_FAMILY)
class FriendlyEditCommand : public EditCommand {
public:
void setEndingSelection(const VisibleSelection& selection)
{
EditCommand::setEndingSelection(selection);
}
};
void TypingCommand::setEndingSelectionOnLastInsertCommand(const VisibleSelection& selection)
{
if (!m_commands.isEmpty()) {
EditCommand* lastCommand = m_commands.last().get();
if (lastCommand->isInsertTextCommand())
static_cast<FriendlyEditCommand*>(lastCommand)->setEndingSelection(selection);
}
}
#endif
void TypingCommand::updatePreservesTypingStyle(ETypingCommand commandType)
{
switch (commandType) {
case DeleteSelection:
case DeleteKey:
case ForwardDeleteKey:
case InsertParagraphSeparator:
case InsertLineBreak:
m_preservesTypingStyle = true;
return;
case InsertParagraphSeparatorInQuotedContent:
case InsertText:
m_preservesTypingStyle = false;
return;
}
ASSERT_NOT_REACHED();
m_preservesTypingStyle = false;
}
bool TypingCommand::isTypingCommand() const
{
return true;
}
} // namespace WebCore
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.swagger.invocation.springmvc.response;
import java.util.Arrays;
import java.util.List;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.Response.StatusType;
import org.apache.servicecomb.foundation.common.http.HttpStatus;
import org.apache.servicecomb.swagger.invocation.Response;
import org.apache.servicecomb.swagger.invocation.response.producer.ProducerResponseMapper;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.springframework.http.HttpHeaders;
import org.springframework.http.ResponseEntity;
import mockit.Mock;
import mockit.MockUp;
import mockit.Mocked;
public class TestSpringmvcProducerResponseMapper {
@Mocked
ProducerResponseMapper realMapper;
SpringmvcProducerResponseMapper mapper;
String[] arrResult = new String[] {"a", "b"};
@Before
public void setup() {
mapper = new SpringmvcProducerResponseMapper(realMapper);
new MockUp<ProducerResponseMapper>(realMapper) {
@Mock
Response mapResponse(StatusType status, Object response) {
if (HttpStatus.isSuccess(status.getStatusCode())) {
return Response.ok(Arrays.asList(arrResult));
}
return null;
}
};
}
@SuppressWarnings("unchecked")
@Test
public void mapResponse_withoutHeader_sucess() {
ResponseEntity<String[]> responseEntity =
new ResponseEntity<>(arrResult, org.springframework.http.HttpStatus.OK);
Response response = mapper.mapResponse(null, responseEntity);
Assert.assertThat((List<String>) response.getResult(), Matchers.contains("a", "b"));
Assert.assertEquals(Status.OK, response.getStatus());
}
@Test
public void mapResponse_withoutHeader_fail() {
ResponseEntity<String[]> responseEntity =
new ResponseEntity<>(arrResult, org.springframework.http.HttpStatus.BAD_REQUEST);
Response response = mapper.mapResponse(null, responseEntity);
Assert.assertSame(arrResult, response.getResult());
Assert.assertEquals(Status.BAD_REQUEST.getStatusCode(), response.getStatus().getStatusCode());
}
@Test
public void mapResponse_withHeader() {
HttpHeaders headers = new HttpHeaders();
headers.add("h", "v");
ResponseEntity<String[]> responseEntity =
new ResponseEntity<>(arrResult, headers, org.springframework.http.HttpStatus.OK);
Response response = mapper.mapResponse(null, responseEntity);
List<Object> hv = response.getHeaders().getHeader("h");
Assert.assertThat(hv, Matchers.contains("v"));
}
}
|
{
"pile_set_name": "Github"
}
|
C:\android\workspace\AppFTPClientDemo1\bin\AppFTPClientDemo1.ap_ \
: C:\android\workspace\AppFTPClientDemo1\res\drawable-hdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\res\drawable-ldpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\res\drawable-mdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\res\drawable-xhdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\res\drawable-xxhdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\res\layout\activity_app.xml \
C:\android\workspace\AppFTPClientDemo1\res\values\colors.xml \
C:\android\workspace\AppFTPClientDemo1\res\values\strings.xml \
C:\android\workspace\AppFTPClientDemo1\res\values\styles.xml \
C:\android\workspace\AppFTPClientDemo1\res\values-v14\styles.xml \
C:\android\workspace\AppFTPClientDemo1\res\values-v21\styles.xml \
C:\android\workspace\AppFTPClientDemo1\bin\res\drawable-hdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\bin\res\drawable-ldpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\bin\res\drawable-mdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\bin\res\drawable-xhdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\bin\res\drawable-xxhdpi\ic_launcher.png \
C:\android\workspace\AppFTPClientDemo1\bin\AndroidManifest.xml \
|
{
"pile_set_name": "Github"
}
|
总体排版规则
==============
缩进
---------
每次缩进使用两个空格。
不使用TAB键或者混合使用TAB键和空格进行缩进。
.. code-block:: html
<ul>
<li>Fantastic
<li>Great
</ul>
.. code-block:: css
.example {
color: blue;
}
大小写
----------
只使用小写字母。
所有的代码都使用小写字母:适用于HTML元素、属性、属性值(除了text/CDATA)、CSS选择器、属性名以及属性值(字符串除外)。
.. code-block:: html
<!-- 不推荐 -->
<A HREF="/">Home</A>
<!-- 推荐 -->
<img src="google.png" alt="Google">
.. code-block:: css
/* 不推荐 */
color: #E5E5E5;
/* 推荐 */
color: #e5e5e5;
尾部的空格
------------
删除尾部的空格。
尾部的空格是多余的,不删除则形成无意义的文件差异。
.. code-block:: html
<!-- 不推荐 -->
<p>What?_
<!-- 推荐 -->
<p>Yes please.
|
{
"pile_set_name": "Github"
}
|
<%@ page language="java" contentType="text/html; charset=utf-8" pageEncoding="iso-8859-1"%>
<%@ include file="include.jsp" %>
<%--
Creates HTML for displaying the rating stars.
PARAMETERS
path: Album path. May be null if readonly.
readonly: Whether rating can be changed.
rating: The rating, an integer from 0 (no rating), through 10 (lowest rating), to 50 (highest rating).
--%>
<c:forEach var="i" begin="1" end="5">
<sub:url value="setRating.view" var="ratingUrl">
<sub:param name="path" value="${param.path}"/>
<sub:param name="action" value="rating"/>
<sub:param name="rating" value="${i}"/>
</sub:url>
<c:choose>
<c:when test="${param.rating ge i * 10}">
<spring:theme code="ratingOnImage" var="imageUrl"/>
</c:when>
<c:when test="${param.rating ge i*10 - 7 and param.rating le i*10 - 3}">
<spring:theme code="ratingHalfImage" var="imageUrl"/>
</c:when>
<c:otherwise>
<spring:theme code="ratingOffImage" var="imageUrl"/>
</c:otherwise>
</c:choose>
<c:choose>
<c:when test="${param.readonly}">
<img src="${imageUrl}" style="margin-right:-3px" alt="" title="<fmt:message key="rating.rating"/> ${param.rating/10}">
</c:when>
<c:otherwise>
<a href="${ratingUrl}"><img src="${imageUrl}" style="margin-right:-3px" alt="" title="<fmt:message key="rating.rating"/> ${i}"></a>
</c:otherwise>
</c:choose>
</c:forEach>
<sub:url value="setRating.view" var="clearRatingUrl">
<sub:param name="path" value="${param.path}"/>
<sub:param name="action" value="rating"/>
<sub:param name="rating" value="0"/>
</sub:url>
<c:if test="${not param.readonly}">
| <a href="${clearRatingUrl}"><img src="<spring:theme code="clearRatingImage"/>" alt="" title="<fmt:message key="rating.clearrating"/>" style="margin-left:-3px; margin-right:5px"></a>
</c:if>
|
{
"pile_set_name": "Github"
}
|
/* Copyright 2009-2018 EPFL, Lausanne */
package stainless
package extraction
package trace
trait Trace extends CachingPhase with SimpleFunctions with IdentitySorts { self =>
val s: Trees
val t: termination.Trees
import s._
override protected final val funCache = new ExtractionCache[s.FunDef, FunctionResult]({(fd, symbols) =>
FunctionKey(fd) + SetKey(
symbols.dependencies(fd.id)
.flatMap(id => symbols.lookupFunction(id))
)
})
override protected type TransformerContext = s.Symbols
override protected def getContext(symbols: s.Symbols) = symbols
private[this] object identity extends transformers.TreeTransformer {
override val s: self.s.type = self.s
override val t: self.t.type = self.t
}
override protected def extractFunction(symbols: Symbols, fd: FunDef): t.FunDef = {
import symbols._
var funInv: Option[FunctionInvocation] = None
if(fd.flags.exists(elem => elem.name == "traceInduct")) {
fd.flags.filter(elem => elem.name == "traceInduct").head match {
case Annotation("traceInduct", fun) => {
exprOps.preTraversal {
case _ if funInv.isDefined => // do nothing
case fi @ FunctionInvocation(tfd, _, args) if symbols.isRecursive(tfd) && (fun.contains(StringLiteral(tfd.name)) || fun.contains(StringLiteral("")))
=> {
val paramVars = fd.params.map(_.toVariable)
val argCheck = args.forall(paramVars.contains) && args.toSet.size == args.size
if (argCheck)
funInv = Some(fi)
}
case _ =>
}(fd.fullBody)
}
}
}
val result: FunDef = (funInv match {
case None => fd
case Some(finv) => createTactFun(symbols, fd, finv)
})
identity.transform(result.copy(flags = result.flags filterNot (f => f == TraceInduct)))
}
private def createTactFun(symbols: Symbols, function: FunDef, finv: FunctionInvocation): FunDef = {
import symbols._
val callee: FunDef = symbols.functions.filter(elem => elem._2.id == finv.id).head._2
def inductPattern(e: Expr): Expr = {
e match {
case IfExpr(c, th, el) =>
andJoin(Seq(inductPattern(c), IfExpr(c, inductPattern(th), inductPattern(el))))
case MatchExpr(scr, cases) =>
val scrpat = inductPattern(scr)
val casePats = cases.map {
case MatchCase(pat, optGuard, rhs) =>
val guardPat = optGuard.toSeq.map(inductPattern _)
(guardPat, MatchCase(pat, optGuard, inductPattern(rhs)))
}
val pats = scrpat +: casePats.flatMap(_._1) :+ MatchExpr(scr, casePats.map(_._2))
andJoin(pats)
case Let(i, v, b) =>
andJoin(Seq(inductPattern(v), Let(i, v, inductPattern(b))))
case FunctionInvocation(tfd, _, args) =>
val argPattern = andJoin(args.map(inductPattern))
if (tfd == callee.id) { // self recursive call ?
// create a tactFun invocation to mimic the recursion pattern
val paramVars = function.params.map(_.toVariable)
val paramIndex = paramVars.zipWithIndex.toMap
val framePositions = finv.args.zipWithIndex.collect {
case (v: Variable, i) if paramVars.contains(v) => (v, i)
}.toMap
val footprint = paramVars.filterNot(framePositions.keySet.contains)
val indexedFootprint = footprint.map { a => paramIndex(a) -> a }.toMap
// create a tactFun invocation to mimic the recursion pattern
val indexedArgs = framePositions.map {
case (f, i) => paramIndex(f) -> args(i)
}.toMap ++ indexedFootprint
val recArgs = (0 until indexedArgs.size).map(indexedArgs)
val recCall = FunctionInvocation(function.id, function.typeArgs, recArgs)
andJoin(Seq(argPattern, recCall))
} else {
argPattern
}
case Operator(args, op) =>
// conjoin all the expressions and return them
andJoin(args.map(inductPattern))
}
}
val argsMap = callee.params.map(_.toVariable).zip(finv.args).toMap
val tparamMap = callee.typeArgs.zip(finv.tfd.tps).toMap
val inlinedBody = typeOps.instantiateType(exprOps.replaceFromSymbols(argsMap, callee.body.get), tparamMap)
val inductScheme = inductPattern(inlinedBody)
val prevBody = function.fullBody match {
case Ensuring(body, pred) => body
case _ => function.fullBody
}
// body, pre and post for the tactFun
val body = andJoin(Seq(inductScheme, prevBody))
val precondition = function.precondition
val postcondition = function.postcondition
val bodyPre = exprOps.withPrecondition(body, precondition)
val bodyPost = exprOps.withPostcondition(bodyPre,postcondition)
function.copy(function.id, function.tparams, function.params, function.returnType, bodyPost, function.flags)
}
}
object Trace {
def apply(ts: Trees, tt: termination.Trees)(implicit ctx: inox.Context): ExtractionPipeline {
val s: ts.type
val t: tt.type
} = new Trace {
override val s: ts.type = ts
override val t: tt.type = tt
override val context = ctx
}
}
|
{
"pile_set_name": "Github"
}
|
//===- llvm/unittest/Analysis/LoopPassManagerTest.cpp - LPM tests ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/SourceMgr.h"
// Workaround for the gcc 6.1 bug PR80916.
#if defined(__GNUC__) && __GNUC__ > 5
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-function"
#endif
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#if defined(__GNUC__) && __GNUC__ > 5
# pragma GCC diagnostic pop
#endif
using namespace llvm;
namespace {
using testing::DoDefault;
using testing::Return;
using testing::Expectation;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::_;
template <typename DerivedT, typename IRUnitT,
typename AnalysisManagerT = AnalysisManager<IRUnitT>,
typename... ExtraArgTs>
class MockAnalysisHandleBase {
public:
class Analysis : public AnalysisInfoMixin<Analysis> {
friend AnalysisInfoMixin<Analysis>;
friend MockAnalysisHandleBase;
static AnalysisKey Key;
DerivedT *Handle;
Analysis(DerivedT &Handle) : Handle(&Handle) {
static_assert(std::is_base_of<MockAnalysisHandleBase, DerivedT>::value,
"Must pass the derived type to this template!");
}
public:
class Result {
friend MockAnalysisHandleBase;
DerivedT *Handle;
Result(DerivedT &Handle) : Handle(&Handle) {}
public:
// Forward invalidation events to the mock handle.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA,
typename AnalysisManagerT::Invalidator &Inv) {
return Handle->invalidate(IR, PA, Inv);
}
};
Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) {
return Handle->run(IR, AM, ExtraArgs...);
}
};
Analysis getAnalysis() { return Analysis(static_cast<DerivedT &>(*this)); }
typename Analysis::Result getResult() {
return typename Analysis::Result(static_cast<DerivedT &>(*this));
}
protected:
// FIXME: MSVC seems unable to handle a lambda argument to Invoke from within
// the template, so we use a boring static function.
static bool invalidateCallback(IRUnitT &IR, const PreservedAnalyses &PA,
typename AnalysisManagerT::Invalidator &Inv) {
auto PAC = PA.template getChecker<Analysis>();
return !PAC.preserved() &&
!PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
}
/// Derived classes should call this in their constructor to set up default
/// mock actions. (We can't do this in our constructor because this has to
/// run after the DerivedT is constructed.)
void setDefaults() {
ON_CALL(static_cast<DerivedT &>(*this),
run(_, _, testing::Matcher<ExtraArgTs>(_)...))
.WillByDefault(Return(this->getResult()));
ON_CALL(static_cast<DerivedT &>(*this), invalidate(_, _, _))
.WillByDefault(Invoke(&invalidateCallback));
}
};
template <typename DerivedT, typename IRUnitT, typename AnalysisManagerT,
typename... ExtraArgTs>
AnalysisKey MockAnalysisHandleBase<DerivedT, IRUnitT, AnalysisManagerT,
ExtraArgTs...>::Analysis::Key;
/// Mock handle for loop analyses.
///
/// This is provided as a template accepting an (optional) integer. Because
/// analyses are identified and queried by type, this allows constructing
/// multiple handles with distinctly typed nested 'Analysis' types that can be
/// registered and queried. If you want to register multiple loop analysis
/// passes, you'll need to instantiate this type with different values for I.
/// For example:
///
/// MockLoopAnalysisHandleTemplate<0> h0;
/// MockLoopAnalysisHandleTemplate<1> h1;
/// typedef decltype(h0)::Analysis Analysis0;
/// typedef decltype(h1)::Analysis Analysis1;
template <size_t I = static_cast<size_t>(-1)>
struct MockLoopAnalysisHandleTemplate
: MockAnalysisHandleBase<MockLoopAnalysisHandleTemplate<I>, Loop,
LoopAnalysisManager,
LoopStandardAnalysisResults &> {
typedef typename MockLoopAnalysisHandleTemplate::Analysis Analysis;
MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &,
LoopStandardAnalysisResults &));
MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &,
LoopAnalysisManager::Invalidator &));
MockLoopAnalysisHandleTemplate() { this->setDefaults(); }
};
typedef MockLoopAnalysisHandleTemplate<> MockLoopAnalysisHandle;
struct MockFunctionAnalysisHandle
: MockAnalysisHandleBase<MockFunctionAnalysisHandle, Function> {
MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &));
MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &,
FunctionAnalysisManager::Invalidator &));
MockFunctionAnalysisHandle() { setDefaults(); }
};
template <typename DerivedT, typename IRUnitT,
typename AnalysisManagerT = AnalysisManager<IRUnitT>,
typename... ExtraArgTs>
class MockPassHandleBase {
public:
class Pass : public PassInfoMixin<Pass> {
friend MockPassHandleBase;
DerivedT *Handle;
Pass(DerivedT &Handle) : Handle(&Handle) {
static_assert(std::is_base_of<MockPassHandleBase, DerivedT>::value,
"Must pass the derived type to this template!");
}
public:
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
ExtraArgTs... ExtraArgs) {
return Handle->run(IR, AM, ExtraArgs...);
}
};
Pass getPass() { return Pass(static_cast<DerivedT &>(*this)); }
protected:
/// Derived classes should call this in their constructor to set up default
/// mock actions. (We can't do this in our constructor because this has to
/// run after the DerivedT is constructed.)
void setDefaults() {
ON_CALL(static_cast<DerivedT &>(*this),
run(_, _, testing::Matcher<ExtraArgTs>(_)...))
.WillByDefault(Return(PreservedAnalyses::all()));
}
};
struct MockLoopPassHandle
: MockPassHandleBase<MockLoopPassHandle, Loop, LoopAnalysisManager,
LoopStandardAnalysisResults &, LPMUpdater &> {
MOCK_METHOD4(run,
PreservedAnalyses(Loop &, LoopAnalysisManager &,
LoopStandardAnalysisResults &, LPMUpdater &));
MockLoopPassHandle() { setDefaults(); }
};
struct MockFunctionPassHandle
: MockPassHandleBase<MockFunctionPassHandle, Function> {
MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &));
MockFunctionPassHandle() { setDefaults(); }
};
struct MockModulePassHandle : MockPassHandleBase<MockModulePassHandle, Module> {
MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &));
MockModulePassHandle() { setDefaults(); }
};
/// Define a custom matcher for objects which support a 'getName' method
/// returning a StringRef.
///
/// LLVM often has IR objects or analysis objects which expose a StringRef name
/// and in tests it is convenient to match these by name for readability. This
/// matcher supports any type exposing a getName() method of this form.
///
/// It should be used as:
///
/// HasName("my_function")
///
/// No namespace or other qualification is required.
MATCHER_P(HasName, Name, "") {
// The matcher's name and argument are printed in the case of failure, but we
// also want to print out the name of the argument. This uses an implicitly
// avaiable std::ostream, so we have to construct a std::string.
*result_listener << "has name '" << arg.getName().str() << "'";
return Name == arg.getName();
}
std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {
SMDiagnostic Err;
return parseAssemblyString(IR, Err, C);
}
class LoopPassManagerTest : public ::testing::Test {
protected:
LLVMContext Context;
std::unique_ptr<Module> M;
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
ModuleAnalysisManager MAM;
MockLoopAnalysisHandle MLAHandle;
MockLoopPassHandle MLPHandle;
MockFunctionPassHandle MFPHandle;
MockModulePassHandle MMPHandle;
static PreservedAnalyses
getLoopAnalysisResult(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.getResult<MockLoopAnalysisHandle::Analysis>(L, AR);
return PreservedAnalyses::all();
};
public:
LoopPassManagerTest()
: M(parseIR(Context,
"define void @f(i1* %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
" %cond.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
" %cond.0.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
" %cond.0.1 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
"end:\n"
" ret void\n"
"}\n"
"\n"
"define void @g(i1* %ptr) {\n"
"entry:\n"
" br label %loop.g.0\n"
"loop.g.0:\n"
" %cond.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0, label %loop.g.0, label %end\n"
"end:\n"
" ret void\n"
"}\n")),
LAM(true), FAM(true), MAM(true) {
// Register our mock analysis.
LAM.registerPass([&] { return MLAHandle.getAnalysis(); });
// We need DominatorTreeAnalysis for LoopAnalysis.
FAM.registerPass([&] { return DominatorTreeAnalysis(); });
FAM.registerPass([&] { return LoopAnalysis(); });
// We also allow loop passes to assume a set of other analyses and so need
// those.
FAM.registerPass([&] { return AAManager(); });
FAM.registerPass([&] { return AssumptionAnalysis(); });
if (EnableMSSALoopDependency)
FAM.registerPass([&] { return MemorySSAAnalysis(); });
FAM.registerPass([&] { return ScalarEvolutionAnalysis(); });
FAM.registerPass([&] { return TargetLibraryAnalysis(); });
FAM.registerPass([&] { return TargetIRAnalysis(); });
// Register required pass instrumentation analysis.
LAM.registerPass([&] { return PassInstrumentationAnalysis(); });
FAM.registerPass([&] { return PassInstrumentationAnalysis(); });
MAM.registerPass([&] { return PassInstrumentationAnalysis(); });
// Cross-register proxies.
LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
}
};
TEST_F(LoopPassManagerTest, Basic) {
ModulePassManager MPM(true);
::testing::InSequence MakeExpectationsSequenced;
// First we just visit all the loops in all the functions and get their
// analysis results. This will run the analysis a total of four times,
// once for each loop.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
// Wire the loop pass through pass managers into the module pipeline.
{
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
FunctionPassManager FPM(true);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
// Next we run two passes over the loops. The first one invalidates the
// analyses for one loop, the second ones try to get the analysis results.
// This should force only one analysis to re-run within the loop PM, but will
// also invalidate everything after the loop pass manager finishes.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(DoDefault())
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(InvokeWithoutArgs([] { return PreservedAnalyses::none(); }))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(DoDefault())
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
.WillOnce(DoDefault())
.WillOnce(Invoke(getLoopAnalysisResult));
// Wire two loop pass runs into the module pipeline.
{
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
FunctionPassManager FPM(true);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
// And now run the pipeline across the module.
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, FunctionPassInvalidationOfLoopAnalyses) {
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
// We process each function completely in sequence.
::testing::Sequence FSequence, GSequence;
// First, force the analysis result to be computed for each loop.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _))
.InSequence(GSequence)
.WillOnce(DoDefault());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// No need to re-run if we require again from a fresh loop pass manager.
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// For 'f', preserve most things but not the specific loop analyses.
EXPECT_CALL(MFPHandle, run(HasName("f"), _))
.InSequence(FSequence)
.WillOnce(Return(getLoopPassPreservedAnalyses()));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
// On one loop, skip the invalidation (as though we did an internal update).
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _))
.InSequence(FSequence)
.WillOnce(Return(false));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
// Now two loops still have to be recomputed.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _))
.InSequence(FSequence)
.WillOnce(DoDefault());
// Preserve things in the second function to ensure invalidation remains
// isolated to one function.
EXPECT_CALL(MFPHandle, run(HasName("g"), _))
.InSequence(GSequence)
.WillOnce(DoDefault());
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
EXPECT_CALL(MFPHandle, run(HasName("f"), _))
.InSequence(FSequence)
.WillOnce(DoDefault());
// For 'g', fail to preserve anything, causing the loops themselves to be
// cleared. We don't get an invalidation event here as the loop is gone, but
// we should still have to recompute the analysis.
EXPECT_CALL(MFPHandle, run(HasName("g"), _))
.InSequence(GSequence)
.WillOnce(Return(PreservedAnalyses::none()));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _))
.InSequence(GSequence)
.WillOnce(DoDefault());
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
// Verify with a separate function pass run that we didn't mess up 'f's
// cache. No analysis runs should be necessary here.
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, ModulePassInvalidationOfLoopAnalyses) {
ModulePassManager MPM(true);
::testing::InSequence MakeExpectationsSequenced;
// First, force the analysis result to be computed for each loop.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// Walking all the way out and all the way back in doesn't re-run the
// analysis.
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// But a module pass that doesn't preserve the actual mock loop analysis
// invalidates all the way down and forces recomputing.
EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
auto PA = getLoopPassPreservedAnalyses();
PA.preserve<FunctionAnalysisManagerModuleProxy>();
return PA;
}));
// All the loop analyses from both functions get invalidated before we
// recompute anything.
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _));
// On one loop, again skip the invalidation (as though we did an internal
// update).
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _))
.WillOnce(Return(false));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.g.0"), _, _));
// Now all but one of the loops gets re-analyzed.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
MPM.addPass(MMPHandle.getPass());
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// Verify that the cached values persist.
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// Now we fail to preserve the loop analysis and observe that the loop
// analyses are cleared (so no invalidation event) as the loops themselves
// are no longer valid.
EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserve<FunctionAnalysisManagerModuleProxy>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
MPM.addPass(MMPHandle.getPass());
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// Verify that the cached values persist.
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
// Next, check that even if we preserve everything within the function itelf,
// if the function's module pass proxy isn't preserved and the potential set
// of functions changes, the clear reaches the loop analyses as well. This
// will again trigger re-runs but not invalidation events.
EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserveSet<AllAnalysesOn<Function>>();
PA.preserveSet<AllAnalysesOn<Loop>>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
MPM.addPass(MMPHandle.getPass());
MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
MPM.run(*M, MAM);
}
// Test that if any of the bundled analyses provided in the LPM's signature
// become invalid, the analysis proxy itself becomes invalid and we clear all
// loop analysis results.
TEST_F(LoopPassManagerTest, InvalidationOfBundledAnalyses) {
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
::testing::InSequence MakeExpectationsSequenced;
// First, force the analysis result to be computed for each loop.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// No need to re-run if we require again from a fresh loop pass manager.
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// Preserving everything but the loop analyses themselves results in
// invalidation and running.
EXPECT_CALL(MFPHandle, run(HasName("f"), _))
.WillOnce(Return(getLoopPassPreservedAnalyses()));
EXPECT_CALL(MLAHandle, invalidate(_, _, _)).Times(3);
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// The rest don't invalidate analyses, they only trigger re-runs because we
// clear the cache completely.
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
// Not preserving `AAManager`.
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
PA.preserve<ScalarEvolutionAnalysis>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserve<AAManager>();
// Not preserving `DominatorTreeAnalysis`.
PA.preserve<LoopAnalysis>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
PA.preserve<ScalarEvolutionAnalysis>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserve<AAManager>();
PA.preserve<DominatorTreeAnalysis>();
// Not preserving the `LoopAnalysis`.
PA.preserve<LoopAnalysisManagerFunctionProxy>();
PA.preserve<ScalarEvolutionAnalysis>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserve<AAManager>();
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
// Not preserving the `LoopAnalysisManagerFunctionProxy`.
PA.preserve<ScalarEvolutionAnalysis>();
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = PreservedAnalyses::none();
PA.preserve<AAManager>();
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
// Not preserving `ScalarEvolutionAnalysis`.
return PA;
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(
RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
// After all the churn on 'f', we'll compute the loop analysis results for
// 'g' once with a requires pass and then run our mock pass over g a bunch
// but just get cached results each time.
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
EXPECT_CALL(MFPHandle, run(HasName("g"), _)).Times(6);
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, IndirectInvalidation) {
// We need two distinct analysis types and handles.
enum { A, B };
MockLoopAnalysisHandleTemplate<A> MLAHandleA;
MockLoopAnalysisHandleTemplate<B> MLAHandleB;
LAM.registerPass([&] { return MLAHandleA.getAnalysis(); });
LAM.registerPass([&] { return MLAHandleB.getAnalysis(); });
typedef decltype(MLAHandleA)::Analysis AnalysisA;
typedef decltype(MLAHandleB)::Analysis AnalysisB;
// Set up AnalysisA to depend on our AnalysisB. For testing purposes we just
// need to get the AnalysisB results in AnalysisA's run method and check if
// AnalysisB gets invalidated in AnalysisA's invalidate method.
ON_CALL(MLAHandleA, run(_, _, _))
.WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR) {
(void)AM.getResult<AnalysisB>(L, AR);
return MLAHandleA.getResult();
}));
ON_CALL(MLAHandleA, invalidate(_, _, _))
.WillByDefault(Invoke([](Loop &L, const PreservedAnalyses &PA,
LoopAnalysisManager::Invalidator &Inv) {
auto PAC = PA.getChecker<AnalysisA>();
return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Loop>>()) ||
Inv.invalidate<AnalysisB>(L, PA);
}));
::testing::InSequence MakeExpectationsSequenced;
// Compute the analyses across all of 'f' first.
EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandleA, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandleB, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandleA, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLAHandleB, run(HasName("loop.0"), _, _));
// Now we invalidate AnalysisB (but not AnalysisA) for one of the loops and
// preserve everything for the rest. This in turn triggers that one loop to
// recompute both AnalysisB *and* AnalysisA if indirect invalidation is
// working.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(InvokeWithoutArgs([] {
auto PA = getLoopPassPreservedAnalyses();
// Specifically preserve AnalysisA so that it would survive if it
// didn't depend on AnalysisB.
PA.preserve<AnalysisA>();
return PA;
}));
// It happens that AnalysisB is invalidated first. That shouldn't matter
// though, and we should still call AnalysisA's invalidation.
EXPECT_CALL(MLAHandleB, invalidate(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandleA, invalidate(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke([](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.getResult<AnalysisA>(L, AR);
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _));
// The rest of the loops should run and get cached results.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.getResult<AnalysisA>(L, AR);
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.getResult<AnalysisA>(L, AR);
return PreservedAnalyses::all();
}));
// The run over 'g' should be boring, with us just computing the analyses once
// up front and then running loop passes and getting cached results.
EXPECT_CALL(MLAHandleA, run(HasName("loop.g.0"), _, _));
EXPECT_CALL(MLAHandleB, run(HasName("loop.g.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.getResult<AnalysisA>(L, AR);
return PreservedAnalyses::all();
}));
// Build the pipeline and run it.
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
FPM.addPass(
createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<AnalysisA>()));
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) {
typedef decltype(MLAHandle)::Analysis LoopAnalysis;
MockFunctionAnalysisHandle MFAHandle;
FAM.registerPass([&] { return MFAHandle.getAnalysis(); });
typedef decltype(MFAHandle)::Analysis FunctionAnalysis;
// Set up the loop analysis to depend on both the function and module
// analysis.
ON_CALL(MLAHandle, run(_, _, _))
.WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR) {
auto &FAMP = AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR);
auto &FAM = FAMP.getManager();
Function &F = *L.getHeader()->getParent();
if (FAM.getCachedResult<FunctionAnalysis>(F))
FAMP.registerOuterAnalysisInvalidation<FunctionAnalysis,
LoopAnalysis>();
return MLAHandle.getResult();
}));
::testing::InSequence MakeExpectationsSequenced;
// Compute the analyses across all of 'f' first.
EXPECT_CALL(MFPHandle, run(HasName("f"), _))
.WillOnce(Invoke([](Function &F, FunctionAnalysisManager &AM) {
// Force the computing of the function analysis so it is available in
// this function.
(void)AM.getResult<FunctionAnalysis>(F);
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
// Now invalidate the function analysis but preserve the loop analyses.
// This should trigger immediate invalidation of the loop analyses, despite
// the fact that they were preserved.
EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = getLoopPassPreservedAnalyses();
PA.preserveSet<AllAnalysesOn<Loop>>();
return PA;
}));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _));
// And re-running a requires pass recomputes them.
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
// When we run over 'g' we don't populate the cache with the function
// analysis.
EXPECT_CALL(MFPHandle, run(HasName("g"), _))
.WillOnce(Return(PreservedAnalyses::all()));
EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
// Which means that no extra invalidation occurs and cached values are used.
EXPECT_CALL(MFPHandle, run(HasName("g"), _)).WillOnce(InvokeWithoutArgs([] {
auto PA = getLoopPassPreservedAnalyses();
PA.preserveSet<AllAnalysesOn<Loop>>();
return PA;
}));
// Build the pipeline and run it.
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
FPM.addPass(MFPHandle.getPass());
FPM.addPass(
createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<LoopAnalysis>()));
FPM.addPass(MFPHandle.getPass());
FPM.addPass(
createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<LoopAnalysis>()));
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, LoopChildInsertion) {
// Super boring module with three loops in a single loop nest.
M = parseIR(Context, "define void @f(i1* %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
" %cond.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
" %cond.0.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
" %cond.0.1 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
" %cond.0.2 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.2, label %loop.0.2, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
"end:\n"
" ret void\n"
"}\n");
// Build up variables referring into the IR so we can rewrite it below
// easily.
Function &F = *M->begin();
ASSERT_THAT(F, HasName("f"));
Argument &Ptr = *F.arg_begin();
auto BBI = F.begin();
BasicBlock &EntryBB = *BBI++;
ASSERT_THAT(EntryBB, HasName("entry"));
BasicBlock &Loop0BB = *BBI++;
ASSERT_THAT(Loop0BB, HasName("loop.0"));
BasicBlock &Loop00PHBB = *BBI++;
ASSERT_THAT(Loop00PHBB, HasName("loop.0.0.ph"));
BasicBlock &Loop00BB = *BBI++;
ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
BasicBlock &Loop01PHBB = *BBI++;
ASSERT_THAT(Loop01PHBB, HasName("loop.0.1.ph"));
BasicBlock &Loop01BB = *BBI++;
ASSERT_THAT(Loop01BB, HasName("loop.0.1"));
BasicBlock &Loop02PHBB = *BBI++;
ASSERT_THAT(Loop02PHBB, HasName("loop.0.2.ph"));
BasicBlock &Loop02BB = *BBI++;
ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
BasicBlock &Loop0LatchBB = *BBI++;
ASSERT_THAT(Loop0LatchBB, HasName("loop.0.latch"));
BasicBlock &EndBB = *BBI++;
ASSERT_THAT(EndBB, HasName("end"));
ASSERT_THAT(BBI, F.end());
auto CreateCondBr = [&](BasicBlock *TrueBB, BasicBlock *FalseBB,
const char *Name, BasicBlock *BB) {
auto *Cond = new LoadInst(Type::getInt1Ty(Context), &Ptr, Name,
/*isVolatile*/ true, BB);
BranchInst::Create(TrueBB, FalseBB, Cond, BB);
};
// Build the pass managers and register our pipeline. We build a single loop
// pass pipeline consisting of three mock pass runs over each loop. After
// this we run both domtree and loop verification passes to make sure that
// the IR remained valid during our mutations.
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
FPM.addPass(DominatorTreeVerifierPass());
FPM.addPass(LoopVerifierPass());
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
// All the visit orders are deterministic, so we use simple fully order
// expectations.
::testing::InSequence MakeExpectationsSequenced;
// We run loop passes three times over each of the loops.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
// When running over the middle loop, the second run inserts two new child
// loops, inserting them and itself into the worklist.
BasicBlock *NewLoop010BB, *NewLoop01LatchBB;
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
auto *NewLoop = AR.LI.AllocateLoop();
L.addChildLoop(NewLoop);
auto *NewLoop010PHBB =
BasicBlock::Create(Context, "loop.0.1.0.ph", &F, &Loop02PHBB);
NewLoop010BB =
BasicBlock::Create(Context, "loop.0.1.0", &F, &Loop02PHBB);
NewLoop01LatchBB =
BasicBlock::Create(Context, "loop.0.1.latch", &F, &Loop02PHBB);
Loop01BB.getTerminator()->replaceUsesOfWith(&Loop01BB, NewLoop010PHBB);
BranchInst::Create(NewLoop010BB, NewLoop010PHBB);
CreateCondBr(NewLoop01LatchBB, NewLoop010BB, "cond.0.1.0",
NewLoop010BB);
BranchInst::Create(&Loop01BB, NewLoop01LatchBB);
AR.DT.addNewBlock(NewLoop010PHBB, &Loop01BB);
AR.DT.addNewBlock(NewLoop010BB, NewLoop010PHBB);
AR.DT.addNewBlock(NewLoop01LatchBB, NewLoop010BB);
EXPECT_TRUE(AR.DT.verify());
L.addBasicBlockToLoop(NewLoop010PHBB, AR.LI);
NewLoop->addBasicBlockToLoop(NewLoop010BB, AR.LI);
L.addBasicBlockToLoop(NewLoop01LatchBB, AR.LI);
NewLoop->verifyLoop();
L.verifyLoop();
Updater.addChildLoops({NewLoop});
return PreservedAnalyses::all();
}));
// We should immediately drop down to fully visit the new inner loop.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// After visiting the inner loop, we should re-visit the second loop
// reflecting its new loop nest structure.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
// In the second run over the middle loop after we've visited the new child,
// we add another child to check that we can repeatedly add children, and add
// children to a loop that already has children.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
auto *NewLoop = AR.LI.AllocateLoop();
L.addChildLoop(NewLoop);
auto *NewLoop011PHBB = BasicBlock::Create(Context, "loop.0.1.1.ph", &F, NewLoop01LatchBB);
auto *NewLoop011BB = BasicBlock::Create(Context, "loop.0.1.1", &F, NewLoop01LatchBB);
NewLoop010BB->getTerminator()->replaceUsesOfWith(NewLoop01LatchBB,
NewLoop011PHBB);
BranchInst::Create(NewLoop011BB, NewLoop011PHBB);
CreateCondBr(NewLoop01LatchBB, NewLoop011BB, "cond.0.1.1",
NewLoop011BB);
AR.DT.addNewBlock(NewLoop011PHBB, NewLoop010BB);
auto *NewDTNode = AR.DT.addNewBlock(NewLoop011BB, NewLoop011PHBB);
AR.DT.changeImmediateDominator(AR.DT[NewLoop01LatchBB], NewDTNode);
EXPECT_TRUE(AR.DT.verify());
L.addBasicBlockToLoop(NewLoop011PHBB, AR.LI);
NewLoop->addBasicBlockToLoop(NewLoop011BB, AR.LI);
NewLoop->verifyLoop();
L.verifyLoop();
Updater.addChildLoops({NewLoop});
return PreservedAnalyses::all();
}));
// Again, we should immediately drop down to visit the new, unvisited child
// loop. We don't need to revisit the other child though.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// And now we should pop back up to the second loop and do a full pipeline of
// three passes on its current form.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.Times(3)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// Now that all the expected actions are registered, run the pipeline over
// our module. All of our expectations are verified when the test finishes.
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, LoopPeerInsertion) {
// Super boring module with two loop nests and loop nest with two child
// loops.
M = parseIR(Context, "define void @f(i1* %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
" %cond.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %loop.2.ph\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
" %cond.0.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
" %cond.0.2 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.2, label %loop.0.2, label %loop.0.latch\n"
"loop.0.latch:\n"
" br label %loop.0\n"
"loop.2.ph:\n"
" br label %loop.2\n"
"loop.2:\n"
" %cond.2 = load volatile i1, i1* %ptr\n"
" br i1 %cond.2, label %loop.2, label %end\n"
"end:\n"
" ret void\n"
"}\n");
// Build up variables referring into the IR so we can rewrite it below
// easily.
Function &F = *M->begin();
ASSERT_THAT(F, HasName("f"));
Argument &Ptr = *F.arg_begin();
auto BBI = F.begin();
BasicBlock &EntryBB = *BBI++;
ASSERT_THAT(EntryBB, HasName("entry"));
BasicBlock &Loop0BB = *BBI++;
ASSERT_THAT(Loop0BB, HasName("loop.0"));
BasicBlock &Loop00PHBB = *BBI++;
ASSERT_THAT(Loop00PHBB, HasName("loop.0.0.ph"));
BasicBlock &Loop00BB = *BBI++;
ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
BasicBlock &Loop02PHBB = *BBI++;
ASSERT_THAT(Loop02PHBB, HasName("loop.0.2.ph"));
BasicBlock &Loop02BB = *BBI++;
ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
BasicBlock &Loop0LatchBB = *BBI++;
ASSERT_THAT(Loop0LatchBB, HasName("loop.0.latch"));
BasicBlock &Loop2PHBB = *BBI++;
ASSERT_THAT(Loop2PHBB, HasName("loop.2.ph"));
BasicBlock &Loop2BB = *BBI++;
ASSERT_THAT(Loop2BB, HasName("loop.2"));
BasicBlock &EndBB = *BBI++;
ASSERT_THAT(EndBB, HasName("end"));
ASSERT_THAT(BBI, F.end());
auto CreateCondBr = [&](BasicBlock *TrueBB, BasicBlock *FalseBB,
const char *Name, BasicBlock *BB) {
auto *Cond = new LoadInst(Type::getInt1Ty(Context), &Ptr, Name,
/*isVolatile*/ true, BB);
BranchInst::Create(TrueBB, FalseBB, Cond, BB);
};
// Build the pass managers and register our pipeline. We build a single loop
// pass pipeline consisting of three mock pass runs over each loop. After
// this we run both domtree and loop verification passes to make sure that
// the IR remained valid during our mutations.
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
FPM.addPass(DominatorTreeVerifierPass());
FPM.addPass(LoopVerifierPass());
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
// All the visit orders are deterministic, so we use simple fully order
// expectations.
::testing::InSequence MakeExpectationsSequenced;
// We run loop passes three times over each of the loops.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
// On the second run, we insert a sibling loop.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
auto *NewLoop = AR.LI.AllocateLoop();
L.getParentLoop()->addChildLoop(NewLoop);
auto *NewLoop01PHBB = BasicBlock::Create(Context, "loop.0.1.ph", &F, &Loop02PHBB);
auto *NewLoop01BB = BasicBlock::Create(Context, "loop.0.1", &F, &Loop02PHBB);
BranchInst::Create(NewLoop01BB, NewLoop01PHBB);
CreateCondBr(&Loop02PHBB, NewLoop01BB, "cond.0.1", NewLoop01BB);
Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02PHBB, NewLoop01PHBB);
AR.DT.addNewBlock(NewLoop01PHBB, &Loop00BB);
auto *NewDTNode = AR.DT.addNewBlock(NewLoop01BB, NewLoop01PHBB);
AR.DT.changeImmediateDominator(AR.DT[&Loop02PHBB], NewDTNode);
EXPECT_TRUE(AR.DT.verify());
L.getParentLoop()->addBasicBlockToLoop(NewLoop01PHBB, AR.LI);
NewLoop->addBasicBlockToLoop(NewLoop01BB, AR.LI);
L.getParentLoop()->verifyLoop();
Updater.addSiblingLoops({NewLoop});
return PreservedAnalyses::all();
}));
// We finish processing this loop as sibling loops don't perturb the
// postorder walk.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
// We visit the inserted sibling next.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
// Next, on the third pass run on the last inner loop we add more new
// siblings, more than one, and one with nested child loops. By doing this at
// the end we make sure that edge case works well.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
Loop *NewLoops[] = {AR.LI.AllocateLoop(), AR.LI.AllocateLoop(),
AR.LI.AllocateLoop()};
L.getParentLoop()->addChildLoop(NewLoops[0]);
L.getParentLoop()->addChildLoop(NewLoops[1]);
NewLoops[1]->addChildLoop(NewLoops[2]);
auto *NewLoop03PHBB =
BasicBlock::Create(Context, "loop.0.3.ph", &F, &Loop0LatchBB);
auto *NewLoop03BB =
BasicBlock::Create(Context, "loop.0.3", &F, &Loop0LatchBB);
auto *NewLoop04PHBB =
BasicBlock::Create(Context, "loop.0.4.ph", &F, &Loop0LatchBB);
auto *NewLoop04BB =
BasicBlock::Create(Context, "loop.0.4", &F, &Loop0LatchBB);
auto *NewLoop040PHBB =
BasicBlock::Create(Context, "loop.0.4.0.ph", &F, &Loop0LatchBB);
auto *NewLoop040BB =
BasicBlock::Create(Context, "loop.0.4.0", &F, &Loop0LatchBB);
auto *NewLoop04LatchBB =
BasicBlock::Create(Context, "loop.0.4.latch", &F, &Loop0LatchBB);
Loop02BB.getTerminator()->replaceUsesOfWith(&Loop0LatchBB, NewLoop03PHBB);
BranchInst::Create(NewLoop03BB, NewLoop03PHBB);
CreateCondBr(NewLoop04PHBB, NewLoop03BB, "cond.0.3", NewLoop03BB);
BranchInst::Create(NewLoop04BB, NewLoop04PHBB);
CreateCondBr(&Loop0LatchBB, NewLoop040PHBB, "cond.0.4", NewLoop04BB);
BranchInst::Create(NewLoop040BB, NewLoop040PHBB);
CreateCondBr(NewLoop04LatchBB, NewLoop040BB, "cond.0.4.0", NewLoop040BB);
BranchInst::Create(NewLoop04BB, NewLoop04LatchBB);
AR.DT.addNewBlock(NewLoop03PHBB, &Loop02BB);
AR.DT.addNewBlock(NewLoop03BB, NewLoop03PHBB);
AR.DT.addNewBlock(NewLoop04PHBB, NewLoop03BB);
auto *NewDTNode = AR.DT.addNewBlock(NewLoop04BB, NewLoop04PHBB);
AR.DT.changeImmediateDominator(AR.DT[&Loop0LatchBB], NewDTNode);
AR.DT.addNewBlock(NewLoop040PHBB, NewLoop04BB);
AR.DT.addNewBlock(NewLoop040BB, NewLoop040PHBB);
AR.DT.addNewBlock(NewLoop04LatchBB, NewLoop040BB);
EXPECT_TRUE(AR.DT.verify());
L.getParentLoop()->addBasicBlockToLoop(NewLoop03PHBB, AR.LI);
NewLoops[0]->addBasicBlockToLoop(NewLoop03BB, AR.LI);
L.getParentLoop()->addBasicBlockToLoop(NewLoop04PHBB, AR.LI);
NewLoops[1]->addBasicBlockToLoop(NewLoop04BB, AR.LI);
NewLoops[1]->addBasicBlockToLoop(NewLoop040PHBB, AR.LI);
NewLoops[2]->addBasicBlockToLoop(NewLoop040BB, AR.LI);
NewLoops[1]->addBasicBlockToLoop(NewLoop04LatchBB, AR.LI);
L.getParentLoop()->verifyLoop();
Updater.addSiblingLoops({NewLoops[0], NewLoops[1]});
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// Note that we need to visit the inner loop of this added sibling before the
// sibling itself!
EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.4.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.4"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// And only now do we visit the outermost loop of the nest.
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
// On the second pass, we add sibling loops which become new top-level loops.
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
auto *NewLoop = AR.LI.AllocateLoop();
AR.LI.addTopLevelLoop(NewLoop);
auto *NewLoop1PHBB = BasicBlock::Create(Context, "loop.1.ph", &F, &Loop2BB);
auto *NewLoop1BB = BasicBlock::Create(Context, "loop.1", &F, &Loop2BB);
BranchInst::Create(NewLoop1BB, NewLoop1PHBB);
CreateCondBr(&Loop2PHBB, NewLoop1BB, "cond.1", NewLoop1BB);
Loop0BB.getTerminator()->replaceUsesOfWith(&Loop2PHBB, NewLoop1PHBB);
AR.DT.addNewBlock(NewLoop1PHBB, &Loop0BB);
auto *NewDTNode = AR.DT.addNewBlock(NewLoop1BB, NewLoop1PHBB);
AR.DT.changeImmediateDominator(AR.DT[&Loop2PHBB], NewDTNode);
EXPECT_TRUE(AR.DT.verify());
NewLoop->addBasicBlockToLoop(NewLoop1BB, AR.LI);
NewLoop->verifyLoop();
Updater.addSiblingLoops({NewLoop});
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.2"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// Now that all the expected actions are registered, run the pipeline over
// our module. All of our expectations are verified when the test finishes.
MPM.run(*M, MAM);
}
TEST_F(LoopPassManagerTest, LoopDeletion) {
// Build a module with a single loop nest that contains one outer loop with
// three subloops, and one of those with its own subloop. We will
// incrementally delete all of these to test different deletion scenarios.
M = parseIR(Context, "define void @f(i1* %ptr) {\n"
"entry:\n"
" br label %loop.0\n"
"loop.0:\n"
" %cond.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0, label %loop.0.0.ph, label %end\n"
"loop.0.0.ph:\n"
" br label %loop.0.0\n"
"loop.0.0:\n"
" %cond.0.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.0, label %loop.0.0, label %loop.0.1.ph\n"
"loop.0.1.ph:\n"
" br label %loop.0.1\n"
"loop.0.1:\n"
" %cond.0.1 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.1, label %loop.0.1, label %loop.0.2.ph\n"
"loop.0.2.ph:\n"
" br label %loop.0.2\n"
"loop.0.2:\n"
" %cond.0.2 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.2, label %loop.0.2.0.ph, label %loop.0.latch\n"
"loop.0.2.0.ph:\n"
" br label %loop.0.2.0\n"
"loop.0.2.0:\n"
" %cond.0.2.0 = load volatile i1, i1* %ptr\n"
" br i1 %cond.0.2.0, label %loop.0.2.0, label %loop.0.2.latch\n"
"loop.0.2.latch:\n"
" br label %loop.0.2\n"
"loop.0.latch:\n"
" br label %loop.0\n"
"end:\n"
" ret void\n"
"}\n");
// Build up variables referring into the IR so we can rewrite it below
// easily.
Function &F = *M->begin();
ASSERT_THAT(F, HasName("f"));
Argument &Ptr = *F.arg_begin();
auto BBI = F.begin();
BasicBlock &EntryBB = *BBI++;
ASSERT_THAT(EntryBB, HasName("entry"));
BasicBlock &Loop0BB = *BBI++;
ASSERT_THAT(Loop0BB, HasName("loop.0"));
BasicBlock &Loop00PHBB = *BBI++;
ASSERT_THAT(Loop00PHBB, HasName("loop.0.0.ph"));
BasicBlock &Loop00BB = *BBI++;
ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
BasicBlock &Loop01PHBB = *BBI++;
ASSERT_THAT(Loop01PHBB, HasName("loop.0.1.ph"));
BasicBlock &Loop01BB = *BBI++;
ASSERT_THAT(Loop01BB, HasName("loop.0.1"));
BasicBlock &Loop02PHBB = *BBI++;
ASSERT_THAT(Loop02PHBB, HasName("loop.0.2.ph"));
BasicBlock &Loop02BB = *BBI++;
ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
BasicBlock &Loop020PHBB = *BBI++;
ASSERT_THAT(Loop020PHBB, HasName("loop.0.2.0.ph"));
BasicBlock &Loop020BB = *BBI++;
ASSERT_THAT(Loop020BB, HasName("loop.0.2.0"));
BasicBlock &Loop02LatchBB = *BBI++;
ASSERT_THAT(Loop02LatchBB, HasName("loop.0.2.latch"));
BasicBlock &Loop0LatchBB = *BBI++;
ASSERT_THAT(Loop0LatchBB, HasName("loop.0.latch"));
BasicBlock &EndBB = *BBI++;
ASSERT_THAT(EndBB, HasName("end"));
ASSERT_THAT(BBI, F.end());
// Helper to do the actual deletion of a loop. We directly encode this here
// to isolate ourselves from the rest of LLVM and for simplicity. Here we can
// egregiously cheat based on knowledge of the test case. For example, we
// have no PHI nodes and there is always a single i-dom.
auto EraseLoop = [](Loop &L, BasicBlock &IDomBB,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
assert(L.empty() && "Can only delete leaf loops with this routine!");
SmallVector<BasicBlock *, 4> LoopBBs(L.block_begin(), L.block_end());
Updater.markLoopAsDeleted(L, L.getName());
IDomBB.getTerminator()->replaceUsesOfWith(L.getHeader(),
L.getUniqueExitBlock());
for (BasicBlock *LoopBB : LoopBBs) {
SmallVector<DomTreeNode *, 4> ChildNodes(AR.DT[LoopBB]->begin(),
AR.DT[LoopBB]->end());
for (DomTreeNode *ChildNode : ChildNodes)
AR.DT.changeImmediateDominator(ChildNode, AR.DT[&IDomBB]);
AR.DT.eraseNode(LoopBB);
AR.LI.removeBlock(LoopBB);
LoopBB->dropAllReferences();
}
for (BasicBlock *LoopBB : LoopBBs)
LoopBB->eraseFromParent();
AR.LI.erase(&L);
};
// Build up the pass managers.
ModulePassManager MPM(true);
FunctionPassManager FPM(true);
// We run several loop pass pipelines across the loop nest, but they all take
// the same form of three mock pass runs in a loop pipeline followed by
// domtree and loop verification. We use a lambda to stamp this out each
// time.
auto AddLoopPipelineAndVerificationPasses = [&] {
LoopPassManager LPM(true);
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
LPM.addPass(MLPHandle.getPass());
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
FPM.addPass(DominatorTreeVerifierPass());
FPM.addPass(LoopVerifierPass());
};
// All the visit orders are deterministic so we use simple fully order
// expectations.
::testing::InSequence MakeExpectationsSequenced;
// We run the loop pipeline with three passes over each of the loops. When
// running over the middle loop, the second pass in the pipeline deletes it.
// This should prevent the third pass from visiting it but otherwise leave
// the process unimpacted.
AddLoopPipelineAndVerificationPasses();
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
.WillOnce(
Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
Loop *ParentL = L.getParentLoop();
AR.SE.forgetLoop(&L);
EraseLoop(L, Loop01PHBB, AR, Updater);
ParentL->verifyLoop();
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.2.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// Run the loop pipeline again. This time we delete the last loop, which
// contains a nested loop within it and insert a new loop into the nest. This
// makes sure we can handle nested loop deletion.
AddLoopPipelineAndVerificationPasses();
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.Times(3)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
.Times(3)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
BasicBlock *NewLoop03PHBB;
EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
.WillOnce(
Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
AR.SE.forgetLoop(*L.begin());
EraseLoop(**L.begin(), Loop020PHBB, AR, Updater);
auto *ParentL = L.getParentLoop();
AR.SE.forgetLoop(&L);
EraseLoop(L, Loop02PHBB, AR, Updater);
// Now insert a new sibling loop.
auto *NewSibling = AR.LI.AllocateLoop();
ParentL->addChildLoop(NewSibling);
NewLoop03PHBB =
BasicBlock::Create(Context, "loop.0.3.ph", &F, &Loop0LatchBB);
auto *NewLoop03BB =
BasicBlock::Create(Context, "loop.0.3", &F, &Loop0LatchBB);
BranchInst::Create(NewLoop03BB, NewLoop03PHBB);
auto *Cond =
new LoadInst(Type::getInt1Ty(Context), &Ptr, "cond.0.3",
/*isVolatile*/ true, NewLoop03BB);
BranchInst::Create(&Loop0LatchBB, NewLoop03BB, Cond, NewLoop03BB);
Loop02PHBB.getTerminator()->replaceUsesOfWith(&Loop0LatchBB,
NewLoop03PHBB);
AR.DT.addNewBlock(NewLoop03PHBB, &Loop02PHBB);
AR.DT.addNewBlock(NewLoop03BB, NewLoop03PHBB);
AR.DT.changeImmediateDominator(AR.DT[&Loop0LatchBB],
AR.DT[NewLoop03BB]);
EXPECT_TRUE(AR.DT.verify());
ParentL->addBasicBlockToLoop(NewLoop03PHBB, AR.LI);
NewSibling->addBasicBlockToLoop(NewLoop03BB, AR.LI);
NewSibling->verifyLoop();
ParentL->verifyLoop();
Updater.addSiblingLoops({NewSibling});
return PreservedAnalyses::all();
}));
// To respect our inner-to-outer traversal order, we must visit the
// newly-inserted sibling of the loop we just deleted before we visit the
// outer loop. When we do so, this must compute a fresh analysis result, even
// though our new loop has the same pointer value as the loop we deleted.
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.Times(2)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.Times(3)
.WillRepeatedly(Invoke(getLoopAnalysisResult));
// In the final loop pipeline run we delete every loop, including the last
// loop of the nest. We do this again in the second pass in the pipeline, and
// as a consequence we never make it to three runs on any loop. We also cover
// deleting multiple loops in a single pipeline, deleting the first loop and
// deleting the (last) top level loop.
AddLoopPipelineAndVerificationPasses();
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
.WillOnce(
Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
AR.SE.forgetLoop(&L);
EraseLoop(L, Loop00PHBB, AR, Updater);
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
.WillOnce(
Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
AR.SE.forgetLoop(&L);
EraseLoop(L, *NewLoop03PHBB, AR, Updater);
return PreservedAnalyses::all();
}));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(Invoke(getLoopAnalysisResult));
EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
.WillOnce(
Invoke([&](Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
AR.SE.forgetLoop(&L);
EraseLoop(L, EntryBB, AR, Updater);
return PreservedAnalyses::all();
}));
// Add the function pass pipeline now that it is fully built up and run it
// over the module's one function.
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
MPM.run(*M, MAM);
}
}
|
{
"pile_set_name": "Github"
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE124_Buffer_Underwrite__malloc_wchar_t_ncpy_08.c
Label Definition File: CWE124_Buffer_Underwrite__malloc.label.xml
Template File: sources-sink-08.tmpl.c
*/
/*
* @description
* CWE: 124 Buffer Underwrite
* BadSource: Set data pointer to before the allocated memory buffer
* GoodSource: Set data pointer to the allocated memory buffer
* Sink: ncpy
* BadSink : Copy string to data using wcsncpy
* Flow Variant: 08 Control flow: if(staticReturnsTrue()) and if(staticReturnsFalse())
*
* */
#include "std_testcase.h"
#include <wchar.h>
/* The two function below always return the same value, so a tool
* should be able to identify that calls to the functions will always
* return a fixed value.
*/
static int staticReturnsTrue()
{
return 1;
}
static int staticReturnsFalse()
{
return 0;
}
#ifndef OMITBAD
void CWE124_Buffer_Underwrite__malloc_wchar_t_ncpy_08_bad()
{
wchar_t * data;
data = NULL;
if(staticReturnsTrue())
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FLAW: Set data pointer to before the allocated memory buffer */
data = dataBuffer - 8;
}
}
{
wchar_t source[100];
wmemset(source, L'C', 100-1); /* fill with 'C's */
source[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copying data to memory before the destination buffer */
wcsncpy(data, source, 100-1);
/* Ensure the destination buffer is null terminated */
data[100-1] = L'\0';
printWLine(data);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B1() - use goodsource and badsink by changing the staticReturnsTrue() to staticReturnsFalse() */
static void goodG2B1()
{
wchar_t * data;
data = NULL;
if(staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t source[100];
wmemset(source, L'C', 100-1); /* fill with 'C's */
source[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copying data to memory before the destination buffer */
wcsncpy(data, source, 100-1);
/* Ensure the destination buffer is null terminated */
data[100-1] = L'\0';
printWLine(data);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
/* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */
static void goodG2B2()
{
wchar_t * data;
data = NULL;
if(staticReturnsTrue())
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t source[100];
wmemset(source, L'C', 100-1); /* fill with 'C's */
source[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copying data to memory before the destination buffer */
wcsncpy(data, source, 100-1);
/* Ensure the destination buffer is null terminated */
data[100-1] = L'\0';
printWLine(data);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
void CWE124_Buffer_Underwrite__malloc_wchar_t_ncpy_08_good()
{
goodG2B1();
goodG2B2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE124_Buffer_Underwrite__malloc_wchar_t_ncpy_08_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE124_Buffer_Underwrite__malloc_wchar_t_ncpy_08_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
{
"pile_set_name": "Github"
}
|
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
|
{
"pile_set_name": "Github"
}
|
#include <iostream>
#include <cstdlib>
#include <ImfRgbaFile.h>
#include <ImfArray.h>
#include <half.h>
int main (int argc, char *argv[])
{
if (argc < 2)
return EXIT_FAILURE;
Imf::RgbaInputFile input_file(argv[1]);
const Imf::Header& header = input_file.header();
size_t width = header.dataWindow().max.x - header.dataWindow().min.x + 1;
size_t height = header.dataWindow().max.y - header.dataWindow().min.y + 1;
std::cout << "OpenEXR images size is " << width << "x" << height << std::endl;
return EXIT_SUCCESS;
}
|
{
"pile_set_name": "Github"
}
|
<?php
namespace Doctrine\ORM;
use Doctrine\Persistence\ObjectRepository;
/**
* @template TEntityClass
* @implements ObjectRepository<TEntityClass>
*/
class EntityRepository implements ObjectRepository
{
/**
* @phpstan-param mixed $id
* @phpstan-param int|null $lockMode
* @phpstan-param int|null $lockVersion
* @phpstan-return TEntityClass|null
*/
public function find($id, $lockMode = null, $lockVersion = null);
/**
* @phpstan-return TEntityClass[]
*/
public function findAll();
/**
* @phpstan-param mixed[] $criteria
* @phpstan-param string[]|null $orderBy
* @phpstan-param int|null $limit
* @phpstan-param int|null $offset
* @phpstan-return TEntityClass[]
*/
public function findBy(array $criteria, ?array $orderBy = null, $limit = null, $offset = null);
/**
* @phpstan-param mixed[] $criteria The criteria.
* @phpstan-param mixed[]|null $orderBy
* @phpstan-return TEntityClass|null
*/
public function findOneBy(array $criteria, array $orderBy = null);
/**
* @phpstan-return class-string<TEntityClass>
*/
public function getClassName();
/**
* @phpstan-return class-string<TEntityClass>
*/
protected function getEntityName();
}
|
{
"pile_set_name": "Github"
}
|
<?php
return
array (
0 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 0,
1 => 0,
),
1 =>
array (
0 => 0,
1 => 23,
),
2 =>
array (
0 => 187,
1 => 23,
),
3 =>
array (
0 => 187,
1 => 0,
),
),
'color' => 16777215,
'filled' => true,
),
1 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 21,
1 => 1,
),
1 =>
array (
0 => 21,
1 => 21,
),
2 =>
array (
0 => 22,
1 => 21,
),
3 =>
array (
0 => 22,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
2 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 25,
1 => 1,
),
1 =>
array (
0 => 25,
1 => 21,
),
2 =>
array (
0 => 26,
1 => 21,
),
3 =>
array (
0 => 26,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
3 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 29,
1 => 1,
),
1 =>
array (
0 => 29,
1 => 21,
),
2 =>
array (
0 => 30,
1 => 21,
),
3 =>
array (
0 => 30,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
4 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 33,
1 => 11,
),
1 =>
array (
0 => 33,
1 => 21,
),
2 =>
array (
0 => 34,
1 => 21,
),
3 =>
array (
0 => 34,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
5 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 37,
1 => 11,
),
1 =>
array (
0 => 37,
1 => 21,
),
2 =>
array (
0 => 38,
1 => 21,
),
3 =>
array (
0 => 38,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
6 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 41,
1 => 11,
),
1 =>
array (
0 => 41,
1 => 21,
),
2 =>
array (
0 => 42,
1 => 21,
),
3 =>
array (
0 => 42,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
7 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 45,
1 => 11,
),
1 =>
array (
0 => 45,
1 => 21,
),
2 =>
array (
0 => 46,
1 => 21,
),
3 =>
array (
0 => 46,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
8 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 49,
1 => 11,
),
1 =>
array (
0 => 49,
1 => 21,
),
2 =>
array (
0 => 50,
1 => 21,
),
3 =>
array (
0 => 50,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
9 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 53,
1 => 11,
),
1 =>
array (
0 => 53,
1 => 21,
),
2 =>
array (
0 => 54,
1 => 21,
),
3 =>
array (
0 => 54,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
10 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 57,
1 => 1,
),
1 =>
array (
0 => 57,
1 => 21,
),
2 =>
array (
0 => 58,
1 => 21,
),
3 =>
array (
0 => 58,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
11 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 61,
1 => 1,
),
1 =>
array (
0 => 61,
1 => 21,
),
2 =>
array (
0 => 62,
1 => 21,
),
3 =>
array (
0 => 62,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
12 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 65,
1 => 11,
),
1 =>
array (
0 => 65,
1 => 21,
),
2 =>
array (
0 => 66,
1 => 21,
),
3 =>
array (
0 => 66,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
13 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 69,
1 => 11,
),
1 =>
array (
0 => 69,
1 => 21,
),
2 =>
array (
0 => 70,
1 => 21,
),
3 =>
array (
0 => 70,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
14 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 73,
1 => 1,
),
1 =>
array (
0 => 73,
1 => 21,
),
2 =>
array (
0 => 74,
1 => 21,
),
3 =>
array (
0 => 74,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
15 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 77,
1 => 11,
),
1 =>
array (
0 => 77,
1 => 21,
),
2 =>
array (
0 => 78,
1 => 21,
),
3 =>
array (
0 => 78,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
16 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 81,
1 => 1,
),
1 =>
array (
0 => 81,
1 => 21,
),
2 =>
array (
0 => 82,
1 => 21,
),
3 =>
array (
0 => 82,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
17 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 85,
1 => 11,
),
1 =>
array (
0 => 85,
1 => 21,
),
2 =>
array (
0 => 86,
1 => 21,
),
3 =>
array (
0 => 86,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
18 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 89,
1 => 11,
),
1 =>
array (
0 => 89,
1 => 21,
),
2 =>
array (
0 => 90,
1 => 21,
),
3 =>
array (
0 => 90,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
19 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 93,
1 => 1,
),
1 =>
array (
0 => 93,
1 => 21,
),
2 =>
array (
0 => 94,
1 => 21,
),
3 =>
array (
0 => 94,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
20 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 97,
1 => 1,
),
1 =>
array (
0 => 97,
1 => 21,
),
2 =>
array (
0 => 98,
1 => 21,
),
3 =>
array (
0 => 98,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
21 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 101,
1 => 11,
),
1 =>
array (
0 => 101,
1 => 21,
),
2 =>
array (
0 => 102,
1 => 21,
),
3 =>
array (
0 => 102,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
22 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 105,
1 => 11,
),
1 =>
array (
0 => 105,
1 => 21,
),
2 =>
array (
0 => 106,
1 => 21,
),
3 =>
array (
0 => 106,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
23 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 109,
1 => 1,
),
1 =>
array (
0 => 109,
1 => 21,
),
2 =>
array (
0 => 110,
1 => 21,
),
3 =>
array (
0 => 110,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
24 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 113,
1 => 11,
),
1 =>
array (
0 => 113,
1 => 21,
),
2 =>
array (
0 => 114,
1 => 21,
),
3 =>
array (
0 => 114,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
25 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 117,
1 => 11,
),
1 =>
array (
0 => 117,
1 => 21,
),
2 =>
array (
0 => 118,
1 => 21,
),
3 =>
array (
0 => 118,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
26 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 121,
1 => 1,
),
1 =>
array (
0 => 121,
1 => 21,
),
2 =>
array (
0 => 122,
1 => 21,
),
3 =>
array (
0 => 122,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
27 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 125,
1 => 11,
),
1 =>
array (
0 => 125,
1 => 21,
),
2 =>
array (
0 => 126,
1 => 21,
),
3 =>
array (
0 => 126,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
28 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 129,
1 => 1,
),
1 =>
array (
0 => 129,
1 => 21,
),
2 =>
array (
0 => 130,
1 => 21,
),
3 =>
array (
0 => 130,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
29 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 133,
1 => 11,
),
1 =>
array (
0 => 133,
1 => 21,
),
2 =>
array (
0 => 134,
1 => 21,
),
3 =>
array (
0 => 134,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
30 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 137,
1 => 1,
),
1 =>
array (
0 => 137,
1 => 21,
),
2 =>
array (
0 => 138,
1 => 21,
),
3 =>
array (
0 => 138,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
31 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 141,
1 => 11,
),
1 =>
array (
0 => 141,
1 => 21,
),
2 =>
array (
0 => 142,
1 => 21,
),
3 =>
array (
0 => 142,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
32 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 145,
1 => 11,
),
1 =>
array (
0 => 145,
1 => 21,
),
2 =>
array (
0 => 146,
1 => 21,
),
3 =>
array (
0 => 146,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
33 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 149,
1 => 1,
),
1 =>
array (
0 => 149,
1 => 21,
),
2 =>
array (
0 => 150,
1 => 21,
),
3 =>
array (
0 => 150,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
34 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 153,
1 => 11,
),
1 =>
array (
0 => 153,
1 => 21,
),
2 =>
array (
0 => 154,
1 => 21,
),
3 =>
array (
0 => 154,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
35 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 157,
1 => 1,
),
1 =>
array (
0 => 157,
1 => 21,
),
2 =>
array (
0 => 158,
1 => 21,
),
3 =>
array (
0 => 158,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
36 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 161,
1 => 11,
),
1 =>
array (
0 => 161,
1 => 21,
),
2 =>
array (
0 => 162,
1 => 21,
),
3 =>
array (
0 => 162,
1 => 11,
),
),
'color' => 0,
'filled' => true,
),
37 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 165,
1 => 1,
),
1 =>
array (
0 => 165,
1 => 21,
),
2 =>
array (
0 => 166,
1 => 21,
),
3 =>
array (
0 => 166,
1 => 1,
),
),
'color' => 0,
'filled' => true,
),
38 =>
array (
'type' => 'polygon',
'points' =>
array (
0 =>
array (
0 => 0,
1 => 0,
),
1 =>
array (
0 => 187,
1 => 0,
),
2 =>
array (
0 => 187,
1 => 23,
),
3 =>
array (
0 => 0,
1 => 23,
),
4 =>
array (
0 => 0,
1 => 0,
),
),
'color' => 0,
'filled' => false,
),
);
|
{
"pile_set_name": "Github"
}
|
/******************************************************************************
* $Id$
*
* Project: OpenGIS Simple Features Reference Implementation
* Purpose: C API and constant declarations for OGR Spatial References.
* Author: Frank Warmerdam, warmerdam@pobox.com
*
******************************************************************************
* Copyright (c) 2000, Frank Warmerdam
* Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************/
#ifndef OGR_SRS_API_H_INCLUDED
#define OGR_SRS_API_H_INCLUDED
#ifndef SWIG
#include "ogr_core.h"
CPL_C_START
/**
* \file ogr_srs_api.h
*
* C spatial reference system services and defines.
*
* See also: ogr_spatialref.h
*/
/* -------------------------------------------------------------------- */
/* Axis orientations (corresponds to CS_AxisOrientationEnum). */
/* -------------------------------------------------------------------- */
typedef enum {
OAO_Other=0,
OAO_North=1,
OAO_South=2,
OAO_East=3,
OAO_West=4,
OAO_Up=5,
OAO_Down=6
} OGRAxisOrientation;
const char CPL_DLL *OSRAxisEnumToName( OGRAxisOrientation eOrientation );
/* -------------------------------------------------------------------- */
/* Datum types (corresponds to CS_DatumType). */
/* -------------------------------------------------------------------- */
typedef enum {
ODT_HD_Min=1000,
ODT_HD_Other=1000,
ODT_HD_Classic=1001,
ODT_HD_Geocentric=1002,
ODT_HD_Max=1999,
ODT_VD_Min=2000,
ODT_VD_Other=2000,
ODT_VD_Orthometric=2001,
ODT_VD_Ellipsoidal=2002,
ODT_VD_AltitudeBarometric=2003,
ODT_VD_Normal=2004,
ODT_VD_GeoidModelDerived=2005,
ODT_VD_Depth=2006,
ODT_VD_Max=2999,
ODT_LD_Min=10000,
ODT_LD_Max=32767
} OGRDatumType;
#endif // ndef SWIG
/* ==================================================================== */
/* Some standard WKT geographic coordinate systems. */
/* ==================================================================== */
#define SRS_WKT_WGS84 "GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]]"
/* ==================================================================== */
/* Some "standard" strings. */
/* ==================================================================== */
#define SRS_PT_ALBERS_CONIC_EQUAL_AREA \
"Albers_Conic_Equal_Area"
#define SRS_PT_AZIMUTHAL_EQUIDISTANT "Azimuthal_Equidistant"
#define SRS_PT_CASSINI_SOLDNER "Cassini_Soldner"
#define SRS_PT_CYLINDRICAL_EQUAL_AREA "Cylindrical_Equal_Area"
#define SRS_PT_BONNE "Bonne"
#define SRS_PT_ECKERT_I "Eckert_I"
#define SRS_PT_ECKERT_II "Eckert_II"
#define SRS_PT_ECKERT_III "Eckert_III"
#define SRS_PT_ECKERT_IV "Eckert_IV"
#define SRS_PT_ECKERT_V "Eckert_V"
#define SRS_PT_ECKERT_VI "Eckert_VI"
#define SRS_PT_EQUIDISTANT_CONIC \
"Equidistant_Conic"
#define SRS_PT_EQUIRECTANGULAR "Equirectangular"
#define SRS_PT_GALL_STEREOGRAPHIC \
"Gall_Stereographic"
#define SRS_PT_GAUSSSCHREIBERTMERCATOR \
"Gauss_Schreiber_Transverse_Mercator"
#define SRS_PT_GEOSTATIONARY_SATELLITE \
"Geostationary_Satellite"
#define SRS_PT_GOODE_HOMOLOSINE "Goode_Homolosine"
#define SRS_PT_IGH "Interrupted_Goode_Homolosine"
#define SRS_PT_GNOMONIC "Gnomonic"
#define SRS_PT_HOTINE_OBLIQUE_MERCATOR_AZIMUTH_CENTER \
"Hotine_Oblique_Mercator_Azimuth_Center"
#define SRS_PT_HOTINE_OBLIQUE_MERCATOR \
"Hotine_Oblique_Mercator"
#define SRS_PT_HOTINE_OBLIQUE_MERCATOR_TWO_POINT_NATURAL_ORIGIN \
"Hotine_Oblique_Mercator_Two_Point_Natural_Origin"
#define SRS_PT_LABORDE_OBLIQUE_MERCATOR \
"Laborde_Oblique_Mercator"
#define SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP \
"Lambert_Conformal_Conic_1SP"
#define SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP \
"Lambert_Conformal_Conic_2SP"
#define SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP_BELGIUM \
"Lambert_Conformal_Conic_2SP_Belgium"
#define SRS_PT_LAMBERT_AZIMUTHAL_EQUAL_AREA \
"Lambert_Azimuthal_Equal_Area"
#define SRS_PT_MERCATOR_1SP "Mercator_1SP"
#define SRS_PT_MERCATOR_2SP "Mercator_2SP"
// Mercator_Auxiliary_Sphere is used used by ESRI to mean EPSG:3875
#define SRS_PT_MERCATOR_AUXILIARY_SPHERE \
"Mercator_Auxiliary_Sphere"
#define SRS_PT_MILLER_CYLINDRICAL "Miller_Cylindrical"
#define SRS_PT_MOLLWEIDE "Mollweide"
#define SRS_PT_NEW_ZEALAND_MAP_GRID \
"New_Zealand_Map_Grid"
#define SRS_PT_OBLIQUE_STEREOGRAPHIC \
"Oblique_Stereographic"
#define SRS_PT_ORTHOGRAPHIC "Orthographic"
#define SRS_PT_POLAR_STEREOGRAPHIC \
"Polar_Stereographic"
#define SRS_PT_POLYCONIC "Polyconic"
#define SRS_PT_ROBINSON "Robinson"
#define SRS_PT_SINUSOIDAL "Sinusoidal"
#define SRS_PT_STEREOGRAPHIC "Stereographic"
#define SRS_PT_SWISS_OBLIQUE_CYLINDRICAL \
"Swiss_Oblique_Cylindrical"
#define SRS_PT_TRANSVERSE_MERCATOR \
"Transverse_Mercator"
#define SRS_PT_TRANSVERSE_MERCATOR_SOUTH_ORIENTED \
"Transverse_Mercator_South_Orientated"
/* special mapinfo variants on Transverse Mercator */
#define SRS_PT_TRANSVERSE_MERCATOR_MI_21 \
"Transverse_Mercator_MapInfo_21"
#define SRS_PT_TRANSVERSE_MERCATOR_MI_22 \
"Transverse_Mercator_MapInfo_22"
#define SRS_PT_TRANSVERSE_MERCATOR_MI_23 \
"Transverse_Mercator_MapInfo_23"
#define SRS_PT_TRANSVERSE_MERCATOR_MI_24 \
"Transverse_Mercator_MapInfo_24"
#define SRS_PT_TRANSVERSE_MERCATOR_MI_25 \
"Transverse_Mercator_MapInfo_25"
#define SRS_PT_TUNISIA_MINING_GRID \
"Tunisia_Mining_Grid"
#define SRS_PT_TWO_POINT_EQUIDISTANT \
"Two_Point_Equidistant"
#define SRS_PT_VANDERGRINTEN "VanDerGrinten"
#define SRS_PT_KROVAK "Krovak"
#define SRS_PT_IMW_POLYCONIC "International_Map_of_the_World_Polyconic"
#define SRS_PT_WAGNER_I "Wagner_I"
#define SRS_PT_WAGNER_II "Wagner_II"
#define SRS_PT_WAGNER_III "Wagner_III"
#define SRS_PT_WAGNER_IV "Wagner_IV"
#define SRS_PT_WAGNER_V "Wagner_V"
#define SRS_PT_WAGNER_VI "Wagner_VI"
#define SRS_PT_WAGNER_VII "Wagner_VII"
#define SRS_PT_QSC "Quadrilateralized_Spherical_Cube"
#define SRS_PT_AITOFF "Aitoff"
#define SRS_PT_WINKEL_I "Winkel_I"
#define SRS_PT_WINKEL_II "Winkel_II"
#define SRS_PT_WINKEL_TRIPEL "Winkel_Tripel"
#define SRS_PT_CRASTER_PARABOLIC "Craster_Parabolic"
#define SRS_PT_LOXIMUTHAL "Loximuthal"
#define SRS_PT_QUARTIC_AUTHALIC "Quartic_Authalic"
#define SRS_PT_SCH "Spherical_Cross_Track_Height"
#define SRS_PP_CENTRAL_MERIDIAN "central_meridian"
#define SRS_PP_SCALE_FACTOR "scale_factor"
#define SRS_PP_STANDARD_PARALLEL_1 "standard_parallel_1"
#define SRS_PP_STANDARD_PARALLEL_2 "standard_parallel_2"
#define SRS_PP_PSEUDO_STD_PARALLEL_1 "pseudo_standard_parallel_1"
#define SRS_PP_LONGITUDE_OF_CENTER "longitude_of_center"
#define SRS_PP_LATITUDE_OF_CENTER "latitude_of_center"
#define SRS_PP_LONGITUDE_OF_ORIGIN "longitude_of_origin"
#define SRS_PP_LATITUDE_OF_ORIGIN "latitude_of_origin"
#define SRS_PP_FALSE_EASTING "false_easting"
#define SRS_PP_FALSE_NORTHING "false_northing"
#define SRS_PP_AZIMUTH "azimuth"
#define SRS_PP_LONGITUDE_OF_POINT_1 "longitude_of_point_1"
#define SRS_PP_LATITUDE_OF_POINT_1 "latitude_of_point_1"
#define SRS_PP_LONGITUDE_OF_POINT_2 "longitude_of_point_2"
#define SRS_PP_LATITUDE_OF_POINT_2 "latitude_of_point_2"
#define SRS_PP_LONGITUDE_OF_POINT_3 "longitude_of_point_3"
#define SRS_PP_LATITUDE_OF_POINT_3 "latitude_of_point_3"
#define SRS_PP_RECTIFIED_GRID_ANGLE "rectified_grid_angle"
#define SRS_PP_LANDSAT_NUMBER "landsat_number"
#define SRS_PP_PATH_NUMBER "path_number"
#define SRS_PP_PERSPECTIVE_POINT_HEIGHT "perspective_point_height"
#define SRS_PP_SATELLITE_HEIGHT "satellite_height"
#define SRS_PP_FIPSZONE "fipszone"
#define SRS_PP_ZONE "zone"
#define SRS_PP_LATITUDE_OF_1ST_POINT "Latitude_Of_1st_Point"
#define SRS_PP_LONGITUDE_OF_1ST_POINT "Longitude_Of_1st_Point"
#define SRS_PP_LATITUDE_OF_2ND_POINT "Latitude_Of_2nd_Point"
#define SRS_PP_LONGITUDE_OF_2ND_POINT "Longitude_Of_2nd_Point"
#define SRS_PP_PEG_POINT_LATITUDE "peg_point_latitude"
#define SRS_PP_PEG_POINT_LONGITUDE "peg_point_longitude"
#define SRS_PP_PEG_POINT_HEADING "peg_point_heading"
#define SRS_PP_PEG_POINT_HEIGHT "peg_point_height"
#define SRS_UL_METER "Meter"
#define SRS_UL_FOOT "Foot (International)" /* or just "FOOT"? */
#define SRS_UL_FOOT_CONV "0.3048"
#define SRS_UL_US_FOOT "Foot_US" /* or "US survey foot" from EPSG */
#define SRS_UL_US_FOOT_CONV "0.3048006096012192"
#define SRS_UL_NAUTICAL_MILE "Nautical Mile"
#define SRS_UL_NAUTICAL_MILE_CONV "1852.0"
#define SRS_UL_LINK "Link" /* Based on US Foot */
#define SRS_UL_LINK_CONV "0.20116684023368047"
#define SRS_UL_CHAIN "Chain" /* based on US Foot */
#define SRS_UL_CHAIN_CONV "20.116684023368047"
#define SRS_UL_ROD "Rod" /* based on US Foot */
#define SRS_UL_ROD_CONV "5.02921005842012"
#define SRS_UL_LINK_Clarke "Link_Clarke"
#define SRS_UL_LINK_Clarke_CONV "0.2011661949"
#define SRS_UL_KILOMETER "Kilometer"
#define SRS_UL_KILOMETER_CONV "1000."
#define SRS_UL_DECIMETER "Decimeter"
#define SRS_UL_DECIMETER_CONV "0.1"
#define SRS_UL_CENTIMETER "Centimeter"
#define SRS_UL_CENTIMETER_CONV "0.01"
#define SRS_UL_MILLIMETER "Millimeter"
#define SRS_UL_MILLIMETER_CONV "0.001"
#define SRS_UL_INTL_NAUT_MILE "Nautical_Mile_International"
#define SRS_UL_INTL_NAUT_MILE_CONV "1852.0"
#define SRS_UL_INTL_INCH "Inch_International"
#define SRS_UL_INTL_INCH_CONV "0.0254"
#define SRS_UL_INTL_FOOT "Foot_International"
#define SRS_UL_INTL_FOOT_CONV "0.3048"
#define SRS_UL_INTL_YARD "Yard_International"
#define SRS_UL_INTL_YARD_CONV "0.9144"
#define SRS_UL_INTL_STAT_MILE "Statute_Mile_International"
#define SRS_UL_INTL_STAT_MILE_CONV "1609.344"
#define SRS_UL_INTL_FATHOM "Fathom_International"
#define SRS_UL_INTL_FATHOM_CONV "1.8288"
#define SRS_UL_INTL_CHAIN "Chain_International"
#define SRS_UL_INTL_CHAIN_CONV "20.1168"
#define SRS_UL_INTL_LINK "Link_International"
#define SRS_UL_INTL_LINK_CONV "0.201168"
#define SRS_UL_US_INCH "Inch_US_Surveyor"
#define SRS_UL_US_INCH_CONV "0.025400050800101603"
#define SRS_UL_US_YARD "Yard_US_Surveyor"
#define SRS_UL_US_YARD_CONV "0.914401828803658"
#define SRS_UL_US_CHAIN "Chain_US_Surveyor"
#define SRS_UL_US_CHAIN_CONV "20.11684023368047"
#define SRS_UL_US_STAT_MILE "Statute_Mile_US_Surveyor"
#define SRS_UL_US_STAT_MILE_CONV "1609.347218694437"
#define SRS_UL_INDIAN_YARD "Yard_Indian"
#define SRS_UL_INDIAN_YARD_CONV "0.91439523"
#define SRS_UL_INDIAN_FOOT "Foot_Indian"
#define SRS_UL_INDIAN_FOOT_CONV "0.30479841"
#define SRS_UL_INDIAN_CHAIN "Chain_Indian"
#define SRS_UL_INDIAN_CHAIN_CONV "20.11669506"
#define SRS_UA_DEGREE "degree"
#define SRS_UA_DEGREE_CONV "0.0174532925199433"
#define SRS_UA_RADIAN "radian"
#define SRS_PM_GREENWICH "Greenwich"
#define SRS_DN_NAD27 "North_American_Datum_1927"
#define SRS_DN_NAD83 "North_American_Datum_1983"
#define SRS_DN_WGS72 "WGS_1972"
#define SRS_DN_WGS84 "WGS_1984"
#define SRS_WGS84_SEMIMAJOR 6378137.0
#define SRS_WGS84_INVFLATTENING 298.257223563
#ifndef SWIG
/* -------------------------------------------------------------------- */
/* C Wrappers for C++ objects and methods. */
/* -------------------------------------------------------------------- */
#ifndef DEFINED_OGRSpatialReferenceH
#define DEFINED_OGRSpatialReferenceH
#ifdef DEBUG
typedef struct OGRSpatialReferenceHS *OGRSpatialReferenceH;
typedef struct OGRCoordinateTransformationHS *OGRCoordinateTransformationH;
#else
typedef void *OGRSpatialReferenceH;
typedef void *OGRCoordinateTransformationH;
#endif
#endif
OGRSpatialReferenceH CPL_DLL CPL_STDCALL
OSRNewSpatialReference( const char * /* = NULL */);
OGRSpatialReferenceH CPL_DLL CPL_STDCALL OSRCloneGeogCS( OGRSpatialReferenceH );
OGRSpatialReferenceH CPL_DLL CPL_STDCALL OSRClone( OGRSpatialReferenceH );
void CPL_DLL CPL_STDCALL OSRDestroySpatialReference( OGRSpatialReferenceH );
int CPL_DLL OSRReference( OGRSpatialReferenceH );
int CPL_DLL OSRDereference( OGRSpatialReferenceH );
void CPL_DLL OSRRelease( OGRSpatialReferenceH );
OGRErr CPL_DLL OSRValidate( OGRSpatialReferenceH );
OGRErr CPL_DLL OSRFixupOrdering( OGRSpatialReferenceH );
OGRErr CPL_DLL OSRFixup( OGRSpatialReferenceH );
OGRErr CPL_DLL OSRStripCTParms( OGRSpatialReferenceH );
OGRErr CPL_DLL CPL_STDCALL OSRImportFromEPSG( OGRSpatialReferenceH, int );
OGRErr CPL_DLL CPL_STDCALL OSRImportFromEPSGA( OGRSpatialReferenceH, int );
OGRErr CPL_DLL OSRImportFromWkt( OGRSpatialReferenceH, char ** );
OGRErr CPL_DLL OSRImportFromProj4( OGRSpatialReferenceH, const char *);
OGRErr CPL_DLL OSRImportFromESRI( OGRSpatialReferenceH, char **);
OGRErr CPL_DLL OSRImportFromPCI( OGRSpatialReferenceH hSRS, const char *,
const char *, double * );
OGRErr CPL_DLL OSRImportFromUSGS( OGRSpatialReferenceH,
long, long, double *, long);
OGRErr CPL_DLL OSRImportFromXML( OGRSpatialReferenceH, const char * );
OGRErr CPL_DLL OSRImportFromDict( OGRSpatialReferenceH, const char *,
const char * );
OGRErr CPL_DLL OSRImportFromPanorama( OGRSpatialReferenceH, long, long, long,
double * );
OGRErr CPL_DLL OSRImportFromOzi( OGRSpatialReferenceH , const char * const *);
OGRErr CPL_DLL OSRImportFromMICoordSys( OGRSpatialReferenceH, const char *);
OGRErr CPL_DLL OSRImportFromERM( OGRSpatialReferenceH,
const char *, const char *, const char * );
OGRErr CPL_DLL OSRImportFromUrl( OGRSpatialReferenceH, const char * );
OGRErr CPL_DLL CPL_STDCALL OSRExportToWkt( OGRSpatialReferenceH, char ** );
OGRErr CPL_DLL CPL_STDCALL OSRExportToPrettyWkt( OGRSpatialReferenceH, char **, int);
OGRErr CPL_DLL CPL_STDCALL OSRExportToProj4( OGRSpatialReferenceH, char **);
OGRErr CPL_DLL OSRExportToPCI( OGRSpatialReferenceH, char **, char **,
double ** );
OGRErr CPL_DLL OSRExportToUSGS( OGRSpatialReferenceH, long *, long *,
double **, long * );
OGRErr CPL_DLL OSRExportToXML( OGRSpatialReferenceH, char **, const char * );
OGRErr CPL_DLL OSRExportToPanorama( OGRSpatialReferenceH, long *, long *,
long *, long *, double * );
OGRErr CPL_DLL OSRExportToMICoordSys( OGRSpatialReferenceH, char ** );
OGRErr CPL_DLL OSRExportToERM( OGRSpatialReferenceH, char *, char *, char * );
OGRErr CPL_DLL OSRMorphToESRI( OGRSpatialReferenceH );
OGRErr CPL_DLL OSRMorphFromESRI( OGRSpatialReferenceH );
OGRErr CPL_DLL CPL_STDCALL OSRSetAttrValue( OGRSpatialReferenceH hSRS,
const char * pszNodePath,
const char * pszNewNodeValue );
const char CPL_DLL * CPL_STDCALL OSRGetAttrValue( OGRSpatialReferenceH hSRS,
const char * pszName, int iChild /* = 0 */ );
OGRErr CPL_DLL OSRSetAngularUnits( OGRSpatialReferenceH, const char *, double );
double CPL_DLL OSRGetAngularUnits( OGRSpatialReferenceH, char ** );
OGRErr CPL_DLL OSRSetLinearUnits( OGRSpatialReferenceH, const char *, double );
OGRErr CPL_DLL OSRSetTargetLinearUnits( OGRSpatialReferenceH, const char *, const char *, double );
OGRErr CPL_DLL OSRSetLinearUnitsAndUpdateParameters(
OGRSpatialReferenceH, const char *, double );
double CPL_DLL OSRGetLinearUnits( OGRSpatialReferenceH, char ** );
double CPL_DLL OSRGetTargetLinearUnits( OGRSpatialReferenceH, const char *, char ** );
double CPL_DLL OSRGetPrimeMeridian( OGRSpatialReferenceH, char ** );
int CPL_DLL OSRIsGeographic( OGRSpatialReferenceH );
int CPL_DLL OSRIsLocal( OGRSpatialReferenceH );
int CPL_DLL OSRIsProjected( OGRSpatialReferenceH );
int CPL_DLL OSRIsCompound( OGRSpatialReferenceH );
int CPL_DLL OSRIsGeocentric( OGRSpatialReferenceH );
int CPL_DLL OSRIsVertical( OGRSpatialReferenceH );
int CPL_DLL OSRIsSameGeogCS( OGRSpatialReferenceH, OGRSpatialReferenceH );
int CPL_DLL OSRIsSameVertCS( OGRSpatialReferenceH, OGRSpatialReferenceH );
int CPL_DLL OSRIsSame( OGRSpatialReferenceH, OGRSpatialReferenceH );
OGRErr CPL_DLL OSRSetLocalCS( OGRSpatialReferenceH hSRS, const char *pszName );
OGRErr CPL_DLL OSRSetProjCS( OGRSpatialReferenceH hSRS, const char * pszName );
OGRErr CPL_DLL OSRSetGeocCS( OGRSpatialReferenceH hSRS, const char * pszName );
OGRErr CPL_DLL OSRSetWellKnownGeogCS( OGRSpatialReferenceH hSRS,
const char * pszName );
OGRErr CPL_DLL CPL_STDCALL OSRSetFromUserInput( OGRSpatialReferenceH hSRS,
const char * );
OGRErr CPL_DLL OSRCopyGeogCSFrom( OGRSpatialReferenceH hSRS,
const OGRSpatialReferenceH hSrcSRS );
OGRErr CPL_DLL OSRSetTOWGS84( OGRSpatialReferenceH hSRS,
double, double, double,
double, double, double, double );
OGRErr CPL_DLL OSRGetTOWGS84( OGRSpatialReferenceH hSRS, double *, int );
OGRErr CPL_DLL OSRSetCompoundCS( OGRSpatialReferenceH hSRS,
const char *pszName,
OGRSpatialReferenceH hHorizSRS,
OGRSpatialReferenceH hVertSRS );
OGRErr CPL_DLL OSRSetGeogCS( OGRSpatialReferenceH hSRS,
const char * pszGeogName,
const char * pszDatumName,
const char * pszEllipsoidName,
double dfSemiMajor, double dfInvFlattening,
const char * pszPMName /* = NULL */,
double dfPMOffset /* = 0.0 */,
const char * pszUnits /* = NULL */,
double dfConvertToRadians /* = 0.0 */ );
OGRErr CPL_DLL OSRSetVertCS( OGRSpatialReferenceH hSRS,
const char * pszVertCSName,
const char * pszVertDatumName,
int nVertDatumType );
double CPL_DLL OSRGetSemiMajor( OGRSpatialReferenceH, OGRErr * /* = NULL */ );
double CPL_DLL OSRGetSemiMinor( OGRSpatialReferenceH, OGRErr * /* = NULL */ );
double CPL_DLL OSRGetInvFlattening( OGRSpatialReferenceH, OGRErr * /*=NULL*/);
OGRErr CPL_DLL OSRSetAuthority( OGRSpatialReferenceH hSRS,
const char * pszTargetKey,
const char * pszAuthority,
int nCode );
const char CPL_DLL *OSRGetAuthorityCode( OGRSpatialReferenceH hSRS,
const char * pszTargetKey );
const char CPL_DLL *OSRGetAuthorityName( OGRSpatialReferenceH hSRS,
const char * pszTargetKey );
OGRErr CPL_DLL OSRSetProjection( OGRSpatialReferenceH, const char * );
OGRErr CPL_DLL OSRSetProjParm( OGRSpatialReferenceH, const char *, double );
double CPL_DLL OSRGetProjParm( OGRSpatialReferenceH hSRS,
const char * pszParmName,
double dfDefault /* = 0.0 */,
OGRErr * /* = NULL */ );
OGRErr CPL_DLL OSRSetNormProjParm( OGRSpatialReferenceH, const char *, double);
double CPL_DLL OSRGetNormProjParm( OGRSpatialReferenceH hSRS,
const char * pszParmName,
double dfDefault /* = 0.0 */,
OGRErr * /* = NULL */ );
OGRErr CPL_DLL OSRSetUTM( OGRSpatialReferenceH hSRS, int nZone, int bNorth );
int CPL_DLL OSRGetUTMZone( OGRSpatialReferenceH hSRS, int *pbNorth );
OGRErr CPL_DLL OSRSetStatePlane( OGRSpatialReferenceH hSRS,
int nZone, int bNAD83 );
OGRErr CPL_DLL OSRSetStatePlaneWithUnits( OGRSpatialReferenceH hSRS,
int nZone, int bNAD83,
const char *pszOverrideUnitName,
double dfOverrideUnit );
OGRErr CPL_DLL OSRAutoIdentifyEPSG( OGRSpatialReferenceH hSRS );
int CPL_DLL OSREPSGTreatsAsLatLong( OGRSpatialReferenceH hSRS );
int CPL_DLL OSREPSGTreatsAsNorthingEasting( OGRSpatialReferenceH hSRS );
const char CPL_DLL *OSRGetAxis( OGRSpatialReferenceH hSRS,
const char *pszTargetKey, int iAxis,
OGRAxisOrientation *peOrientation );
OGRErr CPL_DLL OSRSetAxes( OGRSpatialReferenceH hSRS,
const char *pszTargetKey,
const char *pszXAxisName,
OGRAxisOrientation eXAxisOrientation,
const char *pszYAxisName,
OGRAxisOrientation eYAxisOrientation );
/** Albers Conic Equal Area */
OGRErr CPL_DLL OSRSetACEA( OGRSpatialReferenceH hSRS, double dfStdP1, double dfStdP2,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Azimuthal Equidistant */
OGRErr CPL_DLL OSRSetAE( OGRSpatialReferenceH hSRS, double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Bonne */
OGRErr CPL_DLL OSRSetBonne(OGRSpatialReferenceH hSRS,
double dfStandardParallel, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Cylindrical Equal Area */
OGRErr CPL_DLL OSRSetCEA( OGRSpatialReferenceH hSRS, double dfStdP1, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Cassini-Soldner */
OGRErr CPL_DLL OSRSetCS( OGRSpatialReferenceH hSRS, double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Equidistant Conic */
OGRErr CPL_DLL OSRSetEC( OGRSpatialReferenceH hSRS, double dfStdP1, double dfStdP2,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Eckert I-VI */
OGRErr CPL_DLL OSRSetEckert( OGRSpatialReferenceH hSRS, int nVariation,
double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Eckert IV */
OGRErr CPL_DLL OSRSetEckertIV( OGRSpatialReferenceH hSRS, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Eckert VI */
OGRErr CPL_DLL OSRSetEckertVI( OGRSpatialReferenceH hSRS, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Equirectangular */
OGRErr CPL_DLL OSRSetEquirectangular(OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Equirectangular generalized form */
OGRErr CPL_DLL OSRSetEquirectangular2( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfPseudoStdParallel1,
double dfFalseEasting,
double dfFalseNorthing );
/** Gall Stereograpic */
OGRErr CPL_DLL OSRSetGS( OGRSpatialReferenceH hSRS, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Goode Homolosine */
OGRErr CPL_DLL OSRSetGH( OGRSpatialReferenceH hSRS, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Interrupted Goode Homolosine */
OGRErr CPL_DLL OSRSetIGH( OGRSpatialReferenceH hSRS );
/** GEOS - Geostationary Satellite View */
OGRErr CPL_DLL OSRSetGEOS( OGRSpatialReferenceH hSRS,
double dfCentralMeridian, double dfSatelliteHeight,
double dfFalseEasting, double dfFalseNorthing );
/** Gauss Schreiber Transverse Mercator */
OGRErr CPL_DLL OSRSetGaussSchreiberTMercator( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting,
double dfFalseNorthing );
/** Gnomonic */
OGRErr CPL_DLL OSRSetGnomonic(OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Oblique Mercator (aka HOM (variant B) */
OGRErr CPL_DLL OSRSetOM( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfAzimuth, double dfRectToSkew,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** Hotine Oblique Mercator using azimuth angle */
OGRErr CPL_DLL OSRSetHOM( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfAzimuth, double dfRectToSkew,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
OGRErr CPL_DLL OSRSetHOMAC( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfAzimuth, double dfRectToSkew,
double dfScale,
double dfFalseEasting,
double dfFalseNorthing );
/** Hotine Oblique Mercator using two points on centerline */
OGRErr CPL_DLL OSRSetHOM2PNO( OGRSpatialReferenceH hSRS, double dfCenterLat,
double dfLat1, double dfLong1,
double dfLat2, double dfLong2,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** International Map of the World Polyconic */
OGRErr CPL_DLL OSRSetIWMPolyconic( OGRSpatialReferenceH hSRS,
double dfLat1, double dfLat2,
double dfCenterLong,
double dfFalseEasting,
double dfFalseNorthing );
/** Krovak Oblique Conic Conformal */
OGRErr CPL_DLL OSRSetKrovak( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfAzimuth, double dfPseudoStdParallelLat,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** Lambert Azimuthal Equal-Area */
OGRErr CPL_DLL OSRSetLAEA( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Lambert Conformal Conic */
OGRErr CPL_DLL OSRSetLCC( OGRSpatialReferenceH hSRS,
double dfStdP1, double dfStdP2,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Lambert Conformal Conic 1SP */
OGRErr CPL_DLL OSRSetLCC1SP( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** Lambert Conformal Conic (Belgium) */
OGRErr CPL_DLL OSRSetLCCB( OGRSpatialReferenceH hSRS,
double dfStdP1, double dfStdP2,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Miller Cylindrical */
OGRErr CPL_DLL OSRSetMC( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Mercator */
OGRErr CPL_DLL OSRSetMercator( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
OGRErr CPL_DLL OSRSetMercator2SP( OGRSpatialReferenceH hSRS,
double dfStdP1,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Mollweide */
OGRErr CPL_DLL OSRSetMollweide( OGRSpatialReferenceH hSRS,
double dfCentralMeridian,
double dfFalseEasting,
double dfFalseNorthing );
/** New Zealand Map Grid */
OGRErr CPL_DLL OSRSetNZMG( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Oblique Stereographic */
OGRErr CPL_DLL OSRSetOS( OGRSpatialReferenceH hSRS,
double dfOriginLat, double dfCMeridian,
double dfScale,
double dfFalseEasting,double dfFalseNorthing);
/** Orthographic */
OGRErr CPL_DLL OSRSetOrthographic( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting,
double dfFalseNorthing);
/** Polyconic */
OGRErr CPL_DLL OSRSetPolyconic( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Polar Stereographic */
OGRErr CPL_DLL OSRSetPS( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing);
/** Robinson */
OGRErr CPL_DLL OSRSetRobinson( OGRSpatialReferenceH hSRS,
double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Sinusoidal */
OGRErr CPL_DLL OSRSetSinusoidal( OGRSpatialReferenceH hSRS,
double dfCenterLong,
double dfFalseEasting,
double dfFalseNorthing );
/** Stereographic */
OGRErr CPL_DLL OSRSetStereographic( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting,
double dfFalseNorthing);
/** Swiss Oblique Cylindrical */
OGRErr CPL_DLL OSRSetSOC( OGRSpatialReferenceH hSRS,
double dfLatitudeOfOrigin, double dfCentralMeridian,
double dfFalseEasting, double dfFalseNorthing );
/** Transverse Mercator
*
* Special processing available for Transverse Mercator with GDAL >= 1.10 and PROJ >= 4.8 :
* see OGRSpatialReference::exportToProj4().
*/
OGRErr CPL_DLL OSRSetTM( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** Transverse Mercator variant */
OGRErr CPL_DLL OSRSetTMVariant(
OGRSpatialReferenceH hSRS, const char *pszVariantName,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
/** Tunesia Mining Grid */
OGRErr CPL_DLL OSRSetTMG( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Transverse Mercator (South Oriented) */
OGRErr CPL_DLL OSRSetTMSO( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong,
double dfScale,
double dfFalseEasting, double dfFalseNorthing );
OGRErr CPL_DLL OSRSetTPED( OGRSpatialReferenceH hSRS,
double dfLat1, double dfLong1,
double dfLat2, double dfLong2,
double dfFalseEasting, double dfFalseNorthing );
/** VanDerGrinten */
OGRErr CPL_DLL OSRSetVDG( OGRSpatialReferenceH hSRS,
double dfCenterLong,
double dfFalseEasting, double dfFalseNorthing );
/** Wagner I -- VII */
OGRErr CPL_DLL OSRSetWagner( OGRSpatialReferenceH hSRS, int nVariation,
double dfCenterLat,
double dfFalseEasting,
double dfFalseNorthing );
/** Quadrilateralized Spherical Cube */
OGRErr CPL_DLL OSRSetQSC( OGRSpatialReferenceH hSRS,
double dfCenterLat, double dfCenterLong );
/** Spherical, Cross-track, Height */
OGRErr CPL_DLL OSRSetSCH( OGRSpatialReferenceH hSRS,
double dfPegLat, double dfPegLong,
double dfPegHeading, double dfPegHgt);
double CPL_DLL OSRCalcInvFlattening( double dfSemiMajor, double dfSemiMinor );
double CPL_DLL OSRCalcSemiMinorFromInvFlattening( double dfSemiMajor, double dfInvFlattening );
void CPL_DLL OSRCleanup( void );
/* -------------------------------------------------------------------- */
/* OGRCoordinateTransform C API. */
/* -------------------------------------------------------------------- */
OGRCoordinateTransformationH CPL_DLL CPL_STDCALL
OCTNewCoordinateTransformation( OGRSpatialReferenceH hSourceSRS,
OGRSpatialReferenceH hTargetSRS );
void CPL_DLL CPL_STDCALL
OCTDestroyCoordinateTransformation( OGRCoordinateTransformationH );
int CPL_DLL CPL_STDCALL
OCTTransform( OGRCoordinateTransformationH hCT,
int nCount, double *x, double *y, double *z );
int CPL_DLL CPL_STDCALL
OCTTransformEx( OGRCoordinateTransformationH hCT,
int nCount, double *x, double *y, double *z,
int *pabSuccess );
/* this is really private to OGR. */
char *OCTProj4Normalize( const char *pszProj4Src );
void OCTCleanupProjMutex( void );
/* -------------------------------------------------------------------- */
/* Projection transform dictionary query. */
/* -------------------------------------------------------------------- */
char CPL_DLL ** OPTGetProjectionMethods( void );
char CPL_DLL ** OPTGetParameterList( const char * pszProjectionMethod,
char ** ppszUserName );
int CPL_DLL OPTGetParameterInfo( const char * pszProjectionMethod,
const char * pszParameterName,
char ** ppszUserName,
char ** ppszType,
double *pdfDefaultValue );
CPL_C_END
#endif /* ndef SWIG */
#endif /* ndef OGR_SRS_API_H_INCLUDED */
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.quickstep.inputconsumers;
import static android.view.MotionEvent.ACTION_CANCEL;
import static android.view.MotionEvent.ACTION_POINTER_DOWN;
import static android.view.MotionEvent.ACTION_UP;
import static com.android.launcher3.Utilities.squaredHypot;
import static com.android.launcher3.Utilities.squaredTouchSlop;
import static com.android.quickstep.MultiStateCallback.DEBUG_STATES;
import static com.android.quickstep.TouchInteractionService.INTENT_EXTRA_LOG_TRACE_ID;
import static com.android.quickstep.TouchInteractionService.startRecentsActivityAsync;
import static com.android.quickstep.WindowTransformSwipeHandler.MIN_PROGRESS_FOR_OVERVIEW;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.graphics.Point;
import android.graphics.PointF;
import android.graphics.Rect;
import android.graphics.RectF;
import android.view.MotionEvent;
import android.view.VelocityTracker;
import android.view.ViewConfiguration;
import com.android.launcher3.R;
import com.android.launcher3.Utilities;
import com.android.launcher3.util.DefaultDisplay;
import com.android.quickstep.LockScreenRecentsActivity;
import com.android.quickstep.MultiStateCallback;
import com.android.quickstep.SwipeSharedState;
import com.android.quickstep.util.ClipAnimationHelper;
import com.android.quickstep.util.RecentsAnimationListenerSet;
import com.android.quickstep.util.SwipeAnimationTargetSet;
import com.android.systemui.shared.system.InputMonitorCompat;
import com.android.systemui.shared.system.RemoteAnimationTargetCompat;
/**
* A dummy input consumer used when the device is still locked, e.g. from secure camera.
*/
public class DeviceLockedInputConsumer implements InputConsumer,
SwipeAnimationTargetSet.SwipeAnimationListener {
private static final float SCALE_DOWN = 0.75f;
private static final String[] STATE_NAMES = DEBUG_STATES ? new String[2] : null;
private static int getFlagForIndex(int index, String name) {
if (DEBUG_STATES) {
STATE_NAMES[index] = name;
}
return 1 << index;
}
private static final int STATE_TARGET_RECEIVED =
getFlagForIndex(0, "STATE_TARGET_RECEIVED");
private static final int STATE_HANDLER_INVALIDATED =
getFlagForIndex(1, "STATE_HANDLER_INVALIDATED");
private final Context mContext;
private final float mTouchSlopSquared;
private final SwipeSharedState mSwipeSharedState;
private final InputMonitorCompat mInputMonitorCompat;
private final PointF mTouchDown = new PointF();
private final ClipAnimationHelper mClipAnimationHelper;
private int mLogId;
private final ClipAnimationHelper.TransformParams mTransformParams;
private final Point mDisplaySize;
private final MultiStateCallback mStateCallback;
private final RectF mSwipeTouchRegion;
public final int mRunningTaskId;
private VelocityTracker mVelocityTracker;
private float mProgress;
private boolean mThresholdCrossed = false;
private SwipeAnimationTargetSet mTargetSet;
public DeviceLockedInputConsumer(Context context, SwipeSharedState swipeSharedState,
InputMonitorCompat inputMonitorCompat, RectF swipeTouchRegion, int runningTaskId,
int logId) {
mContext = context;
mTouchSlopSquared = squaredTouchSlop(context);
mSwipeSharedState = swipeSharedState;
mClipAnimationHelper = new ClipAnimationHelper(context);
mLogId = logId;
mTransformParams = new ClipAnimationHelper.TransformParams();
mInputMonitorCompat = inputMonitorCompat;
mSwipeTouchRegion = swipeTouchRegion;
mRunningTaskId = runningTaskId;
// Do not use DeviceProfile as the user data might be locked
mDisplaySize = DefaultDisplay.INSTANCE.get(context).getInfo().realSize;
// Init states
mStateCallback = new MultiStateCallback(STATE_NAMES);
mStateCallback.addCallback(STATE_TARGET_RECEIVED | STATE_HANDLER_INVALIDATED,
this::endRemoteAnimation);
mVelocityTracker = VelocityTracker.obtain();
}
@Override
public int getType() {
return TYPE_DEVICE_LOCKED;
}
@Override
public void onMotionEvent(MotionEvent ev) {
if (mVelocityTracker == null) {
return;
}
mVelocityTracker.addMovement(ev);
float x = ev.getX();
float y = ev.getY();
switch (ev.getAction()) {
case MotionEvent.ACTION_DOWN:
mTouchDown.set(x, y);
break;
case ACTION_POINTER_DOWN: {
if (!mThresholdCrossed) {
// Cancel interaction in case of multi-touch interaction
int ptrIdx = ev.getActionIndex();
if (!mSwipeTouchRegion.contains(ev.getX(ptrIdx), ev.getY(ptrIdx))) {
int action = ev.getAction();
ev.setAction(ACTION_CANCEL);
finishTouchTracking(ev);
ev.setAction(action);
}
}
break;
}
case MotionEvent.ACTION_MOVE: {
if (!mThresholdCrossed) {
if (squaredHypot(x - mTouchDown.x, y - mTouchDown.y) > mTouchSlopSquared) {
startRecentsTransition();
}
} else {
float dy = Math.max(mTouchDown.y - y, 0);
mProgress = dy / mDisplaySize.y;
mTransformParams.setProgress(mProgress);
if (mTargetSet != null) {
mClipAnimationHelper.applyTransform(mTargetSet, mTransformParams);
}
}
break;
}
case MotionEvent.ACTION_CANCEL:
case MotionEvent.ACTION_UP:
finishTouchTracking(ev);
break;
}
}
/**
* Called when the gesture has ended. Does not correlate to the completion of the interaction as
* the animation can still be running.
*/
private void finishTouchTracking(MotionEvent ev) {
mStateCallback.setState(STATE_HANDLER_INVALIDATED);
if (mThresholdCrossed && ev.getAction() == ACTION_UP) {
mVelocityTracker.computeCurrentVelocity(1000,
ViewConfiguration.get(mContext).getScaledMaximumFlingVelocity());
float velocityY = mVelocityTracker.getYVelocity();
float flingThreshold = mContext.getResources()
.getDimension(R.dimen.quickstep_fling_threshold_velocity);
boolean dismissTask;
if (Math.abs(velocityY) > flingThreshold) {
// Is fling
dismissTask = velocityY < 0;
} else {
dismissTask = mProgress >= (1 - MIN_PROGRESS_FOR_OVERVIEW);
}
if (dismissTask) {
// For now, just start the home intent so user is prompted to unlock the device.
mContext.startActivity(new Intent(Intent.ACTION_MAIN)
.addCategory(Intent.CATEGORY_HOME)
.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK));
}
}
mVelocityTracker.recycle();
mVelocityTracker = null;
}
private void startRecentsTransition() {
mThresholdCrossed = true;
RecentsAnimationListenerSet newListenerSet =
mSwipeSharedState.newRecentsAnimationListenerSet();
newListenerSet.addListener(this);
Intent intent = new Intent(Intent.ACTION_MAIN)
.addCategory(Intent.CATEGORY_DEFAULT)
.setComponent(new ComponentName(mContext, LockScreenRecentsActivity.class))
.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_CLEAR_TASK)
.putExtra(INTENT_EXTRA_LOG_TRACE_ID, mLogId);
mInputMonitorCompat.pilferPointers();
startRecentsActivityAsync(intent, newListenerSet);
}
@Override
public void onRecentsAnimationStart(SwipeAnimationTargetSet targetSet) {
mTargetSet = targetSet;
Rect displaySize = new Rect(0, 0, mDisplaySize.x, mDisplaySize.y);
RemoteAnimationTargetCompat targetCompat = targetSet.findTask(mRunningTaskId);
if (targetCompat != null) {
mClipAnimationHelper.updateSource(displaySize, targetCompat);
}
Utilities.scaleRectAboutCenter(displaySize, SCALE_DOWN);
displaySize.offsetTo(displaySize.left, 0);
mClipAnimationHelper.updateTargetRect(displaySize);
mClipAnimationHelper.applyTransform(mTargetSet, mTransformParams);
mStateCallback.setState(STATE_TARGET_RECEIVED);
}
@Override
public void onRecentsAnimationCanceled() {
mTargetSet = null;
}
private void endRemoteAnimation() {
if (mTargetSet != null) {
mTargetSet.finishController(
false /* toRecents */, null /* callback */, false /* sendUserLeaveHint */);
}
}
@Override
public void onConsumerAboutToBeSwitched() {
mStateCallback.setState(STATE_HANDLER_INVALIDATED);
}
@Override
public boolean allowInterceptByParent() {
return !mThresholdCrossed;
}
}
|
{
"pile_set_name": "Github"
}
|
{
"author": "Microsoft Community",
"name": "Sample Data",
"description": "SampleDataService contiene i dati di esempio usati dalla versione generata di alcune pagine.",
"identity": "wts.Service.SampleDataService"
}
|
{
"pile_set_name": "Github"
}
|
# ci-info
Get details about the current Continuous Integration environment.
Please [open an
issue](https://github.com/watson/ci-info/issues/new?template=ci-server-not-detected.md)
if your CI server isn't properly detected :)
[](https://www.npmjs.com/package/ci-info)
[](https://travis-ci.org/watson/ci-info)
[](https://github.com/feross/standard)
## Installation
```bash
npm install ci-info --save
```
## Usage
```js
var ci = require('ci-info')
if (ci.isCI) {
console.log('The name of the CI server is:', ci.name)
} else {
console.log('This program is not running on a CI server')
}
```
## Supported CI tools
Officially supported CI servers:
| Name | Constant |
|------|----------|
| [AWS CodeBuild](https://aws.amazon.com/codebuild/) | `ci.CODEBUILD` |
| [AppVeyor](http://www.appveyor.com) | `ci.APPVEYOR` |
| [Bamboo](https://www.atlassian.com/software/bamboo) by Atlassian | `ci.BAMBOO` |
| [Bitbucket Pipelines](https://bitbucket.org/product/features/pipelines) | `ci.BITBUCKET` |
| [Bitrise](https://www.bitrise.io/) | `ci.BITRISE` |
| [Buddy](https://buddy.works/) | `ci.BUDDY` |
| [Buildkite](https://buildkite.com) | `ci.BUILDKITE` |
| [CircleCI](http://circleci.com) | `ci.CIRCLE` |
| [Cirrus CI](https://cirrus-ci.org) | `ci.CIRRUS` |
| [Codeship](https://codeship.com) | `ci.CODESHIP` |
| [Drone](https://drone.io) | `ci.DRONE` |
| [dsari](https://github.com/rfinnie/dsari) | `ci.DSARI` |
| [GitLab CI](https://about.gitlab.com/gitlab-ci/) | `ci.GITLAB` |
| [GoCD](https://www.go.cd/) | `ci.GOCD` |
| [Hudson](http://hudson-ci.org) | `ci.HUDSON` |
| [Jenkins CI](https://jenkins-ci.org) | `ci.JENKINS` |
| [Magnum CI](https://magnum-ci.com) | `ci.MAGNUM` |
| [Sail CI](https://sail.ci/) | `ci.SAIL` |
| [Semaphore](https://semaphoreci.com) | `ci.SEMAPHORE` |
| [Shippable](https://www.shippable.com/) | `ci.SHIPPABLE` |
| [Solano CI](https://www.solanolabs.com/) | `ci.SOLANO` |
| [Strider CD](https://strider-cd.github.io/) | `ci.STRIDER` |
| [TaskCluster](http://docs.taskcluster.net) | `ci.TASKCLUSTER` |
| [Team Foundation Server](https://www.visualstudio.com/en-us/products/tfs-overview-vs.aspx) by Microsoft | `ci.TFS` |
| [TeamCity](https://www.jetbrains.com/teamcity/) by JetBrains | `ci.TEAMCITY` |
| [Travis CI](http://travis-ci.org) | `ci.TRAVIS` |
## API
### `ci.name`
A string. Will contain the name of the CI server the code is running on.
If not CI server is detected, it will be `null`.
Don't depend on the value of this string not to change for a specific
vendor. If you find your self writing `ci.name === 'Travis CI'`, you
most likely want to use `ci.TRAVIS` instead.
### `ci.isCI`
A boolean. Will be `true` if the code is running on a CI server.
Otherwise `false`.
Some CI servers not listed here might still trigger the `ci.isCI`
boolean to be set to `true` if they use certain vendor neutral
environment variables. In those cases `ci.name` will be `null` and no
vendor specific boolean will be set to `true`.
### `ci.isPR`
A boolean if PR detection is supported for the current CI server. Will
be `true` if a PR is being tested. Otherwise `false`. If PR detection is
not supported for the current CI server, the value will be `null`.
### `ci.<VENDOR-CONSTANT>`
A vendor specific boolean constants is exposed for each support CI
vendor. A constant will be `true` if the code is determined to run on
the given CI server. Otherwise `false`.
Examples of vendor constants are `ci.TRAVIS` or `ci.APPVEYOR`. For a
complete list, see the support table above.
Deprecated vendor constants that will be removed in the next major
release:
- `ci.TDDIUM` (Solano CI) This have been renamed `ci.SOLANO`
## License
[MIT](LICENSE)
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2006-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.consol.citrus.ftp.client;
import com.consol.citrus.endpoint.AbstractEndpointBuilder;
import com.consol.citrus.message.ErrorHandlingStrategy;
import com.consol.citrus.message.MessageCorrelator;
/**
* @author Christoph Deppisch
* @since 2.5
*/
public class FtpClientBuilder extends AbstractEndpointBuilder<FtpClient> {
/** Endpoint target */
private FtpClient endpoint = new FtpClient();
@Override
protected FtpClient getEndpoint() {
return endpoint;
}
/**
* Sets the host property.
* @param host
* @return
*/
public FtpClientBuilder host(String host) {
endpoint.getEndpointConfiguration().setHost(host);
return this;
}
/**
* Sets the port property.
* @param port
* @return
*/
public FtpClientBuilder port(int port) {
endpoint.getEndpointConfiguration().setPort(port);
return this;
}
/**
* Sets the auto read files property.
* @param autoReadFiles
* @return
*/
public FtpClientBuilder autoReadFiles(boolean autoReadFiles) {
endpoint.getEndpointConfiguration().setAutoReadFiles(autoReadFiles);
return this;
}
/**
* Sets the local passive mode property.
* @param localPassiveMode
* @return
*/
public FtpClientBuilder localPassiveMode(boolean localPassiveMode) {
endpoint.getEndpointConfiguration().setLocalPassiveMode(localPassiveMode);
return this;
}
/**
* Sets the client username.
* @param username
* @return
*/
public FtpClientBuilder username(String username) {
endpoint.getEndpointConfiguration().setUser(username);
return this;
}
/**
* Sets the client password.
* @param password
* @return
*/
public FtpClientBuilder password(String password) {
endpoint.getEndpointConfiguration().setPassword(password);
return this;
}
/**
* Sets the message correlator.
* @param correlator
* @return
*/
public FtpClientBuilder correlator(MessageCorrelator correlator) {
endpoint.getEndpointConfiguration().setCorrelator(correlator);
return this;
}
/**
* Sets the error handling strategy.
* @param errorStrategy
* @return
*/
public FtpClientBuilder errorHandlingStrategy(ErrorHandlingStrategy errorStrategy) {
endpoint.getEndpointConfiguration().setErrorHandlingStrategy(errorStrategy);
return this;
}
/**
* Sets the polling interval.
* @param pollingInterval
* @return
*/
public FtpClientBuilder pollingInterval(int pollingInterval) {
endpoint.getEndpointConfiguration().setPollingInterval(pollingInterval);
return this;
}
/**
* Sets the default timeout.
* @param timeout
* @return
*/
public FtpClientBuilder timeout(long timeout) {
endpoint.getEndpointConfiguration().setTimeout(timeout);
return this;
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2010-2013 AlphaSierraPapa for the SharpDevelop Team
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
using System;
using System.Collections.Generic;
using System.Text;
namespace ICSharpCode.NRefactory.TypeSystem
{
/// <summary>
/// Substitutes class and method type parameters.
/// </summary>
public class TypeParameterSubstitution : TypeVisitor
{
/// <summary>
/// The identity function.
/// </summary>
public static readonly TypeParameterSubstitution Identity = new TypeParameterSubstitution(null, null);
readonly IList<IType> classTypeArguments;
readonly IList<IType> methodTypeArguments;
/// <summary>
/// Creates a new type parameter substitution.
/// </summary>
/// <param name="classTypeArguments">
/// The type arguments to substitute for class type parameters.
/// Pass <c>null</c> to keep class type parameters unmodified.
/// </param>
/// <param name="methodTypeArguments">
/// The type arguments to substitute for method type parameters.
/// Pass <c>null</c> to keep method type parameters unmodified.
/// </param>
public TypeParameterSubstitution(IList<IType> classTypeArguments, IList<IType> methodTypeArguments)
{
this.classTypeArguments = classTypeArguments;
this.methodTypeArguments = methodTypeArguments;
}
/// <summary>
/// Gets the list of class type arguments.
/// Returns <c>null</c> if this substitution keeps class type parameters unmodified.
/// </summary>
public IList<IType> ClassTypeArguments {
get { return classTypeArguments; }
}
/// <summary>
/// Gets the list of method type arguments.
/// Returns <c>null</c> if this substitution keeps method type parameters unmodified.
/// </summary>
public IList<IType> MethodTypeArguments {
get { return methodTypeArguments; }
}
#region Compose
/// <summary>
/// Computes a single TypeParameterSubstitution so that for all types <c>t</c>:
/// <c>t.AcceptVisitor(Compose(g, f)) equals t.AcceptVisitor(f).AcceptVisitor(g)</c>
/// </summary>
/// <remarks>If you consider type parameter substitution to be a function, this is function composition.</remarks>
public static TypeParameterSubstitution Compose(TypeParameterSubstitution g, TypeParameterSubstitution f)
{
if (g == null)
return f;
if (f == null || (f.classTypeArguments == null && f.methodTypeArguments == null))
return g;
// The composition is a copy of 'f', with 'g' applied on the array elements.
// If 'f' has a null list (keeps type parameters unmodified), we have to treat it as
// the identity function, and thus use the list from 'g'.
var classTypeArguments = f.classTypeArguments != null ? GetComposedTypeArguments(f.classTypeArguments, g) : g.classTypeArguments;
var methodTypeArguments = f.methodTypeArguments != null ? GetComposedTypeArguments(f.methodTypeArguments, g) : g.methodTypeArguments;
return new TypeParameterSubstitution(classTypeArguments, methodTypeArguments);
}
static IList<IType> GetComposedTypeArguments(IList<IType> input, TypeParameterSubstitution substitution)
{
IType[] result = new IType[input.Count];
for (int i = 0; i < result.Length; i++) {
result[i] = input[i].AcceptVisitor(substitution);
}
return result;
}
#endregion
#region Equals and GetHashCode implementation
public override bool Equals(object obj)
{
TypeParameterSubstitution other = obj as TypeParameterSubstitution;
if (other == null)
return false;
return TypeListEquals(classTypeArguments, other.classTypeArguments)
&& TypeListEquals(methodTypeArguments, other.methodTypeArguments);
}
public override int GetHashCode()
{
unchecked {
return 1124131 * TypeListHashCode(classTypeArguments) + 1821779 * TypeListHashCode(methodTypeArguments);
}
}
static bool TypeListEquals(IList<IType> a, IList<IType> b)
{
if (a == b)
return true;
if (a == null || b == null)
return false;
if (a.Count != b.Count)
return false;
for (int i = 0; i < a.Count; i++) {
if (!a[i].Equals(b[i]))
return false;
}
return true;
}
static int TypeListHashCode(IList<IType> obj)
{
if (obj == null)
return 0;
unchecked {
int hashCode = 1;
foreach (var element in obj) {
hashCode *= 27;
hashCode += element.GetHashCode();
}
return hashCode;
}
}
#endregion
public override IType VisitTypeParameter(ITypeParameter type)
{
int index = type.Index;
if (classTypeArguments != null && type.OwnerType == SymbolKind.TypeDefinition) {
if (index >= 0 && index < classTypeArguments.Count)
return classTypeArguments[index];
else
return SpecialType.UnknownType;
} else if (methodTypeArguments != null && type.OwnerType == SymbolKind.Method) {
if (index >= 0 && index < methodTypeArguments.Count)
return methodTypeArguments[index];
else
return SpecialType.UnknownType;
} else {
return base.VisitTypeParameter(type);
}
}
public override string ToString()
{
StringBuilder b = new StringBuilder();
b.Append('[');
bool first = true;
if (classTypeArguments != null) {
for (int i = 0; i < classTypeArguments.Count; i++) {
if (first) first = false; else b.Append(", ");
b.Append('`');
b.Append(i);
b.Append(" -> ");
b.Append(classTypeArguments[i].ReflectionName);
}
}
if (methodTypeArguments != null) {
for (int i = 0; i < methodTypeArguments.Count; i++) {
if (first) first = false; else b.Append(", ");
b.Append("``");
b.Append(i);
b.Append(" -> ");
b.Append(methodTypeArguments[i].ReflectionName);
}
}
b.Append(']');
return b.ToString();
}
}
}
|
{
"pile_set_name": "Github"
}
|
namespace ClassLib112
{
public class Class028
{
public static string Property => "ClassLib112";
}
}
|
{
"pile_set_name": "Github"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.