repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
zszyellow/leetcode | Cpp/1085.sum-of-digits-in-the-minimum-number.cpp | 292 | class Solution {
public:
int sumOfDigits(vector<int>& A) {
int min_num = INT_MAX;
for (int &a : A) min_num = std::min(min_num, a);
int sum = 0;
for (char c : std::to_string(min_num)) sum += (c - '0');
return sum % 2 == 0;
}
}; | mit |
mhuijser/Rock | resources/views/frontend/shared/partials/footer.blade.php | 1580 | <section class="section">
@if(!empty(\Canvas\Models\Settings::disqus()))
@include('canvas::frontend.blog.partials.disqus')
@endif
</section>
<section class="hero is-primary is-small is-bold">
<!-- Hero content: will be in the middle -->
<div class="hero-body">
<div class="container has-text-centered">
<!--
Well, you can add your favourite quote here by yourself, or
uncomment one below.
-->
<h2 class="subtitle">"When my heart is <strong>overwhelmed</strong>, lead me to the <strong>Rock</strong> that is higher than I"</h2>
<h5>Psalms 61</h5>
<!--
<h2 class="subtitle">When the solution is simple, <strong>God is answering</strong>.</h2>
<h5>Albert Einstein</h5>
-->
</div>
</div>
</section>
<footer class="footer">
<div class="container">
<div class="content has-text-centered">
<p>
<small>Proudly powered by <a href="https://canvas.toddaustin.io" target="_blank">Canvas</a> · </small>
</p>
<p>
<small><a href="{!! route('canvas.admin') !!}"><i class="fa fa-lock icon is-small" style="font-size: 14px; padding-top:12px;"></i><i>Sign in</i></a></small>
</p>
</div>
</div>
</footer>
<!-- scroll to top button -->
<div id="top-link-block" class="hidden hover-button"">
<a id="scroll-to-top" href="#top" class="button is-primary is-small is-pulled-right">SCROLL TO TOP</a>
</div>
@if (!empty(\Canvas\Models\Settings::gaId()))
@include('canvas::frontend.blog.partials.analytics')
@endif
| mit |
swisscex/api-client-java | src/ch/swisscex/api/client/ui/MainWindow.java | 1855 | package ch.swisscex.api.client.ui;
import java.awt.BorderLayout;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JTabbedPane;
import javax.swing.ListSelectionModel;
import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
import javax.swing.event.TableModelEvent;
import javax.swing.event.TableModelListener;
import ch.swisscex.api.client.ApiClient;
import ch.swisscex.api.client.ApiPushClient;
public class MainWindow extends JFrame {
private static final long serialVersionUID = 6984798735118484777L;
private final ApiClient apiClient;
private final ApiPushClient apiPushClient;
public MainWindow(ApiClient client, ApiPushClient apiPushClient) {
super("SWISSCEX - Trading Client");
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
this.apiClient = client;
this.apiPushClient = apiPushClient;
}
public void start() {
JTabbedPane tabbedPane = new JTabbedPane();
QuoteList quoteList = new QuoteList(apiClient);
JScrollPane scrollPane = new JScrollPane(quoteList);
getContentPane().add(scrollPane, BorderLayout.WEST);
getContentPane().add(tabbedPane, BorderLayout.CENTER);
setSize(1024, 768);
pack();
setVisible(true);
quoteList.getSelectionModel().setSelectionMode(ListSelectionModel.SINGLE_SELECTION);
quoteList.getSelectionModel().addListSelectionListener(new ListSelectionListener() {
@Override
public void valueChanged(ListSelectionEvent e) {
if(e.getFirstIndex() > -1 && !e.getValueIsAdjusting()) {
String symbol = quoteList.getModel().getValueAt(e.getFirstIndex(), 0).toString();
OHLCPane pane = new OHLCPane(apiClient);
JPanel panel = new JPanel();
panel.add(pane, BorderLayout.CENTER);
tabbedPane.addTab(symbol, panel);
pane.loadData(symbol);
}
}
});
}
} | mit |
helospark/SparkTools | SparkBuilderGeneratorPlugin/src/com/helospark/spark/builder/preferences/impl/NamedElementListPluginPreference.java | 1849 | package com.helospark.spark.builder.preferences.impl;
import java.util.List;
import org.eclipse.core.runtime.preferences.IEclipsePreferences;
import org.eclipse.jface.preference.ComboFieldEditor;
import org.eclipse.jface.preference.FieldEditor;
import org.eclipse.swt.widgets.Composite;
import com.helospark.spark.builder.NamedElementWithId;
import com.helospark.spark.builder.handlers.codegenerator.component.helper.PreferenceStoreWrapper;
/**
* Preference that allows to select from a list of {@link NamedElementWithId<.
* @author helospark
*/
public class NamedElementListPluginPreference<T extends NamedElementWithId> extends AbstractPluginPreference<T> {
private List<T> values;
public NamedElementListPluginPreference(String key, String description, List<T> values, T defaultValue) {
super(key, description, defaultValue);
this.values = values;
}
@Override
public T getCurrentPreferenceValue(PreferenceStoreWrapper preferenceStore) {
String preferenceValue = preferenceStore.getString(this.getKey()).orElse(null);
return values.stream()
.filter(v -> v.getId().equals(preferenceValue))
.findFirst()
.orElse(null);
}
@Override
public FieldEditor createFieldEditor(Composite parent) {
String[][] entryNamesAndValues = new String[values.size()][2];
for (int i = 0; i < values.size(); ++i) {
entryNamesAndValues[i][0] = values.get(i).getDisplayName();
entryNamesAndValues[i][1] = values.get(i).getId();
}
return new ComboFieldEditor(this.getKey(), this.getDescription(), entryNamesAndValues, parent);
}
@Override
public void putDefaultValue(IEclipsePreferences preferences) {
preferences.put(this.getKey(), this.getDefaultValue().getId());
}
}
| mit |
jamesology/AzureVmFarmer | AzureVmFarmer/AzureVmFarmer.Service/App_Start/FilterConfig.cs | 232 | using System.Web;
using System.Web.Mvc;
namespace AzureVmFarmer
{
public class FilterConfig
{
public static void RegisterGlobalFilters(GlobalFilterCollection filters)
{
filters.Add(new HandleErrorAttribute());
}
}
}
| mit |
majtenyim/thesis-webserver | modules/frameworks/autoload.php | 1768 | <?php
// DOM pufferolása.
ob_start('postfix');
session_start();
// Sokat használt elérési utak definiálása.
define('MOD_PATH', "modules/");
define('FRAME_PATH', "modules/frameworks/");
define('TEMP_PATH', "templates/");
define('STYLE_PATH', "css/");
define('JS_PATH', "js/");
define('TMP_FILES',"sky/_tmp/");
/* Osztályokat tartalmazó .php file-ok behívása. */
function __autoload($class){
// php classes
if(is_file(MOD_PATH.$class.'.php')){
require_once(MOD_PATH.$class.'.php');
}
// php frameworks
if(is_file(FRAME_PATH.$class.'.php')){
if(!FRAME_PATH.$class.'.php'){
require_once(FRAME_PATH.$class.'.php');
}
}
// styles of classes
if(is_file(STYLE_PATH.$class.'.css')){
unset($_SESSION['css']);
$_SESSION['css'][] = '<link type="text/css" rel="stylesheet" href="css/'.$class.'.css" />';
}
// js classes
if(is_file(JS_PATH.$class.'.js')){
unset($_SESSION['js']);
$_SESSION['js'][] = '<script type="text/javascript" src="js/'.$class.'.js"></script>';
}
}
/* Pufferolt kimeneti stringek kezelése, pattern csere. */
function postfix($output){
foreach($_SESSION['css'] as $i => $cssLink){
$css .= $cssLink;
}
foreach($_SESSION['js'] as $i => $jsLink){
$js .= $jsLink;
}
$output = str_replace('%title%',$_SESSION['title'],$output);
$output = str_replace('%keywords%',$_SESSION['keywords'],$output);
$output = str_replace('%description%',$_SESSION['description'],$output);
$output = str_replace('%css%',$css,$output);
$output = str_replace('%js%',$js,$output);
$output = str_replace('&', '&',$output);
$output = str_replace('&nbsp;',' ',$output);
$output = str_replace('&copy;','©',$output);
return $output;
}
?> | mit |
noslouch/pa | core/expressionengine/language/english/core_lang.php | 7718 | <?php
$lang = array(
//----------------------------
// General word list
//----------------------------
'no' =>
'No',
'yes' =>
'Yes',
'on' =>
'on',
'off' =>
'off',
'first' =>
'First',
'last' =>
'Last',
'enabled' =>
'enabled',
'disabled' =>
'disabled',
'back' =>
'Back',
'submit' =>
'Submit',
'update' =>
'Update',
'thank_you' =>
'Thank You!',
'page' =>
'Page',
'of' =>
'of',
'by' =>
'by',
'at' =>
'at',
'dot' =>
'dot',
'and' =>
'and',
'or' =>
'or',
'id' =>
'ID',
'encoded_email' =>
'(JavaScript must be enabled to view this email address)',
'search' =>
'Search',
'system_off_msg' =>
'This site is currently inactive.',
'not_authorized' =>
'You are not authorized to perform this action',
'auto_redirection' =>
'You will be redirected automatically in %x seconds',
'click_if_no_redirect' =>
'Click here if you are not redirected automatically',
'return_to_previous' =>
'Return to Previous Page',
'not_available' =>
'Not available',
'setting' =>
'Setting',
'preference' =>
'Preference',
'pag_first_link' => '‹ First',
'pag_last_link' => 'Last ›',
//----------------------------
// Errors
//----------------------------
'error' =>
'Error',
'invalid_url' =>
'The URL you submitted is not valid.',
'submission_error' =>
'The form you submitted contained the following errors',
'general_error' =>
'The following errors were encountered',
'invalid_action' =>
'The action you have requested is invalid.',
'csrf_token_expired' =>
'This form has expired. Please refresh and try again.',
'current_password_required' =>
'Your current password is required.',
'current_password_incorrect' =>
'Your current password was not submitted correctly.',
'captcha_required' =>
'You must submit the word that appears in the image',
'captcha_incorrect' =>
'You did not submit the word exactly as it appears in the image',
'nonexistent_page' =>
'The page you requested was not found',
'unable_to_load_field_type' =>
'Unable to load requested field type file: %s.<br />
Confirm the fieldtype file is located in the expressionengine/third_party/ directory',
//----------------------------
// Member Groups
//----------------------------
'banned' =>
'Banned',
'guests' =>
'Guests',
'members' =>
'Members',
'pending' =>
'Pending',
'super_admins' =>
'Super Admins',
//----------------------------
// Template.php
//----------------------------
'error_tag_syntax' =>
'The following tag has a syntax error:',
'error_fix_syntax' =>
'Please correct the syntax in your template.',
'error_tag_module_processing' =>
'The following tag cannot be processed:',
'error_fix_module_processing' =>
'Please check that the \'%x\' module is installed and that \'%y\' is an available method of the module',
'template_loop' =>
'You have caused a template loop due to improperly nested sub-templates (\'%s\' recursively called)',
'template_load_order' =>
'Template load order',
'error_multiple_layouts' =>
'Multiple Layouts found, please ensure you only have one layout tag per template',
'error_layout_too_late' =>
'Plugin or module tag found before layout declaration. Please move the layout tag to the top of your template.',
//----------------------------
// Email
//----------------------------
'forgotten_email_sent' =>
'If this email address is associated with an account, instructions for resetting your password have just been emailed to you.',
'error_sending_email' =>
'Unable to send email at this time.',
'no_email_found' =>
'The email address you submitted was not found in the database.',
'your_new_login_info' =>
'Login information',
'password_has_been_reset' =>
'Your password was reset and a new one has been emailed to you.',
//----------------------------
// Date
//----------------------------
'ago' =>
'%x ago',
'year' =>
'year',
'years' =>
'years',
'month' =>
'month',
'months' =>
'months',
'fortnight' =>
'fortnight',
'fortnights' =>
'fortnights',
'week' =>
'week',
'weeks' =>
'weeks',
'day' =>
'day',
'days' =>
'days',
'hour' =>
'hour',
'hours' =>
'hours',
'minute' =>
'minute',
'minutes' =>
'minutes',
'second' =>
'second',
'seconds' =>
'seconds',
'am' =>
'am',
'pm' =>
'pm',
'AM' =>
'AM',
'PM' =>
'PM',
'Sun' =>
'Sun',
'Mon' =>
'Mon',
'Tue' =>
'Tue',
'Wed' =>
'Wed',
'Thu' =>
'Thu',
'Fri' =>
'Fri',
'Sat' =>
'Sat',
'Su' =>
'S',
'Mo' =>
'M',
'Tu' =>
'T',
'We' =>
'W',
'Th' =>
'T',
'Fr' =>
'F',
'Sa' =>
'S',
'Sunday' =>
'Sunday',
'Monday' =>
'Monday',
'Tuesday' =>
'Tuesday',
'Wednesday' =>
'Wednesday',
'Thursday' =>
'Thursday',
'Friday' =>
'Friday',
'Saturday' =>
'Saturday',
'Jan' =>
'Jan',
'Feb' =>
'Feb',
'Mar' =>
'Mar',
'Apr' =>
'Apr',
'May' =>
'May',
'Jun' =>
'Jun',
'Jul' =>
'Jul',
'Aug' =>
'Aug',
'Sep' =>
'Sep',
'Oct' =>
'Oct',
'Nov' =>
'Nov',
'Dec' =>
'Dec',
'January' =>
'January',
'February' =>
'February',
'March' =>
'March',
'April' =>
'April',
'May_l' =>
'May',
'June' =>
'June',
'July' =>
'July',
'August' =>
'August',
'September' =>
'September',
'October' =>
'October',
'November' =>
'November',
'December' =>
'December',
'UM12' => '(UTC -12:00) Baker/Howland Island',
'UM11' => '(UTC -11:00) Niue',
'UM10' => '(UTC -10:00) Hawaii-Aleutian Standard Time, Cook Islands, Tahiti',
'UM95' => '(UTC -9:30) Marquesas Islands',
'UM9' => '(UTC -9:00) Alaska Standard Time, Gambier Islands',
'UM8' => '(UTC -8:00) Pacific Standard Time, Clipperton Island',
'UM7' => '(UTC -7:00) Mountain Standard Time',
'UM6' => '(UTC -6:00) Central Standard Time',
'UM5' => '(UTC -5:00) Eastern Standard Time, Western Caribbean Standard Time',
'UM45' => '(UTC -4:30) Venezuelan Standard Time',
'UM4' => '(UTC -4:00) Atlantic Standard Time, Eastern Caribbean Standard Time',
'UM35' => '(UTC -3:30) Newfoundland Standard Time',
'UM3' => '(UTC -3:00) Argentina, Brazil, French Guiana, Uruguay',
'UM2' => '(UTC -2:00) South Georgia/South Sandwich Islands',
'UM1' => '(UTC -1:00) Azores, Cape Verde Islands',
'UTC' => '(UTC) Greenwich Mean Time, Western European Time',
'UP1' => '(UTC +1:00) Central European Time, West Africa Time',
'UP2' => '(UTC +2:00) Central Africa Time, Eastern European Time, Kaliningrad Time',
'UP3' => '(UTC +3:00) East Africa Time, Arabia Standard Time',
'UP35' => '(UTC +3:30) Iran Standard Time',
'UP4' => '(UTC +4:00) Moscow Time, Azerbaijan Standard Time',
'UP45' => '(UTC +4:30) Afghanistan',
'UP5' => '(UTC +5:00) Pakistan Standard Time, Yekaterinburg Time',
'UP55' => '(UTC +5:30) Indian Standard Time, Sri Lanka Time',
'UP575' => '(UTC +5:45) Nepal Time',
'UP6' => '(UTC +6:00) Bangladesh Standard Time, Bhutan Time, Omsk Time',
'UP65' => '(UTC +6:30) Cocos Islands, Myanmar',
'UP7' => '(UTC +7:00) Krasnoyarsk Time, Cambodia, Laos, Thailand, Vietnam',
'UP8' => '(UTC +8:00) Australian Western Standard Time, Beijing Time, Irkutsk Time',
'UP875' => '(UTC +8:45) Australian Central Western Standard Time',
'UP9' => '(UTC +9:00) Japan Standard Time, Korea Standard Time, Yakutsk Time',
'UP95' => '(UTC +9:30) Australian Central Standard Time',
'UP10' => '(UTC +10:00) Australian Eastern Standard Time, Vladivostok Time',
'UP105' => '(UTC +10:30) Lord Howe Island',
'UP11' => '(UTC +11:00) Magadan Time, Solomon Islands, Vanuatu',
'UP115' => '(UTC +11:30) Norfolk Island',
'UP12' => '(UTC +12:00) Fiji, Gilbert Islands, Kamchatka Time, New Zealand Standard Time',
'UP1275' => '(UTC +12:45) Chatham Islands Standard Time',
'UP13' => '(UTC +13:00) Samoa Time Zone, Phoenix Islands Time, Tonga',
'UP14' => '(UTC +14:00) Line Islands',
"select_timezone" =>
"Select Timezone",
"no_timezones" =>
"No Timezones",
// IGNORE
''=>'');
/* End of file core_lang.php */
/* Location: ./system/expressionengine/language/english/core_lang.php */ | mit |
jobinesh/jet-examples | node-jet1.2.0-mongo-app/public/js/libs/oj/v1.2.0/debug/ojknockout-validation.js | 26297 | /**
* Copyright (c) 2014, 2015, Oracle and/or its affiliates.
* All rights reserved.
*/
"use strict";
define(['ojs/ojcore', 'jquery', 'knockout', 'ojs/ojknockout', 'ojs/ojmessaging'], function(oj, $, ko)
{
/**
* Copyright (c) 2014, Oracle and/or its affiliates.
* All rights reserved.
*/
/*jslint browser: true, devel: true*/
// private to prevent creating a JSDoc page for this class. The only thing we wish
// to JSDoc is the invalidComponentTracker, which we're putting in EditableValue's output.
/**
* An extension to oj.ComponentBinding, properties exposed on this binding are available
* to jet components that extend from oj.editableValue.
*
* @private
* @constructor oj.ValueBinding
* @see oj.ComponentBinding
* @see oj.editableValue
* @since 0.6
*/
oj.ValueBinding = function(){};
/**
* <p>When this attribute is bound to an observable, the framework pushes an object of type {@link oj.InvalidComponentTracker}
* onto the observable. The object itself tracks the validity of a group of editable components.
*
* <p>When this attribute is present, the binding registers a listener for the <a href="#optionChange">optionChange</a>
* event. This event is fired by JET editable components whenever its validity changes (i.e. when
* <a href="#messagesShown">messagesShown</a> or <a href="#messagesHidden">messagesHidden</a>
* options change). When the event is fired, the listener determines the current validity of the
* component and updates the tracker.
*
* <p>
* The observable bound to this attribute is often used with multiple component binding declarations
* as shown in the example below.
* </p>
*
* <p>
* This attribute is only exposed via the <code class="prettyprint">ojComponent</code> binding, and
* is not a component option.
* </p>
*
* @example <caption>Track validity of multiple components using a single observable bound to the <code class="prettyprint">invalidComponentTracker</code> attribute:</caption>
* <input id="username" type="text" name="username" required
* data-bind="ojComponent: {component: 'ojInputText', value: userName,
* invalidComponentTracker: tracker}">
*
* <input id="password" type="password" name="password" required
* data-bind="ojComponent: {component: 'ojInputPassword', value: password,
* invalidComponentTracker: tracker}"/>
*
* // ViewModel that defines the tracker observable
* <script>
* function MemberViewModel()
* {
* var self = this;
*
* self.tracker = ko.observable();
*
* self.userName = ko.observable();
* self.password = ko.observable();
* }
* </script>
*
* @example <caption>Use tracker property <code class="prettyprint">invalid</code> to disable button:</caption>
* // button is disabled if there are components currently showing errors
* <button type="button" data-bind="ojComponent: {component: 'ojButton', label: 'Submit',
* disabled: tracker()['invalidShown']}"></button>
*
* @ojbindingonly
* @member
* @name invalidComponentTracker
* @memberof oj.editableValue
* @instance
* @type {oj.InvalidComponentTracker}
* @default <code class="prettyprint">null</code>
* @since 0.7
*/
/** prevent preceding jsdoc from applying to following line of code */
oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER = 'invalidComponentTracker';
// An listener is added for this event to listen to changes to the 'messagesHidden' or
// 'messagesShown' options. The listener updates the InvalidComponentTracker.
oj.ValueBinding._EVENT_OPTIONCHANGE = "ojoptionchange";
// Options we are interested in listening to changes for.
oj.ValueBinding._OPTION_MESSAGES_SHOWN = 'messagesShown';
oj.ValueBinding._OPTION_MESSAGES_HIDDEN = 'messagesHidden';
// options that are managed primarily to detect changes for tracker to be notified.
oj.ValueBinding._OPTION_DISABLED = 'disabled';
oj.ValueBinding._OPTION_READONLY = 'readOnly';
// callback called when managed attribute is being updated
oj.ValueBinding._update = function(name, value, element, component, valueAccessor)
{
var options = valueAccessor.call(), updateProps = {},
ictObs = options[oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER],
icTracker;
if (name === oj.ValueBinding._OPTION_DISABLED || name === oj.ValueBinding._OPTION_READONLY)
{
icTracker = ictObs && ictObs.peek() || null; // don't add extra subscriptions
// when either of these options are updated
if (icTracker !== null && ko.isWriteableObservable(ictObs))
{
if (icTracker._update.call(icTracker, component, name, value))
{
// if _update mutates state
ictObs.valueHasMutated();
}
}
updateProps[name] = value;
return updateProps;
}
};
// init callback for managed attributes. When managing options like disabled, readOnly
// this method is required to return values.
oj.ValueBinding._init = function(name, value)
{
var initProps = {};
initProps[name] = value;
return initProps;
};
/**
* Called after component binding creates the component.
* @param {string} property
* @param {Element} element the element to which binding applied the componnet
* @param {Function=} component the widget bridge
* @param {Object=} valueAccessor
* @private
*/
oj.ValueBinding._afterCreate = function(property, element, component, valueAccessor)
{
var initProps = {}, optionsSet = valueAccessor.call(), isICTOptionSet;
if (property === oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER)
{
isICTOptionSet = optionsSet[property] ? true : false;
if (isICTOptionSet)
{
// register a writeback for invalidComponentTracker property by registering an event listener
// for the optionChange event.
oj.ValueBinding._registerInvalidComponentTrackerWriteback(property, optionsSet, element, component);
}
}
return initProps;
};
/**
* Called right before component is destroyed.
*
* @param {Element} element
* @private
*/
oj.ValueBinding._beforeDestroy = function(property, element, component, valueAccessor)
{
var jelem = $(element), options = valueAccessor.call(), icTracker,
ictObs = options[property];
if (property === oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER)
{
if (jelem)
{
jelem.off(oj.ValueBinding._EVENT_OPTIONCHANGE, oj.ValueBinding._updateInvalidComponentTracker);
if (ictObs && ko.isWriteableObservable(ictObs))
{
icTracker = ictObs.peek();
// remove component from tracker
if (icTracker._remove.call(icTracker, component))
{
// if _remove mutates state, then components need to react to it.
// example a button that binds to properties on invalidComponentTracker.
ictObs.valueHasMutated();
}
}
}
}
};
/**
* Listener for the optionChange event, it updates the invalidComponentTracker associated to the
* component that triggered the event.
*
* @param {jQuery.event=} event
* @private
*/
oj.ValueBinding._updateInvalidComponentTracker = function(event)
{
var ictObs = event.data.tracker, icTracker,
component = event.data.component, payload = arguments[1], option = payload['option'],
msgs = payload['value'];
if (option === oj.ValueBinding._OPTION_MESSAGES_SHOWN ||
option === oj.ValueBinding._OPTION_MESSAGES_HIDDEN)
{
if (ictObs && ko.isWriteableObservable(ictObs))
{
icTracker = ictObs.peek();
if (icTracker && icTracker._update.call(icTracker, component, option, msgs))
{
// if _update mutates state
ictObs.valueHasMutated();
}
}
}
};
/**
* Register a default callback for the 'optionChange' event. The callback writes the component and
* its validity to the invalidComponentTracker observable.
* @param {string} property
* @param {Object} options original options set on element
* @param {Element} element
* @param {Function=} component
* @private
*/
oj.ValueBinding._registerInvalidComponentTrackerWriteback = function(property, options, element, component)
{
var ictObs = options[property], messagesShown, messagesHidden, eventData,
icTracker, jElem = $(element);
// Create new intsance of InvalidComponentTracker if the observable is not set.
if (ko.isObservable(ictObs))
{
icTracker = ictObs.peek();
// push new instance of oj.InvalidComponentTracker onto observable if none present.
if (icTracker == null) // null or undefined
{
icTracker = new oj.InvalidComponentTracker();
ictObs(icTracker);
}
}
else
{
// tracker object is not an observable.
throw new Error('Binding attribute ' + oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER +
' should be bound to a ko observable.');
}
if (icTracker !== null)
{
// update icTracker inital state using component's latest option values
if (ko.isWriteableObservable(ictObs))
{
messagesShown = component.call(component, "option", oj.ValueBinding._OPTION_MESSAGES_SHOWN);
messagesHidden = component.call(component, "option", oj.ValueBinding._OPTION_MESSAGES_HIDDEN);
icTracker._update.call(icTracker, component,
oj.ValueBinding._OPTION_MESSAGES_SHOWN, messagesShown);
icTracker._update.call(icTracker, component,
oj.ValueBinding._OPTION_MESSAGES_HIDDEN, messagesHidden);
ictObs.valueHasMutated();
}
// register listener for optionChange event for future changes to messages* options
eventData = {tracker: ictObs, component: component};
jElem.on(oj.ValueBinding._EVENT_OPTIONCHANGE, eventData,
oj.ValueBinding._updateInvalidComponentTracker);
}
};
/**
* editableValue Behavior Definition and Injection
*/
oj.ComponentBinding.getDefaultInstance().setupManagedAttributes(
{
'for': 'editableValue',
'attributes': [oj.ValueBinding._ATTRIBUTE_INVALID_COMPONENT_TRACKER,
oj.ValueBinding._OPTION_DISABLED,
oj.ValueBinding._OPTION_READONLY],
'init': oj.ValueBinding._init,
'update': oj.ValueBinding._update,
'afterCreate': oj.ValueBinding._afterCreate,
'beforeDestroy': oj.ValueBinding._beforeDestroy
});
/**
* Copyright (c) 2014, Oracle and/or its affiliates.
* All rights reserved.
*/
/*jslint browser: true, devel: true*/
/**
* Tracks the validity of a group of components bound to this observable. It also provides
* properties and methods that the page author can use to enforce form validation best practice.
*
* <p>
* Validity of components that are disabled or readOnly will not be tracked by this object.</li>
* </p>
*
* <p>
* The <code class="prettyprint">invalidComponentTracker</code> binding attribute should be bound to
* a ko observable. At runtime the framework creates an instance of this type
* <code class="prettyprint">oj.InvalidComponentTracker</code> and sets it on the bound observable.<br/>
* This object can then be used by page authors to do the following -
* <ul>
* <li>determine if there are invalid components tracked by this object that are currently showing
* errors.</li>
* <li>determine if there are invalid components tracked by this object that are currently deferring
* showing errors.</li>
* <li>set focus on the first invalid component in the tracked group</li>
* <li>show all messages on all tracked components including deferred error messages, and set focus
* on the first invalid component.</li>
* </ul>
* </p>
*
* @example <caption> Bind an observable to the <code class="prettyprint">invalidComponentTracker</code> and access oj.InvalidComponentTracker instance.</caption>
* <input id="username" type="text" required
* data-bind="ojComponent: {component: 'ojInputText', value: userName,
* invalidComponentTracker: tracker}">
* <input id="password" type="text" required
* data-bind="ojComponent: {component: 'ojInputPassword', value: password,
* invalidComponentTracker: tracker}">
*
* <script>
* function MyViewModel() {
* var self = this;
* var tracker = ko.observable();
*
* log = function ()
* {
* var trackerObj = ko.utils.unwrapObservable(self.tracker);
* console.log(trackerObj instanceof oj.InvalidComponentTracker); // true
* }
* }
* </script>
*
* @class oj.InvalidComponentTracker
* @constructor
* @class
* @export
* @since 0.7
*/
oj.InvalidComponentTracker = function()
{
this.Init();
};
// Subclass from oj.Object
oj.Object.createSubclass(oj.InvalidComponentTracker, oj.Object, "oj.InvalidComponentTracker");
// DOCLETS for public properties
/**
* Whether there is at least one component (tracked by this object) that is invalid and is currently
* showing messages.
*
*
* @example <caption>Disable button using <code class="prettyprint">invalidShown</code> property:</caption>
* <input id="username" type="text" required
* data-bind="ojComponent: {component: 'ojInputText', value: userName,
* invalidComponentTracker: tracker}">
* <input id="password" type="text" required
* data-bind="ojComponent: {component: 'ojInputPassword', value: password,
* invalidComponentTracker: tracker}">
* <button type="button" data-bind="ojComponent: {component: 'ojButton', label: 'Create',
* disabled: tracker()['invalidShown']}"></button>
*
* <script>
* var userName = ko.observable();
* var password = ko.observable();
* var tracker = ko.observable();
* </script>
*
* @member
* @name invalidShown
* @access public
* @instance
* @default false
* @type {boolean}
* @expose
* @memberof! oj.InvalidComponentTracker
*/
/**
* prevent preceding jsdoc from applying to following line of code
* @ignore
*/
//Options we are interested in listening to changes for.
oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN = 'messagesShown';
oj.InvalidComponentTracker._OPTION_MESSAGES_HIDDEN = 'messagesHidden';
oj.InvalidComponentTracker._OPTION_DISABLED = 'disabled';
oj.InvalidComponentTracker._OPTION_READONLY = 'readOnly';
/**
* Whether there is at least one component that is invalid with deferred messages, i.e., messages
* that are currently hidden.
*
* @example <caption>Enable button using <code class="prettyprint">invalidHidden</code> property:</caption>
* <input id="username" type="text" required
* data-bind="ojComponent: {component: 'ojInputText', value: userName,
* invalidComponentTracker: tracker}">
* <input id="password" type="text" required
* data-bind="ojComponent: {component: 'ojInputPassword', value: password,
* invalidComponentTracker: tracker}">
* <br/>
* <button type="button" data-bind="ojComponent: {component: 'ojButton', label: 'Create',
* disabled: !tracker()['invalidHidden']}"></button>
*
* <script>
* var userName = ko.observable();
* var password = ko.observable();
* var tracker = ko.observable();
* </script>
*
* @member
* @name invalidHidden
* @access public
* @instance
* @default false
* @type {boolean}
* @expose
* @memberof! oj.InvalidComponentTracker
*/
/**
* Initializer
* @protected
* @memberof! oj.InvalidComponentTracker
* @instance
*/
oj.InvalidComponentTracker.prototype.Init = function()
{
var self = this;
oj.InvalidComponentTracker.superclass.Init.call(this);
// INTERNAL PROPERTIES
// all tracked components
this._tracked = [];
// tracks invalid components showing messages. indices correspond to this_tracked.
this._invalid = [];
// tracks invalid components hiding messages. Contains indices from tracked.
this._invalidHidden = [];
// PUBLIC PROPERTIES
this['invalidShown'] = false;
this['invalidHidden'] = false;
};
/**
* Sets focus on first invalid component currently showing an error. This method does not set focus
* on components that are invalid and have deferred messages. For example, when a component is
* required, deferred validation is run. Any validation error raised is not shown to user right away,
* i.e., it is deferred.
* <p>
* To show hidden messages on all tracked components use showMessages() method. </p>
*
* @return {boolean} true if there is at least one invalid component to set focus on; false if
* unable to locate a component to focus on or there are no invalid components.
* @export
* @see #showMessages
* @memberof! oj.InvalidComponentTracker
* @instance
*/
oj.InvalidComponentTracker.prototype.focusOnFirstInvalid = function()
{
var firstInvalid = null, self = this, updateCounter = this._updateCounter;
if (this['invalidShown'])
{
firstInvalid = this._getFirstInvalidComponent();
}
// always call focus handler on a timer; to give time for updates to be applied on component.
// oj.ComponentBinding.applyUpdates, happens on a timer.
setTimeout(function () {
// sometimes when this timer is called, firstInvalid may not have been determined
// yet. Or the invalid states could have changed in between the timer being set and the
// callback being called.
firstInvalid = (updateCounter === self._updateCounter) ?
firstInvalid || self._getFirstInvalidComponent() : self._getFirstInvalidComponent();
if (firstInvalid)
{
// Call a protected method Focus() exposed on editable components for now.
firstInvalid.call(firstInvalid, "Focus");
}
}, 1);
return firstInvalid ? true : false;
};
/**
* Shows hidden messages on all tracked components by calling showMessages() method on each tracked
* editable component.
*
* @example <caption>Show all hidden messages on tracked components:</caption>
* function ViewModel ()
* {
* self = this;
* var tracker = ko.observable();
* // ...
*
* showAllMessages : function ()
* {
* var trackerObj = ko.utils.unwrapObservable(self.tracker);
return trackerObj.showMessages();
* }
* }
*
* @export
* @memberof! oj.InvalidComponentTracker
* @instance
* @see oj.editableValue#showMessages
*/
oj.InvalidComponentTracker.prototype.showMessages = function()
{
var tr, len, index;
if (this['invalidHidden'])
{
len = this._invalidHidden.length;
for (index = 0; index < len; index++)
{
if (this._invalidHidden[index])
{
tr = this._tracked[index].call(tr, "showMessages");
}
}
}
};
// P R I V A T E M E T H O D S
/**
* Gets the first invalid component and returns the component to focus on.
*
* @returns the component instance that has focus or null
* @private
*/
oj.InvalidComponentTracker.prototype._getFirstInvalidComponent = function()
{
var firstInvalid, focusable = null, idx = 0, len = this._invalid.length;
// locate first invalid component and set focus on it
for (idx = 0; idx < len; idx++)
{
firstInvalid = this._invalid[idx];
if (firstInvalid)
{
return this._tracked[idx];
}
};
return focusable;
};
/**
* Removes the component from its tracked list.
*
* @param {Object} component being removed
* @returns {boolean} if internal state mutated; false otherwise
* @private
* @memberof! oj.InvalidComponentTracker
* @instance
*/
oj.InvalidComponentTracker.prototype._remove = function(component)
{
var trackedIndex = -1, mutated = false;
// locate the index in tracked, for the component that was updated
$.each(this._tracked, function(index, item)
{
if (trackedIndex < 0 && item === component)
{
trackedIndex = index;
return;
}
});
if (trackedIndex >= 0)
{
this._tracked.splice(trackedIndex, 1);
// stop tracking them in the internal arrays
this._invalid.splice(trackedIndex, 1);
this._invalidHidden.splice(trackedIndex, 1);
this._updateInvalidProperties();
mutated = true;
}
return mutated;
};
/**
* Updates the internal properties to reflect the current validity state of the component, using
* new messages.
*
* @param {Object} component the component that has the new messages
* @param {string} option
* @param {Array} value
* @returns {boolean} if internal state mutated; false otherwise
* @private
* @memberof! oj.InvalidComponentTracker
* @instance
*/
oj.InvalidComponentTracker.prototype._update = function(component, option, value)
{
var compValid = component.call(component, "isValid"),
trackedIndex = -1, item, mutated = true, result, isDisabled, isReadOnly;
// locate the index in tracked, for the component that was updated
$.each(this._tracked, function(index, item)
{
if (trackedIndex < 0 && item === component)
{
trackedIndex = index;
return;
}
});
switch (option)
{
case oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN:
case oj.InvalidComponentTracker._OPTION_MESSAGES_HIDDEN:
result = false;
if (value)
{
// start tracking component if not already doing it.
if (trackedIndex < 0)
{
trackedIndex = this._tracked.push(component) - 1;
this._initializeInvalidTrackers(trackedIndex, result);
}
if (!compValid)
{
if (oj.InvalidComponentTracker._hasInvalidMessages(value))
{
result = true;
if (option === oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN)
{
// if component is disabled or readOnly but has messages showing, tracker stops
// tracking component in its 'invalidShown' list. We do this because if property is
// bound to a button, and if the only invalid component showing messages is disabled
// the button would appear disabled visually, confusing the end-user.
// E.g., disabled component can be initialized with messagesCustom making it invalid and
// disabled.
isDisabled = component.call(component, "option", oj.InvalidComponentTracker._OPTION_DISABLED),
isReadOnly = component.call(component, "option", oj.InvalidComponentTracker._OPTION_READONLY);
result = isDisabled || isReadOnly ? false : true;
}
}
}
mutated = this._updateInvalidTracker(option, trackedIndex || 0, result);
this._updateInvalidProperties();
// update properties
if (mutated)
{
// every time messages mutates, we track it.
if (this._updateCounter === undefined)
{
this._updateCounter = 0;
}
this._updateCounter++;
}
}
break;
case oj.InvalidComponentTracker._OPTION_DISABLED:
case oj.InvalidComponentTracker._OPTION_READONLY:
// when component goes from enabled to disabled (or to readOnly) tracker updates invalidShown
// to be false, since the component cannot be showing errors visually. Same goes for
// invalidHidden.
//
// when component goes from disabled (or readOnly) to enabled validations are re-run and
// component's messagesHidden and messagesShown are updated which result in invalidShown and
// invalidHidden to be updated. This case is not handled here.
mutated = false;
if (value)
{
mutated = this._updateInvalidTracker(oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN,
trackedIndex || 0, false);
mutated = this._updateInvalidTracker(oj.InvalidComponentTracker._OPTION_MESSAGES_HIDDEN,
trackedIndex || 0, false) || mutated;
this._updateInvalidProperties();
}
break;
}
return mutated;
};
oj.InvalidComponentTracker.prototype._initializeInvalidTrackers = function(trackedIndex, result)
{
if (this._invalid[trackedIndex] === undefined)
this._updateInvalidTracker(oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN, trackedIndex, result);
if (this._invalidHidden[trackedIndex] === undefined)
this._updateInvalidTracker(oj.InvalidComponentTracker._OPTION_MESSAGES_HIDDEN, trackedIndex, result);
};
oj.InvalidComponentTracker.prototype._updateInvalidProperties = function()
{
// updates public properties exposed by this object
this['invalidShown'] = this._invalid.indexOf(true) >= 0;
this['invalidHidden'] = this._invalidHidden.indexOf(true) >= 0;
};
oj.InvalidComponentTracker.prototype._updateInvalidTracker = function(option, trackedIndex, value)
{
var mutated = false,
arr = (option === oj.InvalidComponentTracker._OPTION_MESSAGES_SHOWN) ?
this._invalid : (option === oj.InvalidComponentTracker._OPTION_MESSAGES_HIDDEN) ?
this._invalidHidden : [];
// adds or updates the appropriate array
if (trackedIndex >= 0 && arr[trackedIndex] !== undefined)
{
// mark component as invalid or invalidHidden to match the trackedIndex; update only if value
// changes
mutated = arr[trackedIndex] !== value ? true : false;
if (mutated)
{
arr.splice(trackedIndex, 1, value);
}
}
else
{
arr.push(value);
mutated = true;
}
return mutated;
};
/**
* helper to determine if we have invalid messages among the list of messages that are currently
* showing i.e., that are showing.
*
* @param {!Array} messages list of all messages associated with component
* @returns {boolean}
* @private
* @memberof! oj.InvalidComponentTracker
* @instance
*/
oj.InvalidComponentTracker._hasInvalidMessages = function(messages)
{
return !oj.Message.isValid(messages);
};
});
| mit |
sifcoin/sifcoin | src/bitcoinrpc.cpp | 46868 | // Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "init.h"
#include "util.h"
#include "sync.h"
#include "ui_interface.h"
#include "base58.h"
#include "bitcoinrpc.h"
#include "db.h"
#include <boost/asio.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/bind.hpp>
#include <boost/filesystem.hpp>
#include <boost/foreach.hpp>
#include <boost/iostreams/concepts.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/shared_ptr.hpp>
#include <list>
using namespace std;
using namespace boost;
using namespace boost::asio;
using namespace json_spirit;
// Key used by getwork/getblocktemplate miners.
// Allocated in StartRPCThreads, free'd in StopRPCThreads
CReserveKey* pMiningKey = NULL;
static std::string strRPCUserColonPass;
// These are created by StartRPCThreads, destroyed in StopRPCThreads
static asio::io_service* rpc_io_service = NULL;
static ssl::context* rpc_ssl_context = NULL;
static boost::thread_group* rpc_worker_group = NULL;
static inline unsigned short GetDefaultRPCPort()
{
return GetBoolArg("-testnet", false) ? 18372 : 8372;
}
Object JSONRPCError(int code, const string& message)
{
Object error;
error.push_back(Pair("code", code));
error.push_back(Pair("message", message));
return error;
}
void RPCTypeCheck(const Array& params,
const list<Value_type>& typesExpected,
bool fAllowNull)
{
unsigned int i = 0;
BOOST_FOREACH(Value_type t, typesExpected)
{
if (params.size() <= i)
break;
const Value& v = params[i];
if (!((v.type() == t) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s, got %s",
Value_type_name[t], Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
i++;
}
}
void RPCTypeCheck(const Object& o,
const map<string, Value_type>& typesExpected,
bool fAllowNull)
{
BOOST_FOREACH(const PAIRTYPE(string, Value_type)& t, typesExpected)
{
const Value& v = find_value(o, t.first);
if (!fAllowNull && v.type() == null_type)
throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing %s", t.first.c_str()));
if (!((v.type() == t.second) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s for %s, got %s",
Value_type_name[t.second], t.first.c_str(), Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
}
}
int64 AmountFromValue(const Value& value)
{
double dAmount = value.get_real();
if (dAmount <= 0.0 || dAmount > 21000000.0)
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
int64 nAmount = roundint64(dAmount * COIN);
if (!MoneyRange(nAmount))
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
return nAmount;
}
Value ValueFromAmount(int64 amount)
{
return (double)amount / (double)COIN;
}
std::string HexBits(unsigned int nBits)
{
union {
int32_t nBits;
char cBits[4];
} uBits;
uBits.nBits = htonl((int32_t)nBits);
return HexStr(BEGIN(uBits.cBits), END(uBits.cBits));
}
///
/// Note: This interface may still be subject to change.
///
string CRPCTable::help(string strCommand) const
{
string strRet;
set<rpcfn_type> setDone;
for (map<string, const CRPCCommand*>::const_iterator mi = mapCommands.begin(); mi != mapCommands.end(); ++mi)
{
const CRPCCommand *pcmd = mi->second;
string strMethod = mi->first;
// We already filter duplicates, but these deprecated screw up the sort order
if (strMethod.find("label") != string::npos)
continue;
if (strCommand != "" && strMethod != strCommand)
continue;
try
{
Array params;
rpcfn_type pfn = pcmd->actor;
if (setDone.insert(pfn).second)
(*pfn)(params, true);
}
catch (std::exception& e)
{
// Help text is returned in an exception
string strHelp = string(e.what());
if (strCommand == "")
if (strHelp.find('\n') != string::npos)
strHelp = strHelp.substr(0, strHelp.find('\n'));
strRet += strHelp + "\n";
}
}
if (strRet == "")
strRet = strprintf("help: unknown command: %s\n", strCommand.c_str());
strRet = strRet.substr(0,strRet.size()-1);
return strRet;
}
Value help(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"help [command]\n"
"List commands, or get help for a command.");
string strCommand;
if (params.size() > 0)
strCommand = params[0].get_str();
return tableRPC.help(strCommand);
}
Value stop(const Array& params, bool fHelp)
{
// Accept the deprecated and ignored 'detach' boolean argument
if (fHelp || params.size() > 1)
throw runtime_error(
"stop\n"
"Stop Sifcoin server.");
// Shutdown will take long enough that the response should get back
StartShutdown();
return "Sifcoin server stopping";
}
//
// Call Table
//
static const CRPCCommand vRPCCommands[] =
{ // name actor (function) okSafeMode threadSafe
// ------------------------ ----------------------- ---------- ----------
{ "help", &help, true, true },
{ "stop", &stop, true, true },
{ "getblockcount", &getblockcount, true, false },
{ "getconnectioncount", &getconnectioncount, true, false },
{ "getpeerinfo", &getpeerinfo, true, false },
{ "addnode", &addnode, true, true },
{ "getaddednodeinfo", &getaddednodeinfo, true, true },
{ "getdifficulty", &getdifficulty, true, false },
{ "getgenerate", &getgenerate, true, false },
{ "setgenerate", &setgenerate, true, false },
{ "gethashespersec", &gethashespersec, true, false },
{ "getinfo", &getinfo, true, false },
{ "getmininginfo", &getmininginfo, true, false },
{ "getnewaddress", &getnewaddress, true, false },
{ "getaccountaddress", &getaccountaddress, true, false },
{ "setaccount", &setaccount, true, false },
{ "getaccount", &getaccount, false, false },
{ "getaddressesbyaccount", &getaddressesbyaccount, true, false },
{ "sendtoaddress", &sendtoaddress, false, false },
{ "getreceivedbyaddress", &getreceivedbyaddress, false, false },
{ "getreceivedbyaccount", &getreceivedbyaccount, false, false },
{ "listreceivedbyaddress", &listreceivedbyaddress, false, false },
{ "listreceivedbyaccount", &listreceivedbyaccount, false, false },
{ "backupwallet", &backupwallet, true, false },
{ "keypoolrefill", &keypoolrefill, true, false },
{ "walletpassphrase", &walletpassphrase, true, false },
{ "walletpassphrasechange", &walletpassphrasechange, false, false },
{ "walletlock", &walletlock, true, false },
{ "encryptwallet", &encryptwallet, false, false },
{ "validateaddress", &validateaddress, true, false },
{ "getbalance", &getbalance, false, false },
{ "move", &movecmd, false, false },
{ "sendfrom", &sendfrom, false, false },
{ "sendmany", &sendmany, false, false },
{ "addmultisigaddress", &addmultisigaddress, false, false },
{ "createmultisig", &createmultisig, true, true },
{ "getrawmempool", &getrawmempool, true, false },
{ "getblock", &getblock, false, false },
{ "getblockhash", &getblockhash, false, false },
{ "gettransaction", &gettransaction, false, false },
{ "listtransactions", &listtransactions, false, false },
{ "listaddressgroupings", &listaddressgroupings, false, false },
{ "signmessage", &signmessage, false, false },
{ "verifymessage", &verifymessage, false, false },
{ "getwork", &getwork, true, false },
{ "getwork2", &getwork2, true, false },
{ "listaccounts", &listaccounts, false, false },
{ "settxfee", &settxfee, false, false },
{ "getblocktemplate", &getblocktemplate, true, false },
{ "submitblock", &submitblock, false, false },
{ "listsinceblock", &listsinceblock, false, false },
{ "dumpprivkey", &dumpprivkey, true, false },
{ "importprivkey", &importprivkey, false, false },
{ "listunspent", &listunspent, false, false },
{ "getrawtransaction", &getrawtransaction, false, false },
{ "createrawtransaction", &createrawtransaction, false, false },
{ "decoderawtransaction", &decoderawtransaction, false, false },
{ "signrawtransaction", &signrawtransaction, false, false },
{ "sendrawtransaction", &sendrawtransaction, false, false },
{ "gettxoutsetinfo", &gettxoutsetinfo, true, false },
{ "gettxout", &gettxout, true, false },
{ "lockunspent", &lockunspent, false, false },
{ "listlockunspent", &listlockunspent, false, false },
};
CRPCTable::CRPCTable()
{
unsigned int vcidx;
for (vcidx = 0; vcidx < (sizeof(vRPCCommands) / sizeof(vRPCCommands[0])); vcidx++)
{
const CRPCCommand *pcmd;
pcmd = &vRPCCommands[vcidx];
mapCommands[pcmd->name] = pcmd;
}
}
const CRPCCommand *CRPCTable::operator[](string name) const
{
map<string, const CRPCCommand*>::const_iterator it = mapCommands.find(name);
if (it == mapCommands.end())
return NULL;
return (*it).second;
}
//
// HTTP protocol
//
// This ain't Apache. We're just using HTTP header for the length field
// and to be compatible with other JSON-RPC implementations.
//
string HTTPPost(const string& strMsg, const map<string,string>& mapRequestHeaders)
{
ostringstream s;
s << "POST / HTTP/1.1\r\n"
<< "User-Agent: bitcoin-json-rpc/" << FormatFullVersion() << "\r\n"
<< "Host: 127.0.0.1\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: " << strMsg.size() << "\r\n"
<< "Connection: close\r\n"
<< "Accept: application/json\r\n";
BOOST_FOREACH(const PAIRTYPE(string, string)& item, mapRequestHeaders)
s << item.first << ": " << item.second << "\r\n";
s << "\r\n" << strMsg;
return s.str();
}
string rfc1123Time()
{
char buffer[64];
time_t now;
time(&now);
struct tm* now_gmt = gmtime(&now);
string locale(setlocale(LC_TIME, NULL));
setlocale(LC_TIME, "C"); // we want POSIX (aka "C") weekday/month strings
strftime(buffer, sizeof(buffer), "%a, %d %b %Y %H:%M:%S +0000", now_gmt);
setlocale(LC_TIME, locale.c_str());
return string(buffer);
}
static string HTTPReply(int nStatus, const string& strMsg, bool keepalive)
{
if (nStatus == HTTP_UNAUTHORIZED)
return strprintf("HTTP/1.0 401 Authorization Required\r\n"
"Date: %s\r\n"
"Server: bitcoin-json-rpc/%s\r\n"
"WWW-Authenticate: Basic realm=\"jsonrpc\"\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 296\r\n"
"\r\n"
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\r\n"
"\"http://www.w3.org/TR/1999/REC-html401-19991224/loose.dtd\">\r\n"
"<HTML>\r\n"
"<HEAD>\r\n"
"<TITLE>Error</TITLE>\r\n"
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=ISO-8859-1'>\r\n"
"</HEAD>\r\n"
"<BODY><H1>401 Unauthorized.</H1></BODY>\r\n"
"</HTML>\r\n", rfc1123Time().c_str(), FormatFullVersion().c_str());
const char *cStatus;
if (nStatus == HTTP_OK) cStatus = "OK";
else if (nStatus == HTTP_BAD_REQUEST) cStatus = "Bad Request";
else if (nStatus == HTTP_FORBIDDEN) cStatus = "Forbidden";
else if (nStatus == HTTP_NOT_FOUND) cStatus = "Not Found";
else if (nStatus == HTTP_INTERNAL_SERVER_ERROR) cStatus = "Internal Server Error";
else cStatus = "";
return strprintf(
"HTTP/1.1 %d %s\r\n"
"Date: %s\r\n"
"Connection: %s\r\n"
"Content-Length: %"PRIszu"\r\n"
"Content-Type: application/json\r\n"
"Server: bitcoin-json-rpc/%s\r\n"
"\r\n"
"%s",
nStatus,
cStatus,
rfc1123Time().c_str(),
keepalive ? "keep-alive" : "close",
strMsg.size(),
FormatFullVersion().c_str(),
strMsg.c_str());
}
bool ReadHTTPRequestLine(std::basic_istream<char>& stream, int &proto,
string& http_method, string& http_uri)
{
string str;
getline(stream, str);
// HTTP request line is space-delimited
vector<string> vWords;
boost::split(vWords, str, boost::is_any_of(" "));
if (vWords.size() < 2)
return false;
// HTTP methods permitted: GET, POST
http_method = vWords[0];
if (http_method != "GET" && http_method != "POST")
return false;
// HTTP URI must be an absolute path, relative to current host
http_uri = vWords[1];
if (http_uri.size() == 0 || http_uri[0] != '/')
return false;
// parse proto, if present
string strProto = "";
if (vWords.size() > 2)
strProto = vWords[2];
proto = 0;
const char *ver = strstr(strProto.c_str(), "HTTP/1.");
if (ver != NULL)
proto = atoi(ver+7);
return true;
}
int ReadHTTPStatus(std::basic_istream<char>& stream, int &proto)
{
string str;
getline(stream, str);
vector<string> vWords;
boost::split(vWords, str, boost::is_any_of(" "));
if (vWords.size() < 2)
return HTTP_INTERNAL_SERVER_ERROR;
proto = 0;
const char *ver = strstr(str.c_str(), "HTTP/1.");
if (ver != NULL)
proto = atoi(ver+7);
return atoi(vWords[1].c_str());
}
int ReadHTTPHeaders(std::basic_istream<char>& stream, map<string, string>& mapHeadersRet)
{
int nLen = 0;
loop
{
string str;
std::getline(stream, str);
if (str.empty() || str == "\r")
break;
string::size_type nColon = str.find(":");
if (nColon != string::npos)
{
string strHeader = str.substr(0, nColon);
boost::trim(strHeader);
boost::to_lower(strHeader);
string strValue = str.substr(nColon+1);
boost::trim(strValue);
mapHeadersRet[strHeader] = strValue;
if (strHeader == "content-length")
nLen = atoi(strValue.c_str());
}
}
return nLen;
}
int ReadHTTPMessage(std::basic_istream<char>& stream, map<string,
string>& mapHeadersRet, string& strMessageRet,
int nProto)
{
mapHeadersRet.clear();
strMessageRet = "";
// Read header
int nLen = ReadHTTPHeaders(stream, mapHeadersRet);
if (nLen < 0 || nLen > (int)MAX_SIZE)
return HTTP_INTERNAL_SERVER_ERROR;
// Read message
if (nLen > 0)
{
vector<char> vch(nLen);
stream.read(&vch[0], nLen);
strMessageRet = string(vch.begin(), vch.end());
}
string sConHdr = mapHeadersRet["connection"];
if ((sConHdr != "close") && (sConHdr != "keep-alive"))
{
if (nProto >= 1)
mapHeadersRet["connection"] = "keep-alive";
else
mapHeadersRet["connection"] = "close";
}
return HTTP_OK;
}
bool HTTPAuthorized(map<string, string>& mapHeaders)
{
string strAuth = mapHeaders["authorization"];
if (strAuth.substr(0,6) != "Basic ")
return false;
string strUserPass64 = strAuth.substr(6); boost::trim(strUserPass64);
string strUserPass = DecodeBase64(strUserPass64);
return strUserPass == strRPCUserColonPass;
}
//
// JSON-RPC protocol. Bitcoin speaks version 1.0 for maximum compatibility,
// but uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were
// unspecified (HTTP errors and contents of 'error').
//
// 1.0 spec: http://json-rpc.org/wiki/specification
// 1.2 spec: http://groups.google.com/group/json-rpc/web/json-rpc-over-http
// http://www.codeproject.com/KB/recipes/JSON_Spirit.aspx
//
string JSONRPCRequest(const string& strMethod, const Array& params, const Value& id)
{
Object request;
request.push_back(Pair("method", strMethod));
request.push_back(Pair("params", params));
request.push_back(Pair("id", id));
return write_string(Value(request), false) + "\n";
}
Object JSONRPCReplyObj(const Value& result, const Value& error, const Value& id)
{
Object reply;
if (error.type() != null_type)
reply.push_back(Pair("result", Value::null));
else
reply.push_back(Pair("result", result));
reply.push_back(Pair("error", error));
reply.push_back(Pair("id", id));
return reply;
}
string JSONRPCReply(const Value& result, const Value& error, const Value& id)
{
Object reply = JSONRPCReplyObj(result, error, id);
return write_string(Value(reply), false) + "\n";
}
void ErrorReply(std::ostream& stream, const Object& objError, const Value& id)
{
// Send error reply from json-rpc error object
int nStatus = HTTP_INTERNAL_SERVER_ERROR;
int code = find_value(objError, "code").get_int();
if (code == RPC_INVALID_REQUEST) nStatus = HTTP_BAD_REQUEST;
else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND;
string strReply = JSONRPCReply(Value::null, objError, id);
stream << HTTPReply(nStatus, strReply, false) << std::flush;
}
bool ClientAllowed(const boost::asio::ip::address& address)
{
// Make sure that IPv4-compatible and IPv4-mapped IPv6 addresses are treated as IPv4 addresses
if (address.is_v6()
&& (address.to_v6().is_v4_compatible()
|| address.to_v6().is_v4_mapped()))
return ClientAllowed(address.to_v6().to_v4());
if (address == asio::ip::address_v4::loopback()
|| address == asio::ip::address_v6::loopback()
|| (address.is_v4()
// Check whether IPv4 addresses match 127.0.0.0/8 (loopback subnet)
&& (address.to_v4().to_ulong() & 0xff000000) == 0x7f000000))
return true;
const string strAddress = address.to_string();
const vector<string>& vAllow = mapMultiArgs["-rpcallowip"];
BOOST_FOREACH(string strAllow, vAllow)
if (WildcardMatch(strAddress, strAllow))
return true;
return false;
}
//
// IOStream device that speaks SSL but can also speak non-SSL
//
template <typename Protocol>
class SSLIOStreamDevice : public iostreams::device<iostreams::bidirectional> {
public:
SSLIOStreamDevice(asio::ssl::stream<typename Protocol::socket> &streamIn, bool fUseSSLIn) : stream(streamIn)
{
fUseSSL = fUseSSLIn;
fNeedHandshake = fUseSSLIn;
}
void handshake(ssl::stream_base::handshake_type role)
{
if (!fNeedHandshake) return;
fNeedHandshake = false;
stream.handshake(role);
}
std::streamsize read(char* s, std::streamsize n)
{
handshake(ssl::stream_base::server); // HTTPS servers read first
if (fUseSSL) return stream.read_some(asio::buffer(s, n));
return stream.next_layer().read_some(asio::buffer(s, n));
}
std::streamsize write(const char* s, std::streamsize n)
{
handshake(ssl::stream_base::client); // HTTPS clients write first
if (fUseSSL) return asio::write(stream, asio::buffer(s, n));
return asio::write(stream.next_layer(), asio::buffer(s, n));
}
bool connect(const std::string& server, const std::string& port)
{
ip::tcp::resolver resolver(stream.get_io_service());
ip::tcp::resolver::query query(server.c_str(), port.c_str());
ip::tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
ip::tcp::resolver::iterator end;
boost::system::error_code error = asio::error::host_not_found;
while (error && endpoint_iterator != end)
{
stream.lowest_layer().close();
stream.lowest_layer().connect(*endpoint_iterator++, error);
}
if (error)
return false;
return true;
}
private:
bool fNeedHandshake;
bool fUseSSL;
asio::ssl::stream<typename Protocol::socket>& stream;
};
class AcceptedConnection
{
public:
virtual ~AcceptedConnection() {}
virtual std::iostream& stream() = 0;
virtual std::string peer_address_to_string() const = 0;
virtual void close() = 0;
};
template <typename Protocol>
class AcceptedConnectionImpl : public AcceptedConnection
{
public:
AcceptedConnectionImpl(
asio::io_service& io_service,
ssl::context &context,
bool fUseSSL) :
sslStream(io_service, context),
_d(sslStream, fUseSSL),
_stream(_d)
{
}
virtual std::iostream& stream()
{
return _stream;
}
virtual std::string peer_address_to_string() const
{
return peer.address().to_string();
}
virtual void close()
{
_stream.close();
}
typename Protocol::endpoint peer;
asio::ssl::stream<typename Protocol::socket> sslStream;
private:
SSLIOStreamDevice<Protocol> _d;
iostreams::stream< SSLIOStreamDevice<Protocol> > _stream;
};
void ServiceConnection(AcceptedConnection *conn);
// Forward declaration required for RPCListen
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error);
/**
* Sets up I/O resources to accept and handle a new connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCListen(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL)
{
// Accept connection
AcceptedConnectionImpl<Protocol>* conn = new AcceptedConnectionImpl<Protocol>(acceptor->get_io_service(), context, fUseSSL);
acceptor->async_accept(
conn->sslStream.lowest_layer(),
conn->peer,
boost::bind(&RPCAcceptHandler<Protocol, SocketAcceptorService>,
acceptor,
boost::ref(context),
fUseSSL,
conn,
boost::asio::placeholders::error));
}
/**
* Accept and handle incoming connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error)
{
// Immediately start accepting new connections, except when we're cancelled or our socket is closed.
if (error != asio::error::operation_aborted && acceptor->is_open())
RPCListen(acceptor, context, fUseSSL);
AcceptedConnectionImpl<ip::tcp>* tcp_conn = dynamic_cast< AcceptedConnectionImpl<ip::tcp>* >(conn);
// TODO: Actually handle errors
if (error)
{
delete conn;
}
// Restrict callers by IP. It is important to
// do this before starting client thread, to filter out
// certain DoS and misbehaving clients.
else if (tcp_conn && !ClientAllowed(tcp_conn->peer.address()))
{
// Only send a 403 if we're not using SSL to prevent a DoS during the SSL handshake.
if (!fUseSSL)
conn->stream() << HTTPReply(HTTP_FORBIDDEN, "", false) << std::flush;
delete conn;
}
else {
ServiceConnection(conn);
conn->close();
delete conn;
}
}
void StartRPCThreads()
{
// getwork/getblocktemplate mining rewards paid here:
pMiningKey = new CReserveKey(pwalletMain);
strRPCUserColonPass = mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"];
if ((mapArgs["-rpcpassword"] == "") ||
(mapArgs["-rpcuser"] == mapArgs["-rpcpassword"]))
{
unsigned char rand_pwd[32];
RAND_bytes(rand_pwd, 32);
string strWhatAmI = "To use bitcoind";
if (mapArgs.count("-server"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-server\"");
else if (mapArgs.count("-daemon"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-daemon\"");
uiInterface.ThreadSafeMessageBox(strprintf(
_("%s, you must set a rpcpassword in the configuration file:\n"
"%s\n"
"It is recommended you use the following random password:\n"
"rpcuser=bitcoinrpc\n"
"rpcpassword=%s\n"
"(you do not need to remember this password)\n"
"The username and password MUST NOT be the same.\n"
"If the file does not exist, create it with owner-readable-only file permissions.\n"
"It is also recommended to set alertnotify so you are notified of problems;\n"
"for example: alertnotify=echo %%s | mail -s \"Bitcoin Alert\" admin@foo.com\n"),
strWhatAmI.c_str(),
GetConfigFile().string().c_str(),
EncodeBase58(&rand_pwd[0],&rand_pwd[0]+32).c_str()),
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return;
}
assert(rpc_io_service == NULL);
rpc_io_service = new asio::io_service();
rpc_ssl_context = new ssl::context(*rpc_io_service, ssl::context::sslv23);
const bool fUseSSL = GetBoolArg("-rpcssl");
if (fUseSSL)
{
rpc_ssl_context->set_options(ssl::context::no_sslv2);
filesystem::path pathCertFile(GetArg("-rpcsslcertificatechainfile", "server.cert"));
if (!pathCertFile.is_complete()) pathCertFile = filesystem::path(GetDataDir()) / pathCertFile;
if (filesystem::exists(pathCertFile)) rpc_ssl_context->use_certificate_chain_file(pathCertFile.string());
else printf("ThreadRPCServer ERROR: missing server certificate file %s\n", pathCertFile.string().c_str());
filesystem::path pathPKFile(GetArg("-rpcsslprivatekeyfile", "server.pem"));
if (!pathPKFile.is_complete()) pathPKFile = filesystem::path(GetDataDir()) / pathPKFile;
if (filesystem::exists(pathPKFile)) rpc_ssl_context->use_private_key_file(pathPKFile.string(), ssl::context::pem);
else printf("ThreadRPCServer ERROR: missing server private key file %s\n", pathPKFile.string().c_str());
string strCiphers = GetArg("-rpcsslciphers", "TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH");
SSL_CTX_set_cipher_list(rpc_ssl_context->impl(), strCiphers.c_str());
}
// Try a dual IPv6/IPv4 socket, falling back to separate IPv4 and IPv6 sockets
const bool loopback = !mapArgs.count("-rpcallowip");
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", GetDefaultRPCPort()));
boost::system::error_code v6_only_error;
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
bool fListening = false;
std::string strerr;
try
{
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
// Try making the socket dual IPv6/IPv4 (if listening on the "any" address)
acceptor->set_option(boost::asio::ip::v6_only(loopback), v6_only_error);
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, *rpc_ssl_context, fUseSSL);
fListening = true;
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
}
try {
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
if (!fListening || loopback || v6_only_error)
{
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
endpoint.address(bindAddress);
acceptor.reset(new ip::tcp::acceptor(*rpc_io_service));
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, *rpc_ssl_context, fUseSSL);
fListening = true;
}
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv4: %s"), endpoint.port(), e.what());
}
if (!fListening) {
uiInterface.ThreadSafeMessageBox(strerr, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return;
}
rpc_worker_group = new boost::thread_group();
for (int i = 0; i < GetArg("-rpcthreads", 4); i++)
rpc_worker_group->create_thread(boost::bind(&asio::io_service::run, rpc_io_service));
}
void StopRPCThreads()
{
delete pMiningKey; pMiningKey = NULL;
if (rpc_io_service == NULL) return;
rpc_io_service->stop();
rpc_worker_group->join_all();
delete rpc_worker_group; rpc_worker_group = NULL;
delete rpc_ssl_context; rpc_ssl_context = NULL;
delete rpc_io_service; rpc_io_service = NULL;
}
class JSONRequest
{
public:
Value id;
string strMethod;
Array params;
JSONRequest() { id = Value::null; }
void parse(const Value& valRequest);
};
void JSONRequest::parse(const Value& valRequest)
{
// Parse request
if (valRequest.type() != obj_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Invalid Request object");
const Object& request = valRequest.get_obj();
// Parse id now so errors from here on will have the id
id = find_value(request, "id");
// Parse method
Value valMethod = find_value(request, "method");
if (valMethod.type() == null_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Missing method");
if (valMethod.type() != str_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string");
strMethod = valMethod.get_str();
if (strMethod != "getwork" && strMethod != "getwork2" && strMethod != "getblocktemplate")
printf("ThreadRPCServer method=%s\n", strMethod.c_str());
// Parse params
Value valParams = find_value(request, "params");
if (valParams.type() == array_type)
params = valParams.get_array();
else if (valParams.type() == null_type)
params = Array();
else
throw JSONRPCError(RPC_INVALID_REQUEST, "Params must be an array");
}
static Object JSONRPCExecOne(const Value& req)
{
Object rpc_result;
JSONRequest jreq;
try {
jreq.parse(req);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
rpc_result = JSONRPCReplyObj(result, Value::null, jreq.id);
}
catch (Object& objError)
{
rpc_result = JSONRPCReplyObj(Value::null, objError, jreq.id);
}
catch (std::exception& e)
{
rpc_result = JSONRPCReplyObj(Value::null,
JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
}
return rpc_result;
}
static string JSONRPCExecBatch(const Array& vReq)
{
Array ret;
for (unsigned int reqIdx = 0; reqIdx < vReq.size(); reqIdx++)
ret.push_back(JSONRPCExecOne(vReq[reqIdx]));
return write_string(Value(ret), false) + "\n";
}
void ServiceConnection(AcceptedConnection *conn)
{
bool fRun = true;
while (fRun)
{
int nProto = 0;
map<string, string> mapHeaders;
string strRequest, strMethod, strURI;
// Read HTTP request line
if (!ReadHTTPRequestLine(conn->stream(), nProto, strMethod, strURI))
break;
// Read HTTP message headers and body
ReadHTTPMessage(conn->stream(), mapHeaders, strRequest, nProto);
if (strURI != "/") {
conn->stream() << HTTPReply(HTTP_NOT_FOUND, "", false) << std::flush;
break;
}
// Check authorization
if (mapHeaders.count("authorization") == 0)
{
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (!HTTPAuthorized(mapHeaders))
{
printf("ThreadRPCServer incorrect password attempt from %s\n", conn->peer_address_to_string().c_str());
/* Deter brute-forcing short passwords.
If this results in a DOS the user really
shouldn't have their RPC port exposed.*/
if (mapArgs["-rpcpassword"].size() < 20)
MilliSleep(250);
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (mapHeaders["connection"] == "close")
fRun = false;
JSONRequest jreq;
try
{
// Parse request
Value valRequest;
if (!read_string(strRequest, valRequest))
throw JSONRPCError(RPC_PARSE_ERROR, "Parse error");
string strReply;
// singleton request
if (valRequest.type() == obj_type) {
jreq.parse(valRequest);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
// Send reply
strReply = JSONRPCReply(result, Value::null, jreq.id);
// array of requests
} else if (valRequest.type() == array_type)
strReply = JSONRPCExecBatch(valRequest.get_array());
else
throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error");
conn->stream() << HTTPReply(HTTP_OK, strReply, fRun) << std::flush;
}
catch (Object& objError)
{
ErrorReply(conn->stream(), objError, jreq.id);
break;
}
catch (std::exception& e)
{
ErrorReply(conn->stream(), JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
break;
}
}
}
json_spirit::Value CRPCTable::execute(const std::string &strMethod, const json_spirit::Array ¶ms) const
{
// Find method
const CRPCCommand *pcmd = tableRPC[strMethod];
if (!pcmd)
throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Method not found");
// Observe safe mode
string strWarning = GetWarnings("rpc");
if (strWarning != "" && !GetBoolArg("-disablesafemode") &&
!pcmd->okSafeMode)
throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, string("Safe mode: ") + strWarning);
try
{
// Execute
Value result;
{
if (pcmd->threadSafe)
result = pcmd->actor(params, false);
else {
LOCK2(cs_main, pwalletMain->cs_wallet);
result = pcmd->actor(params, false);
}
}
return result;
}
catch (std::exception& e)
{
throw JSONRPCError(RPC_MISC_ERROR, e.what());
}
}
Object CallRPC(const string& strMethod, const Array& params)
{
if (mapArgs["-rpcuser"] == "" && mapArgs["-rpcpassword"] == "")
throw runtime_error(strprintf(
_("You must set rpcpassword=<password> in the configuration file:\n%s\n"
"If the file does not exist, create it with owner-readable-only file permissions."),
GetConfigFile().string().c_str()));
// Connect to localhost
bool fUseSSL = GetBoolArg("-rpcssl");
asio::io_service io_service;
ssl::context context(io_service, ssl::context::sslv23);
context.set_options(ssl::context::no_sslv2);
asio::ssl::stream<asio::ip::tcp::socket> sslStream(io_service, context);
SSLIOStreamDevice<asio::ip::tcp> d(sslStream, fUseSSL);
iostreams::stream< SSLIOStreamDevice<asio::ip::tcp> > stream(d);
if (!d.connect(GetArg("-rpcconnect", "127.0.0.1"), GetArg("-rpcport", itostr(GetDefaultRPCPort()))))
throw runtime_error("couldn't connect to server");
// HTTP basic authentication
string strUserPass64 = EncodeBase64(mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"]);
map<string, string> mapRequestHeaders;
mapRequestHeaders["Authorization"] = string("Basic ") + strUserPass64;
// Send request
string strRequest = JSONRPCRequest(strMethod, params, 1);
string strPost = HTTPPost(strRequest, mapRequestHeaders);
stream << strPost << std::flush;
// Receive HTTP reply status
int nProto = 0;
int nStatus = ReadHTTPStatus(stream, nProto);
// Receive HTTP reply message headers and body
map<string, string> mapHeaders;
string strReply;
ReadHTTPMessage(stream, mapHeaders, strReply, nProto);
if (nStatus == HTTP_UNAUTHORIZED)
throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
else if (nStatus >= 400 && nStatus != HTTP_BAD_REQUEST && nStatus != HTTP_NOT_FOUND && nStatus != HTTP_INTERNAL_SERVER_ERROR)
throw runtime_error(strprintf("server returned HTTP error %d", nStatus));
else if (strReply.empty())
throw runtime_error("no response from server");
// Parse reply
Value valReply;
if (!read_string(strReply, valReply))
throw runtime_error("couldn't parse reply from server");
const Object& reply = valReply.get_obj();
if (reply.empty())
throw runtime_error("expected reply to have result, error and id properties");
return reply;
}
template<typename T>
void ConvertTo(Value& value, bool fAllowNull=false)
{
if (fAllowNull && value.type() == null_type)
return;
if (value.type() == str_type)
{
// reinterpret string as unquoted json value
Value value2;
string strJSON = value.get_str();
if (!read_string(strJSON, value2))
throw runtime_error(string("Error parsing JSON:")+strJSON);
ConvertTo<T>(value2, fAllowNull);
value = value2;
}
else
{
value = value.get_value<T>();
}
}
// Convert strings to command-specific RPC representation
Array RPCConvertValues(const std::string &strMethod, const std::vector<std::string> &strParams)
{
Array params;
BOOST_FOREACH(const std::string ¶m, strParams)
params.push_back(param);
int n = params.size();
//
// Special case non-string parameter types
//
if (strMethod == "stop" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "getaddednodeinfo" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendtoaddress" && n > 1) ConvertTo<double>(params[1]);
if (strMethod == "settxfee" && n > 0) ConvertTo<double>(params[0]);
if (strMethod == "getreceivedbyaddress" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getreceivedbyaccount" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listreceivedbyaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaddress" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "listreceivedbyaccount" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaccount" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getbalance" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getblockhash" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "move" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "move" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "sendfrom" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "sendfrom" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "listtransactions" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listtransactions" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "listaccounts" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "walletpassphrase" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getblocktemplate" && n > 0) ConvertTo<Object>(params[0]);
if (strMethod == "listsinceblock" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendmany" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "sendmany" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "addmultisigaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "addmultisigaddress" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "createmultisig" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "createmultisig" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "listunspent" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listunspent" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listunspent" && n > 2) ConvertTo<Array>(params[2]);
if (strMethod == "getrawtransaction" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "createrawtransaction" && n > 0) ConvertTo<Array>(params[0]);
if (strMethod == "createrawtransaction" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "signrawtransaction" && n > 1) ConvertTo<Array>(params[1], true);
if (strMethod == "signrawtransaction" && n > 2) ConvertTo<Array>(params[2], true);
if (strMethod == "gettxout" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "gettxout" && n > 2) ConvertTo<bool>(params[2]);
if (strMethod == "lockunspent" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "lockunspent" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "importprivkey" && n > 2) ConvertTo<bool>(params[2]);
return params;
}
int CommandLineRPC(int argc, char *argv[])
{
string strPrint;
int nRet = 0;
try
{
// Skip switches
while (argc > 1 && IsSwitchChar(argv[1][0]))
{
argc--;
argv++;
}
// Method
if (argc < 2)
throw runtime_error("too few parameters");
string strMethod = argv[1];
// Parameters default to strings
std::vector<std::string> strParams(&argv[2], &argv[argc]);
Array params = RPCConvertValues(strMethod, strParams);
// Execute
Object reply = CallRPC(strMethod, params);
// Parse reply
const Value& result = find_value(reply, "result");
const Value& error = find_value(reply, "error");
if (error.type() != null_type)
{
// Error
strPrint = "error: " + write_string(error, false);
int code = find_value(error.get_obj(), "code").get_int();
nRet = abs(code);
}
else
{
// Result
if (result.type() == null_type)
strPrint = "";
else if (result.type() == str_type)
strPrint = result.get_str();
else
strPrint = write_string(result, true);
}
}
catch (boost::thread_interrupted) {
throw;
}
catch (std::exception& e) {
strPrint = string("error: ") + e.what();
nRet = 87;
}
catch (...) {
PrintException(NULL, "CommandLineRPC()");
}
if (strPrint != "")
{
fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str());
}
return nRet;
}
#ifdef TEST
int main(int argc, char *argv[])
{
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFile("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
#endif
setbuf(stdin, NULL);
setbuf(stdout, NULL);
setbuf(stderr, NULL);
try
{
if (argc >= 2 && string(argv[1]) == "-server")
{
printf("server ready\n");
ThreadRPCServer(NULL);
}
else
{
return CommandLineRPC(argc, argv);
}
}
catch (boost::thread_interrupted) {
throw;
}
catch (std::exception& e) {
PrintException(&e, "main()");
} catch (...) {
PrintException(NULL, "main()");
}
return 0;
}
#endif
const CRPCTable tableRPC;
| mit |
Unlink/Countdown | Countdown/MainWindow.xaml.cs | 3038 | using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
using Countdown.Annotations;
namespace Countdown
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window, INotifyPropertyChanged
{
private ClockWindow _clockWindow;
private double _lastTop = 0;
private double _lastLeft = 0;
private bool _wasShown = false;
public bool IsRunning => _clockWindow != null && _clockWindow.IsActive;
public MainWindow()
{
InitializeComponent();
DataContext = this;
}
private void RunButtonClick(object sender, RoutedEventArgs e)
{
TimeSpan cas = Cas.Value ?? new TimeSpan(0, 0, 0);
if (Stopwatch.IsChecked ?? false)
{
cas = TimeSpan.Zero;
}
if (_clockWindow != null)
{
_lastTop = _clockWindow.Top;
_lastLeft = _clockWindow.Left;
_clockWindow.Close();
}
_clockWindow = new ClockWindow(cas);
ApplyStyle();
if (_wasShown)
{
_clockWindow.Top = _lastTop;
_clockWindow.Left = _lastLeft;
}
_wasShown = true;
_clockWindow.Show();
_clockWindow.Closed += (o, args) => OnPropertyChanged(nameof(IsRunning));
OnPropertyChanged(nameof(IsRunning));
}
private void ApplyStyle()
{
Color farba = (Color) (Farba.SelectedColor ?? Colors.White);
int velkost = (int) Velkost.Value;
bool tien = (bool) (Tien.IsChecked ?? false);
_clockWindow?.ApplyStyle(farba, velkost, tien);
}
private void CloseButtonClick(object sender, RoutedEventArgs e)
{
if (_clockWindow != null)
{
_lastTop = _clockWindow.Top;
_lastLeft = _clockWindow.Left;
_clockWindow.Close();
}
}
private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
{
_clockWindow?.Close();
}
private void ApplyButtonClick(object sender, RoutedEventArgs e)
{
ApplyStyle();
}
public event PropertyChangedEventHandler PropertyChanged;
[NotifyPropertyChangedInvocator]
protected virtual void OnPropertyChanged([CallerMemberName] string propertyName = null)
{
PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName));
}
}
}
| mit |
Orcomp/Orc.CheckForUpdates | src/Orc.CheckForUpdate.SystemWeb/Abstract/IDownloadLinkProvider.cs | 981 | // --------------------------------------------------------------------------------------------------------------------
// <copyright file="IDownloadLinkProvider.cs" company="ORC">
// MS-PL
// </copyright>
// <summary>
// Defines the IDownloadLinkProvider interface so it can be injected into the consumer.
// </summary>
// --------------------------------------------------------------------------------------------------------------------
namespace Orc.CheckForUpdate.Web.Abstract
{
/// <summary>
/// The DownloadLinkProvider interface.
/// </summary>
public interface IDownloadLinkProvider
{
/// <summary>
/// Gets the route name.
/// </summary>
string RouteName { get; }
/// <summary>
/// Gets the controller name.
/// </summary>
string ControllerName { get; }
/// <summary>
/// Gets the action name.
/// </summary>
string ActionName { get; }
}
} | mit |
sanyaade-teachings/pixi-lights | src/renderers/LightRenderer.js | 5903 | /**
*
* @class
* @private
* @memberof PIXI.lights
* @extends PIXI.ObjectRenderer
* @param renderer {WebGLRenderer} The renderer this sprite batch works for.
*/
function LightRenderer(renderer)
{
PIXI.ObjectRenderer.call(this, renderer);
// the total number of indices in our batch, there are 6 points per quad.
var numIndices = LightRenderer.MAX_LIGHTS * 6;
/**
* Holds the indices
*
* @member {Uint16Array}
*/
this.indices = new Uint16Array(numIndices);
//TODO this could be a single buffer shared amongst all renderers as we reuse this set up in most renderers
for (var i = 0, j = 0; i < numIndices; i += 6, j += 4)
{
this.indices[i + 0] = j + 0;
this.indices[i + 1] = j + 1;
this.indices[i + 2] = j + 2;
this.indices[i + 3] = j + 0;
this.indices[i + 4] = j + 2;
this.indices[i + 5] = j + 3;
}
/**
* The current size of the batch, each render() call adds to this number.
*
* @member {number}
*/
this.currentBatchSize = 0;
/**
* The current lights in the batch.
*
* @member {Light[]}
*/
this.lights = [];
}
LightRenderer.MAX_LIGHTS = 500;
LightRenderer.prototype = Object.create(PIXI.ObjectRenderer.prototype);
LightRenderer.prototype.constructor = LightRenderer;
module.exports = LightRenderer;
PIXI.WebGLRenderer.registerPlugin('lights', LightRenderer);
/**
* Renders the light object.
*
* @param light {Light} the light to render
*/
LightRenderer.prototype.render = function (light)
{
this.lights[this.currentBatchSize++] = light;
};
LightRenderer.prototype.flush = function ()
{
var renderer = this.renderer,
gl = renderer.gl,
diffuseTexture = renderer.diffuseTexture,
normalsTexture = renderer.normalsTexture,
lastShader = null;
for (var i = 0; i < this.currentBatchSize; ++i)
{
var light = this.lights[i],
shader = light.shader || this.renderer.shaderManager.plugins[light.shaderName];
if (!light._vertexBuffer)
{
this._initWebGL(light);
}
// set shader if needed
if (shader !== lastShader) {
lastShader = shader;
renderer.shaderManager.setShader(shader);
}
renderer.blendModeManager.setBlendMode(light.blendMode);
// set uniforms, can do some optimizations here.
shader.uniforms.uViewSize.value[0] = renderer.width;
shader.uniforms.uViewSize.value[1] = renderer.height;
light.worldTransform.toArray(true, shader.uniforms.translationMatrix.value);
renderer.currentRenderTarget.projectionMatrix.toArray(true, shader.uniforms.projectionMatrix.value);
if (light.useViewportQuad) {
// update verts to ensure it is a fullscreen quad even if the renderer is resized. This should be optimized
light.vertices[2] = light.vertices[4] = renderer.width;
light.vertices[5] = light.vertices[7] = renderer.height;
}
light.syncShader(shader);
shader.syncUniforms();
// have to set these manually due to the way pixi base shader makes assumptions about texture units
gl.uniform1i(shader.uniforms.uSampler._location, 0);
gl.uniform1i(shader.uniforms.uNormalSampler._location, 1);
if (!light.needsUpdate)
{
// update vertex data
gl.bindBuffer(gl.ARRAY_BUFFER, light._vertexBuffer);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, light.vertices);
gl.vertexAttribPointer(shader.attributes.aVertexPosition, 2, gl.FLOAT, false, 0, 0);
// bind diffuse texture
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, diffuseTexture.baseTexture._glTextures[gl.id]);
// bind normal texture
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, normalsTexture.baseTexture._glTextures[gl.id]);
// update indices
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, light._indexBuffer);
gl.bufferSubData(gl.ELEMENT_ARRAY_BUFFER, 0, light.indices);
}
else
{
light.needsUpdate = false;
// upload vertex data
gl.bindBuffer(gl.ARRAY_BUFFER, light._vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, light.vertices, gl.STATIC_DRAW);
gl.vertexAttribPointer(shader.attributes.aVertexPosition, 2, gl.FLOAT, false, 0, 0);
// bind diffuse texture
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, diffuseTexture.baseTexture._glTextures[gl.id]);
// bind normal texture
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, normalsTexture.baseTexture._glTextures[gl.id]);
// static upload of index buffer
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, light._indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, light.indices, gl.STATIC_DRAW);
}
gl.drawElements(renderer.drawModes[light.drawMode], light.indices.length, gl.UNSIGNED_SHORT, 0);
renderer.drawCount++;
}
this.currentBatchSize = 0;
};
/**
* Prepares all the buffers to render this light.
*
* @param light {Light} The light object to prepare for rendering.
*/
LightRenderer.prototype._initWebGL = function (light)
{
var gl = this.renderer.gl;
// create the buffers
light._vertexBuffer = gl.createBuffer();
light._indexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, light._vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, light.vertices, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, light._indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, light.indices, gl.STATIC_DRAW);
};
LightRenderer.prototype.destroy = function ()
{
};
| mit |
pipetree/pipetree | pipetree/templates.py | 1769 | # MIT License
# Copyright (c) 2016 Morgan McDermott & John Carlyle
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from collections import OrderedDict
_default_config = {
'project_name': '%s'
}
DEFAULT_CONFIG = json.dumps(_default_config, indent=4)
DEFAULT_HANDLERS = """# Generated by pipetree
def handler(img_bytes):
yield len(img_bytes)
"""
_default_pipeline_config = OrderedDict([
('CatPictures', {
'type': 'LocalDirectoryPipelineStage',
'filepath': 'cat_imgs',
'read_content': True
}),
('WriteBytes', {
'inputs': ['CatPictures'],
'type': 'ExecutorPipelineStage',
'execute': '%s.main.handler'
})
])
DEFAULT_PIPELINE_CONFIG = json.dumps(_default_pipeline_config, indent=4)
| mit |
ultrasaurus/trie_example | memory.rb | 333 | require 'rubygems'
require 'memprof'
require 'trie_dictionary.rb'
require 'names'
puts "----------------- create"
Memprof.start
d = Dictionary.new
$names.each do |name|
d.add(name)
end
Memprof.stats
puts "----------------- find"
('A'..'Z').each do |chr|
puts "------------ #{chr}"
d.find(chr)
Memprof.stats
end
Memprof.stop
| mit |
alphagov/content-store | spec/integration/fetching_publish_intent_spec.rb | 5704 | require "rails_helper"
describe "Fetching a content item with a publish intent", type: :request do
let(:content_item) { create(:content_item, public_updated_at: 30.minutes.ago) }
context "when the user is not authenticated" do
around do |example|
ClimateControl.modify(GDS_SSO_MOCK_INVALID: "1") { example.run }
end
it "returns an unauthorized response" do
create(:publish_intent, base_path: content_item.base_path, publish_time: 5.minutes.ago)
get "/content/#{content_item.base_path}"
expect(response).to be_successful
end
end
context "a publish intent long in the past" do
before(:each) do
create(:publish_intent, base_path: content_item.base_path, publish_time: 5.minutes.ago)
get "/content/#{content_item.base_path}"
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to expire in the default TTL" do
expect(cache_control["max-age"]).to eq(default_ttl.to_s)
end
it "sets a cache-control directive of public" do
expect(cache_control["public"]).to eq(true)
end
end
context "a publish intent that has newly passed" do
before(:each) do
create(:publish_intent, base_path: content_item.base_path, publish_time: 10.seconds.ago)
get "/content/#{content_item.base_path}"
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to the minimum TTL" do
expect(cache_control["max-age"]).to eq(Rails.application.config.minimum_ttl.to_s)
end
it "sets a cache-control directive of public" do
expect(cache_control["public"]).to eq(true)
end
end
context "a publish intent more than the default TTL away" do
before(:each) do
create(:publish_intent, base_path: content_item.base_path, publish_time: 40.minutes.from_now)
get "/content/#{content_item.base_path}"
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to expire in the default TTL" do
expect(cache_control["max-age"]).to eq(default_ttl.to_s)
end
it "sets a cache-control directive of public" do
expect(cache_control["public"]).to eq(true)
end
end
context "a publish intent before the default TTL time" do
before(:each) do
Timecop.freeze
create(:publish_intent, base_path: content_item.base_path, publish_time: 5.minutes.from_now)
get "/content/#{content_item.base_path}"
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to expire when the publish intent is due" do
expect(cache_control["max-age"]).to eq(5.minutes.to_i.to_s)
end
it "sets a cache-control directive of public" do
expect(cache_control["public"]).to eq(true)
end
end
context "a publish intent for access limited content" do
let(:content_item) { create(:access_limited_content_item, :by_user_id) }
before(:each) do
Timecop.freeze
create(:publish_intent, base_path: content_item.base_path, publish_time: 5.minutes.from_now)
get "/content/#{content_item.base_path}",
headers: { "X-Govuk-Authenticated-User": content_item.access_limited["users"].first }
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to to the minimum ttl" do
expect(cache_control["max-age"]).to eq(Rails.application.config.minimum_ttl.to_s)
end
it "sets a cache-control directive of private" do
expect(cache_control["private"]).to eq(true)
end
end
context "a publish intent for content accessed by auth_bypass_id" do
let(:auth_bypass_id) { SecureRandom.uuid }
let(:content_item) { create(:content_item, auth_bypass_ids: [auth_bypass_id]) }
before(:each) do
Timecop.freeze
create(:publish_intent, base_path: content_item.base_path, publish_time: 5.minutes.from_now)
get "/content/#{content_item.base_path}",
headers: { "Govuk-Auth-Bypass-Id" => auth_bypass_id }
end
it "returns the presented content item as JSON data" do
expect(response.media_type).to eq("application/json")
expect(response.body).to eq(present(content_item))
end
it "sets cache headers to to the minimum ttl" do
expect(cache_control["max-age"]).to eq(Rails.application.config.minimum_ttl.to_s)
end
it "sets a cache-control directive of private" do
expect(cache_control["private"]).to eq(true)
end
end
end
describe "Fetching a publish intent without a content item", type: :request do
before(:each) do
Timecop.freeze
create(:publish_intent, base_path: "/some/future/thing", publish_time: 5.minutes.from_now)
get "/content/some/future/thing"
end
it "returns a 404 Not Found response" do
expect(response.status).to eq(404)
end
it "sets cache headers to expire according to the publish intent" do
expect(cache_control["max-age"]).to eq(5.minutes.to_i.to_s)
end
it "sets a cache-control directive of public" do
expect(cache_control["public"]).to eq(true)
end
end
| mit |
abclaur/jobeet | src/Eti/JobeetBundle/Command/JobeetCleanupCommand.php | 1605 | <?php
namespace Eti\JobeetBundle\Command;
use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
use Eti\JobeetBundle\Entity\Job;
class JobeetCleanupCommand extends ContainerAwareCommand {
protected function configure()
{
$this
->setName('eti:jobeet:cleanup')
->setDescription('Cleanup Jobeet database')
->addArgument('days', InputArgument::OPTIONAL, 'The email', 90)
;
}
protected function execute(InputInterface $input, OutputInterface $output)
{
$days = $input->getArgument('days');
$em = $this->getContainer()->get('doctrine')->getManager();
// cleanup Lucene index
$index = Job::getLuceneIndex();
$q = $em->getRepository('EtiJobeetBundle:Job')->createQueryBuilder('j')
->where('j.expiresAt < :date')
->setParameter('date',date('Y-m-d'))
->getQuery();
$jobs = $q->getResult();
foreach ($jobs as $job)
{
if ($hit = $index->find('pk:'.$job->getId()))
{
$index->delete($hit->id);
}
}
$index->optimize();
$output->writeln('Cleaned up and optimized the job index');
// remove stale jobs
$nb = $em->getRepository('EtiJobeetBundle:Job')->cleanup($days);
$output->writeln(sprintf('Removed %d stale jobs', $nb));
}
} | mit |
yahoo/pngjs-image | test/png/encode/custom/stRT.js | 4270 | // Copyright 2015 Yahoo! Inc.
// Copyrights licensed under the Mit License. See the accompanying LICENSE file for terms.
var testGen = require('../../testGen');
describe('stRT', function () {
describe('Single', function () {
testGen.addEncodeTests({
resourceGroup: [],
resourceFile: 'base',
outputGroup: ['custom', 'structure'],
outputFile: 'stRT_single',
imageCheck: true,
width: 32,
height: 32,
encodeOptions: {
structures: [
{
type: "test",
major: 3,
minor: 7,
content: {
answer: 42
}
}
]
}
});
});
//describe('Multiple', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_multiple',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "test1",
// content: "hello world1"
// },
// {
// keyword: "test2",
// content: "hello world2"
// },
// {
// keyword: "test3",
// content: "hello world3"
// }
// ]
// }
// });
//});
//
//describe('Empty', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_empty',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: []
// }
// });
//});
//
//describe('Special Characters', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_special_chars',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "test",
// content: "hello © world"
// }
// ]
// }
// });
//});
//
//describe('Long Keyword', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_long_keyword',
//
// expectFailure: true,
// expectMessage: 'Keyword cannot be longer than 79 characters.',
//
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "12345678901234567890123456789012345678901234567890123456789012345678901234567890", // 80
// content: "hello world"
// }
// ]
// }
// });
//});
//
//describe('Empty Keyword', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_empty_keyword',
//
// expectFailure: true,
// expectMessage: 'Keyword needs to have a least one character.',
//
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "",
// content: "hello world"
// }
// ]
// }
// });
//});
//
//describe('Default Keyword', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_default_keyword',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// { // Keyword: "Title"
// content: "hello world"
// }
// ]
// }
// });
//});
//
//describe('No Content Keyword', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_no_content',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "test"
// }
// ]
// }
// });
//});
//
//describe('New linew', function () {
//
// testGen.addEncodeTests({
// resourceGroup: [],
// resourceFile: 'base',
// outputGroup: ['custom', 'structure'],
// outputFile: 'stRT_new_line',
//
// imageCheck: true,
// width: 32,
// height: 32,
//
// encodeOptions: {
// structures: [
// {
// keyword: "test",
// content: "hello" + os.EOL + "world"
// }
// ]
// }
// });
//});
});
| mit |
kwong/UpdatingViewsOnSQLite | src/driver/DBConn.java | 7016 | package driver;
import java.sql.*;
import java.util.HashSet;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.logging.Level;
import java.util.logging.Logger;
import datastructs.*;
public class DBConn {
public static final String DATABASE = "jdbc:sqlite:DB.db";
static boolean connOpened = false;
/*
* public static Connection openConn() throws Exception {
* Class.forName("org.sqlite.JDBC"); Connection conn =
* DriverManager.getConnection(DATABASE); conn.setAutoCommit(false);
* connOpened = true; // set connOpened flag to true. return conn; }
*
* public static boolean closeConn(Connection conn) { try { conn.close();
* return true; } catch (Exception e) { return false; } }
*
* public static boolean cleanup(ResultSet rs, Statement stat) throws
* SQLException { rs.close(); stat.close(); return true; }
*/
// Connection c = openConn(...)
// ....
// Rs = queryInsertDB(c, query)
// ....
// cleanup(
public static boolean getTupleExist(String query) throws Exception {
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
try {
//System.out.println("TUPLE EXIST : " + query);
Statement stat = conn.createStatement();
conn.setAutoCommit(false);
ResultSet rs;
rs = stat.executeQuery(query);
return rs.next();
/*
if (rs.getString(1) != null)
return true;
else
return false;*/
} catch (SQLException e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return false;
} finally{
conn.close();
}
}
public static boolean queryDB(String query) throws Exception {
//System.out.println("THIS IS : " + query);
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection("jdbc:sqlite:DB.db");
try {
conn.setAutoCommit(false);
query = query.trim();
StringTokenizer st = new StringTokenizer(query, ";");
int i = 0;
int s = 0;
while (st.hasMoreTokens()) {
String temppppp= st.nextToken().trim();
PreparedStatement prep = conn.prepareStatement(temppppp);
s += prep.executeUpdate();
prep.close();
i++;
}
conn.commit();
if (s > 0)
return true;
else
return false;
} catch (SQLException e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return false;
} finally{
conn.close();
}
/*
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection("jdbc:sqlite:DB.db");
try {
conn.setAutoCommit(false);
PreparedStatement prep = conn.prepareStatement(query);
int s = prep.executeUpdate();
prep.close();
conn.commit();
if (s > 0)
return true;
else
return false;
} catch (SQLException e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return false;
} finally{
conn.close();
}
*/
}
// View can only be created or deleted
public static boolean queryNCDB(String query) throws Exception {
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection("jdbc:sqlite:DB.db");
try {
conn.setAutoCommit(false);
PreparedStatement prep = conn.prepareStatement(query);
prep.executeUpdate();
prep.close();
conn.commit();
return true;
} catch (SQLException e) {
//Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
System.out.println("Potential SQL error: "+e.getMessage());
return false;
} finally{
conn.close();
}
}
// Get column values (For table join)
public static Set<String> getColumnValue(String query) throws Exception{
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
try {
Statement stat = conn.createStatement();
conn.setAutoCommit(false);
Set<String> s = new HashSet<String>();
ResultSet rs;
rs = stat.executeQuery(query);
while (rs.next()) {
s.add(rs.getString(1));
}
return s;
} catch (Exception e) {
}finally{
conn.close();
}
return null;
}
public static boolean checkValueinColumn(String value, String tableName,
String columnName) throws Exception{
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
try {
Statement stat = conn.createStatement();
conn.setAutoCommit(false);
ResultSet rs;
rs = stat.executeQuery("Select " + columnName + " from "
+ tableName);
while (rs.next()) {
if (value.equalsIgnoreCase(rs.getString(1)))
return true;
}
return false;
} catch (Exception e) {
}finally{
conn.close();
}
return false;
}
// get unique keys in the relation
public static Set<String> getCandidateKey(String tableName){
try {
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
conn.setAutoCommit(false);
Set<String> keys = new HashSet<String>();
ResultSet rs;
DatabaseMetaData dbmd = conn.getMetaData();
rs = dbmd.getIndexInfo(conn.getCatalog(), null, tableName, true,
true);
while (rs.next()) {
String columnName = rs.getString("COLUMN_NAME");
keys.add(columnName);
}
conn.commit();
conn.close();
return keys;
} catch (Exception e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return null;
}
}
public static String getPrimaryKey(String tableName) {
try {
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
conn.setAutoCommit(false);
ResultSet rs;
DatabaseMetaData dbmd = conn.getMetaData();
rs = dbmd.getPrimaryKeys(null, null, tableName);
String colName = rs.getString("COLUMN_NAME");
conn.commit();
conn.close();
return colName;
} catch (Exception e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return null;
}
}
public static ViewTable querySelectDB(String query) {
try {
Class.forName("org.sqlite.JDBC");
Connection conn = DriverManager.getConnection(DATABASE);
conn.setAutoCommit(false);
Statement stat = conn.createStatement();
ResultSet rs = stat.executeQuery(query);
ResultSetMetaData rsmd = rs.getMetaData();
int numCols = rsmd.getColumnCount();
ViewTable vt = new ViewTable(numCols);
// get all the attribute names
for (int i = 1; i < numCols + 1; i++)
vt.colNames[i - 1] = rsmd.getColumnName(i);
while (rs.next()) {
Tuple tempTuple = new Tuple(numCols);
for (int i = 1; i < numCols + 1; i++)
tempTuple.tuple[i - 1] = rs.getString(i);
vt.add(tempTuple);
}
conn.commit();
conn.close();
return vt;
} catch (Exception e) {
Logger.getLogger(DBConn.class.getName()).log(Level.SEVERE, null, e);
return null;
}
}
}
| mit |
square/rubygems.org | app/controllers/api/v1/api_keys_controller.rb | 942 | class Api::V1::ApiKeysController < Api::BaseController
before_filter :redirect_to_root, :unless => :signed_in?, :only => [:reset]
respond_to :json, :xml, :yaml, :only => :show
def show
authenticate_or_request_with_http_basic do |username, password|
sign_in User.authenticate(username, password)
if current_user
respond_to do |format|
format.any(:all) { render :text => current_user.api_key }
format.json { render :json => {:rubygems_api_key => current_user.api_key} }
format.xml { render :xml => {:rubygems_api_key => current_user.api_key} }
format.yaml { render :text => {:rubygems_api_key => current_user.api_key}.to_yaml }
end
else
false
end
end
end
def reset
current_user.reset_api_key!
flash[:notice] = "Your API key has been reset. Don't forget to update your .gemrc file!"
redirect_to edit_profile_path
end
end
| mit |
peterfpeterson/finddata | finddata/publish_plot.py | 9684 | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
try:
from postprocessing.Configuration import Configuration, CONFIG_FILE, CONFIG_FILE_ALTERNATE
except ImportError:
# Local dev only, mocking Configuration as needed
CONFIG_FILE = '/etc/autoreduce/post_processing.conf'
CONFIG_FILE_ALTERNATE = 'post_processing.conf'
class Configuration(object):
"""
Read and process configuration file and provide an easy
way to hold the various options for a client. This is a
heavily abridged version of what is found in postprocessing.
"""
def __init__(self, config_file):
if os.access(config_file, os.R_OK) is False:
raise RuntimeError("Configuration file doesn't exist or is not readable: %s" % config_file)
with open(config_file, 'r') as cfg:
json_encoded = cfg.read()
config = json.loads(json_encoded)
# Keep a record of which config file we are using
self.config_file = config_file
# plot publishing
self.publish_url = config.get('publish_url_template', '')
self.publisher_username = config.get('publisher_username', '')
self.publisher_password = config.get('publisher_password', '')
self.publisher_certificate = config.get('publisher_certificate', '')
def _determine_config_file(config_file):
# put together the list of all choices
choices = [config_file, CONFIG_FILE, CONFIG_FILE_ALTERNATE]
# filter out bad choices
choices = [name for name in choices
if name is not None]
choices = [name for name in choices
if len(name) > 0]
choices = [name for name in choices
if os.access(name, os.R_OK)]
# first one is a winner
if len(choices) > 0:
return choices[0]
else:
return None
def read_configuration(config_file=None):
"""
Returns a new configuration object for a given
configuration file
@param config_file: configuration file to process
"""
config_file = _determine_config_file(config_file)
if config_file is None:
raise RuntimeError('Failed to find Configuration file')
logging.info('Loading configuration \'%s\'' % config_file)
return Configuration(config_file)
def _loadDiv(filename):
if not os.path.exists(filename):
raise RuntimeError('\'%s\' does not exist' % filename)
print('loading \'%s\'' % filename)
with open(filename, 'r') as handle:
div = handle.read()
return div
def _getURL(url_template, instrument, run_number):
import string
url_template = string.Template(url_template)
url = url_template.substitute(instrument=instrument,
run_number=str(run_number))
return url
def publish_plot(instrument, run_number, files, config=None):
# read the configuration if one isn't provided
if config is None:
config = read_configuration()
# verify that it has an attribute that matters
try:
config.publish_url
except AttributeError: # assume that it is a filename
config = read_configuration(config)
run_number = str(run_number)
url = _getURL(config.publish_url, instrument, run_number)
print('posting to \'%s\'' % url)
# these next 2 lines are explicity bad - and doesn't seem
# to do ANYTHING
# https://urllib3.readthedocs.org/en/latest/security.html
import urllib3
urllib3.disable_warnings()
import requests
if config.publisher_certificate:
request = requests.post(url, data={'username': config.publisher_username,
'password': config.publisher_password},
files=files, cert=config.publisher_certificate)
else:
request = requests.post(url, data={'username': config.publisher_username,
'password': config.publisher_password},
files=files, verify=False)
return request
def plot1d(run_number, data_list, data_names=None, x_title='', y_title='',
x_log=False, y_log=False, instrument='', show_dx=True, title='', publish=True):
"""
Produce a 1D plot
@param data_list: list of traces [ [x1, y1], [x2, y2], ...]
@param data_names: name for each trace, for the legend
"""
from plotly.offline import plot
import plotly.graph_objs as go
# Create traces
if not isinstance(data_list, list):
raise RuntimeError("plot1d: data_list parameter is expected to be a list")
# Catch the case where the list is in the format [x y]
data = []
show_legend = False
if len(data_list) == 2 and not isinstance(data_list[0], list):
label = ''
if isinstance(data_names, list) and len(data_names) == 1:
label = data_names[0]
show_legend = True
data = [go.Scatter(name=label, x=data_list[0], y=data_list[1])]
else:
for i in range(len(data_list)):
label = ''
if isinstance(data_names, list) and len(data_names) == len(data_list):
label = data_names[i]
show_legend = True
err_x = {}
err_y = {}
if len(data_list[i]) >= 3:
err_y = dict(type='data', array=data_list[i][2], visible=True)
if len(data_list[i]) >= 4:
err_x = dict(type='data', array=data_list[i][3], visible=True)
if show_dx is False:
err_x['thickness'] = 0
data.append(go.Scatter(name=label, x=data_list[i][0], y=data_list[i][1],
error_x=err_x, error_y=err_y))
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=show_legend,
autosize=True,
width=600,
height=400,
margin=dict(t=40, b=40, l=80, r=40), # noqa: E741
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout,
title=title
)
fig = go.Figure(data=data, layout=layout)
plot_div = plot(fig, output_type='div', include_plotlyjs=False, show_link=False)
if publish:
try:
return publish_plot(instrument, run_number, files={'file': plot_div})
except: # noqa: E722
logging.error("Publish plot failed: %s", sys.exc_value)
return None
else:
return plot_div
def plot_heatmap(run_number, x, y, z, x_title='', y_title='', surface=False,
x_log=False, y_log=False, instrument='', title='', publish=True):
"""
Produce a 2D plot
"""
from plotly.offline import plot
import plotly.graph_objs as go
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=False,
autosize=True,
width=600,
height=500,
margin=dict(t=40, b=40, l=80, r=40), # noqa: E741
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout,
title=title
)
colorscale = [[0, "rgb(0,0,131)"], [0.125, "rgb(0,60,170)"], [0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"], [0.875, "rgb(250,0,0)"], [1, "rgb(128,0,0)"]]
plot_type = 'surface' if surface else 'heatmap'
trace = go.Heatmap(z=z, x=x, y=y, autocolorscale=False, # type=plot_type,
hoverinfo="none", colorscale=colorscale)
fig = go.Figure(data=[trace], layout=layout)
plot_div = plot(fig, output_type='div', include_plotlyjs=False, show_link=False)
# The following would remove the hover options, which are not accessible through python
# https://github.com/plotly/plotly.js/blob/master/src/components/modebar/buttons.js
# plot_div = plot_div.replace('modeBarButtonsToRemove:[]',
# 'modeBarButtonsToRemove:["hoverClosestCartesian",
# "hoverCompareCartesian"]')
if publish:
try:
return publish_plot(instrument, run_number, files={'file': plot_div})
except: # noqa: E722
logging.error("Publish plot failed: %s", sys.exc_value)
return None
else:
return plot_div
if __name__ == '__main__':
import sys
div = _loadDiv(sys.argv[1])
# run information is generated from the filename
name = os.path.split(sys.argv[1])[-1]
(instr, runnumber) = name.split('_')[:2]
config = read_configuration('post_processing.conf')
request = publish_plot(instr, runnumber, {'file': div}, config)
print('request returned', request.status_code)
| mit |
mvdnes/rboy | src/mbc/mbc5.rs | 2755 | use crate::mbc::{MBC, ram_size};
use crate::StrResult;
use std::fs::File;
use std::{io, path};
use std::io::prelude::*;
pub struct MBC5 {
rom: Vec<u8>,
ram: Vec<u8>,
rombank: usize,
rambank: usize,
ram_on: bool,
savepath: Option<path::PathBuf>,
}
impl MBC5 {
pub fn new(data: Vec<u8>, file: path::PathBuf) -> StrResult<MBC5> {
let subtype = data[0x147];
let svpath = match subtype {
0x1B | 0x1E => Some(file.with_extension("gbsave")),
_ => None,
};
let ramsize = match subtype {
0x1A | 0x1B | 0x1D | 0x1E => ram_size(data[0x149]),
_ => 0,
};
let mut res = MBC5 {
rom: data,
ram: ::std::iter::repeat(0u8).take(ramsize).collect(),
rombank: 1,
rambank: 0,
ram_on: false,
savepath: svpath,
};
res.loadram().map(|_| res)
}
fn loadram(&mut self) -> StrResult<()> {
match self.savepath {
None => Ok(()),
Some(ref savepath) => {
let mut data = vec![];
match File::open(&savepath).and_then(|mut f| f.read_to_end(&mut data)) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => Ok(()),
Err(_) => Err("Could not read RAM"),
Ok(..) => { self.ram = data; Ok(()) },
}
},
}
}
}
impl Drop for MBC5 {
fn drop(&mut self) {
match self.savepath {
None => {},
Some(ref path) =>
{
let _ = File::create(path).and_then(|mut f| f.write_all(&*self.ram));
},
};
}
}
impl MBC for MBC5 {
fn readrom(&self, a: u16) -> u8 {
let idx = if a < 0x4000 { a as usize }
else { self.rombank * 0x4000 | ((a as usize) & 0x3FFF) };
*self.rom.get(idx).unwrap_or(&0)
}
fn readram(&self, a: u16) -> u8 {
if !self.ram_on { return 0 }
self.ram[self.rambank * 0x2000 | ((a as usize) & 0x1FFF)]
}
fn writerom(&mut self, a: u16, v: u8) {
match a {
0x0000 ..= 0x1FFF => self.ram_on = v == 0x0A,
0x2000 ..= 0x2FFF => self.rombank = (self.rombank & 0x100) | (v as usize),
0x3000 ..= 0x3FFF => self.rombank = (self.rombank & 0x0FF) | (((v & 0x1) as usize) << 8),
0x4000 ..= 0x5FFF => self.rambank = (v & 0x0F) as usize,
0x6000 ..= 0x7FFF => { /* ? */ },
_ => panic!("Could not write to {:04X} (MBC5)", a),
}
}
fn writeram(&mut self, a: u16, v: u8) {
if self.ram_on == false { return }
self.ram[self.rambank * 0x2000 | ((a as usize) & 0x1FFF)] = v;
}
}
| mit |
bcanseco/common-bot-library | tests/CommonBotLibrary.Tests/Services/NCalcServiceTests.cs | 1307 | using System;
using System.Threading.Tasks;
using CommonBotLibrary.Services;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using NCalc;
namespace CommonBotLibrary.Tests.Services
{
[TestClass]
public class NCalcServiceTests
{
private NCalcService Service { get; set; }
[TestInitialize]
public void InitService()
{
Service = new NCalcService();
}
[TestMethod]
[ExpectedException(typeof(ArgumentException))]
public async Task Should_Fail_With_Null_Expression()
{
var result = await Service.EvaluateAsync(null);
}
[TestMethod]
[ExpectedException(typeof(ArgumentException))]
public async Task Should_Fail_With_Blank_Expression()
{
var result = await Service.EvaluateAsync("");
}
[TestMethod]
[ExpectedException(typeof(EvaluationException))]
public async Task Should_Fail_With_Invalid_Expression()
{
var result = await Service.EvaluateAsync("!@#$%");
}
[TestMethod]
public async Task Should_Work_With_Valid_Expression()
{
var result = await Service.EvaluateAsync("2 + 2");
Assert.AreEqual(4, int.Parse(result));
}
}
}
| mit |
lbryio/lbry | lbry/scripts/release.py | 8824 | import os
import re
import io
import sys
import json
import argparse
import unittest
from datetime import date
from getpass import getpass
try:
import github3
except ImportError:
print('To run release tool you need to install github3.py:')
print('')
print(' $ pip install github3.py')
print('')
sys.exit(1)
AREA_RENAME = {
'api': 'API',
'dht': 'DHT'
}
def get_github():
config_path = os.path.expanduser('~/.lbry-release-tool.json')
if os.path.exists(config_path):
with open(config_path, 'r') as config_file:
config = json.load(config_file)
return github3.login(token=config['token'])
token = os.environ.get("GH_TOKEN")
if not token:
print('GitHub Credentials')
username = input('username: ')
password = getpass('password: ')
gh = github3.authorize(
username, password, ['repo'], 'lbry release tool',
two_factor_callback=lambda: input('Enter 2FA: ')
)
with open(config_path, 'w') as config_file:
json.dump({'token': gh.token}, config_file)
token = gh.token
return github3.login(token=token)
def get_labels(pr, prefix):
for label in pr.labels:
label_name = label['name']
if label_name.startswith(f'{prefix}: '):
yield label_name[len(f'{prefix}: '):]
def get_label(pr, prefix):
for label in get_labels(pr, prefix):
return label
BACKWARDS_INCOMPATIBLE = 'backwards-incompatible:'
RELEASE_TEXT = "release-text:"
def get_backwards_incompatible(desc: str):
for line in desc.splitlines():
if line.startswith(BACKWARDS_INCOMPATIBLE):
yield line[len(BACKWARDS_INCOMPATIBLE):]
def get_release_text(desc: str):
for line in desc.splitlines():
if line.startswith(RELEASE_TEXT):
yield line[len(RELEASE_TEXT):]
def get_previous_final(repo, current_release):
assert current_release.rc is not None, "Need an rc to find the previous final release."
previous = None
for release in repo.releases(current_release.rc + 1):
previous = release
return previous
class Version:
def __init__(self, major=0, minor=0, micro=0, rc=None):
self.major = int(major)
self.minor = int(minor)
self.micro = int(micro)
self.rc = rc if rc is None else int(rc)
@classmethod
def from_string(cls, version_string):
(major, minor, micro), rc = version_string.split('.'), None
if 'rc' in micro:
micro, rc = micro.split('rc')
return cls(major, minor, micro, rc)
@classmethod
def from_content(cls, content):
src = content.decoded.decode('utf-8')
version = re.search('__version__ = "(.*?)"', src).group(1)
return cls.from_string(version)
def increment(self, action):
cls = self.__class__
if action == '*-rc':
assert self.rc is not None, f"Can't drop rc designation because {self} is already not an rc."
return cls(self.major, self.minor, self.micro)
elif action == '*+rc':
assert self.rc is not None, "Must already be an rc to increment."
return cls(self.major, self.minor, self.micro, self.rc+1)
assert self.rc is None, f"Can't start a new rc because {self} is already an rc."
if action == 'major+rc':
return cls(self.major+1, rc=1)
elif action == 'minor+rc':
return cls(self.major, self.minor+1, rc=1)
elif action == 'micro+rc':
return cls(self.major, self.minor, self.micro+1, 1)
raise ValueError(f'unknown action: {action}')
@property
def tag(self):
return f'v{self}'
def __str__(self):
version = '.'.join(str(p) for p in [self.major, self.minor, self.micro])
if self.rc is not None:
version += f'rc{self.rc}'
return version
def release(args):
gh = get_github()
repo = gh.repository('lbryio', 'lbry')
version_file = repo.file_contents('lbry/__init__.py')
current_version = Version.from_content(version_file)
print(f'Current Version: {current_version}')
new_version = current_version.increment(args.action)
print(f' New Version: {new_version}')
print()
if args.action == '*-rc':
previous_release = get_previous_final(repo, current_version)
else:
previous_release = repo.release_from_tag(current_version.tag)
incompats = []
release_texts = []
unlabeled = []
areas = {}
for pr in gh.search_issues(f"merged:>={previous_release._json_data['created_at']} repo:lbryio/lbry"):
area_labels = list(get_labels(pr, 'area'))
type_label = get_label(pr, 'type')
if area_labels and type_label:
for area_name in area_labels:
for incompat in get_backwards_incompatible(pr.body):
incompats.append(f' * [{area_name}] {incompat.strip()} ({pr.html_url})')
for release_text in get_release_text(pr.body):
release_texts.append(f'{release_text.strip()} ({pr.html_url})')
if not (args.action == '*-rc' and type_label == 'fixup'):
area = areas.setdefault(area_name, [])
area.append(f' * [{type_label}] {pr.title} ({pr.html_url}) by {pr.user["login"]}')
else:
unlabeled.append(f' * {pr.title} ({pr.html_url}) by {pr.user["login"]}')
area_names = list(areas.keys())
area_names.sort()
body = io.StringIO()
w = lambda s: body.write(s+'\n')
w(f'## [{new_version}] - {date.today().isoformat()}')
if release_texts:
w('')
for release_text in release_texts:
w(release_text)
w('')
if incompats:
w('')
w(f'### Backwards Incompatible Changes')
for incompat in incompats:
w(incompat)
for area in area_names:
prs = areas[area]
area = AREA_RENAME.get(area.lower(), area.capitalize())
w('')
w(f'### {area}')
for pr in prs:
w(pr)
print(body.getvalue())
if unlabeled:
print('The following PRs were skipped and not included in changelog:')
for skipped in unlabeled:
print(skipped)
if not args.dry_run:
commit = version_file.update(
new_version.tag,
version_file.decoded.decode('utf-8').replace(str(current_version), str(new_version)).encode()
)['commit']
repo.create_tag(
tag=new_version.tag,
message=new_version.tag,
sha=commit.sha,
obj_type='commit',
tagger=commit.committer
)
repo.create_release(
new_version.tag,
name=new_version.tag,
body=body.getvalue(),
draft=True,
prerelease=new_version.rc is not None
)
class TestReleaseTool(unittest.TestCase):
def test_version_parsing(self):
self.assertTrue(str(Version.from_string('1.2.3')), '1.2.3')
self.assertTrue(str(Version.from_string('1.2.3rc4')), '1.2.3rc4')
def test_version_increment(self):
v = Version.from_string('1.2.3')
self.assertTrue(str(v.increment('major+rc')), '2.0.0rc1')
self.assertTrue(str(v.increment('minor+rc')), '1.3.0rc1')
self.assertTrue(str(v.increment('micro+rc')), '1.2.4rc1')
with self.assertRaisesRegex(AssertionError, "Must already be an rc to increment."):
v.increment('*+rc')
with self.assertRaisesRegex(AssertionError, "Can't drop rc designation"):
v.increment('*-rc')
v = Version.from_string('1.2.3rc3')
self.assertTrue(str(v.increment('*+rc')), '1.2.3rc4')
self.assertTrue(str(v.increment('*-rc')), '1.2.3')
with self.assertRaisesRegex(AssertionError, "already an rc"):
v.increment('major+rc')
with self.assertRaisesRegex(AssertionError, "already an rc"):
v.increment('minor+rc')
with self.assertRaisesRegex(AssertionError, "already an rc"):
v.increment('micro+rc')
def test():
runner = unittest.TextTestRunner(verbosity=2)
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(TestReleaseTool)
runner.run(suite)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--test", default=False, action="store_true", help="run unit tests")
parser.add_argument("--dry-run", default=False, action="store_true", help="show what will be done")
parser.add_argument("action", nargs="?", choices=['major+rc', 'minor+rc', 'micro+rc', '*+rc', '*-rc'])
args = parser.parse_args()
if args.test:
test()
else:
release(args)
if __name__ == "__main__":
main()
| mit |
pact-foundation/pact-go | dsl/interaction_test.go | 2878 | package dsl
import (
"encoding/json"
"reflect"
"testing"
)
func TestInteraction_NewInteraction(t *testing.T) {
i := (&Interaction{}).
Given("Some state").
UponReceiving("Some name for the test").
WithRequest(Request{}).
WillRespondWith(Response{})
if i.State != "Some state" {
t.Fatalf("Expected 'Some state' but got '%s'", i.State)
}
if i.Description != "Some name for the test" {
t.Fatalf("Expected 'Some name for the test' but got '%s'", i.Description)
}
}
func TestInteraction_WithRequest(t *testing.T) {
// Pass in plain string, should be left alone
i := (&Interaction{}).
Given("Some state").
UponReceiving("Some name for the test").
WithRequest(Request{
Body: "somestring",
})
content, ok := i.Request.Body.(string)
if !ok {
t.Fatalf("must be a string")
}
if content != "somestring" {
t.Fatalf("Expected 'somestring' but got '%s'", content)
}
// structured string should be changed to an interface{}
i = (&Interaction{}).
Given("Some state").
UponReceiving("Some name for the test").
WithRequest(Request{
Body: map[string]string{
"foo": "bar",
"baz": "bat",
},
})
obj := map[string]string{
"foo": "bar",
"baz": "bat",
}
var expect interface{}
body, _ := json.Marshal(obj)
json.Unmarshal(body, &expect) // nolint:errcheck
if _, ok := i.Request.Body.(map[string]string); !ok {
t.Fatal("Expected response to be of type 'map[string]string', but got", reflect.TypeOf(i.Request.Body))
}
}
func TestInteraction_WillRespondWith(t *testing.T) {
// Pass in plain string, should be left alone
i := (&Interaction{}).
Given("Some state").
UponReceiving("Some name for the test").
WithRequest(Request{}).
WillRespondWith(Response{
Body: "somestring",
})
content, ok := i.Response.Body.(string)
if !ok {
t.Fatalf("must be a string")
}
if content != "somestring" {
t.Fatalf("Expected 'somestring' but got '%s'", content)
}
// structured string should be changed to an interface{}
i = (&Interaction{}).
Given("Some state").
UponReceiving("Some name for the test").
WithRequest(Request{}).
WillRespondWith(Response{
Body: map[string]string{
"foo": "bar",
"baz": "bat",
},
})
obj := map[string]string{
"foo": "bar",
"baz": "bat",
}
var expect interface{}
body, _ := json.Marshal(obj)
json.Unmarshal(body, &expect) // nolint:errcheck
if _, ok := i.Response.Body.(map[string]string); !ok {
t.Fatal("Expected response to be of type 'map[string]string', but got", reflect.TypeOf(i.Response.Body))
}
}
func TestInteraction_isStringLikeObject(t *testing.T) {
testCases := map[string]bool{
"somestring": false,
"": false,
`{"foo":"bar"}`: true,
}
for testCase, want := range testCases {
if isJSONFormattedObject(testCase) != want {
t.Fatal("want", want, "got", !want, "for test case", testCase)
}
}
}
| mit |
jupito/podxm | enclosure.py | 4713 | """Enclosure classes."""
import logging
from functools import lru_cache
from jupitotools.media.ylemedia import YleMedia
import pyutils.files
import pyutils.net
import media
import util
log = logging.getLogger(__name__)
def _yle_suffix_kludge(path):
"""TODO: Horrible kludge to adapt to varying prefix."""
if path.suffix == '.flv':
mp3 = path.with_suffix('.mp3')
if mp3.exists():
return mp3
return path
class Enclosure():
"""Feed entry enclosure."""
def __init__(self, entry, href, length, typ):
"""Create new enclosure."""
self.entry = entry # Parent entry.
self.href = href # Media URL.
self.length = length # XXX: Update when known?
self.typ = typ # XXX: Remove?
def __str__(self):
return self.href
@property
@lru_cache()
def suffix(self):
"""URL filename suffix (in lowercase)."""
suffix = pyutils.net.url_suffix(self.href)
if not suffix:
if self.typ:
parts = self.typ.split('/')
suffix = '.' + parts[-1]
else:
suffix = '.unknown'
return suffix.lower()
@property
@lru_cache()
def filename(self):
"""Filename on disk."""
date_str = util.time_fmt(self.entry.date, fmt='isodate')
title = self.entry.title[:80]
name = f'{date_str}_{title}{self.suffix}'
name = name.replace('/', '%')
return name
# @property
# @lru_cache()
# def filename_slugified(self):
# """Filename on disk."""
# date_str = util.time_fmt(self.entry.date, fmt='isodate') + '_'
# return util.slugify_filename(self.entry.title, prefix=date_str,
# suffix=self.suffix, max_length=80)
@property
@lru_cache()
def path(self):
"""Path on disk."""
# return self.entry.feed.directory / self.filename
return _yle_suffix_kludge(self.entry.feed.directory / self.filename)
def size(self):
"""Size on disk."""
return self.path.stat().st_size
def is_too_big(self, maxsize):
"""Is it too big to download? Maximum size is given in megabytes."""
return maxsize is not None and (self.length or 0) > maxsize * 1024**2
def duration(self):
"""Media duration."""
return media.get_duration(self.path)
def is_normalized(self):
"""Has loudness been normalized?"""
return media.get_gain(self.path) is not None
def download(self):
"""Download file."""
# log.info('Downloading: %s', self.path)
pyutils.files.ensure_dir(self.path)
return pyutils.net.download(self.href, self.path, progress=True)
def play(self):
"""Play downloaded file."""
if self.entry.progress is not None and 0 < self.entry.progress < 1:
start = float(self.entry.progress)
else:
start = None
return media.play_file(self.path, start=start)
def stream(self):
"""Stream from net."""
return media.play_stream(self.href)
def remove(self):
"""Remove from disk."""
if self.path.exists():
# self.path.unlink()
pyutils.files.trash_or_rm(self.path)
for path in self.path.parent.glob(f'{self.path.stem}.*.srt'):
logging.warning('Removing subtitle: %s', path)
def score(self):
"""Enclosure score."""
if self.path.exists():
if self.is_normalized():
return 2
return 1
return 0
def expire_time(self):
return None
class YleEnclosure(Enclosure):
"""Yle media. No streaming support."""
def __init__(self, entry):
super().__init__(entry, entry.link, None, None)
@property
def suffix(self):
return '.flv'
def download(self):
pyutils.files.ensure_dir(self.path)
return media.download_yle(self.href, self.path, sublang=self.sublang(),
verbose=True)
def stream(self):
return media.stream(self.href)
def sublang(self):
"""Return subtitle language."""
return self.entry.get_tags().get('sub')
def expire_time(self):
ylemedia = YleMedia(self.href)
return ylemedia.expire_time()
class YoutubeEnclosure(Enclosure):
"""Youtube media. Support only streaming for now."""
def __init__(self, entry):
super().__init__(entry, entry.link, None, None)
@property
def suffix(self):
return '.flv'
def download(self):
raise NotImplementedError()
def play(self):
raise NotImplementedError()
| mit |
shen139/openwebspider | src/_app/_tabs/_searchTabMixin.js | 4564 | var DbClass = require("../../_db/_dbClass");
module.exports = function (tab5)
{
var that = this;
function button1_click()
{
that.loading(true);
that.setInfoline("Searching...", 1000);
that.gridSearch.Visible = false;
owsSearchInfo.Visible = false;
that.gridSearch.dataview.clear();
var dbManager = new DbClass(that.CONF);
dbManager.connect(null, function (err)
{
if (!err)
{
var start = new Date();
dbManager.fulltextSearch(txtQuery.Text, function (results)
{
owsSearchInfo.Caption = results.length + " results in " + ((new Date() - start) / 1000) + " seconds";
for (var i = 0; i < results.length; i++)
{
that.gridSearch.dataview.addItem({
"id": i,
"page": results[i]["page"],
"title": results[i]["title"],
"relevancy": results[i]["relevancy"]
});
}
that.gridSearch.grid.autosizeColumns();
dbManager.close();
that.gridSearch.Visible = true;
owsSearchInfo.Visible = true;
that.loading(false);
});
}
else
{
// db connection error
that.setInfoline("Database connection ERROR!", 2000);
that.loading(false);
}
});
}
var container = this.app.create("container", tab5);
container.Top = 10;
container.Left = 10;
container.Position = "absolute";
container.Width = 556;
container.Height = 35;
container.Resizable = false;
var label = this.app.create("label", container);
label.Top = 10;
label.Left = 10;
label.Position = "absolute";
label.Caption = "Query";
label.Width = 60;
label.Height = 22;
var txtQuery = this.app.create("textfield", container);
txtQuery.Name = "txtSearchQuery";
txtQuery.Top = 5;
txtQuery.Left = 65;
txtQuery.Position = "absolute";
txtQuery.Text = "openwebspider download";
txtQuery.Width = 360;
txtQuery.Height = 22;
txtQuery.on("keypress", function (self, args)
{
if (args["which"] === 13)
{
txtQuery.getText(function (self, ret, val)
{
ret && button1_click();
});
}
});
txtQuery.on("change", function (self, args)
{
that.updateConf("HISTORY_SEARCH_QUERY", txtQuery.Text, /* show save: */ false);
});
var button1 = this.app.create("button", container);
button1.Top = 5;
button1.Left = 445;
button1.Position = "absolute";
button1.Caption = "Search";
button1.Width = 100;
button1.Height = 22;
button1.on("click", button1_click);
this.gridSearch = this.app.create("datagrid", tab5, {
"Width": 560,
"Height": 235,
"dataset": {
"columns": [
{
"id": "page",
"name": "Page",
"field": "page",
"width": 150
},
{
"id": "title",
"name": "Title",
"field": "title",
"width": 150
},
{
"id": "relevancy",
"name": "Relevancy",
"field": "relevancy",
"width": 150
}],
"data": []
}
});
this.gridSearch.Top = 76;
this.gridSearch.Left = 10;
this.gridSearch.Position = "absolute";
this.gridSearch.Visible = false;
var owsSearchInfo = this.app.create("label", tab5);
owsSearchInfo.Top = 60;
owsSearchInfo.Left = 10;
owsSearchInfo.Position = "absolute";
owsSearchInfo.Caption = "";
owsSearchInfo.Width = 400;
owsSearchInfo.Height = 14;
owsSearchInfo.Visible = false;
owsSearchInfo.addClass("label-no-wrap");
var owsSearchLink = this.app.create("hyperlink", tab5);
owsSearchLink.Top = 60;
owsSearchLink.Left = 500;
owsSearchLink.Position = "absolute";
owsSearchLink.Caption = "Search page";
owsSearchLink.Width = 70;
owsSearchLink.Height = 14;
owsSearchLink.Link = "/search";
owsSearchLink.addClass("label-no-wrap");
};
| mit |
bugsancho/Academy-Platform | AcademyPlatform.Web/App_Start/BundleConfig.cs | 2717 | namespace AcademyPlatform.Web
{
using System.Web.Optimization;
public class BundleConfig
{
// For more information on bundling, visit http://go.microsoft.com/fwlink/?LinkId=301862
public static void RegisterBundles(BundleCollection bundles)
{
bundles.Add(new ScriptBundle("~/bundles/jquery", "//ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js").Include(
"~/Scripts/jquery-{version}.js"));
bundles.Add(new ScriptBundle("~/bundles/jqueryval").Include(
"~/Scripts/jquery.validate*"));
// Use the development version of Modernizr to develop with and learn from. Then, when you're
// ready for production, use the build tool at http://modernizr.com to pick only the tests you need.
bundles.Add(new ScriptBundle("~/bundles/modernizr", "https://cdnjs.cloudflare.com/ajax/libs/modernizr/2.8.3/modernizr.min.js").Include(
"~/Scripts/modernizr-*"));
bundles.Add(new ScriptBundle("~/bundles/bootstrap").Include(
"~/Scripts/bootstrap.js",
"~/Scripts/respond.js"));
bundles.Add(new StyleBundle("~/Content/css").Include(
"~/Content/bootstrap.css",
"~/Content/project-theme.css",
"~/Content/animate.css",
"~/Content/animations.css",
"~/Content/site.css"));
bundles.Add(new ScriptBundle("~/project-theme-scripts").Include(
//"~/Scripts/Plugins/jasny-bootstrap.js",
"~/Scripts/Plugins/SmoothScroll.js",
"~/Scripts/Plugins/jquery.themepunch.revolution.js",
"~/Scripts/Plugins/jquery.themepunch.tools.min.js",
"~/Scripts/Plugins/jquery.waypoints.js",
"~/Scripts/Plugins/jquery.browser.js",
"~/Scripts/template.js",
"~/Scripts/custom.js",
"~/Scripts/Plugins/isotope.pkgd.js",
"~/Scripts/Plugins/isotope.pkgd.min.js",
"~/Scripts/Plugins/jquery.vide.js"));
bundles.Add(new StyleBundle("~/project-theme").Include(
"~/Content/project-theme.css",
"~/Content/ProjectThemeSkins/light_blue.css",
"~/Content/Fonts/font-awesome.css",
"~/Content/hover.css",
"~/Content/rs-slider.css",
"~/Content/jasny-bootstrap.css",
"~/Content/animate.css"));
BundleTable.EnableOptimizations = true;
}
}
}
| mit |
jkrth617/GorillaSurvey | app/controllers/question.rb | 614 | get '/surveys/:id/questions/new' do |survey_id|
@question = Question.new
@survey_id = survey_id
if request.xhr?
erb :'question/_new-form', layout: false, locals: {question: @question, survey_id: @survey_id}
else
erb :'question/new'
end
end
post '/surveys/:id/questions' do |survey_id|
survey = Survey.find(survey_id)
@question = survey.questions.new(params[:question])
if @question.save
flash[:message] = "adding a choice for #{@question.name}"
redirect "/questions/#{@question.id}/choices/new"
else
@errors = @question.errors.full_messages
erb :'question/new'
end
end
| mit |
sljuka/bizflow | lib/bizflow/semantic_model/handler.rb | 352 | module Bizflow
module SemanticModel
class Handler
attr_accessor :namespace, :name, :description
def initialize(name, options = {})
@name = name
@namespace = options[:namespace]
@description = options[:description]
end
def full_name
"#{namespace}:#{name}"
end
end
end
end | mit |
kadjahoe/Javascript-Timer | timer.js | 4714 | /**********************************************************
* *
* Title : JavaScript Countdown Clock *
* Author : Katherine Adjahoe *
* Created : February 18, 2017 *
* Website : www.preview.manifestare.com/javascript timer *
* Email : support@manifestare.com *
* *
***********************************************************/
let countdown;// setInterval function for countdown clock
let serviceInSession;// seTimeout function for when event is Live
const clock = document.getElementById('clock');// div that controls the clock container
const livestreamButton = document.getElementById('door');// div that controls the button for the user to click to enter the live stream
const daysUnit = document.querySelector('.days');// span element that displays the amount of days
const hoursUnit = document.querySelector('.hours');// span element that displays the amount of hours
const minutesUnit = document.querySelector('.minutes');// span element that displays the amount of minutes
const secondsUnit = document.querySelector('.seconds');// span element that displays the amount of seconds
const startDate = new Date(2017, 1, 19, 11, 30, 00).getTime();// initial date and time the countdown clock started from (Year, Month, Day, Hours, Minutes, Seconds,)
startDate > Date.now() ? timer(startDate) : calculateFutureDate(startDate);// conditional statement that decides if the timer function should start with the start date or calculate another date
// timer function takes in a date parameter in milliseconds
function timer(date){
// countdown holds the entire timer functionality
countdown = setInterval(()=>{
const now = Date.now();// current date and time
const differenceInTime = date - now;// distance between current time and future time of event
// checks timer to see if the distance is zero and if zero
if(differenceInTime < 0){
clearInterval(countdown);// clear timer
clock.classList.add("hide");// hide the clock div element
livestreamButton.classList.remove("hide");// show the live stream button div element
// keeps the live stream button div element on the screen for 2 hours or 7200000 milliseconds and then
serviceInSession = setTimeout(()=>{
livestreamButton.classList.add("hide");// hide live stream button div element
calculateFutureDate(date);// past the date that countdown was counting down to, to the calculateFutureDate function
clock.classList.remove("hide");// show the clock again
},7200000); // after 2 hours do what's inside the setTimeout function
return;
}
timeLeft(differenceInTime);// each iteration of setInterval send updated distance to timeLeft function
}, 1000);// every 1 second
}
// timeLeft function takes a time as a parameter in milliseconds and displays it in Days, Hours, Minutes, and Seconds
function timeLeft(time){
const days = Math.floor(time /(1000 * 60 * 60 * 24));// milliseconds into days
const hours = Math.floor((time % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60));// milliseconds into hours
const minutes = Math.floor((time % (1000 * 60 * 60)) / (1000 * 60));// milliseconds into minutes
const seconds = Math.floor((time % (1000 * 60)) / 1000);// milliseconds into seconds
// conditional added to each portion of the time that will be displayed adds a zero to the front of numbers < 10
const displayDays = `${days < 10 ? '0' : '' }${days}`;// days string that will be displayed
const displayHours = `${hours < 10 ? '0' : ''}${hours}`;// hours string that will be displayed
const displayMinutes = `${minutes < 10 ? '0' : ''}${minutes}`;// minutes string that will be displayed
const displaySeconds = `${seconds < 10 ? '0' : ''}${seconds}`;// seconds string that will be displayed
//displays the time strings on the page individually
daysUnit.textContent = displayDays;
hoursUnit.textContent = displayHours;
minutesUnit.textContent = displayMinutes;
secondsUnit.textContent = displaySeconds;
// next line is for testing purposes
// console.log(displayDays+" : " +displayHours+" : "+displayMinutes+" : "+displaySeconds);
}
// calculateFutureDate takes a number in milliseconds as a parameter
function calculateFutureDate (dateTochange){
const newDate = new Date(dateTochange);// converts it to date format
const weeklyDate = newDate.setDate(newDate.getDate() +07);// adds 7 days to that date
timer(weeklyDate);// sends it to the timer function
//console.log("new: "+dateTochange);
}
// liveStream function changes the webpage to the webpage where the live stream is hosted
function liveStream (){
window.location.assign("http://www.clcconthemove.org/clccLivestream.html");
} | mit |
jenyayel/WebStupidSolution | SS.Tests/DAL/ContentPageRepoTests.cs | 2402 | using System;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Autofac;
using SS.DAL.DB;
using SS.Contracts.DAL;
using System.Linq;
namespace SS.Tests.DAL
{
/// <summary>
/// A set of tests for <see cref="SS.DAL.DB.ContentPageRepository"/>
/// </summary>
[TestClass]
public class ContentPageRepoTests
{
private IContainer _iocContainer;
private IContentPageRepository _contentRepo;
[TestInitialize]
public void Initialize()
{
// ensure, cleanup, seed db
RepoHelpers.Initialize();
// configure IoC container
ContainerBuilder builder = new ContainerBuilder();
builder.RegisterType<ContentPageRepository>().As<IContentPageRepository>();
builder.RegisterType<ConnectionFactory>().As<IDbConnectionFactory>()
.WithParameter("connectionString", RepoHelpers.CONN_STRING);
_iocContainer = builder.Build();
_contentRepo = _iocContainer.Resolve<IContentPageRepository>();
}
[TestCleanup]
public void Cleanup()
{
_iocContainer.Dispose();
}
[TestMethod]
public void GetPageByUrlNotExisting()
{
// #### prepare
var url = "fakeurl";
// #### act
var page = _contentRepo.GetByUrl(url);
// #### assert
Assert.IsNull(page, "Page is not null");
}
[TestMethod]
[ExpectedException(typeof(NotSupportedException))]
public void GetPageByIdIsNotImplemented()
{
// #### act
_contentRepo.GetById(new object());
// #### assert
// Done via attribute
}
[TestMethod]
public void GetPagesAll()
{
// #### prepare
var sql = @"INSERT INTO [{0}] (url, Title, Content) VALUES ('one', 'Title One', 'Content One') INSERT INTO [{0}] (url, Title, Content) VALUES ('two', 'Title Two', 'Content Two')";
RepoHelpers.ExecuteSQL(String.Format(sql, typeof(ContentPage).Name));
// #### act
var pages = _contentRepo.GetAll();
// #### assert
Assert.IsTrue(pages.Count() > 1, "Too few pages");
Assert.IsTrue(pages.Any(a => a.Title.Equals("Title One")), "Page missing");
}
}
}
| mit |
usine/wp-usine | theme/elements/comments-fight.php | 1753 | <?php
/**
* Template for support comments.
*
* @package @@name
*/
?>
<?php
$args = array(
'comment_notes_before' => '',
'comment_notes_after' => '',
'title_reply' => __( 'Ecrire un message de soutien', 'usine' ),
'label_submit' => __( 'Publier le message', 'usine' ),
'comment_field' => '<p class="comment-form-comment"><label for="comment">' . _x( 'Message', 'noun', 'usine' ) . '</label><br><textarea id="comment" name="comment" class="form-control" aria-required="true"></textarea></p>',
);
comment_form( $args );
if ( get_comments_number() ) :
?>
<section class="commentlist">
<?php
wp_list_comments( array(
'style' => 'div',
'short_ping' => true,
'avatar_size' => 40,
'type' => 'all',
'reply_text' => __( 'Reply', 'usine' ),
'page' => '',
'per_page' => '',
'reverse_top_level' => null,
'reverse_children' => '',
'walker' => new MInc_Walker_Comment_ZipGun(),
), get_comments( array( 'post_id' => get_the_ID(), 'status' => 'approve' ) ) );
?>
</section>
<?php if ( get_comment_pages_count() > 1 && get_option( 'page_comments' ) ) : ?>
<nav class="navigation comment__navigation" role="navigation">
<div class="comment-nav-prev"><?php previous_comments_link( __( '← Previous Comments', 'usine' ) ); ?></div>
<div class="comment-nav-next"><?php next_comments_link( __( 'More Comments →', 'usine' ) ); ?></div>
</nav>
<?php endif; ?>
<?php if ( ! comments_open() ) : ?>
<p class="no-comments"><?php esc_html_e( 'Comments are closed.' , 'usine' ); ?></p>
<?php endif; ?>
<?php else : ?>
<ul id="comment__list"></ul>
<?php endif; ?>
| mit |
denniscollective/dragonfly.go | dragonfly/dragonfly_test.go | 2124 | package dragonfly_test
import (
//"fmt"
"github.com/denniscollective/dragonfly.go/dragonfly"
"testing"
)
const stubB64Job string = "W1siZmYiLCIvVXNlcnMvZGVubmlzL3dvcmtzcGFjZS96aXZpdHkvcHVibGljL2NvbnRlbnQvcGhvdG9zZXRzL29yaWdpbmFsc19hcmNoaXZlLzAwMC8wMDAwMDAvMDAwMDAwMDA3LzAwMDAwMDAwMjQtaC1vcmlnLmpwZyJdLFsicCIsInRodW1iIiwiMjB4MjAiXV0"
func TestFetch(t *testing.T) {
file, err := dragonfly.ImageFor(stubB64Job)
if err != nil {
t.Errorf("ImgFor(stub) failed %s", err)
}
if file == nil || len(file.Name()) < 10 {
t.Error("expected a file Object")
}
}
func TestFirstStepFailingErrorPropigation(t *testing.T) {
jobstr := "W1siZmYiLCJwYXJ0eV90aW1lIl0sWyJwIiwidGh1bWIiLCIyMHgyMCJdXQ" //fetches a nonexistent file called partytime in step one
_, err := dragonfly.ImageFor(jobstr)
if err == nil {
t.Error("nonexistent file party_time is supposed to fail fetching")
return
}
if err.Error() != "open party_time: no such file or directory" {
t.Errorf("Deconde job should have gotten fetch file failed, got %s", err)
}
}
func TestDecodeThingThatNeedsTwoEquals(t *testing.T) {
jobstr := "W1siZmYiLCIvVXNlcnMvZGVubmlzL3dvcmtzcGFjZS96aXZpdHkvcHVibGljL2ltYWdlcy9pY29ucy9kZWZhdWx0XzI1Ni5qcGciXSxbInAiLCJ0aHVtYiIsIjgweDgwIyJdXQ"
job, err := dragonfly.Decode(jobstr)
if err != nil {
t.Errorf("Deconde job got error %s", err)
}
if len(job.Steps) != 2 {
t.Error("job should have two steps")
}
}
func TestDecodeDragonfly(t *testing.T) {
job, err := dragonfly.Decode(stubB64Job)
if err != nil {
t.Errorf("Deconde job got error %s", err)
}
if len(job.Steps) != 2 {
t.Error("job should have two steps")
}
//if job.Steps[0].Command != "ff" {
//t.Error("the first test of the stub is supposed to be fetch File")
//}
//if args := job.Steps[1].Args; args[0] != "thumb" && args[1] != "20x20" {
//t.Error("second step should be a resize to thumbnail 20x20 job")
//}
}
func TestDecodeFailse(t *testing.T) {
job, err := dragonfly.Decode("this is y i'm hawt")
if err == nil {
t.Error("Decode errors aren't propagating")
}
if len(job.Steps) != 0 {
t.Error("Decode return a nil job")
}
}
| mit |
tranduong/documentlib-client | src/js/controllers/rating-ctrl.js | 303 | 'use strict';
angular
.module('SDLMSys')
.controller('RatingCtrl', ['$scope', RatingCtrl]);
function RatingCtrl($scope) {
// user rate the document
$scope.rate = function(user_id, doc_id, score)
{
}
// get the self rating of user
$scope.getMyRating = function(user_id)
{
}
} | mit |
maxdavidson/structly | src/__tests__/view/numbers.ts | 577 | import { numberSchemaData } from '../_helpers';
import { getBuffer } from '../../utils';
import { createView } from '../../view';
for (const { schema, constructor } of numberSchemaData) {
test(`view of ${schema.numberTag}`, () => {
const view = createView(schema);
expect(typeof view.value).toBe('number');
expect(view.value).toBe(0);
expect(getBuffer(view).equals(getBuffer(new constructor([0])))).toBe(true);
view.value = 42;
expect(view.value).toBe(42);
expect(getBuffer(view).equals(getBuffer(new constructor([42])))).toBe(true);
});
}
| mit |
timtroendle/urban-occupants-paper | scripts/plot/geopandasplotting.py | 22175 | """
The following is an excerpt of a -- as of 2017-02-27 -- not yet released
geopandas version that provides improved plotting feature compared to the
currently released one.
Copyright (c) 2013-2016, GeoPandas developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Enthought, Inc. nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import warnings
import numpy as np
import pandas as pd
from six import next
from six.moves import xrange
def _flatten_multi_geoms(geoms, colors):
"""
Returns Series like geoms and colors, except that any Multi geometries
are split into their components and colors are repeated for all component
in the same Multi geometry. Maintains 1:1 matching of geometry to color.
"Colors" are treated opaquely and so can actually contain any values.
Returns
-------
components : list of geometry
component_colors : list of whatever type `colors` contains
"""
components, component_colors = [], []
# precondition, so zip can't short-circuit
assert len(geoms) == len(colors)
for geom, color in zip(geoms, colors):
if geom.type.startswith('Multi'):
for poly in geom:
components.append(poly)
# repeat same color for all components
component_colors.append(color)
else:
components.append(geom)
component_colors.append(color)
return components, component_colors
def plot_polygon_collection(ax, geoms, colors_or_values, plot_values,
vmin=None, vmax=None, cmap=None,
edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs):
"""
Plots a collection of Polygon and MultiPolygon geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
colors_or_values : a sequence of `N` values or RGBA tuples
It should have 1:1 correspondence with the geometries (not their components).
plot_values : bool
If True, `colors_or_values` is interpreted as a list of values, and will
be mapped to colors using vmin/vmax/cmap (which become required).
Otherwise `colors_or_values` is interpreted as a list of colors.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
from descartes.patch import PolygonPatch
from matplotlib.collections import PatchCollection
components, component_colors_or_values = _flatten_multi_geoms(
geoms, colors_or_values)
# PatchCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
collection = PatchCollection([PolygonPatch(poly) for poly in components],
linewidth=linewidth, edgecolor=edgecolor,
alpha=alpha, **kwargs)
if plot_values:
collection.set_array(np.array(component_colors_or_values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
else:
# set_color magically sets the correct combination of facecolor and
# edgecolor, based on collection type.
collection.set_color(component_colors_or_values)
# If the user set facecolor and/or edgecolor explicitly, the previous
# call to set_color might have overridden it (remember, the 'color' may
# have come from plot_series, not from the user). The user should be
# able to override matplotlib's default behavior, by setting them again
# after set_color.
if 'facecolor' in kwargs:
collection.set_facecolor(kwargs['facecolor'])
if edgecolor:
collection.set_edgecolor(edgecolor)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_linestring_collection(ax, geoms, colors_or_values, plot_values,
vmin=None, vmax=None, cmap=None,
linewidth=1.0, **kwargs):
"""
Plots a collection of LineString and MultiLineString geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be mixed)
colors_or_values : a sequence of `N` values or RGBA tuples
It should have 1:1 correspondence with the geometries (not their components).
plot_values : bool
If True, `colors_or_values` is interpreted as a list of values, and will
be mapped to colors using vmin/vmax/cmap (which become required).
Otherwise `colors_or_values` is interpreted as a list of colors.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
from matplotlib.collections import LineCollection
components, component_colors_or_values = _flatten_multi_geoms(
geoms, colors_or_values)
# LineCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
segments = [np.array(linestring)[:, :2] for linestring in components]
collection = LineCollection(segments,
linewidth=linewidth, **kwargs)
if plot_values:
collection.set_array(np.array(component_colors_or_values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
else:
# set_color magically sets the correct combination of facecolor and
# edgecolor, based on collection type.
collection.set_color(component_colors_or_values)
# If the user set facecolor and/or edgecolor explicitly, the previous
# call to set_color might have overridden it (remember, the 'color' may
# have come from plot_series, not from the user). The user should be
# able to override matplotlib's default behavior, by setting them again
# after set_color.
if 'facecolor' in kwargs:
collection.set_facecolor(kwargs['facecolor'])
if 'edgecolor' in kwargs:
collection.set_edgecolor(kwargs['edgecolor'])
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_point_collection(ax, geoms, colors_or_values,
vmin=None, vmax=None, cmap=None,
marker='o', markersize=2, **kwargs):
"""
Plots a collection of Point geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : sequence of `N` Points
colors_or_values : sequence of color or sequence of numbers
can be a sequence of color specifications of length `N` or a sequence
of `N` numbers to be mapped to colors using vmin, vmax, and cmap.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
x = [p.x for p in geoms]
y = [p.y for p in geoms]
# matplotlib ax.scatter requires RGBA color specifications to be a single 2D
# array, NOT merely a list of 1D arrays. This reshapes that if necessary,
# having no effect on 1D arrays of values.
colors_or_values = np.array([element
for _, element in enumerate(colors_or_values)])
collection = ax.scatter(x, y, c=colors_or_values,
vmin=vmin, vmax=vmax, cmap=cmap,
marker=marker, s=markersize, **kwargs)
return collection
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0,
figsize=None, **color_kwds):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
linewidth : float (default 1.0)
Line width for geometries.
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to the actual plot function
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
num_geoms = len(s.index)
if color:
colors = np.array([color] * num_geoms)
else:
color_generator = gencolor(len(s), colormap=cmap)
colors = np.array([next(color_generator) for _ in xrange(num_geoms)])
# plot all Polygons and all MultiPolygon components in the same collection
poly_idx = np.array(
(s.geometry.type == 'Polygon') | (s.geometry.type == 'MultiPolygon'))
polys = s.geometry[poly_idx]
if not polys.empty:
# Legacy behavior applies alpha to fill but not to edges. This requires
# plotting them separately (at big performance expense).
if linewidth > 0 and color_kwds.get('alpha', 0.5) < 1.0:
# Plot the fill with default or user-specified alpha, but do not
# draw outlines.
plot_polygon_collection(ax, polys, colors[poly_idx], False,
linewidth=0, **color_kwds)
# Draw the edges, fully opaque, but no facecolor.
edges_kwds = color_kwds.copy()
edges_kwds['alpha'] = 1
edges_kwds['facecolor'] = 'none'
plot_polygon_collection(ax, polys, colors[poly_idx], False,
linewidth=linewidth, **edges_kwds)
else:
# Optimization: if no alpha on fill, or if no edges, we can plot
# everything in one go.
plot_polygon_collection(ax, polys, colors[poly_idx], False,
linewidth=linewidth, **color_kwds)
# plot all LineStrings and MultiLineString components in same collection
line_idx = np.array(
(s.geometry.type == 'LineString') |
(s.geometry.type == 'MultiLineString'))
lines = s.geometry[line_idx]
if not lines.empty:
plot_linestring_collection(ax, lines, colors[line_idx], False,
linewidth=linewidth, **color_kwds)
point_idx = np.array(s.geometry.type == 'Point')
points = s.geometry[point_idx]
if not points.empty:
plot_point_collection(ax, points, colors[point_idx], **color_kwds)
plt.draw()
return ax
def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0,
categorical=False, legend=False, ax=None,
scheme=None, k=5, vmin=None, vmax=None, figsize=None,
**color_kwds):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted. Ignored if `color` is also set.
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
linewidth : float (default 1.0)
Line width for geometries.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes (requires PySAL)
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
figsize
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to the actual plot function
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
if column and color:
warnings.warn("Only specify one of 'column' or 'color'. Using 'color'.",
SyntaxWarning)
column = None
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, cmap=cmap, color=color,
ax=ax, linewidth=linewidth, figsize=figsize,
**color_kwds)
if s[column].dtype is np.dtype('O'):
categorical = True
# Define `values` as a Series
if categorical:
if cmap is None:
cmap = 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = np.array([valuemap[k] for k in s[column]])
else:
values = s[column]
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
values = np.array(binning.yb)
# set categorical to True for creating the legend
categorical = True
binedges = [binning.yb.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
mn = values.min() if vmin is None else vmin
mx = values.max() if vmax is None else vmax
# plot all Polygons and all MultiPolygon components in the same collection
poly_idx = np.array(
(s.geometry.type == 'Polygon') | (s.geometry.type == 'MultiPolygon'))
polys = s.geometry[poly_idx]
if not polys.empty:
# Legacy behavior applies alpha to fill but not to edges. This requires
# plotting them separately (at big performance expense).
if linewidth > 0 and color_kwds.get('alpha', 0.5) < 1.0:
# Plot the fill with default or user-specified alpha, but do not
# draw outlines.
plot_polygon_collection(ax, polys, values[poly_idx], True,
vmin=mn, vmax=mx, cmap=cmap,
linewidth=0, **color_kwds)
# Draw the edges, fully opaque, but no facecolor.
edges_kwds = color_kwds.copy()
edges_kwds['alpha'] = 1
edges_kwds['facecolor'] = 'none'
# Setting plot_values=False would cause the array values' colors to
# override edgecolor. By setting color instead, matplotlib will
# respect edgecolor if set.
plot_polygon_collection(ax, polys, ['black'] * len(polys), False,
linewidth=linewidth, **edges_kwds)
else:
# Optimization: if no alpha on fill, or if no edges, we can plot
# everything in one go.
plot_polygon_collection(ax, polys, values[poly_idx], True,
vmin=mn, vmax=mx, cmap=cmap,
linewidth=linewidth, **color_kwds)
# plot all LineStrings and MultiLineString components in same collection
line_idx = np.array(
(s.geometry.type == 'LineString') |
(s.geometry.type == 'MultiLineString'))
lines = s.geometry[line_idx]
if not lines.empty:
plot_linestring_collection(ax, lines, values[line_idx], True,
vmin=mn, vmax=mx, cmap=cmap,
linewidth=linewidth, **color_kwds)
point_idx = np.array(s.geometry.type == 'Point')
points = s.geometry[point_idx]
if not points.empty:
plot_point_collection(ax, points, values[point_idx],
vmin=mn, vmax=mx, cmap=cmap, **color_kwds)
if legend and not color:
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=color_kwds.get('alpha', 0.5),
markersize=10, markerfacecolor=n_cmap.to_rgba(value)))
ax.legend(patches, categories, numpoints=1, loc='best')
else:
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap)
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme
['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
scheme = scheme.lower()
if scheme not in schemes:
raise ValueError("Invalid scheme. Scheme must be in the set: %r" % schemes.keys())
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword") | mit |
poblahblahblah/gofigure | lib/id/id.go | 582 | package id
import (
"github.com/poblahblahblah/gofigure/lib/factfuncts"
"github.com/poblahblahblah/gofigure/lib/kernel"
"os/exec"
)
func Load() string {
switch kernel.Load() {
case "SunOS":
app := "/usr/xpg4/bin/id"
arg0 := "-un"
cmd := exec.Command(app, arg0)
out, err := cmd.Output()
if err != nil {
return string("")
}
return factfuncts.Chomp(string(out))
default:
app := "whoami"
cmd := exec.Command(app)
out, err := cmd.Output()
if err != nil {
return string("")
}
return factfuncts.Chomp(string(out))
}
return "unsupported OS"
}
| mit |
liamkeaton/ascii-space-invaders | public/game.js | 18671 | /**
* The main game object
*/
var Game = function() {
this.config = {
initalLives: 3,
difficultyMultiplier: 0.2,
shipSpeed: 20,
rocketVelocity: 20,
rocketMaxFireRate: 2,
invaderVelocity: 10,
invaderColumns: 4,
invaderRows: 2,
invaderPoints: 5,
bombRate: 0.5,
bombMinVelocity: 5,
bombMaxVelocity: 20
};
this.characters = {
space: ' ',
block: '░',
ship: '8',
rocket: '|',
bomb: 'O',
invader: 'Y'
};
this.keys = {
fire: 32, // Spacebar
left: 37, // Left arrow
right: 39, // Right arrow
start: 32, // Spacebar
restart: 27 // Escape
};
this.bounds = {
left: 0,
right: 0,
top: 0,
bottom: 0,
};
this.score = 0;
this.level = 1;
this.lives = this.config.initalLives;
this.stateStack = [];
this.pressedKeys = {};
};
/**
* Initalise the game with game element
* @param {Object} element
*/
Game.prototype.init = function(element) {
// Set the game element
this.element = element;
// Set up the game screen
this.setBounds();
// Initialise the content
this.clear();
};
/**
* Set the game bounds based on the element and character dimensions
*/
Game.prototype.setBounds = function() {
var width = this.element.offsetWidth,
height = this.element.offsetHeight,
ruler = document.createElement('span'),
style = window.getComputedStyle(this.element, null),
line = parseInt(style.getPropertyValue('line-height'));
ruler.style.font = style.getPropertyValue('font');
ruler.style.fontSize = style.getPropertyValue('font-size');
ruler.innerHTML = this.characters.block;
document.body.appendChild(ruler);
this.bounds.right = parseInt(width / ruler.offsetWidth);
this.bounds.bottom = parseInt(height / line);
document.body.removeChild(ruler);
}
/**
* Helper method to retrive the current state
* @return {Object|Null}
*/
Game.prototype.currentState = function() {
return this.stateStack.length > 0 ? this.stateStack[this.stateStack.length - 1] : null;
};
/**
* Helper method that moves to a state by poping old state and pushing new one
* @param {Object} state
*/
Game.prototype.moveToState = function(state) {
this.popState();
this.pushState(state);
};
/**
* Pop a state from the state stack, calling the leave method if available
* @param {Object} state
*/
Game.prototype.popState = function() {
// Leave and pop the state.
if (this.currentState()) {
if (this.currentState().leave) {
this.currentState().leave(this);
}
// Set the current state.
this.stateStack.pop();
}
};
/**
* Push a new state onto the state stack, calling enter method if available
* @param {Object} state
*/
Game.prototype.pushState = function(state) {
if (state.enter) {
state.enter(this);
}
this.stateStack.push(state);
};
/**
* Binding for key press input passing the numeric key code,
* key will be added to the pressed keys object
* @param {Number} keyCode
*/
Game.prototype.keyDown = function(keyCode) {
this.pressedKeys[keyCode] = true;
// Delegate to the current state too.
if (this.currentState() && this.currentState().keyDown) {
this.currentState().keyDown(this, keyCode);
}
};
/**
* Binding for key lift input passing the numeric key code,
* key will be removed to the pressed keys object
* @param {Number} keyCode
*/
Game.prototype.keyUp = function(keyCode) {
delete this.pressedKeys[keyCode];
// Delegate to the current state too.
if (this.currentState() && this.currentState().keyUp) {
this.currentState().keyUp(this, keyCode);
}
};
/**
* Clear all content and repopulate with a blank screen
*/
Game.prototype.clear = function() {
var i = 0,
row = Array(this.bounds.right+1).join(this.characters.space);
this.content = [];
for (i = 0; i < this.bounds.bottom+1; i++) {
this.content.push(row);
}
};
/**
* Draw the string at the x and y coordinates given
* @throws {Error} If x and y coordinates are out of bounds
*/
Game.prototype.draw = function(x, y, string) {
var intX = parseInt(x),
intY = parseInt(y);
if (intX < this.bounds.left || intX > this.bounds.right ||
intY < this.bounds.top || intY > this.bounds.bottom) {
throw new Error('Out of bounds');
}
this.content[intY] = this.content[intY].substr(0, intX) + string + this.content[intY].substr(intX + string.length);
};
/**
* Helper function to draw text in the center
* @param {String} string
*/
Game.prototype.drawCenter = function(string) {
var length = string.length
x = 0,
y = Math.floor(this.bounds.bottom/2);
if (length < this.bounds.right) {
x = parseInt((this.bounds.right / 2) - (length / 2));
}
return this.draw(x, y, string);
}
/**
* Render the content to the element, wont render if nothing has changed
* @return {Boolean} If changes to content were made
*/
Game.prototype.render = function() {
var content = this.content.join('\n');
if (this.element.innerHTML !== content) {
this.element.innerHTML = content;
return true;
}
return false;
};
/**
* Kicks of the game by loading the welcome state and starting the game loop
*/
Game.prototype.start = function() {
var self = this,
lastTimestamp = 0;
// Reset the game state
this.reset();
// Start by calling the first step
window.requestAnimationFrame(step);
/**
* Gets the current state, updates state, draws state and renders
* @param {Number} timestamp
*/
function step(timestamp) {
var currentState = self.currentState();
if (currentState) {
// Update the game with the delta period
if(currentState.update) {
currentState.update(self, ((timestamp - lastTimestamp) / 1000));
}
// Draw the current state
if(currentState.draw) {
currentState.draw(self);
}
// Render to the canvas any changes
self.render();
}
// Call the next step
lastTimestamp = timestamp;
window.requestAnimationFrame(step);
}
};
/**
* Reset the game to start again
*/
Game.prototype.reset = function() {
this.level = 1;
this.score = 0;
this.lives = this.config.initalLives;
this.moveToState(new WelcomeState(game));
};
/**
* Initial welcome state, reset the game
* @param {Game} game
*/
var WelcomeState = function(game) {};
/**
* Draw the welcome screen with instructions
* @param {Game} game
*/
WelcomeState.prototype.draw = function(game) {
game.clear();
game.drawCenter('Welcome Press "Space"');
}
/**
* Listen to the keyup code for game start and transition to new state
* @param {Game} game
* @param {Number} keyCode
*/
WelcomeState.prototype.keyUp = function(game, keyCode) {
if (keyCode === game.keys.start) {
game.moveToState(new LevelIntroState(game));
}
}
/**
* Level intro state, intalise countdown information
* @param {Game} game
*/
var LevelIntroState = function(game) {
this.countdown = 3;
this.countdownMessage = '3';
};
/**
* Update the internal countdown time and message
* @param {Game} game
* @param {Number} delta
*/
LevelIntroState.prototype.update = function(game, delta) {
this.countdown -= delta;
if (this.countdown < 2) {
this.countdownMessage = '2';
}
if (this.countdown < 1) {
this.countdownMessage = '1';
}
if (this.countdown <= 0) {
game.moveToState(new PlayState(game));
}
}
/**
* Draw countdown messaging for the user
* @param {Game} game
*/
LevelIntroState.prototype.draw = function(game) {
game.clear();
game.drawCenter('Start Level ' + game.level + ' in ' + this.countdownMessage);
}
/**
* Main play state, intalise interal valuse for game pieces
* @param {Game} game
*/
var PlayState = function(game) {
// Difficulty
this.difficulty = game.level * game.config.difficultyMultiplier;
// Game entities
this.ship = null;
this.rockets = [];
this.invaders = [];
this.bombs = [];
};
/**
* Enter the game state, creating the ship and invaders
* @param {Game} game
*/
PlayState.prototype.enter = function(game) {
this.ship = new Ship(game.bounds.right/2, game.bounds.bottom, game.config.shipSpeed);
this.invaders = createInvaders(
game.config.invaderColumns,
game.config.invaderRows,
this.difficultyMultiplier(game.config.invaderVelocity)
);
/**
* Create invaders for the columns and rows
* @param {Number} columns
* @param {Number} rows
* @param {Number} velocity
*/
function createInvaders(columns, rows, velocity) {
var invaders = [],
column, row, x, y;
for (column = 0; column < columns; column++) {
for (row = 0; row < rows; row++) {
x = column * 5;
y = 2 + row * 2;
invaders.push(new Invader(x, y, velocity, column, row));
}
}
return invaders;
}
}
/**
* Makes things more difficult as the levels increase
* @param {Number} value
* @return {Number} increased with difficulty
*/
PlayState.prototype.difficultyMultiplier = function(value) {
return value + (this.difficulty * value);
};
/**
* Update all of the game pieces on screen and register play
* @param {Game} game
* @param {Number} delta
*/
PlayState.prototype.update = function(game, delta) {
var i = 0,
rocket,
invader;
// Move the ship, fire rocket and check for damage
moveShip(game, this.ship);
if (entityHit(this.ship, this.bombs)) {
game.lives -= 1;
}
if (game.pressedKeys[game.keys.fire]) {
fireRocket(game, this.ship, this.rockets);
}
// Move each rocket, if it returns false the rocket should be removed
for (i = 0; i < this.rockets.length; i++) {
if (!moveRocket(game, this.rockets[i])) {
this.rockets.splice(i, 1);
}
}
// Move the invaders and give them a chance of launching a bomb and be hit
for (i = 0; i < this.invaders.length; i++) {
moveInvader(game, this.invaders[i]);
dropBomb(game, this.invaders[i], this.bombs);
if (entityHit(this.invaders[i], this.rockets)) {
this.invaders.splice(i, 1);
game.score += game.config.invaderPoints;
}
}
// Move the bombs, if it returns false the bomb should be removed
for (i = 0; i < this.bombs.length; i++) {
if (!moveBomb(game, this.bombs[i])) {
this.bombs.splice(i, 1);
}
}
// Next Level
if (this.invaders.length === 0) {
game.score += game.level * 50;
game.level += 1;
game.moveToState(new LevelIntroState(game));
}
// Game Over
if (game.lives <= 0) {
game.moveToState(new GameOverState(game));
}
/**
* Move the ship, will limit to the bounds
* @param {Object} game
* @param {Entity} ship
*/
function moveShip(game, ship) {
// Move the ship
if (game.pressedKeys[game.keys.left]) {
ship.x -= ship.velocity * delta;
}
if (game.pressedKeys[game.keys.right]) {
ship.x += ship.velocity * delta;
}
// Limit ship to the bounds
if (ship.x < game.bounds.left) {
ship.x = game.bounds.left;
}
if (ship.x + ship.width > game.bounds.right) {
ship.x = game.bounds.right - ship.width;
}
}
/**
* Move the invader,
* if the edges are hit they will desend
* if the bottom is hit you die
* @param {Object} game
* @param {Entity} invader
*/
function moveInvader(game, invader) {
invader.x += invader.velocity * delta;
// If the invader has hit the side drop down and reverse
if (invader.x < game.bounds.left) {
invader.x = game.bounds.left;
invader.y += 1;
invader.velocity *= -1;
}
if ((invader.x + invader.width) > game.bounds.right) {
invader.x = game.bounds.right - invader.width;
invader.y += 1;
invader.velocity *= -1;
}
// Invaders have landed
if (invader.y >= game.bounds.bottom) {
game.lives = 0;
}
}
/**
* Move the rocket and return false if out of bounds
* @param {Object} game
* @param {Entity} rocket
* @return {Boolean}
*/
function moveRocket(game, rocket) {
rocket.y -= delta * rocket.velocity;
if (rocket.y < game.bounds.top) {
return false;
}
return true;
}
/**
* Move the bomb, return false if out of bounds
* @param {Object} game
* @param {Entity} bomb
* @return {Boolean}
*/
function moveBomb(game, bomb) {
bomb.y += delta * bomb.velocity;
if (bomb.y > game.bounds.bottom + 1) {
return false;
}
return true;
}
/**
* Fire a rocket from the ship, does fire rate check
* @param {Object} game
* @param {Entity} ship
* @param {Array<Entity>} rockets
*/
function fireRocket(game, ship, rockets) {
var lastRocket = rockets[rockets.length - 1],
rate = (1000 / game.config.rocketMaxFireRate),
now = (new Date()).valueOf();
if (!lastRocket || (now - lastRocket.fired) > rate) {
rockets.push(new Rocket(ship.center(), ship.y, game.config.rocketVelocity, now));
}
}
/**
* Gives the invader a random chance of dropping a bomb
* @param {Object} game
* @param {Entity} invader
* @param {Array<Entity>} bombs
*/
function dropBomb(game, invader, bombs) {
var random = Math.random(),
chance = game.config.bombRate * delta,
max = game.config.bombMaxVelocity,
min = game.config.bombMinVelocity;
if (chance > random) {
bombs.push(new Bomb(invader.x, invader.y, (random * (max - min + 1) + min)));
}
}
/**
* Check if an entity has been hit by any of the attacker entities
* @param {Entity} victim
* @param {Array<Entity>} attackers
*/
function entityHit(victim, attackers) {
var i;
for (i = 0; i < attackers.length; i++) {
if (attackers[i].hit(victim)) {
attackers.splice(i, 1);
return true;
}
}
return false;
}
}
/**
* Draw the the game characters to the screen
* @param {Game} game
*/
PlayState.prototype.draw = function(game) {
var i = 0;
game.clear();
game.draw(1, 0, 'Level ' + game.level + ' - Lives ' + game.lives + ' - Score ' + game.score);
this.ship.draw(game);
for (i = 0; i < this.rockets.length; i++) {
this.rockets[i].draw(game);
}
for (i = 0; i < this.invaders.length; i++) {
this.invaders[i].draw(game);
}
for (i = 0; i < this.bombs.length; i++) {
this.bombs[i].draw(game);
}
}
/**
* When the game is over and no lives are left show this state
*/
var GameOverState = function(game) {};
/**
* Draw the message to the user how to restart
* @param {Game} game
*/
GameOverState.prototype.draw = function(game) {
game.clear();
game.drawCenter('Game Over, Press "Escape" to restart');
};
/**
* Listen to the key up and reset on the correct code
* @param {Game} game
* @param {Number} keyCode
*/
GameOverState.prototype.keyUp = function(game, keyCode) {
if (keyCode === game.keys.restart) {
game.reset();
}
}
/**
* Game entity object which entities inherit
*/
var Entity = function() {
this.x = 0;
this.y = 0;
this.width = 0;
this.height = 0;
this.string = '';
};
/**
* Check to see if this entity has been hit by any others
* @param {Entity} entity
* @return {Boolean}
*/
Entity.prototype.hit = function(entity) {
if (this.x >= entity.x && this.x <= (entity.x + entity.width) &&
this.y >= entity.y && this.y <= (entity.y + entity.height)) {
return true;
}
return false;
};
/**
* Return the sprite used for the entity
* @param {String} char
* @return {String}
*/
Entity.prototype.sprite = function(char) {
if (!this.string) {
this.string = Array(this.width+1).join(char);
}
return this.string
};
/**
* Draw the entity to the game state
* @param {Game} game
*/
Entity.prototype.draw = function(game) {
var i = 0,
sprite = this.sprite(game.characters[this.type]);
for (i = 0; i < this.height; i++) {
game.draw(this.x, this.y + i, sprite);
}
};
/**
* Return the center of the ship
* @return {Number}
*/
Entity.prototype.center = function() {
if (this.width > 1) {
return this.x + (this.width/2);
}
return this.x;
};
/**
* Ship game entity
* @param {Number} x
* @param {Number} y
* @param {Number} velocity
*/
var Ship = function (x, y, velocity) {
this.x = x;
this.y = y;
this.velocity = velocity;
this.width = 1;
this.height = 1;
this.type = 'ship';
};
Ship.prototype = Object.create(Entity.prototype);
/**
* Rocket game entity
* @param {Number} x
* @param {Number} y
* @param {Number} velocity
* @param {Number} fired
*/
var Rocket = function (x, y, velocity, fired) {
this.x = x;
this.y = y;
this.velocity = velocity;
this.width = 1;
this.height = 1;
this.type = 'rocket';
this.fired = fired;
};
Rocket.prototype = Object.create(Entity.prototype);
/**
* Invader game entity
* @param {Number} x
* @param {Number} y
* @param {Number} velocity
* @param {Number} column
* @param {Number} row
*/
var Invader = function(x, y, velocity, column, row) {
this.x = x;
this.y = y;
this.velocity = velocity;
this.column = column;
this.row = row;
this.width = 1;
this.height = 1;
this.type = 'invader';
};
Invader.prototype = Object.create(Entity.prototype);
/**
* Bomb game entity
* @param {Number} x
* @param {Number} y
* @param {Number} velocity
*/
var Bomb = function(x, y, velocity) {
this.x = x;
this.y = y;
this.velocity = velocity;
this.width = 1;
this.height = 1;
this.type = 'bomb';
};
Bomb.prototype = Object.create(Entity.prototype);
| mit |
chaincoin/chaincoin | src/validation.cpp | 221014 | // Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2018 PM-Tech
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <validation.h>
#include <arith_uint256.h>
#include <chain.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <checkqueue.h>
#include <consensus/consensus.h>
#include <consensus/merkle.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <cuckoocache.h>
#include <hash.h>
#include <index/txindex.h>
#include <modules/coinjoin/coinjoin.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <reverse_iterator.h>
#include <script/script.h>
#include <script/sigcache.h>
#include <script/standard.h>
#include <shutdown.h>
#include <timedata.h>
#include <tinyformat.h>
#include <txdb.h>
#include <txmempool.h>
#include <ui_interface.h>
#include <undo.h>
#include <util/system.h>
#include <util/moneystr.h>
#include <util/strencodings.h>
#include <validationinterface.h>
#include <warnings.h>
#include <modules/masternode/masternode_man.h>
#include <modules/masternode/masternode_payments.h>
#include <future>
#include <numeric>
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
#include <boost/thread.hpp>
#if defined(NDEBUG)
# error "Chaincoin Core cannot be compiled without assertions."
#endif
#define MICRO 0.000001
#define MILLI 0.001
/**
* Global state
*/
namespace {
struct CBlockIndexWorkComparator
{
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
// ... then by earliest time received, ...
if (pa->nSequenceId < pb->nSequenceId) return false;
if (pa->nSequenceId > pb->nSequenceId) return true;
// Use pointer address as tie breaker (should only happen with blocks
// loaded from disk, as those all have id 0).
if (pa < pb) return false;
if (pa > pb) return true;
// Identical blocks.
return false;
}
};
} // anon namespace
enum DisconnectResult
{
DISCONNECT_OK, // All good.
DISCONNECT_UNCLEAN, // Rolled back, but UTXO set was inconsistent with block.
DISCONNECT_FAILED // Something else went wrong.
};
class ConnectTrace;
/**
* CChainState stores and provides an API to update our local knowledge of the
* current best chain and header tree.
*
* It generally provides access to the current block tree, as well as functions
* to provide new data, which it will appropriately validate and incorporate in
* its state as necessary.
*
* Eventually, the API here is targeted at being exposed externally as a
* consumable libconsensus library, so any functions added must only call
* other class member functions, pure functions in other parts of the consensus
* library, callbacks via the validation interface, or read/write-to-disk
* functions (eventually this will also be via callbacks).
*/
class CChainState {
private:
/**
* The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
* as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
* missing the data for the block.
*/
std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/**
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
CCriticalSection cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
int32_t nBlockSequenceId = 1;
/** Decreasing counter (used by subsequent preciousblock calls). */
int32_t nBlockReverseSequenceId = -1;
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
/** In order to efficiently track invalidity of headers, we keep the set of
* blocks which we tried to connect and found to be invalid here (ie which
* were set to BLOCK_FAILED_VALID since the last restart). We can then
* walk this set and check if a new header is a descendant of something in
* this set, preventing us from having to walk mapBlockIndex when we try
* to connect a bad block and fail.
*
* While this is more complicated than marking everything which descends
* from an invalid block as invalid at the time we discover it to be
* invalid, doing so would require walking all of mapBlockIndex to find all
* descendants. Since this case should be very rare, keeping track of all
* BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
* well.
*
* Because we already walk mapBlockIndex in height-order at startup, we go
* ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
* instead of putting things in this set.
*/
std::set<CBlockIndex*> m_failed_blocks;
/**
* the ChainState CriticalSection
* A lock that must be held when modifying this ChainState - held in ActivateBestChain()
*/
CCriticalSection m_cs_chainstate;
public:
CChain chainActive;
BlockMap mapBlockIndex GUARDED_BY(cs_main);
std::multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CBlockIndex *pindexBestInvalid = nullptr;
bool LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock);
/**
* If a block header hasn't already been seen, call CheckBlockHeader on it, ensure
* that it doesn't descend from an invalid block, and then add it to mapBlockIndex.
*/
bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block (dis)connection on a given view:
DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view);
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex,
CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block disconnection on our pcoinsTip:
bool DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Manual block validity manipulation:
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindex);
void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
bool RewindBlockIndex(const CChainParams& params);
bool LoadGenesisBlock(const CChainParams& chainparams);
void PruneBlockIndexCandidates();
void UnloadBlockIndex();
private:
bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Create a new block index entry for a given block hash */
CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Make various assertions about the state of the block index.
*
* By default this only executes fully when using the Regtest chain; see: fCheckBlockIndex.
*/
void CheckBlockIndex(const Consensus::Params& consensusParams);
void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
//! Mark a block as not having block data
void EraseBlockData(CBlockIndex* index) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
} g_chainstate;
/**
* Mutex to guard access to validation specific variables, such as reading
* or changing the chainstate.
*
* This may also need to be locked when updating the transaction pool, e.g. on
* AcceptToMemoryPool. See CTxMemPool::cs comment for details.
*
* The transaction pool has a separate lock to allow reading from it and the
* chainstate at the same time.
*/
RecursiveMutex cs_main;
BlockMap& mapBlockIndex = g_chainstate.mapBlockIndex;
CChain& chainActive = g_chainstate.chainActive;
CBlockIndex *pindexBestHeader = nullptr;
Mutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
uint256 g_best_block;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT;
uint256 hashAssumeValid;
arith_uint256 nMinimumChainWork;
/** Fees smaller than this (in duffs) are considered zero fee (for relaying, mining and transaction creation) */
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CBlockPolicyEstimator feeEstimator;
CTxMemPool mempool(&feeEstimator);
std::atomic_bool g_is_mempool_loaded{false};
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
const std::string strMessageMagic = "DarkCoin Signed Message:\n";
// Internal stuff
namespace {
CBlockIndex *&pindexBestInvalid = g_chainstate.pindexBestInvalid;
/** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = g_chainstate.mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
/** Global flag to indicate we should check to see if there are
* block/undo files that should be deleted. Set on startup
* or if we allocate more file space when we're in prune mode
*/
bool fCheckForPruning = false;
/** Dirty block index entries. */
std::set<CBlockIndex*> setDirtyBlockIndex;
/** Dirty block file entries. */
std::set<int> setDirtyFileInfo;
} // anon namespace
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
{
AssertLockHeld(cs_main);
// Find the latest block common to locator and chain - we expect that
// locator.vHave is sorted descending by height.
for (const uint256& hash : locator.vHave) {
CBlockIndex* pindex = LookupBlockIndex(hash);
if (pindex) {
if (chain.Contains(pindex))
return pindex;
if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
return chain.Tip();
}
}
}
return chain.Genesis();
}
std::unique_ptr<CCoinsViewDB> pcoinsdbview;
std::unique_ptr<CCoinsViewCache> pcoinsTip;
std::unique_ptr<CBlockTreeDB> pblocktree;
enum class FlushStateMode {
NONE,
IF_NEEDED,
PERIODIC,
ALWAYS
};
// See definition for documentation
static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0);
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
bool CheckFinalTx(const CTransaction &tx, int flags)
{
AssertLockHeld(cs_main);
// By convention a negative value for flags indicates that the
// current network-enforced consensus rules should be used. In
// a future soft-fork scenario that would mean checking which
// rules would be enforced for the next block and setting the
// appropriate flags. At the present time no soft-forks are
// scheduled, so no flags are set.
flags = std::max(flags, 0);
// CheckFinalTx() uses chainActive.Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// IsFinalTx() with one more than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
// BIP113 requires that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
? chainActive.Tip()->GetMedianTimePast()
: GetAdjustedTime();
return IsFinalTx(tx, nBlockHeight, nBlockTime);
}
bool TestLockPointValidity(const LockPoints* lp)
{
AssertLockHeld(cs_main);
assert(lp);
// If there are relative lock times then the maxInputBlock will be set
// If there are no relative lock times, the LockPoints don't depend on the chain
if (lp->maxInputBlock) {
// Check whether chainActive is an extension of the block at which the LockPoints
// calculation was valid. If not LockPoints are no longer valid
if (!chainActive.Contains(lp->maxInputBlock)) {
return false;
}
}
// LockPoints still valid
return true;
}
bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints)
{
AssertLockHeld(cs_main);
AssertLockHeld(pool.cs);
CBlockIndex* tip = chainActive.Tip();
assert(tip != nullptr);
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
// height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being*
// evaluated is what is used.
// Thus if we want to know if a transaction can be part of the
// *next* block, we need to use one more than chainActive.Height()
index.nHeight = tip->nHeight + 1;
std::pair<int, int64_t> lockPair;
if (useExistingLockPoints) {
assert(lp);
lockPair.first = lp->height;
lockPair.second = lp->time;
}
else {
// pcoinsTip contains the UTXO set for chainActive.Tip()
CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
std::vector<int> prevheights;
prevheights.resize(tx.vin.size());
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn& txin = tx.vin[txinIndex];
Coin coin;
if (!viewMemPool.GetCoin(txin.prevout, coin)) {
return error("%s: Missing input", __func__);
}
if (coin.nHeight == MEMPOOL_HEIGHT) {
// Assume all mempool transaction confirm in the next block
prevheights[txinIndex] = tip->nHeight + 1;
} else {
prevheights[txinIndex] = coin.nHeight;
}
}
lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index);
if (lp) {
lp->height = lockPair.first;
lp->time = lockPair.second;
// Also store the hash of the block with the highest height of
// all the blocks which have sequence locked prevouts.
// This hash needs to still be on the chain
// for these LockPoint calculations to be valid
// Note: It is impossible to correctly calculate a maxInputBlock
// if any of the sequence locked inputs depend on unconfirmed txs,
// except in the special case where the relative lock time/height
// is 0, which is equivalent to no sequence lock. Since we assume
// input height of tip+1 for mempool txs and test the resulting
// lockPair from CalculateSequenceLocks against tip+1. We know
// EvaluateSequenceLocks will fail if there was a non-zero sequence
// lock on a mempool input, so we can use the return value of
// CheckSequenceLocks to indicate the LockPoints validity
int maxInputHeight = 0;
for (const int height : prevheights) {
// Can ignore mempool inputs since we'll fail if they had non-zero locks
if (height != tip->nHeight+1) {
maxInputHeight = std::max(maxInputHeight, height);
}
}
lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
}
}
return EvaluateSequenceLocks(index, lockPair);
}
// Returns the script flags which should be checked for a given block
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0) {
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
}
std::vector<COutPoint> vNoSpendsRemaining;
pool.TrimToSize(limit, &vNoSpendsRemaining);
for (const COutPoint& removed : vNoSpendsRemaining)
pcoinsTip->Uncache(removed);
}
/** Convert CValidationState to a human-readable message for logging */
std::string FormatStateMessage(const CValidationState &state)
{
return strprintf("%s%s (code %i)",
state.GetRejectReason(),
state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage(),
state.GetRejectCode());
}
static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
if (IsInitialBlockDownload())
return false;
if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE))
return false;
if (chainActive.Height() < pindexBestHeader->nHeight - 1)
return false;
return true;
}
/* Make mempool consistent after a reorg, by re-adding or recursively erasing
* disconnected block transactions from the mempool, and also removing any
* other transactions from the mempool that are no longer valid given the new
* tip/height.
*
* Note: we assume that disconnectpool only contains transactions that are NOT
* confirmed in the current chain nor already in the mempool (otherwise,
* in-mempool descendants of such transactions would be removed).
*
* Passing fAddToMempool=false will skip trying to add the transactions back,
* and instead just erase from the mempool as needed.
*/
static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
std::vector<uint256> vHashUpdate;
// disconnectpool's insertion_order index sorts the entries from
// oldest to newest, but the oldest entry will be the last tx from the
// latest mined block that was disconnected.
// Iterate disconnectpool in reverse, so that we add transactions
// back to the mempool starting with the earliest transaction that had
// been previously seen in a block.
auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
if (!fAddToMempool || (*it)->IsCoinBase() ||
!AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */,
nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
} else if (mempool.exists((*it)->GetHash())) {
vHashUpdate.push_back((*it)->GetHash());
}
++it;
}
disconnectpool.queuedTx.clear();
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
// no in-mempool children, which is generally not true when adding
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in
// the disconnectpool that were added back and cleans up the mempool state.
mempool.UpdateTransactionsFromBlock(vHashUpdate);
// We also need to remove any now-immature transactions
mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
// were somehow broken and returning the wrong scriptPubKeys
static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
// pool.cs should be locked already, but go ahead and re-take the lock here
// to enforce that mempool doesn't change between when we check the view
// and when we actually call through to CheckInputs
LOCK(pool.cs);
assert(!tx.IsCoinBase());
for (const CTxIn& txin : tx.vin) {
const Coin& coin = view.AccessCoin(txin.prevout);
// At this point we haven't actually checked if the coins are all
// available (or shouldn't assume we have, since CheckInputs does).
// So we just return failure if the inputs are not available here,
// and then only have to check equivalence for available inputs.
if (coin.IsSpent()) return false;
const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
if (txFrom) {
assert(txFrom->GetHash() == txin.prevout.hash);
assert(txFrom->vout.size() > txin.prevout.n);
assert(txFrom->vout[txin.prevout.n] == coin.out);
} else {
const Coin& coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
assert(!coinFromDisk.IsSpent());
assert(coinFromDisk.out == coin.out);
}
}
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
}
static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
const CTransaction& tx = *ptx;
const uint256 hash = tx.GetHash();
AssertLockHeld(cs_main);
LOCK(pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
if (pfMissingInputs) {
*pfMissingInputs = false;
}
if (!CheckTransaction(tx, state))
return false; // state filled in by CheckTransaction
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
if (fRequireStandard && !IsStandardTx(tx, reason))
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
// Do not work on transactions that are too small.
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
// Transactions smaller than this are not relayed to reduce unnecessary malloc overhead.
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
return state.DoS(0, false, REJECT_NONSTANDARD, "tx-size-small");
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
if (pool.exists(hash)) {
return state.Invalid(false, REJECT_DUPLICATE, "txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
std::set<uint256> setConflicts;
for (const CTxIn &txin : tx.vin)
{
const CTransaction* ptxConflicting = pool.GetConflictTx(txin.prevout);
if (ptxConflicting) {
if (!setConflicts.count(ptxConflicting->GetHash()))
{
// Allow opt-out of transaction replacement by setting
// nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
//
// SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
// non-replaceable transactions. All inputs rather than just one
// is for the sake of multi-party protocols, where we don't
// want a single party to be able to disable replacement.
//
// The opt-out ignores descendants as anyone relying on
// first-seen mempool behavior should be checking all
// unconfirmed ancestors anyway; doing otherwise is hopelessly
// insecure.
bool fReplacementOptOut = true;
if (fEnableReplacement)
{
for (const CTxIn &_txin : ptxConflicting->vin)
{
if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
{
fReplacementOptOut = false;
break;
}
}
}
if (fReplacementOptOut) {
return state.Invalid(false, REJECT_DUPLICATE, "txn-mempool-conflict");
}
setConflicts.insert(ptxConflicting->GetHash());
}
}
}
{
CCoinsView dummy;
CCoinsViewCache view(&dummy);
LockPoints lp;
CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
view.SetBackend(viewMemPool);
// do all inputs exist?
for (const CTxIn& txin : tx.vin) {
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
if (!view.HaveCoin(txin.prevout)) {
// Are inputs missing because we already have the tx?
for (size_t out = 0; out < tx.vout.size(); out++) {
// Optimistically just do efficient check of cache for outputs
if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) {
return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
}
}
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
if (pfMissingInputs) {
*pfMissingInputs = true;
}
return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
}
// Bring the best block into scope
view.GetBestBlock();
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
// Only accept BIP68 sequence locked transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
// Check for non-standard pay-to-script-hash in inputs
if (fRequireStandard && !AreInputsStandard(tx, view))
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view))
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-witness-nonstandard", true);
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
// nModifiedFees includes any fee deltas from PrioritiseTransaction
CAmount nModifiedFees = nFees;
pool.ApplyDelta(hash, nModifiedFees);
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false;
for (const CTxIn &txin : tx.vin) {
const Coin &coin = view.AccessCoin(txin.prevout);
if (coin.IsCoinBase()) {
fSpendsCoinbase = true;
break;
}
}
CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, chainActive.Height(),
fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
// sigops, making it impossible to mine. Since the coinbase transaction
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
// merely non-standard transaction.
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", false, strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
}
if (nAbsurdFee && nFees > nAbsurdFee)
return state.Invalid(false,
REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
// Calculate in-mempool ancestors, up to a limit.
CTxMemPool::setEntries setAncestors;
size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure setConflicts and setAncestors don't
// intersect.
for (CTxMemPool::txiter ancestorIt : setAncestors)
{
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
if (setConflicts.count(hashAncestor))
{
return state.DoS(10, false,
REJECT_INVALID, "bad-txns-spends-conflicting-tx", false,
strprintf("%s spends conflicting transaction %s",
hash.ToString(),
hashAncestor.ToString()));
}
}
// Check if it's economically rational to mine this transaction rather
// than the ones it replaces.
CAmount nConflictingFees = 0;
size_t nConflictingSize = 0;
uint64_t nConflictingCount = 0;
CTxMemPool::setEntries allConflicting;
// If we don't hold the lock allConflicting might be incomplete; the
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee
// mempool consistency for us.
const bool fReplacementTransaction = setConflicts.size();
if (fReplacementTransaction)
{
CFeeRate newFeeRate(nModifiedFees, nSize);
std::set<uint256> setConflictsParents;
const int maxDescendantsToVisit = 100;
const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts);
for (const auto& mi : setIterConflicting) {
// Don't allow the replacement to reduce the feerate of the
// mempool.
//
// We usually don't want to accept replacements with lower
// feerates than what they replaced as that would lower the
// feerate of the next block. Requiring that the feerate always
// be increased is also an easy-to-reason about way to prevent
// DoS attacks via replacements.
//
// We only consider the feerates of transactions being directly
// replaced, not their indirect descendants. While that does
// mean high feerate children are ignored when deciding whether
// or not to replace, we do require the replacement to pay more
// overall fees too, mitigating most cases.
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
if (newFeeRate <= oldFeeRate)
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
newFeeRate.ToString(),
oldFeeRate.ToString()));
}
for (const CTxIn &txin : mi->GetTx().vin)
{
setConflictsParents.insert(txin.prevout.hash);
}
nConflictingCount += mi->GetCountWithDescendants();
}
// This potentially overestimates the number of actual descendants
// but we just want to be conservative to avoid doing too much
// work.
if (nConflictingCount <= maxDescendantsToVisit) {
// If not too many to replace, then calculate the set of
// transactions that would have to be evicted
for (CTxMemPool::txiter it : setIterConflicting) {
pool.CalculateDescendants(it, allConflicting);
}
for (CTxMemPool::txiter it : allConflicting) {
nConflictingFees += it->GetModifiedFee();
nConflictingSize += it->GetTxSize();
}
} else {
return state.DoS(0, false,
REJECT_NONSTANDARD, "too many potential replacements", false,
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
hash.ToString(),
nConflictingCount,
maxDescendantsToVisit));
}
for (unsigned int j = 0; j < tx.vin.size(); j++)
{
// We don't want to accept replacements that require low
// feerate junk to be mined first. Ideally we'd keep track of
// the ancestor feerates and make the decision based on that,
// but for now requiring all new inputs to be confirmed works.
if (!setConflictsParents.count(tx.vin[j].prevout.hash))
{
// Rather than check the UTXO set - potentially expensive -
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
if (pool.exists(tx.vin[j].prevout.hash)) {
return state.DoS(0, false,
REJECT_NONSTANDARD, "replacement-adds-unconfirmed", false,
strprintf("replacement %s adds unconfirmed input, idx %d",
hash.ToString(), j));
}
}
}
// The replacement must pay greater fees than the transactions it
// replaces - if we did the bandwidth used by those conflicting
// transactions would not be paid for.
if (nModifiedFees < nConflictingFees)
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
// Finally in addition to paying more fees than the conflicts the
// new transaction must pay for its own bandwidth.
CAmount nDeltaFees = nModifiedFees - nConflictingFees;
if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
hash.ToString(),
FormatMoney(nDeltaFees),
FormatMoney(::incrementalRelayFee.GetFee(nSize))));
}
}
constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata(tx);
if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) {
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
// need to turn both off, and compare against just turning off CLEANSTACK
// to see if the failure is specifically due to witness validation.
CValidationState stateDummy; // Want reported failures to be from first CheckInputs
if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
!CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
// Only the witness is missing, so the transaction itself may be fine.
state.SetCorruptionPossible();
}
return false; // state filled in by CheckInputs
}
// Check again against the current block tip's script verification
// flags to cache our script execution flags. This is, of course,
// useless if the next block has different script flags from the
// previous one, but because the cache tracks script flags for us it
// will auto-invalidate and we'll just have a few blocks of extra
// misses on soft-fork activation.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), chainparams.GetConsensus());
if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) {
return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
__func__, hash.ToString(), FormatStateMessage(state));
}
if (test_accept) {
// Tx was accepted, but not added
return true;
}
// Remove conflicting transactions from the mempool
for (CTxMemPool::txiter it : allConflicting)
{
LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
it->GetTx().GetHash().ToString(),
hash.ToString(),
FormatMoney(nModifiedFees - nConflictingFees),
(int)nSize - (int)nConflictingSize);
if (plTxnReplaced)
plTxnReplaced->push_back(it->GetSharedTx());
}
pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
// This transaction should only count for fee estimation if:
// - it isn't a BIP 125 replacement transaction (may not be widely supported)
// - it's not being re-added during a reorg which bypasses typical mempool fee limits
// - the node is not behind
// - the transaction is not dependent on any other transactions in the mempool
bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
// Store transaction in memory
pool.addUnchecked(entry, setAncestors, validForFeeEstimation);
// trim mempool and check if tx was trimmed
if (!bypass_limits) {
LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
}
GetMainSignals().TransactionAddedToMempool(ptx);
return true;
}
/** (try to) add transaction to memory pool with a specified acceptance time **/
static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::vector<COutPoint> coins_to_uncache;
bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept);
if (!res) {
for (const COutPoint& hashTx : coins_to_uncache)
pcoinsTip->Uncache(hashTx);
}
// After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
CValidationState stateDummy;
FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC);
return res;
}
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
{
const CChainParams& chainparams = Params();
return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
}
/**
* Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock.
* If blockIndex is provided, the transaction is fetched from the corresponding block.
*/
bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, const CBlockIndex* const block_index)
{
LOCK(cs_main);
if (!block_index) {
CTransactionRef ptx = mempool.get(hash);
if (ptx) {
txOut = ptx;
return true;
}
if (g_txindex) {
return g_txindex->FindTx(hash, hashBlock, txOut);
}
} else {
CBlock block;
if (ReadBlockFromDisk(block, block_index, consensusParams)) {
for (const auto& tx : block.vtx) {
if (tx->GetHash() == hash) {
txOut = tx;
hashBlock = block_index->GetBlockHash();
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
fileout << messageStart << nSize;
// Write block
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("WriteBlockToDisk: ftell failed");
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
// Read block
try {
filein >> block;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
}
// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
CDiskBlockPos blockPos;
{
LOCK(cs_main);
blockPos = pindex->GetBlockPos();
}
if (!ReadBlockFromDisk(block, blockPos, consensusParams))
return false;
if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
pindex->ToString(), pindex->GetBlockPos().ToString());
return true;
}
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& message_start)
{
CDiskBlockPos hpos = pos;
hpos.nPos -= 8; // Seek back 8 bytes for meta header
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
}
try {
CMessageHeader::MessageStartChars blk_start;
unsigned int blk_size;
filein >> blk_start >> blk_size;
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE),
HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE));
}
if (blk_size > MAX_SIZE) {
return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
blk_size, MAX_SIZE);
}
block.resize(blk_size); // Zeroing of memory is intentional here
filein.read((char*)block.data(), blk_size);
} catch(const std::exception& e) {
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
}
return true;
}
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
{
CDiskBlockPos block_pos;
{
LOCK(cs_main);
block_pos = pindex->GetBlockPos();
}
return ReadRawBlockFromDisk(block, block_pos, message_start);
}
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams, bool fSuperblockPartOnly)
{
int halvings = (nHeight - 1) / consensusParams.nSubsidyHalvingInterval;
int64_t nMinSubsidy = 0.001 * COIN;
// Force block reward to minimum when right shift is undefined.
if (halvings >= 10)
return nMinSubsidy;
CAmount nSubsidy = 16 * COIN;
// Subsidy is cut in half approximately every 2 years.
nSubsidy >>= halvings;
if (IsWitnessEnabled(chainActive.Tip(), consensusParams)) {
return fSuperblockPartOnly ? nSubsidy/20 : nSubsidy - nSubsidy/20;
} else {
return nSubsidy;
}
}
CAmount GetMasternodePayment(int nHeight, CAmount blockValue)
{
CAmount ret = blockValue/5; // start at 20%
int nMNPIBlock = Params().GetConsensus().nMasternodePaymentsIncreaseBlock;
int nMNPIPeriod = Params().GetConsensus().nMasternodePaymentsIncreasePeriod;
// mainnet:
if(nHeight > nMNPIBlock) ret += blockValue / 40; // 22.5%
if(nHeight > nMNPIBlock+(nMNPIPeriod* 1)) ret += blockValue / 40; // 25,0%
if(nHeight > nMNPIBlock+(nMNPIPeriod* 2)) ret += blockValue / 40; // 27.5%
if(nHeight > nMNPIBlock+(nMNPIPeriod* 3)) ret += blockValue / 40; // 30.0%
if(nHeight > nMNPIBlock+(nMNPIPeriod* 4)) ret += blockValue / 40; // 32.5%
if(nHeight > nMNPIBlock+(nMNPIPeriod* 5)) ret += blockValue / 40; // 35.0%
return ret;
}
bool IsInitialBlockDownload()
{
// Once this function has returned false, it must remain false.
static std::atomic<bool> latchToFalse{false};
// Optimization: pre-test latch before taking the lock.
if (latchToFalse.load(std::memory_order_relaxed))
return false;
LOCK(cs_main);
if (latchToFalse.load(std::memory_order_relaxed))
return false;
if (fImporting || fReindex)
return true;
if (chainActive.Tip() == nullptr)
return true;
if (chainActive.Tip()->nChainWork < nMinimumChainWork)
return true;
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
latchToFalse.store(true, std::memory_order_relaxed);
return false;
}
CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
static void AlertNotify(const std::string& strMessage)
{
uiInterface.NotifyAlertChanged();
std::string strCmd = gArgs.GetArg("-alertnotify", "");
if (strCmd.empty()) return;
// Alert text should be plain ascii coming from a trusted source, but to
// be safe we first strip anything not in safeChars, then add single quotes around
// the whole string before passing it to the shell:
std::string singleQuote("'");
std::string safeStatus = SanitizeString(strMessage);
safeStatus = singleQuote+safeStatus+singleQuote;
boost::replace_all(strCmd, "%s", safeStatus);
std::thread t(runCommand, strCmd);
t.detach(); // thread runs free
}
static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial sync)
if (IsInitialBlockDownload())
return;
// If our best fork is no longer within 120 blocks (+/- 3 hours if no one mines it)
// of our head, drop it
if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 120)
pindexBestForkTip = nullptr;
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
{
if (!GetfLargeWorkForkFound() && pindexBestForkBase)
{
if(pindexBestForkBase->phashBlock){
std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
pindexBestForkBase->phashBlock->ToString() + std::string("'");
AlertNotify(warning);
}
}
if (pindexBestForkTip && pindexBestForkBase)
{
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
SetfLargeWorkForkFound(true);
}
else
{
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
SetfLargeWorkInvalidChainFound(true);
}
}
else
{
SetfLargeWorkForkFound(false);
SetfLargeWorkInvalidChainFound(false);
}
}
static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
CBlockIndex* plonger = chainActive.Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
plonger = plonger->pprev;
if (pfork == plonger)
break;
pfork = pfork->pprev;
}
// We define a condition where we should warn the user about as a fork of at least 7 blocks
// with a tip within 120 blocks (+/- 3 hours if no one mines it) of ours
// We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
// hash rate operating on the fork.
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
chainActive.Height() - pindexNewForkTip->nHeight < 120)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
}
CheckForkWarningConditions();
}
void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
CBlockIndex *tip = chainActive.Tip();
assert (tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
FormatISO8601DateTime(tip->GetBlockTime()));
CheckForkWarningConditions();
}
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
m_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
}
}
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
{
// mark inputs spent
if (!tx.IsCoinBase()) {
txundo.vprevout.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
assert(is_spent);
}
}
// add outputs
AddCoins(inputs, tx, nHeight);
}
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
{
CTxUndo txundo;
UpdateCoins(tx, inputs, txundo, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
}
int GetSpendHeight(const CCoinsViewCache& inputs)
{
LOCK(cs_main);
CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
return pindexPrev->nHeight + 1;
}
static CuckooCache::cache<uint256, SignatureCacheHasher> scriptExecutionCache;
static uint256 scriptExecutionCacheNonce(GetRandHash());
void InitScriptExecutionCache() {
// nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
// setup_bytes creates the minimum possible cache (2 elements).
size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize);
LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
(nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
}
/**
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
* This does not modify the UTXO set.
*
* If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
* script checks which are not necessary (eg due to script execution cache hits) are, obviously,
* not pushed onto pvChecks/run.
*
* Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
* which are matched. This is useful for checking blocks where we will likely never need the cache
* entry again.
*
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!tx.IsCoinBase())
{
if (pvChecks)
pvChecks->reserve(tx.vin.size());
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
// Skip script verification when connecting blocks under the
// assumevalid block. Assuming the assumevalid block is valid this
// is safe because block merkle hashes are still computed and checked,
// Of course, if an assumed valid block is invalid due to false scriptSigs
// this optimization would allow an invalid chain to be accepted.
if (fScriptChecks) {
// First check if script executions have been cached with the same
// flags. Note that this assumes that the inputs provided are
// correct (ie that the transaction hash which is in tx's prevouts
// properly commits to the scriptPubKey in the inputs view of that
// transaction).
uint256 hashCacheEntry;
// We only use the first 19 bytes of nonce to avoid a second SHA
// round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64)
static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache");
CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
return true;
}
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin& coin = inputs.AccessCoin(prevout);
assert(!coin.IsSpent());
// We very carefully only pass in things to CScriptCheck which
// are clearly committed to by tx' witness hash. This provides
// a sanity check that our caching is not introducing consensus
// failures through additional data in, eg, the coins being
// spent being checked as a part of CScriptCheck.
// Verify signature
CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
} else if (!check()) {
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
// Check whether the failure was caused by a
// non-mandatory script verification check, such as
// non-standard DER encodings or non-null dummy
// arguments; if so, don't trigger DoS protection to
// avoid splitting the network between upgraded and
// non-upgraded nodes.
CScriptCheck check2(coin.out, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
// Failures of other flags indicate a transaction that is
// invalid in new blocks, e.g. an invalid P2SH. We DoS ban
// such nodes as they are not following the protocol. That
// said during an upgrade careful thought should be taken
// as to the correct behavior - we may want to continue
// peering with non-upgraded nodes even after soft-fork
// super-majority signaling has occurred.
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
if (cacheFullScriptStore && !pvChecks) {
// We executed all of the provided scripts, and were told to
// cache the result. Do so now.
scriptExecutionCache.insert(hashCacheEntry);
}
}
}
return true;
}
namespace {
bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s: OpenUndoFile failed", __func__);
// Write index header
unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
fileout << messageStart << nSize;
// Write undo data
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("%s: ftell failed", __func__);
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
// calculate & write checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
fileout << hasher.GetHash();
return true;
}
static bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex *pindex)
{
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s: OpenUndoFile failed", __func__);
// Read block
uint256 hashChecksum;
CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
try {
verifier << pindex->pprev->GetBlockHash();
verifier >> blockundo;
filein >> hashChecksum;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
// Verify checksum
if (hashChecksum != verifier.GetHash())
return error("%s: Checksum mismatch", __func__);
return true;
}
/** Abort with a message */
static bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
SetMiscWarning(strMessage);
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
static bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
} // namespace
/**
* Restore the UTXO in a Coin at a given COutPoint
* @param undo The Coin to be restored.
* @param view The coins view to which to apply the changes.
* @param out The out point that corresponds to the tx input.
* @return A DisconnectResult as an int
*/
int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
{
bool fClean = true;
if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
if (undo.nHeight == 0) {
// Missing undo metadata (height and coinbase). Older versions included this
// information only in undo records for the last spend of a transactions'
// outputs. This implies that it must be present for some other output of the same tx.
const Coin& alternate = AccessByTxid(view, out.hash);
if (!alternate.IsSpent()) {
undo.nHeight = alternate.nHeight;
undo.fCoinBase = alternate.fCoinBase;
} else {
return DISCONNECT_FAILED; // adding output for transaction without known metadata
}
}
// The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
// sure that the coin did not already exist in the cache. As we have queried for that above
// using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
// it is an overwrite.
view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
* When FAILED is returned, view is left in an indeterminate state. */
DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
{
bool fClean = true;
CBlockUndo blockUndo;
if (!UndoReadFromDisk(blockUndo, pindex)) {
error("DisconnectBlock(): failure reading undo data");
return DISCONNECT_FAILED;
}
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
error("DisconnectBlock(): block and undo data inconsistent");
return DISCONNECT_FAILED;
}
// undo transactions in reverse order
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the block itself
// exactly.
for (size_t o = 0; o < tx.vout.size(); o++) {
if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
COutPoint out(hash, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
fClean = false; // transaction output mismatch
}
}
}
// restore inputs
if (i > 0) { // not coinbases
CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size()) {
error("DisconnectBlock(): transaction and undo data inconsistent");
return DISCONNECT_FAILED;
}
for (unsigned int j = tx.vin.size(); j-- > 0;) {
const COutPoint &out = tx.vin[j].prevout;
int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
fClean = fClean && res != DISCONNECT_UNCLEAN;
}
// At this point, all of txundo.vprevout should have been moved out.
}
}
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
bool status = true;
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
status &= FileCommit(fileOld);
fclose(fileOld);
}
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
status &= FileCommit(fileOld);
fclose(fileOld);
}
if (!status) {
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
}
}
static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos _pos;
if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
return error("ConnectBlock(): FindUndoPos failed");
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
return AbortNode(state, "Failed to write undo data");
// update nUndoPos in block index
pindex->nUndoPos = _pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
setDirtyBlockIndex.insert(pindex);
}
return true;
}
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void ThreadScriptCheck() {
RenameThread("chaincoin-scriptch");
scriptcheckqueue.Thread();
}
VersionBitsCache versionbitscache GUARDED_BY(cs_main);
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
{
LOCK(cs_main);
int32_t nVersion = VERSIONBITS_TOP_BITS;
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
}
}
return nVersion;
}
/**
* Threshold condition checker that triggers when unknown versionbits are seen on the network.
*/
class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
{
private:
int bit;
public:
explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
}
};
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
// 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
// mainnet. We no longer need to support disabling the segwit deployment
// except for testing purposes, due to limitations of the functional test
// environment. See test/functional/p2p-segwit.py.
static bool IsScriptWitnessEnabled(const Consensus::Params& params)
{
return params.SegwitHeight != std::numeric_limits<int>::max();
}
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
unsigned int flags = SCRIPT_VERIFY_NONE;
// BIP16 became active on Bitcoin Apr 1 2012
// As those rules have always been in place for Chaincoin,
// just enforce from Genesis
flags |= SCRIPT_VERIFY_P2SH;
// Enforce WITNESS rules whenever P2SH is in effect (and the segwit
// deployment is defined).
if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
flags |= SCRIPT_VERIFY_WITNESS;
}
// Start enforcing the DERSIG (BIP66) rule
if (pindex->nHeight >= consensusparams.BIP66Height) {
flags |= SCRIPT_VERIFY_DERSIG;
}
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
if (pindex->nHeight >= consensusparams.BIP65Height) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
// Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
if (pindex->nHeight >= consensusparams.CSVHeight) {
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
}
// Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
flags |= SCRIPT_VERIFY_NULLDUMMY;
}
return flags;
}
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeCallbacks = 0;
static int64_t nTimeTotal = 0;
static int64_t nBlocksTotal = 0;
/** Apply the effects of this block (with given index) on the UTXO set represented by coins.
* Validity checks that depend on the UTXO set are also done; ConnectBlock()
* can fail if those validity checks fail (among other reasons). */
bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex,
CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
{
AssertLockHeld(cs_main);
assert(pindex);
assert(*pindex->phashBlock == block.GetHash());
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
// ContextualCheckBlockHeader() here. This means that if we add a new
// consensus rule that is enforced in one of those two functions, then we
// may have let in a block that violates the rule prior to updating the
// software, and we would NOT be enforcing the rule here. Fully solving
// upgrade from one software version to the next after a consensus rule
// change is potentially tricky and issue-specific (see RewindBlockIndex()
// for one general approach that was used for BIP 141 deployment).
// Also, currently the rule against blocks more than 2 hours in the future
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to
// re-enforce that rule here (at least until we make it impossible for
// GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
if (state.CorruptionPossible()) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
// problems.
return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
}
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
}
// verify that the view's current state corresponds to the previous block
uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
if (!fJustCheck)
view.SetBestBlock(pindex->GetBlockHash());
return true;
}
nBlocksTotal++;
bool fScriptChecks = true;
if (!hashAssumeValid.IsNull()) {
// We've been configured with the hash of a block which has been externally verified to have a valid history.
// A suitable default value is included with the software and updated from time to time. Because validity
// relative to a piece of software is an objective fact these defaults can be easily reviewed.
// This setting doesn't force the selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid);
if (it != mapBlockIndex.end()) {
if (it->second->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->nChainWork >= nMinimumChainWork) {
// This block is a member of the assumed verified chain and an ancestor of the best header.
// The equivalent time check discourages hash power from extorting the network via DOS attack
// into accepting an invalid block through telling users they must manually set assumevalid.
// Requiring a software change or burying the invalid block, regardless of the setting, makes
// it hard to hide the implication of the demand. This also avoids having release candidates
// that are hardly doing any signature verification at all in testing without having to
// artificially set the default assumed verified block further back.
// The test against nMinimumChainWork prevents the skipping when denied access to any chain at
// least as good as the expected chain.
fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
}
}
}
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
// Removed BIP30 checks since there are no blocks before BIP34 in Chaincoin
// Start enforcing BIP68 (sequence locks)
int nLockTimeFlags = 0;
if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
// Get the script flags for this block
unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr);
std::vector<int> prevheights;
CAmount nFees = 0;
int nInputs = 0;
int64_t nSigOpsCost = 0;
blockundo.vtxundo.reserve(block.vtx.size() - 1);
std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
const CTransaction &tx = *(block.vtx[i]);
nInputs += tx.vin.size();
if (!tx.IsCoinBase())
{
CAmount txfee = 0;
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
nFees += txfee;
if (!MoneyRange(nFees)) {
return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__),
REJECT_INVALID, "bad-txns-accumulated-fee-outofrange");
}
// Check that transaction is BIP68 final
// BIP68 lock checks (as opposed to nLockTime checks) must
// be in ConnectBlock because they require the UTXO set
prevheights.resize(tx.vin.size());
for (size_t j = 0; j < tx.vin.size(); j++) {
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
}
// GetTransactionSigOpCost counts 3 types of sigops:
// * legacy (always)
// * p2sh (when P2SH enabled in flags and excludes coinbase)
// * witness (when witness enabled in flags and excludes coinbase)
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
txdata.emplace_back(tx);
if (!tx.IsCoinBase())
{
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr))
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
control.Add(vChecks);
}
CTxUndo undoDummy;
if (i > 0) {
blockundo.vtxundo.push_back(CTxUndo());
}
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
}
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
// CHAINCOIN : MODIFIED TO CHECK MASTERNODE PAYMENTS AND SUPERBLOCKS
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
// It's possible that we simply don't have enough data and this could fail
// (i.e. block itself could be a correct one and we need to store it),
// that's why this is in ConnectBlock. Could be the other way around however -
// the peer who sent us this block is missing some data and wasn't able
// to recognize that block is actually invalid.
// TODO: resync data (both ways?) and try to reprocess this block later.
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
std::string strError = "";
if (!IsBlockValueValid(block, pindex->nHeight, blockReward, strError)) {
return state.DoS(0, error("ConnectBlock(CHAINCOIN): %s", strError), REJECT_INVALID, "bad-cb-amount");
}
if (!IsBlockPayeeValid(MakeTransactionRef (std::move(*block.vtx[0])), pindex->nHeight, blockReward)) {
return state.DoS(0, error("ConnectBlock(CHAINCOIN): couldn't find masternode or superblock payments"),
REJECT_INVALID, "bad-cb-payee");
}
// END CHAINCOIN
if (!control.Wait())
return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
if (fJustCheck)
return true;
if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
return false;
if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
setDirtyBlockIndex.insert(pindex);
}
assert(pindex->phashBlock);
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
return true;
}
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
* if they're too large, if it's been a while since the last write,
* or always and in all cases if we're in prune mode and are deleting files.
*
* If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do anything
* besides checking if we need to prune.
*/
bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) {
int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
LOCK(cs_main);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
std::set<int> setFilesToPrune;
bool full_flush_completed = false;
try {
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
if (nManualPruneHeight > 0) {
FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
} else {
FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
fCheckForPruning = false;
}
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!fHavePruned) {
pblocktree->WriteFlag("prunedblockfiles", true);
fHavePruned = true;
}
}
}
int64_t nNow = GetTimeMicros();
// Avoid writing/flushing immediately after startup.
if (nLastWrite == 0) {
nLastWrite = nNow;
}
if (nLastFlush == 0) {
nLastFlush = nNow;
}
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
// Combine all conditions that result in a full cache flush.
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
if (!CheckDiskSpace(0, true))
return state.Error("out of disk space");
// First make sure all block and undo data is flushed to disk.
FlushBlockFile();
// Then update all block file information (which may refer to block and undo files).
{
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
vFiles.reserve(setDirtyFileInfo.size());
for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
setDirtyFileInfo.erase(it++);
}
std::vector<const CBlockIndex*> vBlocks;
vBlocks.reserve(setDirtyBlockIndex.size());
for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
vBlocks.push_back(*it);
setDirtyBlockIndex.erase(it++);
}
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
return AbortNode(state, "Failed to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune)
UnlinkPrunedFiles(setFilesToPrune);
nLastWrite = nNow;
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush && !pcoinsTip->GetBestBlock().IsNull()) {
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
return state.Error("out of disk space");
// Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return AbortNode(state, "Failed to write to coin database");
nLastFlush = nNow;
full_flush_completed = true;
}
}
if (full_flush_completed) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().ChainStateFlushed(chainActive.GetLocator());
}
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error while flushing: ") + e.what());
}
return true;
}
void FlushStateToDisk() {
CValidationState state;
const CChainParams& chainparams = Params();
if (!FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
void PruneAndFlush() {
CValidationState state;
fCheckForPruning = true;
const CChainParams& chainparams = Params();
if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
static void DoWarning(const std::string& strWarning)
{
static bool fWarned = false;
SetMiscWarning(strWarning);
if (!fWarned) {
AlertNotify(strWarning);
fWarned = true;
}
}
/** Private helper function that concatenates warning messages. */
static void AppendWarning(std::string& res, const std::string& warn)
{
if (!res.empty()) res += ", ";
res += warn;
}
/** Check warning conditions and do some notifications on new chain tip set. */
void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainParams) {
// New best block
mempool.AddTransactionsUpdated(1);
{
LOCK(g_best_block_mutex);
g_best_block = pindexNew->GetBlockHash();
g_best_block_cv.notify_all();
}
std::string warningMessages;
if (!IsInitialBlockDownload())
{
int nUpgraded = 0;
const CBlockIndex* pindex = pindexNew;
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
WarningBitsConditionChecker checker(bit);
ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
if (state == ThresholdState::ACTIVE) {
DoWarning(strWarning);
} else {
AppendWarning(warningMessages, strWarning);
}
}
}
// Check the version of the last 100 blocks to see if we need to upgrade:
for (int i = 0; i < 100 && pindex != nullptr; i++)
{
int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded));
}
LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, /* Continued */
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
FormatISO8601DateTime(pindexNew->GetBlockTime()),
GuessVerificationProgress(chainParams.TxData(), pindexNew), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
if (!warningMessages.empty())
LogPrintf(" warning='%s'", warningMessages); /* Continued */
LogPrintf("\n");
}
/** Disconnect chainActive's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling UpdateMempoolForReorg.
* with cs_main held.
*
* If disconnectpool is nullptr, then no disconnected transactions are added to
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool)
{
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock& block = *pblock;
if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip.get());
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
bool flushed = view.Flush();
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
return false;
if (disconnectpool) {
// Save transactions to re-add to mempool at end of reorg
for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
disconnectpool->addTransaction(*it);
}
while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
// Drop the earliest entry, and remove its children from the mempool.
auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
disconnectpool->removeEntry(it);
}
}
chainActive.SetTip(pindexDelete->pprev);
UpdateTip(pindexDelete->pprev, chainparams);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock);
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
struct PerBlockConnectTrace {
CBlockIndex* pindex = nullptr;
std::shared_ptr<const CBlock> pblock;
std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs;
PerBlockConnectTrace() : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {}
};
/**
* Used to track blocks whose transactions were applied to the UTXO state as a
* part of a single ActivateBestChainStep call.
*
* This class also tracks transactions that are removed from the mempool as
* conflicts (per block) and can be used to pass all those transactions
* through SyncTransaction.
*
* This class assumes (and asserts) that the conflicted transactions for a given
* block are added via mempool callbacks prior to the BlockConnected() associated
* with those transactions. If any transactions are marked conflicted, it is
* assumed that an associated block will always be added.
*
* This class is single-use, once you call GetBlocksConnected() you have to throw
* it away and make a new one.
*/
class ConnectTrace {
private:
std::vector<PerBlockConnectTrace> blocksConnected;
CTxMemPool &pool;
boost::signals2::scoped_connection m_connNotifyEntryRemoved;
public:
explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
m_connNotifyEntryRemoved = pool.NotifyEntryRemoved.connect(std::bind(&ConnectTrace::NotifyEntryRemoved, this, std::placeholders::_1, std::placeholders::_2));
}
void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
assert(!blocksConnected.back().pindex);
assert(pindex);
assert(pblock);
blocksConnected.back().pindex = pindex;
blocksConnected.back().pblock = std::move(pblock);
blocksConnected.emplace_back();
}
std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
// We always keep one extra block at the end of our list because
// blocks are added after all the conflicted transactions have
// been filled in. Thus, the last entry should always be an empty
// one waiting for the transactions from the next block. We pop
// the last entry here to make sure the list we return is sane.
assert(!blocksConnected.back().pindex);
assert(blocksConnected.back().conflictedTxs->empty());
blocksConnected.pop_back();
return blocksConnected;
}
void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) {
assert(!blocksConnected.back().pindex);
if (reason == MemPoolRemovalReason::CONFLICT) {
blocksConnected.back().conflictedTxs->emplace_back(std::move(txRemoved));
}
}
};
/**
* Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is added to connectTrace if connection succeeds.
*/
bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
{
assert(pindexNew->pprev == chainActive.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr<const CBlock> pthisBlock;
if (!pblock) {
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
pthisBlock = pblockNew;
} else {
pthisBlock = pblock;
}
const CBlock& blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
{
CCoinsViewCache view(pcoinsTip.get());
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
GetMainSignals().BlockChecked(blockConnecting, state);
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), FormatStateMessage(state));
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
return false;
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
disconnectpool.removeForBlock(blockConnecting.vtx);
// Update chainActive & related variables.
chainActive.SetTip(pindexNew);
UpdateTip(pindexNew, chainparams);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't
* known to be invalid (it's however far from certain to be valid).
*/
CBlockIndex* CChainState::FindMostWorkChain() {
do {
CBlockIndex *pindexNew = nullptr;
// Find the best candidate header.
{
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend())
return nullptr;
pindexNew = *it;
}
// Check whether all blocks on the path between the currently active chain and the candidate are valid.
// Just going until the active chain is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
if (fFailedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing data)
if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fFailedChain) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
} else if (fMissingData) {
// If we're missing data, then add back to mapBlocksUnlinked,
// so that if the block arrives in the future we can try adding
// to setBlockIndexCandidates again.
mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
setBlockIndexCandidates.erase(pindexTest);
fInvalidAncestor = true;
break;
}
pindexTest = pindexTest->pprev;
}
if (!fInvalidAncestor)
return pindexNew;
} while(true);
}
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
void CChainState::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
*/
bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
AssertLockHeld(cs_main);
const CBlockIndex *pindexOldTip = chainActive.Tip();
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
if (!DisconnectTip(state, chainparams, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
UpdateMempoolForReorg(disconnectpool, false);
return false;
}
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector<CBlockIndex*> vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
// a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible()) {
InvalidChainFound(vpindexToConnect.front());
}
state = CValidationState();
fInvalidFound = true;
fContinue = false;
break;
} else {
// A system error occurred (disk space, database error, ...).
// Make the mempool consistent with the current tip, just in case
// any observers try to use it before shutdown.
UpdateMempoolForReorg(disconnectpool, false);
return false;
}
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (fBlocksDisconnected) {
// If any blocks were disconnected, disconnectpool may be non empty. Add
// any disconnected transactions back to the mempool.
UpdateMempoolForReorg(disconnectpool, true);
}
mempool.check(pcoinsTip.get());
// Callbacks/notifications for a new best chain.
if (fInvalidFound)
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
else
CheckForkWarningConditions();
return true;
}
static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex* pindexHeaderOld = nullptr;
CBlockIndex* pindexHeader = nullptr;
{
LOCK(cs_main);
pindexHeader = pindexBestHeader;
if (pindexHeader != pindexHeaderOld) {
fNotify = true;
fInitialBlockDownload = IsInitialBlockDownload();
pindexHeaderOld = pindexHeader;
}
}
// Send block tip changed notifications without cs_main
if (fNotify) {
uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader);
}
}
static void LimitValidationInterfaceQueue() {
AssertLockNotHeld(cs_main);
if (GetMainSignals().CallbacksPending() > 10) {
SyncWithValidationInterfaceQueue();
}
}
/**
* Make the best chain active, in multiple steps. The result is either failure
* or an activated best chain. pblock is either nullptr or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*
* ActivateBestChain is split into steps (see ActivateBestChainStep) so that
* we avoid holding cs_main for an extended period of time; the length of this
* call may be quite long during reindexing or a substantial reorg.
*/
bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
AssertLockNotHeld(cs_main);
// ABC maintains a fair degree of expensive-to-calculate internal state
// because this function periodically releases cs_main so that it does not lock up other threads for too long
// during large connects - and to allow for e.g. the callback queue to drain
// we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
LOCK(m_cs_chainstate);
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
boost::this_thread::interruption_point();
// Block until the validation queue drains. This should largely
// never happen in normal operation, however may happen during
// reindex, causing memory blowup if we run too far ahead.
// Note that if a validationinterface callback ends up calling
// ActivateBestChain this may lead to a deadlock! We should
// probably have a DEBUG_LOCKORDER test for this in the future.
LimitValidationInterfaceQueue();
{
LOCK(cs_main);
CBlockIndex* starting_tip = chainActive.Tip();
bool blocks_connected = false;
do {
// We absolutely may not unlock cs_main until we've made forward progress
// (with the exception of shutdown due to hardware issues, low disk space, etc).
ConnectTrace connectTrace(mempool); // Destructed before cs_main is unlocked
if (pindexMostWork == nullptr) {
pindexMostWork = FindMostWorkChain();
}
// Whether we have anything to do at all.
if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) {
break;
}
bool fInvalidFound = false;
std::shared_ptr<const CBlock> nullBlockPtr;
if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace))
return false;
blocks_connected = true;
if (fInvalidFound) {
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
pindexNewTip = chainActive.Tip();
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs);
}
} while (!chainActive.Tip() || (starting_tip && CBlockIndexWorkComparator()(chainActive.Tip(), starting_tip)));
if (!blocks_connected) return true;
const CBlockIndex* pindexFork = chainActive.FindFork(starting_tip);
bool fInitialDownload = IsInitialBlockDownload();
// Notify external listeners about the new tip.
// Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
if (pindexFork != pindexNewTip) {
// Notify ValidationInterface subscribers
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
// Always notify the UI if a new block tip was connected
uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip);
}
}
// When we reach this point, we switched to a new tip (stored in pindexNewTip).
if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
// We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
// never shutdown before connecting the genesis block during LoadChainTip(). Previously this
// caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
// that the best block hash is non-null.
if (ShutdownRequested())
break;
} while (pindexNewTip != pindexMostWork);
CheckBlockIndex(chainparams.GetConsensus());
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
return false;
}
return true;
}
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
return g_chainstate.ActivateBestChain(state, chainparams, std::move(pblock));
}
bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex)
{
{
LOCK(cs_main);
if (pindex->nChainWork < chainActive.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the counter.
nBlockReverseSequenceId = -1;
}
nLastPreciousChainwork = chainActive.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
// We can't keep reducing the counter if somebody really wants to
// call preciousblock 2**31-1 times on the same set of tips...
nBlockReverseSequenceId--;
}
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
setBlockIndexCandidates.insert(pindex);
PruneBlockIndexCandidates();
}
}
return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
}
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
return g_chainstate.PreciousBlock(state, params, pindex);
}
bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
{
CBlockIndex* to_mark_failed = pindex;
bool pindex_was_in_chain = false;
int disconnected = 0;
// Disconnect (descendants of) pindex, and mark them invalid.
while (true) {
if (ShutdownRequested()) break;
// Make sure the queue of validation callbacks doesn't grow unboundedly.
LimitValidationInterfaceQueue();
LOCK(cs_main);
if (!chainActive.Contains(pindex)) break;
pindex_was_in_chain = true;
CBlockIndex *invalid_walk_tip = chainActive.Tip();
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
DisconnectedBlockTransactions disconnectpool;
bool ret = DisconnectTip(state, chainparams, &disconnectpool);
// DisconnectTip will add transactions to disconnectpool.
// Adjust the mempool to be consistent with the new tip, adding
// transactions back to the mempool if disconnecting was succesful,
// and we're not doing a very deep invalidation (in which case
// keeping the mempool up to date is probably futile anyway).
UpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
if (!ret) return false;
assert(invalid_walk_tip->pprev == chainActive.Tip());
// We immediately mark the disconnected blocks as invalid.
// This prevents a case where pruned nodes may fail to invalidateblock
// and be left unable to start as they have no tip candidates (as there
// are no blocks that meet the "have data and are not invalid per
// nStatus" criteria for inclusion in setBlockIndexCandidates).
invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(invalid_walk_tip);
setBlockIndexCandidates.erase(invalid_walk_tip);
setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
// We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
// need to be BLOCK_FAILED_CHILD instead.
to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(to_mark_failed);
}
// Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
// iterations, or, if it's the last one, call InvalidChainFound on it.
to_mark_failed = invalid_walk_tip;
}
{
LOCK(cs_main);
if (chainActive.Contains(to_mark_failed)) {
// If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
return false;
}
// Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(to_mark_failed);
setBlockIndexCandidates.erase(to_mark_failed);
m_failed_blocks.insert(to_mark_failed);
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
// add it again.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
setBlockIndexCandidates.insert(it->second);
}
it++;
}
InvalidChainFound(to_mark_failed);
}
// Only notify about a new block tip if the active chain was modified.
if (pindex_was_in_chain) {
uiInterface.NotifyBlockTip(IsInitialBlockDownload(), to_mark_failed->pprev);
}
return true;
}
bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
return g_chainstate.InvalidateBlock(state, chainparams, pindex);
}
void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
AssertLockHeld(cs_main);
int nHeight = pindex->nHeight;
// Remove the invalidity flag from this block and all its descendants.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = nullptr;
}
m_failed_blocks.erase(it->second);
}
it++;
}
// Remove the invalidity flag from all ancestors too.
while (pindex != nullptr) {
if (pindex->nStatus & BLOCK_FAILED_MASK) {
pindex->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(pindex);
m_failed_blocks.erase(pindex);
}
pindex = pindex->pprev;
}
}
void ResetBlockFailureFlags(CBlockIndex *pindex) {
return g_chainstate.ResetBlockFailureFlags(pindex);
}
CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
{
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end())
return it->second;
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(block);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
pindexNew->BuildSkip();
}
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
pindexBestHeader = pindexNew;
setDirtyBlockIndex.insert(pindexNew);
return pindexNew;
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
pindexNew->nStatus |= BLOCK_OPT_WITNESS;
}
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
std::deque<CBlockIndex*> queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
{
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
queue.push_back(it->second);
range.first++;
mapBlocksUnlinked.erase(it);
}
}
} else {
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
}
}
static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
{
LOCK(cs_LastBlockFile);
unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
nFile++;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
}
pos.nFile = nFile;
pos.nPos = vinfoBlockFile[nFile].nSize;
}
if ((int)nFile != nLastBlockFile) {
if (!fKnown) {
LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
}
FlushBlockFile(!fKnown);
nLastBlockFile = nFile;
}
vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (fKnown)
vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
else
vinfoBlockFile[nFile].nSize += nAddSize;
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) {
FILE *file = OpenBlockFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return error("out of disk space");
}
}
setDirtyFileInfo.insert(nFile);
return true;
}
static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) {
FILE *file = OpenUndoFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error("out of disk space");
}
return true;
}
static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
return true;
}
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
if (block.fChecked)
return true;
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
return false;
// Check the merkle root.
if (fCheckMerkleRoot) {
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction");
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// Note that witness malleability is checked in ContextualCheckBlock, so no
// checks that use witness data may be performed here.
// Size limits
if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
// Check transactions
for (const auto& tx : block.vtx)
if (!CheckTransaction(*tx, state, true))
return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage()));
unsigned int nSigOps = 0;
for (const auto& tx : block.vtx)
{
nSigOps += GetLegacySigOpCount(*tx);
}
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
block.fChecked = true;
return true;
}
bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
{
int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
return (height >= params.SegwitHeight);
}
// Compute at which vout of the block's coinbase transaction the witness
// commitment occurs, or -1 if not found.
static int GetWitnessCommitmentIndex(const CBlock& block)
{
int commitpos = -1;
if (!block.vtx.empty()) {
for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
commitpos = o;
}
}
}
return commitpos;
}
void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
{
int commitpos = GetWitnessCommitmentIndex(block);
static const std::vector<unsigned char> nonce(32, 0x00);
if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
CMutableTransaction tx(*block.vtx[0]);
tx.vin[0].scriptWitness.stack.resize(1);
tx.vin[0].scriptWitness.stack[0] = nonce;
block.vtx[0] = MakeTransactionRef(std::move(tx));
}
}
std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
{
std::vector<unsigned char> commitment;
int commitpos = GetWitnessCommitmentIndex(block);
std::vector<unsigned char> ret(32, 0x00);
if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
if (commitpos == -1) {
uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin());
CTxOut out;
out.nValue = 0;
out.scriptPubKey.resize(38);
out.scriptPubKey[0] = OP_RETURN;
out.scriptPubKey[1] = 0x24;
out.scriptPubKey[2] = 0xaa;
out.scriptPubKey[3] = 0x21;
out.scriptPubKey[4] = 0xa9;
out.scriptPubKey[5] = 0xed;
memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
CMutableTransaction tx(*block.vtx[0]);
tx.vout.push_back(out);
block.vtx[0] = MakeTransactionRef(std::move(tx));
}
}
UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
return commitment;
}
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
* NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
{
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
// Check proof of work
const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
// Check against checkpoints
if (fCheckpointsEnabled) {
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
// MapBlockIndex.
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(params.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
}
/** NOTE: This function is not currently invoked by ConnectBlock(), so we
* should consider upgrade issues if we change which consensus rules are
* enforced in this function (eg by adding a new consensus rule). See comment
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Start enforcing BIP113 (Median Time Past).
int nLockTimeFlags = 0;
if (nHeight >= consensusParams.CSVHeight) {
assert(pindexPrev != nullptr);
nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
}
int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
? pindexPrev->GetMedianTimePast()
: block.GetBlockTime();
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction");
}
}
// Enforce rule that the coinbase starts with serialized block height
if (nHeight >= consensusParams.BIP34Height)
{
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase");
}
}
// Validation for witness commitments.
// * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
// coinbase (where 0x0000....0000 is used instead).
// * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
// * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
// * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
// {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
// multiple, the last one is used.
bool fHaveWitness = false;
if (nHeight >= consensusParams.SegwitHeight) {
int commitpos = GetWitnessCommitmentIndex(block);
if (commitpos != -1) {
bool malleated = false;
uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
// The malleation check is ignored; as the transaction tree itself
// already does not permit it, it is impossible to trigger in the
// witness tree.
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness reserved value size", __func__));
}
CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
return state.DoS(100, false, REJECT_INVALID, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__));
}
fHaveWitness = true;
}
}
// No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
if (!fHaveWitness) {
for (const auto& tx : block.vtx) {
if (tx->HasWitness()) {
return state.DoS(100, false, REJECT_INVALID, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__));
}
}
}
// After the coinbase witness reserved value and commitment are verified,
// we can check if the block weight passes (before we've checked the
// coinbase witness, it would be possible for the weight to be too
// large by filling up the coinbase witness, which doesn't change
// the block hash, so we couldn't mark the block as permanently
// failed).
if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
return state.DoS(100, false, REJECT_INVALID, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__));
}
return true;
}
bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
{
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = nullptr;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != mapBlockIndex.end()) {
// Block header is already known.
pindex = miSelf->second;
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
return true;
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks between
* them as failed. For example:
*
* D3
* /
* B2 - C2
* / \
* A D2 - E2 - F2
* \
* B1 - C1 - D1 - E1
*
* In the case that we attempted to reorg from E1 to F2, only to find
* C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
* but NOT D3 (it was not in any of our candidate sets at the time).
*
* In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
* in LoadBlockIndex.
*/
if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
// The above does not mean "invalid": it checks if the previous block
// hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
// optimization, in the common case of adding a new block to the tip,
// we don't need to iterate over the failed blocks list.
for (const CBlockIndex* failedit : m_failed_blocks) {
if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
assert(failedit->nStatus & BLOCK_FAILED_VALID);
CBlockIndex* invalid_walk = pindexPrev;
while (invalid_walk != failedit) {
invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
}
}
}
}
if (pindex == nullptr)
pindex = AddToBlockIndex(block);
if (ppindex)
*ppindex = pindex;
CheckBlockIndex(chainparams.GetConsensus());
// Notify external listeners about accepted block header
// disabled to see if we can pass the info to MN code otherwise
// GetMainSignals().AcceptedBlockHeader(pindex);
return true;
}
// Exposed wrapper for AcceptBlockHeader
bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid)
{
if (first_invalid != nullptr) first_invalid->SetNull();
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
if (!g_chainstate.AcceptBlockHeader(header, state, chainparams, &pindex)) {
if (first_invalid) *first_invalid = header;
return false;
}
if (ppindex) {
*ppindex = pindex;
}
}
}
NotifyHeaderTip();
return true;
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
static CDiskBlockPos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const CDiskBlockPos* dbp) {
unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
CDiskBlockPos blockPos;
if (dbp != nullptr)
blockPos = *dbp;
if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
error("%s: FindBlockPos failed", __func__);
return CDiskBlockPos();
}
if (dbp == nullptr) {
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
AbortNode("Failed to write block");
return CDiskBlockPos();
}
}
return blockPos;
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
CBlockIndex *pindexDummy = nullptr;
CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
if (!AcceptBlockHeader(block, state, chainparams, &pindex))
return false;
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: Decouple this function from the block download logic by removing fRequested
// This requires some new chain data structure to efficiently look up if a
// block is in a chain leading to a candidate for best tip, despite not
// being such a candidate itself.
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) return true;
if (!fRequested) { // If we didn't ask for it:
if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
// Protect against DoS attacks from low-work chains.
// If our tip is behind, a peer could try to send us
// low-work blocks on a fake chain that we would never
// request; don't process these.
if (pindex->nChainWork < nMinimumChainWork) return true;
}
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
return error("%s: %s", __func__, FormatStateMessage(state));
}
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
// (but if it does not build on our best tip, let the SendMessages loop relay it)
if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev)
GetMainSignals().NewPoWValidBlock(pindex, pblock);
// Write block to history file
if (fNewBlock) *fNewBlock = true;
try {
CDiskBlockPos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
if (blockPos.IsNull()) {
state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
return false;
}
ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
CheckBlockIndex(chainparams.GetConsensus());
return true;
}
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
AssertLockNotHeld(cs_main);
{
CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
CValidationState state;
// CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
// Therefore, the following critical section must include the CheckBlock() call as well.
LOCK(cs_main);
// Ensure that CheckBlock() passes before calling AcceptBlock, as
// belt-and-suspenders.
bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
if (ret) {
// Store to disk
ret = g_chainstate.AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
}
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
return error("%s: AcceptBlock FAILED (%s)", __func__, FormatStateMessage(state));
}
}
NotifyHeaderTip();
CValidationState state; // Only used to report errors, not invalidity - ignore it
if (!g_chainstate.ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state));
LogPrintf("%s : ACCEPTED\n", __func__);
return true;
}
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainActive.Tip());
CCoinsViewCache viewNew(pcoinsTip.get());
uint256 block_hash(block.GetHash());
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
indexDummy.phashBlock = &block_hash;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
if (!g_chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
return false;
assert(state.IsValid());
return true;
}
/**
* BLOCK PRUNING CODE
*/
/* Calculate the amount of disk space the block & undo files currently use */
uint64_t CalculateCurrentUsage()
{
LOCK(cs_LastBlockFile);
uint64_t retval = 0;
for (const CBlockFileInfo &file : vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
}
return retval;
}
/* Prune a block file (modify associated database entries)*/
void PruneOneBlockFile(const int fileNumber)
{
LOCK(cs_LastBlockFile);
for (const auto& entry : mapBlockIndex) {
CBlockIndex* pindex = entry.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
pindex->nFile = 0;
pindex->nDataPos = 0;
pindex->nUndoPos = 0;
setDirtyBlockIndex.insert(pindex);
// Prune from mapBlocksUnlinked -- any block we prune would have
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
// mapBlocksUnlinked or setBlockIndexCandidates.
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
range.first++;
if (_it->second == pindex) {
mapBlocksUnlinked.erase(_it);
}
}
}
}
vinfoBlockFile[fileNumber].SetNull();
setDirtyFileInfo.insert(fileNumber);
}
void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
{
for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
CDiskBlockPos pos(*it, 0);
fs::remove(GetBlockPosFilename(pos, "blk"));
fs::remove(GetBlockPosFilename(pos, "rev"));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
/* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight)
{
assert(fPruneMode && nManualPruneHeight > 0);
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == nullptr)
return;
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
int count=0;
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
PruneOneBlockFile(fileNumber);
setFilesToPrune.insert(fileNumber);
count++;
}
LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
}
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(int nManualPruneHeight)
{
CValidationState state;
const CChainParams& chainparams = Params();
if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
/**
* Prune block and undo files (blk???.dat and undo???.dat) so that the disk space used is less than a user-defined target.
* The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
* space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
* (which in this case means the blockchain must be re-downloaded.)
*
* Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
* Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
* Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
* Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
* The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
* A db flag records the fact that at least some block files have been pruned.
*
* @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
*/
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
return;
}
if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) {
return;
}
unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
// before the next pruning.
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
uint64_t nBytesToPrune;
int count=0;
if (nCurrentUsage + nBuffer >= nPruneTarget) {
// On a prune event, the chainstate DB is flushed.
// To avoid excessive prune events negating the benefit of high dbcache
// values, we should not prune too rapidly.
// So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
if (IsInitialBlockDownload()) {
// Since this is only relevant during IBD, we use a fixed 10%
nBuffer += nPruneTarget / 10;
}
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
if (vinfoBlockFile[fileNumber].nSize == 0)
continue;
if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
break;
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
PruneOneBlockFile(fileNumber);
// Queue up the files for removal
setFilesToPrune.insert(fileNumber);
nCurrentUsage -= nBytesToPrune;
count++;
}
}
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
nLastBlockWeCanPrune, count);
}
bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir)
{
uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
return true;
}
static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
if (pos.IsNull())
return nullptr;
fs::path path = GetBlockPosFilename(pos, prefix);
fs::create_directories(path.parent_path());
FILE* file = fsbridge::fopen(path, fReadOnly ? "rb": "rb+");
if (!file && !fReadOnly)
file = fsbridge::fopen(path, "wb+");
if (!file) {
LogPrintf("Unable to open file %s\n", path.string());
return nullptr;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
fclose(file);
return nullptr;
}
}
return file;
}
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
/** Open an undo file (rev?????.dat) */
static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
{
return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile);
}
CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
{
AssertLockHeld(cs_main);
if (hash.IsNull())
return nullptr;
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree)
{
if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
return false;
// Calculate nChainWork
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
}
std::sort(vSortedByHeight.begin(), vSortedByHeight.end());
for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
// We can link the chain of blocks for which we've received transactions at some point.
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
if (pindex->pprev->HaveTxsDownloaded()) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
} else {
pindex->nChainTx = 0;
mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
}
}
if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
pindex->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(pindex);
}
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
}
return true;
}
bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!g_chainstate.LoadBlockIndex(chainparams.GetConsensus(), *pblocktree))
return false;
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
vinfoBlockFile.resize(nLastBlockFile + 1);
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
}
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
for (int nFile = nLastBlockFile + 1; true; nFile++) {
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(nFile, info)) {
vinfoBlockFile.push_back(info);
} else {
break;
}
}
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
{
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
}
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CDiskBlockPos pos(*it, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
}
// Check whether we have ever pruned block & undo files
pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned)
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
if(fReindexing) fReindex = true;
return true;
}
bool LoadChainTip(const CChainParams& chainparams)
{
AssertLockHeld(cs_main);
if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) {
// In case we just added the genesis block, connect it now, so
// that we always have a chainActive.Tip() when we return.
LogPrintf("%s: Connecting genesis block...\n", __func__);
CValidationState state;
if (!ActivateBestChain(state, chainparams)) {
LogPrintf("%s: failed to activate chain (%s)\n", __func__, FormatStateMessage(state));
return false;
}
}
// Load pointer to end of best chain
CBlockIndex* pindex = LookupBlockIndex(pcoinsTip->GetBestBlock());
if (!pindex) {
return false;
}
chainActive.SetTip(pindex);
g_chainstate.PruneBlockIndexCandidates();
LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
return true;
}
CVerifyDB::CVerifyDB()
{
uiInterface.ShowProgress(_("Verifying blocks..."), 0, false);
}
CVerifyDB::~CVerifyDB()
{
uiInterface.ShowProgress("", 100, false);
}
bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr)
return true;
// Verify blocks in the best chain
if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height())
nCheckDepth = chainActive.Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
CBlockIndex* pindex;
CBlockIndex* pindexFailure = nullptr;
int nGoodTransactions = 0;
CValidationState state;
int reportDone = 0;
LogPrintf("[0%%]..."); /* Continued */
for (pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
boost::this_thread::interruption_point();
const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone/10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone); /* Continued */
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
if (pindex->nHeight <= chainActive.Height()-nCheckDepth)
break;
if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, only go back as far as we have data.
LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
break;
}
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
if (!pindex->GetUndoPos().IsNull()) {
if (!UndoReadFromDisk(undo, pindex)) {
return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = g_chainstate.DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
if (res == DISCONNECT_UNCLEAN) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else {
nGoodTransactions += block.vtx.size();
}
}
if (ShutdownRequested())
return true;
}
if (pindexFailure)
return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// store block count as we move pindex at check level >= 4
int block_count = chainActive.Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
if (reportDone < percentageDone/10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone); /* Continued */
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
if (!g_chainstate.ConnectBlock(block, state, pindex, coins, chainparams))
return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
}
}
LogPrintf("[DONE].\n");
LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
return true;
}
/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
{
// TODO: merge with ConnectBlock
CBlock block;
if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
for (const CTransactionRef& tx : block.vtx) {
if (!tx->IsCoinBase()) {
for (const CTxIn &txin : tx->vin) {
inputs.SpendCoin(txin.prevout);
}
}
// Pass check = true as every addition may be an overwrite.
AddCoins(inputs, *tx, pindex->nHeight, true);
}
return true;
}
bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
{
LOCK(cs_main);
CCoinsViewCache cache(view);
std::vector<uint256> hashHeads = view->GetHeadBlocks();
if (hashHeads.empty()) return true; // We're already in a consistent state.
if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
uiInterface.ShowProgress(_("Replaying blocks..."), 0, false);
LogPrintf("Replaying blocks\n");
const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
const CBlockIndex* pindexNew; // New tip during the interrupted flush.
const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
if (mapBlockIndex.count(hashHeads[0]) == 0) {
return error("ReplayBlocks(): reorganization to unknown block requested");
}
pindexNew = mapBlockIndex[hashHeads[0]];
if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
if (mapBlockIndex.count(hashHeads[1]) == 0) {
return error("ReplayBlocks(): reorganization from unknown block requested");
}
pindexOld = mapBlockIndex[hashHeads[1]];
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
assert(pindexFork != nullptr);
}
// Rollback along the old branch.
while (pindexOld != pindexFork) {
if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
CBlock block;
if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
if (res == DISCONNECT_FAILED) {
return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
// If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
// overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
// applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
// the result is still a version of the UTXO set with the effects of that block undone.
}
pindexOld = pindexOld->pprev;
}
// Roll forward from the forking point to the new tip.
int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
uiInterface.ShowProgress(_("Replaying blocks..."), (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
if (!RollforwardBlock(pindex, cache, params)) return false;
}
cache.SetBestBlock(pindexNew->GetBlockHash());
cache.Flush();
uiInterface.ShowProgress("", 100, false);
return true;
}
bool ReplayBlocks(const CChainParams& params, CCoinsView* view) {
return g_chainstate.ReplayBlocks(params, view);
}
//! Helper for CChainState::RewindBlockIndex
void CChainState::EraseBlockData(CBlockIndex* index)
{
AssertLockHeld(cs_main);
assert(!chainActive.Contains(index)); // Make sure this block isn't active
// Reduce validity
index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
// Remove have-data flags.
index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
// Remove storage location.
index->nFile = 0;
index->nDataPos = 0;
index->nUndoPos = 0;
// Remove various other things
index->nTx = 0;
index->nChainTx = 0;
index->nSequenceId = 0;
// Make sure it gets written.
setDirtyBlockIndex.insert(index);
// Update indexes
setBlockIndexCandidates.erase(index);
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(index->pprev);
while (ret.first != ret.second) {
if (ret.first->second == index) {
mapBlocksUnlinked.erase(ret.first++);
} else {
++ret.first;
}
}
// Mark parent as eligible for main chain again
if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
setBlockIndexCandidates.insert(index->pprev);
}
}
bool CChainState::RewindBlockIndex(const CChainParams& params)
{
// Note that during -reindex-chainstate we are called with an empty chainActive!
// First erase all post-segwit blocks without witness not in the main chain,
// as this can we done without costly DisconnectTip calls. Active
// blocks will be dealt with below (releasing cs_main in between).
{
LOCK(cs_main);
for (const auto& entry : mapBlockIndex) {
if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !chainActive.Contains(entry.second)) {
EraseBlockData(entry.second);
}
}
}
// Find what height we need to reorganize to.
CBlockIndex *tip;
int nHeight = 1;
{
LOCK(cs_main);
while (nHeight <= chainActive.Height()) {
// Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
// blocks in ConnectBlock, we don't need to go back and
// re-download/re-verify blocks from before segwit actually activated.
if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
break;
}
nHeight++;
}
tip = chainActive.Tip();
}
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
CValidationState state;
// Loop until the tip is below nHeight, or we reach a pruned block.
while (!ShutdownRequested()) {
{
LOCK(cs_main);
// Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
assert(tip == chainActive.Tip());
if (tip == nullptr || tip->nHeight < nHeight) break;
if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, don't try rewinding past the HAVE_DATA point;
// since older blocks can't be served anyway, there's
// no need to walk further, and trying to DisconnectTip()
// will fail (and require a needless reindex/redownload
// of the blockchain).
break;
}
// Disconnect block
if (!DisconnectTip(state, params, nullptr)) {
return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, FormatStateMessage(state));
}
// Reduce validity flag and have-data flags.
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
// Note: If we encounter an insufficiently validated block that
// is on chainActive, it must be because we are a pruning node, and
// this block or some successor doesn't HAVE_DATA, so we were unable to
// rewind all the way. Blocks remaining on chainActive at this point
// must not have their validity reduced.
EraseBlockData(tip);
tip = tip->pprev;
}
// Make sure the queue of validation callbacks doesn't grow unboundedly.
LimitValidationInterfaceQueue();
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
return false;
}
}
{
LOCK(cs_main);
if (chainActive.Tip() != nullptr) {
// We can't prune block index candidates based on our tip if we have
// no tip due to chainActive being empty!
PruneBlockIndexCandidates();
CheckBlockIndex(params.GetConsensus());
}
}
return true;
}
bool RewindBlockIndex(const CChainParams& params) {
if (!g_chainstate.RewindBlockIndex(params)) {
return false;
}
if (chainActive.Tip() != nullptr) {
// FlushStateToDisk can possibly read chainActive. Be conservative
// and skip it here, we're about to -reindex-chainstate anyway, so
// it'll get called a bunch real soon.
CValidationState state;
if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
return false;
}
}
return true;
}
void CChainState::UnloadBlockIndex() {
nBlockSequenceId = 1;
m_failed_blocks.clear();
setBlockIndexCandidates.clear();
}
// May NOT be used after any connections are up as much
// of the peer-processing logic assumes a consistent
// block index state
void UnloadBlockIndex()
{
LOCK(cs_main);
chainActive.SetTip(nullptr);
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
mempool.clear();
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
versionbitscache.Clear();
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
warningcache[b].clear();
}
for (const BlockMap::value_type& entry : mapBlockIndex) {
delete entry.second;
}
mapBlockIndex.clear();
fHavePruned = false;
g_chainstate.UnloadBlockIndex();
}
bool LoadBlockIndex(const CChainParams& chainparams)
{
// Load block index from databases
bool needs_init = fReindex;
if (!fReindex) {
bool ret = LoadBlockIndexDB(chainparams);
if (!ret) return false;
needs_init = mapBlockIndex.empty();
}
if (needs_init) {
// Everything here is for *new* reindex/DBs. Thus, though
// LoadBlockIndexDB may have set fReindex if we shut down
// mid-reindex previously, we don't check fReindex and
// instead only check it prior to LoadBlockIndexDB to set
// needs_init.
LogPrintf("Initializing databases...\n");
}
return true;
}
bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
{
LOCK(cs_main);
// Check whether we're already initialized by checking for genesis in
// mapBlockIndex. Note that we can't use chainActive here, since it is
// set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash()))
return true;
try {
const CBlock& block = chainparams.GenesisBlock();
CDiskBlockPos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
if (blockPos.IsNull())
return error("%s: writing genesis block to disk failed", __func__);
CBlockIndex *pindex = AddToBlockIndex(block);
ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
} catch (const std::runtime_error& e) {
return error("%s: failed to write genesis block: %s", __func__, e.what());
}
return true;
}
bool LoadGenesisBlock(const CChainParams& chainparams)
{
return g_chainstate.LoadGenesisBlock(chainparams);
}
bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp)
{
// Map of disk positions for blocks with unknown parent (only used for reindex)
static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
unsigned int nSize = 0;
try {
// locate a header
unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
blkdat.FindByte(chainparams.MessageStart()[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> buf;
if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
break;
}
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
if (dbp)
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock& block = *pblock;
blkdat >> block;
nRewind = blkdat.GetPos();
uint256 hash = block.GetHash();
{
LOCK(cs_main);
// detect out of order blocks, and store them for later
if (hash != chainparams.GetConsensus().hashGenesisBlock && !LookupBlockIndex(block.hashPrevBlock)) {
LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp)
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
continue;
}
// process in case the block isn't known yet
CBlockIndex* pindex = LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
if (g_chainstate.AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
nLoaded++;
}
if (state.IsError()) {
break;
}
} else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
}
}
// Activate the genesis block so normal node progress can continue
if (hash == chainparams.GetConsensus().hashGenesisBlock) {
CValidationState state;
if (!ActivateBestChain(state, chainparams)) {
break;
}
}
NotifyHeaderTip();
// Recursively process earlier encountered successors of this block
std::deque<uint256> queue;
queue.push_back(hash);
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
{
LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
CValidationState dummy;
if (g_chainstate.AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
{
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
}
}
range.first++;
mapBlocksUnknownParent.erase(it);
NotifyHeaderTip();
}
}
} catch (const std::exception& e) {
LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
}
}
} catch (const std::runtime_error& e) {
AbortNode(std::string("System error: ") + e.what());
}
if (nLoaded > 0)
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
return nLoaded > 0;
}
void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
{
if (!fCheckBlockIndex) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
// so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
// iterating the block tree require that chainActive has been initialized.)
if (chainActive.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
for (const std::pair<const uint256, CBlockIndex*>& entry : mapBlockIndex) {
forward.insert(std::make_pair(entry.second->pprev, entry.second));
}
assert(forward.size() == mapBlockIndex.size());
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
while (pindex != nullptr) {
nNodes++;
if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
// Begin: actual consistency checks.
if (pindex->pprev == nullptr) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
}
if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
if (!fHavePruned) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else {
// If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
}
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
assert(pindexFirstNotTreeValid == nullptr); // All mapBlockIndex entries must at least be TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
if (pindexFirstInvalid == nullptr) {
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
// setBlockIndexCandidates. chainActive.Tip() must also be there
// even if some data has been pruned.
if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
// setBlockIndexCandidates but had to be removed because of the missing data.
// In this case it must be in mapBlocksUnlinked -- see test below.
}
} else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in mapBlocksUnlinked.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
// If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
assert(foundInUnlinked);
}
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
assert(fHavePruned); // We must have pruned.
// This block may have entered mapBlocksUnlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between chainActive and the
// tip.
// So if this block is itself better than chainActive.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
}
}
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node.
// Move upwards until we reach a node of which we have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the corresponding variable.
if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
// Find our parent.
CBlockIndex* pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
std::string CBlockFileInfo::ToString() const
{
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
}
CBlockFileInfo* GetBlockFileInfo(size_t n)
{
LOCK(cs_LastBlockFile);
return &vinfoBlockFile.at(n);
}
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
}
BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
return VersionBitsStatistics(chainActive.Tip(), params, pos);
}
int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache);
}
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool()
{
const CChainParams& chainparams = Params();
int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
return false;
}
int64_t count = 0;
int64_t expired = 0;
int64_t failed = 0;
int64_t already_there = 0;
int64_t nNow = GetTime();
try {
uint64_t version;
file >> version;
if (version != MEMPOOL_DUMP_VERSION) {
return false;
}
uint64_t num;
file >> num;
while (num--) {
CTransactionRef tx;
int64_t nTime;
int64_t nFeeDelta;
file >> tx;
file >> nTime;
file >> nFeeDelta;
CAmount amountdelta = nFeeDelta;
if (amountdelta) {
mempool.PrioritiseTransaction(tx->GetHash(), amountdelta);
}
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime,
nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
false /* test_accept */);
if (state.IsValid()) {
++count;
} else {
// mempool may contain the transaction already, e.g. from
// wallet(s) having loaded it while we were processing
// mempool transactions; consider these as valid, instead of
// failed, but mark them as 'already there'
if (mempool.exists(tx->GetHash())) {
++already_there;
} else {
++failed;
}
}
} else {
++expired;
}
if (ShutdownRequested())
return false;
}
std::map<uint256, CAmount> mapDeltas;
file >> mapDeltas;
for (const auto& i : mapDeltas) {
mempool.PrioritiseTransaction(i.first, i.second);
}
} catch (const std::exception& e) {
LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
return false;
}
LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there\n", count, failed, expired, already_there);
return true;
}
bool DumpMempool()
{
int64_t start = GetTimeMicros();
std::map<uint256, CAmount> mapDeltas;
std::vector<TxMempoolInfo> vinfo;
static Mutex dump_mutex;
LOCK(dump_mutex);
{
LOCK(mempool.cs);
for (const auto &i : mempool.mapDeltas) {
mapDeltas[i.first] = i.second;
}
vinfo = mempool.infoAll();
}
int64_t mid = GetTimeMicros();
try {
FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
if (!filestr) {
return false;
}
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
uint64_t version = MEMPOOL_DUMP_VERSION;
file << version;
file << (uint64_t)vinfo.size();
for (const auto& i : vinfo) {
file << *(i.tx);
file << (int64_t)i.nTime;
file << (int64_t)i.nFeeDelta;
mapDeltas.erase(i.tx->GetHash());
}
file << mapDeltas;
if (!FileCommit(file.Get()))
throw std::runtime_error("FileCommit failed");
file.fclose();
RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
int64_t last = GetTimeMicros();
LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
} catch (const std::exception& e) {
LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
return false;
}
return true;
}
//! Guess how far we are in the verification process at the given block index
//! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
if (pindex == nullptr)
return 0.0;
int64_t nNow = time(nullptr);
double fTxTotal;
if (pindex->nChainTx <= data.nTxCount) {
fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
} else {
fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
}
return pindex->nChainTx / fTxTotal;
}
class CMainCleanup
{
public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
BlockMap::iterator it1 = mapBlockIndex.begin();
for (; it1 != mapBlockIndex.end(); it1++)
delete (*it1).second;
mapBlockIndex.clear();
}
} instance_of_cmaincleanup;
| mit |
kamilsk/Common | src/Doctrine/Util/Parser.php | 810 | <?php
declare(strict_types = 1);
namespace OctoLab\Common\Doctrine\Util;
/**
* @author Kamil Samigullin <kamil@samigullin.info>
*/
final class Parser
{
/**
* @param string $text
*
* @return string[] sql instructions
*
* @api
*/
public static function extractSql(string $text): array
{
// remove comments
// inline
$text = (string)preg_replace('/\s*(?:--|#).*$/um', '', $text);
// multi-line
$text = (string)preg_replace('/\/\*[^*]*(\*)?[^*]*\*\//um', '', $text);
// flatten and filter
$text = (string)preg_replace('/\n/', ' ', $text);
$text = (string)preg_replace('/\s{2,}/', ' ', $text);
$text = trim($text, '; ');
return $text === '' ? [] : preg_split('/\s*;\s*/', $text);
}
}
| mit |
smysnk/saddlewoof | frontend/src/script/filter/unsafe.js | 188 | 'use strict';
define(['app'], function (app) {
app.filter('unsafe', function($sce) {
return function(val) {
return $sce.trustAsHtml(val);
};
});
});
| mit |
boekkooi/datasource | lib/FSi/Component/DataSource/DataSource.php | 11909 | <?php
/**
* (c) FSi sp. z o.o. <info@fsi.pl>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace FSi\Component\DataSource;
use FSi\Component\DataSource\Driver\DriverInterface;
use FSi\Component\DataSource\Exception\DataSourceException;
use FSi\Component\DataSource\Field\FieldTypeInterface;
use Symfony\Component\EventDispatcher\EventDispatcher;
use FSi\Component\DataSource\Event\DataSourceEvents;
use FSi\Component\DataSource\Event\DataSourceEvent;
/**
* {@inheritdoc}
*/
class DataSource implements DataSourceInterface
{
/**
* Driver.
*
* @var \FSi\Component\DataSource\Driver\DriverInterface
*/
private $driver;
/**
* Name of data source.
*
* @var string
*/
private $name;
/**
* Fields of data source.
*
* @var array
*/
private $fields = array();
/**
* Extensions of DataSource.
*
* @var array
*/
private $extensions = array();
/**
* @var \FSi\Component\DataSource\DataSourceView
*/
private $view;
/**
* @var \FSi\Component\DataSource\DataSourceFactoryInterface
*/
private $factory;
/**
* Max results fetched at once.
*
* @var int
*/
private $maxResults;
/**
* Offset for first result.
*
* @var int
*/
private $firstResult;
/**
* Cache for methods that depends on fields data (cache is dropped whenever any of fields is dirty, or fields have changed).
*
* @var array
*/
private $cache = array();
/**
* Flag set as true when fields or their data is modifying, or even new extension is added.
*
* @var bool
*/
private $dirty = true;
/**
* @var \Symfony\Component\EventDispatcher\EventDispatcher
*/
private $eventDispatcher;
/**
* @param \FSi\Component\DataSource\Driver\DriverInterface $driver
* @param string $name
* @throws \FSi\Component\DataSource\Exception\DataSourceException
*/
public function __construct(DriverInterface $driver, $name = 'datasource')
{
$name = (string) $name;
if (empty($name)) {
throw new DataSourceException('Name of data source can\t be empty.');
}
if (!preg_match('/^[\w\d]+$/', $name)) {
throw new DataSourceException('Name of data source may contain only word characters and digits.');
}
$this->driver = $driver;
$this->name = $name;
$this->eventDispatcher = new EventDispatcher();
$driver->setDataSource($this);
}
/**
* {@inheritdoc}
*/
public function hasField($name)
{
return isset($this->fields[$name]);
}
/**
* {@inheritdoc}
*/
public function addField($name, $type = null, $comparison = null, $options = array())
{
if ($name instanceof FieldTypeInterface) {
$field = $name;
$name = $name->getName();
if (empty($name)) {
throw new DataSourceException('Given field has no name set.');
}
} else {
if (empty($type)) {
throw new DataSourceException('"type" can\'t be null.');
}
if (empty($comparison)) {
throw new DataSourceException('"comparison" can\'t be null.');
}
$field = $this->driver->getFieldType($type);
$field->setName($name);
$field->setComparison($comparison);
$field->setOptions($options);
}
$this->dirty = true;
$this->fields[$name] = $field;
$field->setDataSource($this);
return $this;
}
/**
* {@inheritdoc}
*/
public function getName()
{
return $this->name;
}
/**
* {@inheritdoc}
*/
public function removeField($name)
{
if (isset($this->fields[$name])) {
unset($this->fields[$name]);
$this->dirty = true;
return true;
}
return false;
}
/**
* {@inheritdoc}
*/
public function getField($name)
{
if (!$this->hasField($name)) {
throw new DataSourceException(sprintf('There\'s no field with name "%s"', $name));
}
return $this->fields[$name];
}
/**
* {@inheritdoc}
*/
public function getFields()
{
return $this->fields;
}
/**
* {@inheritdoc}
*/
public function clearFields()
{
$this->fields = array();
$this->dirty = true;
return $this;
}
/**
* {@inheritdoc}
*/
public function bindParameters($parameters = array())
{
$this->dirty = true;
//PreBindParameters event.
$event = new DataSourceEvent\ParametersEventArgs($this, $parameters);
$this->eventDispatcher->dispatch(DataSourceEvents::PRE_BIND_PARAMETERS, $event);
$parameters = $event->getParameters();
if (!is_array($parameters)) {
throw new DataSourceException('Given parameters must be an array.');
}
foreach ($this->getFields() as $field) {
$field->bindParameter($parameters);
}
//PostBindParameters event.
$event = new DataSourceEvent\DataSourceEventArgs($this);
$this->eventDispatcher->dispatch(DataSourceEvents::POST_BIND_PARAMETERS, $event);
}
/**
* {@inheritdoc}
*/
public function getResult()
{
$this->checkFieldsClarity();
if (
isset($this->cache['result'])
&& $this->cache['result']['maxresults'] == $this->getMaxResults()
&& $this->cache['result']['firstresult'] == $this->getFirstResult()
) {
return $this->cache['result']['result'];
}
//PreGetResult event.
$event = new DataSourceEvent\DataSourceEventArgs($this);
$this->eventDispatcher->dispatch(DataSourceEvents::PRE_GET_RESULT, $event);
$result = $this->driver->getResult($this->fields, $this->getFirstResult(), $this->getMaxResults());
foreach ($this->getFields() as $field) {
$field->setDirty(false);
}
if (!is_object($result)) {
throw new DataSourceException('Returned result must be object implementing both Conutable and IteratorAggregate.');
}
if ((!$result instanceof \IteratorAggregate) || (!$result instanceof \Countable)) {
throw new DataSourceException(sprintf('Returned result must be both Countable and IteratorAggregate, instance of "%s" given.', get_class($result)));
}
//PostGetResult event.
$event = new DataSourceEvent\ResultEventArgs($this, $result);
$this->eventDispatcher->dispatch(DataSourceEvents::POST_GET_RESULT, $event);
$result = $event->getResult();
//Creating cache.
$this->cache['result'] = array(
'result' => $result,
'firstresult' => $this->getFirstResult(),
'maxresults' => $this->getMaxResults(),
);
return $result;
}
/**
* {@inheritdoc}
*/
public function setMaxResults($max)
{
$this->dirty = true;
$this->maxResults = $max;
return $this;
}
/**
* {@inheritdoc}
*/
public function setFirstResult($first)
{
$this->dirty = true;
$this->firstResult = $first;
return $this;
}
/**
* {@inheritdoc}
*/
public function getMaxResults()
{
return $this->maxResults;
}
/**
* {@inheritdoc}
*/
public function getFirstResult()
{
return $this->firstResult;
}
/**
* {@inheritdoc}
*/
public function addExtension(DataSourceExtensionInterface $extension)
{
$this->dirty = true;
$this->extensions[] = $extension;
foreach ((array) $extension->loadSubscribers() as $subscriber) {
$this->eventDispatcher->addSubscriber($subscriber);
}
foreach ((array) $extension->loadDriverExtensions() as $driverExtension) {
if (in_array($this->driver->getType(), $driverExtension->getExtendedDriverTypes())) {
$this->driver->addExtension($driverExtension);
}
}
return $this;
}
/**
* {@inheritdoc}
*/
public function getExtensions()
{
return $this->extensions;
}
/**
* {@inheritdoc}
*/
public function createView()
{
$view = new DataSourceView($this);
//PreBuildView event.
$event = new DataSourceEvent\ViewEventArgs($this, $view);
$this->eventDispatcher->dispatch(DataSourceEvents::PRE_BUILD_VIEW, $event);
foreach ($this->fields as $key => $field) {
$view->addField($field->createView());
}
$this->view = $view;
//PostBuildView event.
$event = new DataSourceEvent\ViewEventArgs($this, $view);
$this->eventDispatcher->dispatch(DataSourceEvents::POST_BUILD_VIEW, $event);
return $this->view;
}
/**
* {@inheritdoc}
*/
public function getParameters()
{
$this->checkFieldsClarity();
if (isset($this->cache['parameters'])) {
return $this->cache['parameters'];
}
$parameters = array();
//PreGetParameters event.
$event = new DataSourceEvent\ParametersEventArgs($this, $parameters);
$this->eventDispatcher->dispatch(DataSourceEvents::PRE_GET_PARAMETERS, $event);
$parameters = $event->getParameters();
foreach ($this->fields as $field) {
$field->getParameter($parameters);
}
//PostGetParameters event.
$event = new DataSourceEvent\ParametersEventArgs($this, $parameters);
$this->eventDispatcher->dispatch(DataSourceEvents::POST_GET_PARAMETERS, $event);
$parameters = $event->getParameters();
$cleanfunc = function(&$value) use (&$cleanfunc) {
if (is_scalar($value) && (!empty($value) || is_numeric($value))) {
return true;
} elseif (is_array($value)) {
$value = array_filter($value, $cleanfunc);
return !empty($value);
} else {
return false;
}
};
//Clearing parameters from empty values.
$parameters = array_filter($parameters, $cleanfunc);
$this->cache['parameters'] = $parameters;
return $parameters;
}
/**
* {@inheritdoc}
*/
public function getAllParameters()
{
if ($this->factory) {
return $this->factory->getAllParameters();
}
return $this->getParameters();
}
/**
* {@inheritdoc}
*/
public function getOtherParameters()
{
if ($this->factory) {
return $this->factory->getOtherParameters($this);
}
return array();
}
/**
* {@inheritdoc}
*/
public function setFactory(DataSourceFactoryInterface $factory)
{
$this->factory = $factory;
return $this;
}
/**
* {@inheritdoc}
*/
public function getFactory()
{
return $this->factory;
}
/**
* Checks if from last time some of data has changed, and if did, resets cache.
*/
private function checkFieldsClarity()
{
//Initialize with dirty flag.
$dirty = $this->dirty;
foreach ($this->getFields() as $field) {
$dirty = $dirty || $field->isDirty();
}
//If flag was set to dirty, or any of fields was dirty, reset cache.
if ($dirty) {
$this->cache = array();
$this->dirty = false;
}
}
}
| mit |
AntonAbramov/odejda | frontend/js/swipe.js | 14775 | /*
* Swipe 2.0
*
* Brad Birdsall
* Copyright 2013, MIT License
*
*/
function Swipe(container, options) {
"use strict";
// utilities
var noop = function() {}; // simple no operation function
var offloadFn = function(fn) { setTimeout(fn || noop, 0) }; // offload a functions execution
// check browser capabilities
var browser = {
addEventListener: !!window.addEventListener,
touch: ('ontouchstart' in window) || window.DocumentTouch && document instanceof DocumentTouch,
transitions: (function(temp) {
var props = ['transitionProperty', 'WebkitTransition', 'MozTransition', 'OTransition', 'msTransition'];
for ( var i in props ) if (temp.style[ props[i] ] !== undefined) return true;
return false;
})(document.createElement('swipe'))
};
// quit if no root element
if (!container) return;
var element = container.children[0];
var slides, slidePos, width, length;
options = options || {};
var index = parseInt(options.startSlide, 10) || 0;
var speed = options.speed || 300;
options.continuous = options.continuous !== undefined ? options.continuous : true;
function setup() {
// cache slides
slides = element.children;
length = slides.length;
// set continuous to false if only one slide
if (slides.length < 2) options.continuous = false;
//special case if two slides
if (browser.transitions && options.continuous && slides.length < 3) {
element.appendChild(slides[0].cloneNode(true));
element.appendChild(element.children[1].cloneNode(true));
slides = element.children;
}
// create an array to store current positions of each slide
slidePos = new Array(slides.length);
// determine width of each slide
width = container.getBoundingClientRect().width || container.offsetWidth;
element.style.width = (slides.length * width) + 'px';
// stack elements
var pos = slides.length;
while(pos--) {
var slide = slides[pos];
slide.style.width = width + 'px';
slide.setAttribute('data-index', pos);
if (browser.transitions) {
slide.style.left = (pos * -width) + 'px';
move(pos, index > pos ? -width : (index < pos ? width : 0), 0);
}
}
// reposition elements before and after index
if (options.continuous && browser.transitions) {
move(circle(index-1), -width, 0);
move(circle(index+1), width, 0);
}
if (!browser.transitions) element.style.left = (index * -width) + 'px';
container.style.visibility = 'visible';
}
function prev() {
if (options.continuous) slide(index-1);
else if (index) slide(index-1);
}
function next() {
if (options.continuous) slide(index+1);
else if (index < slides.length - 1) slide(index+1);
}
function circle(index) {
// a simple positive modulo using slides.length
return (slides.length + (index % slides.length)) % slides.length;
}
function slide(to, slideSpeed) {
// do nothing if already on requested slide
if (index == to) return;
if (browser.transitions) {
var direction = Math.abs(index-to) / (index-to); // 1: backward, -1: forward
// get the actual position of the slide
if (options.continuous) {
var natural_direction = direction;
direction = -slidePos[circle(to)] / width;
// if going forward but to < index, use to = slides.length + to
// if going backward but to > index, use to = -slides.length + to
if (direction !== natural_direction) to = -direction * slides.length + to;
}
var diff = Math.abs(index-to) - 1;
// move all the slides between index and to in the right direction
while (diff--) move( circle((to > index ? to : index) - diff - 1), width * direction, 0);
to = circle(to);
move(index, width * direction, slideSpeed || speed);
move(to, 0, slideSpeed || speed);
if (options.continuous) move(circle(to - direction), -(width * direction), 0); // we need to get the next in place
} else {
to = circle(to);
animate(index * -width, to * -width, slideSpeed || speed);
//no fallback for a circular continuous if the browser does not accept transitions
}
index = to;
offloadFn(options.callback && options.callback(index, slides[index]));
}
function move(index, dist, speed) {
translate(index, dist, speed);
slidePos[index] = dist;
}
function translate(index, dist, speed) {
var slide = slides[index];
var style = slide && slide.style;
if (!style) return;
style.webkitTransitionDuration =
style.MozTransitionDuration =
style.msTransitionDuration =
style.OTransitionDuration =
style.transitionDuration = speed + 'ms';
style.webkitTransform = 'translate(' + dist + 'px,0)' + 'translateZ(0)';
style.msTransform =
style.MozTransform =
style.OTransform = 'translateX(' + dist + 'px)';
}
function animate(from, to, speed) {
// if not an animation, just reposition
if (!speed) {
element.style.left = to + 'px';
return;
}
var start = +new Date;
var timer = setInterval(function() {
var timeElap = +new Date - start;
if (timeElap > speed) {
element.style.left = to + 'px';
if (delay) begin();
options.transitionEnd && options.transitionEnd.call(event, index, slides[index]);
clearInterval(timer);
return;
}
element.style.left = (( (to - from) * (Math.floor((timeElap / speed) * 100) / 100) ) + from) + 'px';
}, 4);
}
// setup auto slideshow
var delay = options.auto || 0;
var interval;
function begin() {
interval = setTimeout(next, delay);
}
function stop() {
delay = 0;
clearTimeout(interval);
setTimeout(() => {
interval = setTimeout(next, delay);
delay = options.auto;
}, options.auto)
}
// setup initial vars
var start = {};
var delta = {};
var isScrolling;
// setup event capturing
var events = {
handleEvent: function(event) {
switch (event.type) {
case 'touchstart': this.start(event); break;
case 'touchmove': this.move(event); break;
case 'touchend': offloadFn(this.end(event)); break;
case 'webkitTransitionEnd':
case 'msTransitionEnd':
case 'oTransitionEnd':
case 'otransitionend':
case 'transitionend': offloadFn(this.transitionEnd(event)); break;
case 'resize': offloadFn(setup); break;
}
if (options.stopPropagation) event.stopPropagation();
},
start: function(event) {
var touches = event.touches[0];
// measure start values
start = {
// get initial touch coords
x: touches.pageX,
y: touches.pageY,
// store time to determine touch duration
time: +new Date
};
// used for testing first move event
isScrolling = undefined;
// reset delta and end measurements
delta = {};
// attach touchmove and touchend listeners
element.addEventListener('touchmove', this, false);
element.addEventListener('touchend', this, false);
},
move: function(event) {
// ensure swiping with one touch and not pinching
if ( event.touches.length > 1 || event.scale && event.scale !== 1) return
if (options.disableScroll) event.preventDefault();
var touches = event.touches[0];
// measure change in x and y
delta = {
x: touches.pageX - start.x,
y: touches.pageY - start.y
}
// determine if scrolling test has run - one time test
if ( typeof isScrolling == 'undefined') {
isScrolling = !!( isScrolling || Math.abs(delta.x) < Math.abs(delta.y) );
}
// if user is not trying to scroll vertically
if (!isScrolling) {
// prevent native scrolling
event.preventDefault();
// stop slideshow
stop();
// increase resistance if first or last slide
if (options.continuous) { // we don't add resistance at the end
translate(circle(index-1), delta.x + slidePos[circle(index-1)], 0);
translate(index, delta.x + slidePos[index], 0);
translate(circle(index+1), delta.x + slidePos[circle(index+1)], 0);
} else {
delta.x =
delta.x /
( (!index && delta.x > 0 // if first slide and sliding left
|| index == slides.length - 1 // or if last slide and sliding right
&& delta.x < 0 // and if sliding at all
) ?
( Math.abs(delta.x) / width + 1 ) // determine resistance level
: 1 ); // no resistance if false
// translate 1:1
translate(index-1, delta.x + slidePos[index-1], 0);
translate(index, delta.x + slidePos[index], 0);
translate(index+1, delta.x + slidePos[index+1], 0);
}
}
},
end: function(event) {
// measure duration
var duration = +new Date - start.time;
// determine if slide attempt triggers next/prev slide
var isValidSlide =
Number(duration) < 250 // if slide duration is less than 250ms
&& Math.abs(delta.x) > 20 // and if slide amt is greater than 20px
|| Math.abs(delta.x) > width/2; // or if slide amt is greater than half the width
// determine if slide attempt is past start and end
var isPastBounds =
!index && delta.x > 0 // if first slide and slide amt is greater than 0
|| index == slides.length - 1 && delta.x < 0; // or if last slide and slide amt is less than 0
if (options.continuous) isPastBounds = false;
// determine direction of swipe (true:right, false:left)
var direction = delta.x < 0;
// if not scrolling vertically
if (!isScrolling) {
if (isValidSlide && !isPastBounds) {
if (direction) {
if (options.continuous) { // we need to get the next in this direction in place
move(circle(index-1), -width, 0);
move(circle(index+2), width, 0);
} else {
move(index-1, -width, 0);
}
move(index, slidePos[index]-width, speed);
move(circle(index+1), slidePos[circle(index+1)]-width, speed);
index = circle(index+1);
} else {
if (options.continuous) { // we need to get the next in this direction in place
move(circle(index+1), width, 0);
move(circle(index-2), -width, 0);
} else {
move(index+1, width, 0);
}
move(index, slidePos[index]+width, speed);
move(circle(index-1), slidePos[circle(index-1)]+width, speed);
index = circle(index-1);
}
options.callback && options.callback(index, slides[index]);
} else {
if (options.continuous) {
move(circle(index-1), -width, speed);
move(index, 0, speed);
move(circle(index+1), width, speed);
} else {
move(index-1, -width, speed);
move(index, 0, speed);
move(index+1, width, speed);
}
}
}
// kill touchmove and touchend event listeners until touchstart called again
element.removeEventListener('touchmove', events, false)
element.removeEventListener('touchend', events, false)
},
transitionEnd: function(event) {
if (parseInt(event.target.getAttribute('data-index'), 10) == index) {
if (delay) begin();
options.transitionEnd && options.transitionEnd.call(event, index, slides[index]);
}
}
}
// trigger setup
setup();
// start auto slideshow if applicable
if (delay) begin();
// add event listeners
if (browser.addEventListener) {
// set touchstart event on element
if (browser.touch) element.addEventListener('touchstart', events, false);
if (browser.transitions) {
element.addEventListener('webkitTransitionEnd', events, false);
element.addEventListener('msTransitionEnd', events, false);
element.addEventListener('oTransitionEnd', events, false);
element.addEventListener('otransitionend', events, false);
element.addEventListener('transitionend', events, false);
}
// set resize event on window
window.addEventListener('resize', events, false);
} else {
window.onresize = function () { setup() }; // to play nice with old IE
}
// expose the Swipe API
return {
setup: function() {
setup();
},
slide: function(to, speed) {
// cancel slideshow
stop();
slide(to, speed);
},
prev: function() {
// cancel slideshow
stop();
prev();
},
next: function() {
// cancel slideshow
stop();
next();
},
stop: function() {
// cancel slideshow
stop();
},
getPos: function() {
// return current index position
return index;
},
getNumSlides: function() {
// return total number of slides
return length;
},
kill: function() {
// cancel slideshow
stop();
// reset element
element.style.width = '';
element.style.left = '';
// reset slides
var pos = slides.length;
while(pos--) {
var slide = slides[pos];
slide.style.width = '';
slide.style.left = '';
if (browser.transitions) translate(pos, 0, 0);
}
// removed event listeners
if (browser.addEventListener) {
// remove current event listeners
element.removeEventListener('touchstart', events, false);
element.removeEventListener('webkitTransitionEnd', events, false);
element.removeEventListener('msTransitionEnd', events, false);
element.removeEventListener('oTransitionEnd', events, false);
element.removeEventListener('otransitionend', events, false);
element.removeEventListener('transitionend', events, false);
window.removeEventListener('resize', events, false);
}
else {
window.onresize = null;
}
}
}
}
if ( window.jQuery || window.Zepto ) {
(function($) {
$.fn.Swipe = function(params) {
return this.each(function() {
$(this).data('Swipe', new Swipe($(this)[0], params));
});
}
})( window.jQuery || window.Zepto )
}
export default Swipe; | mit |
llooker/carousel | src/App.tsx | 2550 | /*
* The MIT License (MIT)
*
* Copyright (c) 2019 Looker Data Sciences, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import { Sidebar } from "./components/Sidebar"
import { CoreSDKFunctions } from "./components/CoreSDKFunctions"
import { ApiFunctions } from "./components/ApiFunctions"
import React, { useState } from "react"
import { Switch, Route, Redirect } from "react-router-dom"
import { theme, Box, GlobalStyle } from "@looker/components"
import styled, { ThemeProvider } from "styled-components"
import { ExtensionProvider } from "@looker/extension-sdk-react"
import { EmbedDashboard } from "./components/Embed"
import { EmbedExplore } from "./components/Embed/EmbedExplore"
import { EmbedLook } from "./components/Embed/EmbedLook"
interface AppProps {
}
export enum ROUTES {
API_ROUTE = "/api",
CORESDK_ROUTE = "/coresdk",
EMBED_DASHBOARD = "/embed/dashboard",
EMBED_EXPLORE = "/embed/explore",
EMBED_LOOK = "/embed/look"
}
export const App: React.FC<AppProps> = () => {
const [route, setRoute] = useState("")
const [routeState, setRouteState] = useState()
const onRouteChange = (route: string, routeState?: any) => {
setRoute(route)
setRouteState(routeState)
}
return (
<ExtensionProvider onRouteChange={onRouteChange}>
<ThemeProvider theme={theme}>
<EmbedDashboard />
</ThemeProvider>
</ExtensionProvider>
)
}
export const Layout = styled(Box)`
display: grid;
grid-gap: 20px;
grid-template-columns: 200px auto;
width: 100vw
`
| mit |
ceolter/angular-grid | community-modules/core/src/ts/eventKeys.ts | 9094 | export class Events {
/** Everything has changed with the columns. Either complete new set of columns set, or user called setState() */
/** @deprecated - grid no longer uses this, and setSate() also fires individual events */
public static EVENT_COLUMN_EVERYTHING_CHANGED = 'columnEverythingChanged';
/** User has set in new columns. */
public static EVENT_NEW_COLUMNS_LOADED = 'newColumnsLoaded';
/** The pivot mode flag was changed */
public static EVENT_COLUMN_PIVOT_MODE_CHANGED = 'columnPivotModeChanged';
/** A row group column was added, removed or order changed. */
public static EVENT_COLUMN_ROW_GROUP_CHANGED = 'columnRowGroupChanged';
/** expandAll / collapseAll was called from the api. */
public static EVENT_EXPAND_COLLAPSE_ALL = 'expandOrCollapseAll';
/** A pivot column was added, removed or order changed. */
public static EVENT_COLUMN_PIVOT_CHANGED = 'columnPivotChanged';
/** The list of grid columns has changed. */
public static EVENT_GRID_COLUMNS_CHANGED = 'gridColumnsChanged';
/** A value column was added, removed or agg function was changed. */
public static EVENT_COLUMN_VALUE_CHANGED = 'columnValueChanged';
/** A column was moved */
public static EVENT_COLUMN_MOVED = 'columnMoved';
/** One or more columns was shown / hidden */
public static EVENT_COLUMN_VISIBLE = 'columnVisible';
/** One or more columns was pinned / unpinned*/
public static EVENT_COLUMN_PINNED = 'columnPinned';
/** A column group was opened / closed */
public static EVENT_COLUMN_GROUP_OPENED = 'columnGroupOpened';
/** One or more columns was resized. If just one, the column in the event is set. */
public static EVENT_COLUMN_RESIZED = 'columnResized';
/** The list of displayed columns has changed, can result from columns open / close, column move, pivot, group, etc */
public static EVENT_DISPLAYED_COLUMNS_CHANGED = 'displayedColumnsChanged';
/** The list of virtual columns has changed, results from viewport changing */
public static EVENT_VIRTUAL_COLUMNS_CHANGED = 'virtualColumnsChanged';
/** Async Transactions Executed */
public static EVENT_ASYNC_TRANSACTIONS_FLUSHED = 'asyncTransactionsFlushed';
/** A row group was opened / closed */
public static EVENT_ROW_GROUP_OPENED = 'rowGroupOpened';
/** The client has set new data into the grid */
public static EVENT_ROW_DATA_CHANGED = 'rowDataChanged';
/** The client has updated data for the grid */
public static EVENT_ROW_DATA_UPDATED = 'rowDataUpdated';
/** The client has set new floating data into the grid */
public static EVENT_PINNED_ROW_DATA_CHANGED = 'pinnedRowDataChanged';
/** Range selection has changed */
public static EVENT_RANGE_SELECTION_CHANGED = 'rangeSelectionChanged';
/** Chart was created */
public static EVENT_CHART_CREATED = 'chartCreated';
/** Chart Range selection has changed */
public static EVENT_CHART_RANGE_SELECTION_CHANGED = 'chartRangeSelectionChanged';
/** Chart Options have changed */
public static EVENT_CHART_OPTIONS_CHANGED = 'chartOptionsChanged';
/** Chart was destroyed */
public static EVENT_CHART_DESTROYED = 'chartDestroyed';
/** For when the tool panel is shown / hidden */
public static EVENT_TOOL_PANEL_VISIBLE_CHANGED = 'toolPanelVisibleChanged';
/** Model was updated - grid updates the drawn rows when this happens */
public static EVENT_MODEL_UPDATED = 'modelUpdated';
public static EVENT_PASTE_START = 'pasteStart';
public static EVENT_PASTE_END = 'pasteEnd';
public static EVENT_FILL_START = 'fillStart';
public static EVENT_FILL_END = 'fillEnd';
public static EVENT_CELL_CLICKED = 'cellClicked';
public static EVENT_CELL_DOUBLE_CLICKED = 'cellDoubleClicked';
public static EVENT_CELL_MOUSE_DOWN = 'cellMouseDown';
public static EVENT_CELL_CONTEXT_MENU = 'cellContextMenu';
public static EVENT_CELL_VALUE_CHANGED = 'cellValueChanged';
public static EVENT_ROW_VALUE_CHANGED = 'rowValueChanged';
public static EVENT_CELL_FOCUSED = 'cellFocused';
public static EVENT_ROW_SELECTED = 'rowSelected';
public static EVENT_SELECTION_CHANGED = 'selectionChanged';
public static EVENT_CELL_KEY_DOWN = 'cellKeyDown';
public static EVENT_CELL_KEY_PRESS = 'cellKeyPress';
public static EVENT_CELL_MOUSE_OVER = 'cellMouseOver';
public static EVENT_CELL_MOUSE_OUT = 'cellMouseOut';
/** 2 events for filtering. The grid LISTENS for filterChanged and afterFilterChanged */
public static EVENT_FILTER_CHANGED = 'filterChanged';
/** Filter was change but not applied. Only useful if apply buttons are used in filters. */
public static EVENT_FILTER_MODIFIED = 'filterModified';
public static EVENT_FILTER_OPENED = 'filterOpened';
public static EVENT_SORT_CHANGED = 'sortChanged';
/** A row was removed from the dom, for any reason. Use to clean up resources (if any) used by the row. */
public static EVENT_VIRTUAL_ROW_REMOVED = 'virtualRowRemoved';
public static EVENT_ROW_CLICKED = 'rowClicked';
public static EVENT_ROW_DOUBLE_CLICKED = 'rowDoubleClicked';
/** Gets called once after the grid has finished initialising. */
public static EVENT_GRID_READY = 'gridReady';
/** Width of height of the main grid div has changed. Grid listens for this and does layout of grid if it's
* changed, so always filling the space it was given. */
public static EVENT_GRID_SIZE_CHANGED = 'gridSizeChanged';
/** The indexes of the rows rendered has changed, eg user has scrolled to a new vertical position. */
public static EVENT_VIEWPORT_CHANGED = 'viewportChanged';
/* The width of the scrollbar has been calculated */
public static EVENT_SCROLLBAR_WIDTH_CHANGED = 'scrollbarWidthChanged';
/** Rows were rendered for the first time (ie on async data load). */
public static EVENT_FIRST_DATA_RENDERED = 'firstDataRendered';
/** A column drag has started, either resizing a column or moving a column. */
public static EVENT_DRAG_STARTED = 'dragStarted';
/** A column drag has stopped */
public static EVENT_DRAG_STOPPED = 'dragStopped';
public static EVENT_CHECKBOX_CHANGED = 'checkboxChanged';
public static EVENT_ROW_EDITING_STARTED = 'rowEditingStarted';
public static EVENT_ROW_EDITING_STOPPED = 'rowEditingStopped';
public static EVENT_CELL_EDITING_STARTED = 'cellEditingStarted';
public static EVENT_CELL_EDITING_STOPPED = 'cellEditingStopped';
/** Main body of grid has scrolled, either horizontally or vertically */
public static EVENT_BODY_SCROLL = 'bodyScroll';
public static EVENT_ANIMATION_QUEUE_EMPTY = 'animationQueueEmpty';
public static EVENT_HEIGHT_SCALE_CHANGED = 'heightScaleChanged';
/** The displayed page for pagination has changed. For example the data was filtered or sorted,
* or the user has moved to a different page. */
public static EVENT_PAGINATION_CHANGED = 'paginationChanged';
/** Only used by React, Angular 2+, Web Components and VueJS ag-Grid components
* (not used if doing plain JavaScript or Angular 1.x). If the grid receives changes due
* to bound properties, this event fires after the grid has finished processing the change. */
public static EVENT_COMPONENT_STATE_CHANGED = 'componentStateChanged';
/** All items from here down are used internally by the grid, not intended for external use. */
// not documented, either experimental, or we just don't want users using an depending on them
public static EVENT_BODY_HEIGHT_CHANGED = 'bodyHeightChanged';
public static EVENT_DISPLAYED_COLUMNS_WIDTH_CHANGED = 'displayedColumnsWidthChanged';
public static EVENT_SCROLL_VISIBILITY_CHANGED = 'scrollVisibilityChanged';
public static EVENT_COLUMN_HOVER_CHANGED = 'columnHoverChanged';
public static EVENT_FLASH_CELLS = 'flashCells';
public static EVENT_PAGINATION_PIXEL_OFFSET_CHANGED = 'paginationPixelOffsetChanged';
public static EVENT_ROW_DRAG_ENTER = 'rowDragEnter';
public static EVENT_ROW_DRAG_MOVE = 'rowDragMove';
public static EVENT_ROW_DRAG_LEAVE = 'rowDragLeave';
public static EVENT_ROW_DRAG_END = 'rowDragEnd';
// primarily for charts
public static EVENT_POPUP_TO_FRONT = 'popupToFront';
// these are used for server side group and agg - only used by CS with Viewport Row Model - intention is
// to design these better around server side functions and then release to general public when fully working with
// all the row models.
public static EVENT_COLUMN_ROW_GROUP_CHANGE_REQUEST = 'columnRowGroupChangeRequest';
public static EVENT_COLUMN_PIVOT_CHANGE_REQUEST = 'columnPivotChangeRequest';
public static EVENT_COLUMN_VALUE_CHANGE_REQUEST = 'columnValueChangeRequest';
public static EVENT_COLUMN_AGG_FUNC_CHANGE_REQUEST = 'columnAggFuncChangeRequest';
public static EVENT_KEYBOARD_FOCUS = 'keyboardFocus';
public static EVENT_MOUSE_FOCUS = 'mouseFocus';
}
| mit |
seccom/kpass | web/src/routes/workspace/views/workspace-sidebar/index.js | 46 | export * from './workspace-sidebar.container'
| mit |
josephalevin/gwt-plug | api/src/main/java/com/josephalevin/gwtplug/rebind/PluginLookupGenerator.java | 7665 | /**
* Copyright © 2010-2011 Joseph A. Levin <josephalevin@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.josephalevin.gwtplug.rebind;
import com.google.gwt.core.ext.Generator;
import com.google.gwt.core.ext.GeneratorContext;
import com.google.gwt.core.ext.TreeLogger;
import com.google.gwt.core.ext.UnableToCompleteException;
import com.google.gwt.core.ext.typeinfo.JClassType;
import com.google.gwt.core.ext.typeinfo.NotFoundException;
import com.google.gwt.core.ext.typeinfo.TypeOracle;
import com.josephalevin.gwtplug.client.PluginIterator;
import com.josephalevin.gwtplug.client.PluginLookup;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
/**
*
* @author josephalevin
*/
public class PluginLookupGenerator extends Generator {
private static final String META_INF_services = "META-INF/services/";
@Override
public String generate(TreeLogger logger, GeneratorContext context, String typeName) throws UnableToCompleteException {
if(!PluginLookup.class.getName().equals(typeName)){
logger.log(TreeLogger.ERROR, String.format("Do not extend or implement the %s.", PluginLookup.class.getName()));
throw new UnableToCompleteException();
}
TypeOracle oracle = context.getTypeOracle();
JClassType type = oracle.findType(typeName);
String packageName = type.getPackage().getName();
String implName = type.getName() + "Impl";
PrintWriter writer = context.tryCreate(logger, packageName, implName);
if(writer != null){
Map <JClassType, List<JClassType>> lookup = new HashMap<JClassType, List<JClassType>>();
for (JClassType plugin : oracle.getTypes()) {
List<JClassType> impls = implementations(logger, oracle, plugin);
if(impls != null && !impls.isEmpty()){
lookup.put(plugin, impls);
}
}
writer.format("package %s;", packageName);
writer.println();
writer.format("import %s;", Iterator.class.getName());
writer.println();
writer.format("public class %s implements %s {", implName, typeName);
writer.println();
writer.println("public <S> Iterator<S> lookup(Class<S> plugin){");
//loop over each plugin type and map to the iterator
for(Entry<JClassType, List<JClassType>> entry : lookup.entrySet()){
JClassType pluginType = entry.getKey();
List<JClassType> impls = entry.getValue();
writer.format("if (plugin.getName().equals(\"%s\")){", pluginType.getQualifiedBinaryName());
writer.println();
writer.format("return (Iterator<S>) new %s<%s>(%s){", PluginIterator.class.getName(),pluginType.getQualifiedBinaryName(), impls.size());
writer.println();
writer.format("public %s get (int index){", pluginType.getQualifiedBinaryName());
writer.println("switch(index){");
for(int i = 0; i < impls.size(); i++){
writer.format("case %s: return new %s();", i, impls.get(i).getQualifiedBinaryName());
writer.println();
}
writer.println("default:throw new ArrayIndexOutOfBoundsException(index);");
writer.println("}");
writer.println("}");
writer.println("};");
writer.println("}");
}
writer.println("return null;");
writer.println("}");
writer.println("}");
//commit the generated source code
context.commit(logger, writer);
}
return String.format("%s.%s", packageName, implName);
}
private List<JClassType> implementations(TreeLogger logger, TypeOracle oracle, JClassType type){
try{
Enumeration<URL> i = ClassLoader.getSystemResources(META_INF_services + type.getQualifiedBinaryName());
List<JClassType> result = new ArrayList<JClassType>();
while (i.hasMoreElements()) {
URL url = i.nextElement();
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(url.openStream()));
for(String line = reader.readLine(); line != null; line = reader.readLine()){
line = line.trim();
if(!line.isEmpty()){
try{
JClassType impl = oracle.getType(line);
if(impl != null){
result.add(impl);
}
}
catch(NotFoundException nfe){
//means the class is not able to be used by the client code
logger.log(TreeLogger.WARN, String.format("Ignoring type not visible to GWT compiler: %s", line));
continue;
}
}
}
} catch (IOException ioe) {
logger.log(TreeLogger.Type.WARN, "Unable to load plugin definitions", ioe);
} finally {
if (reader != null) {
try {
reader.close();
} catch (Exception e) {
logger.log(TreeLogger.ERROR, "Unable to close stream.", e);
}
}
}
}
return result;
}
catch (IOException ioe){
logger.log(TreeLogger.Type.WARN, "Unable to load plugin definitions", ioe);
return Collections.emptyList();
}
}
}
| mit |
wenjue/Gank | app/src/main/java/io/gank/model/RandomResultModel.java | 863 | package io.gank.model;
import com.google.gson.annotations.SerializedName;
import java.io.Serializable;
import java.util.List;
/**
* Created by satan on 2015/8/14.
*/
public class RandomResultModel implements Serializable {
@SerializedName("error")
private boolean error;
@SerializedName("results")
private List<List<GankModel>> results;
public boolean isError() {
return error;
}
public void setError(boolean error) {
this.error = error;
}
public List<List<GankModel>> getResults() {
return results;
}
public void setResults(List<List<GankModel>> results) {
this.results = results;
}
@Override
public String toString() {
return "RandomResultModel{" +
"error=" + error +
", results=" + results +
'}';
}
}
| mit |
daniel-kun/llvmcrash | crashme.cpp | 127 | // Shared Library that is linked to the program "app"
#include <llvm/IR/LLVMContext.h>
llvm::LLVMContext c;
void foo () {
}
| mit |
didclab/stork | stork/module/http/HTTPModule.java | 515 | package stork.module.http;
import stork.feather.*;
import stork.module.*;
public class HTTPModule extends Module<HTTPResource> {
{
name("Stork HTTP Module");
protocols("http", "https");
description("A module for interacting with HTTP(S) resources.");
}
public HTTPResource select(URI uri, Credential credential) {
URI ep = uri.endpointURI(), res = uri.resourceURI();
//return new HTTPSession(ep).select(res.path(), res.query());
return new HTTPSession(ep).select(res.path());
}
}
| mit |
Moccine/global-service-plus.com | web/libariries/bootstrap/node_modules/jscs-jsdoc/lib/rules/validate-jsdoc/check-redundant-returns.js | 801 | module.exports = checkReturnTypes;
module.exports.tags = ['return', 'returns'];
module.exports.scopes = ['function'];
module.exports.options = {
checkRedundantReturns: {allowedValues: [true]}
};
/**
* Checking returns types
*
* @param {(FunctionDeclaration|FunctionExpression)} node
* @param {DocTag} tag
* @param {Function} err
*/
function checkReturnTypes(node, tag, err) {
// checking consistency
if (!tag.type) {
return;
}
// skip abstract methods
if (node.jsdoc.abstract) {
return;
}
// checking redundant: invalid or not return statements in code
var redundant = !Boolean(this._getReturnStatementsForNode(node).length);
if (redundant) {
err('Redundant return statement', tag.loc);
}
}
| mit |
JinkiJung/PAUT | VRAT/vrat/Assets/Plugins/NoesisGUI/Scripts/Proxies/NullableSize.cs | 2449 | /* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 2.0.4
*
* Do not make changes to this file unless you know what you are doing--modify
* the SWIG interface file instead.
* ----------------------------------------------------------------------------- */
using System;
using System.Runtime.InteropServices;
namespace Noesis
{
[StructLayoutAttribute(LayoutKind.Sequential, CharSet = CharSet.Ansi)]
internal struct NullableSize {
[MarshalAs(UnmanagedType.U1)]
private bool _hasValue;
private Size _value;
public bool HasValue { get { return this._hasValue; } }
public Size Value {
get {
if (!HasValue) {
throw new InvalidOperationException("Nullable does not have a value");
}
return this._value;
}
}
public NullableSize(Size v) {
this._hasValue = true;
this._value = v;
}
public static explicit operator Size(NullableSize n) {
if (!n.HasValue) {
throw new InvalidOperationException("Nullable does not have a value");
}
return n.Value;
}
public static implicit operator NullableSize(Size v) {
return new NullableSize(v);
}
public static implicit operator System.Nullable<Size>(NullableSize n) {
return n.HasValue ? new System.Nullable<Size>(n.Value) : new System.Nullable<Size>();
}
public static implicit operator NullableSize(System.Nullable<Size> n) {
return n.HasValue ? new NullableSize(n.Value) : new NullableSize();
}
public static bool operator==(NullableSize n, Size v) {
return n.HasValue && n.Value == v;
}
public static bool operator!=(NullableSize n, Size v) {
return !(n == v);
}
public static bool operator==(Size v, NullableSize n) {
return n == v;
}
public static bool operator!=(Size v, NullableSize n) {
return n != v;
}
public static bool operator==(NullableSize n0, NullableSize n1) {
return n0.HasValue && n1.HasValue ? n0.Value == n1.Value : n0.HasValue == n1.HasValue;
}
public static bool operator!=(NullableSize n0, NullableSize n1) {
return !(n0 == n1);
}
public override bool Equals(System.Object obj) {
return obj is NullableSize && this == (NullableSize)obj;
}
public bool Equals(NullableSize n) {
return this == n;
}
public override int GetHashCode() {
return HasValue ? Value.GetHashCode() : 0;
}
}
}
| mit |
MignDul/se-questionnaire-web | src/test/java/com/example/web/QuestionnaireControllerTests.java | 4445 | package com.example.web;
import com.example.domain.Questionnaire;
import com.example.domain.Reply;
import com.example.repository.QuestionnaireRepository;
import com.example.repository.ReplyRepository;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import static org.hamcrest.Matchers.containsString;
import static org.mockito.BDDMockito.given;
import static org.mockito.Matchers.any;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
@RunWith(SpringRunner.class)
@WebMvcTest(QuestionnaireController.class)
public class QuestionnaireControllerTests {
@Autowired
private MockMvc mvc;
@MockBean
private QuestionnaireRepository questionnaireRepository;
@MockBean
private ReplyRepository replyRepository;
private Questionnaire questionnaire;
@Before
public void setUp() throws Exception {
questionnaire = PrepareData.generateTestQuestionnaire();
}
@Test
public void testGetQuestionnaire() throws Exception {
given(questionnaireRepository.findOne(2L)).willReturn(questionnaire);
mvc.perform(get("/questionnaires/2"))
.andExpect(status().isOk())
.andExpect(content().string(containsString("A Test Questionnaire")))
.andExpect(content().string(containsString("required")))
.andExpect(content().string(containsString("Phone number:")))
.andExpect(content().string(containsString("Gender:")))
.andExpect(content().string(containsString("Male")))
.andExpect(content().string(containsString("Others")));
}
@Test
public void testCreateReplySuccess() throws Exception {
given(questionnaireRepository.findOne(2L)).willReturn(questionnaire);
given(replyRepository.save(any(Reply.class))).willReturn(null);
mvc.perform(post("/questionnaires/2")
.param("questionnaireId", "2")
.param("items[0].questionId", "5")
.param("items[0].inputText", "12312341234")
.param("items[1].questionId", "6")
.param("items[1].selectedOptions", "0")
)
.andExpect(status().isFound())
.andExpect(redirectedUrl("/questionnaires/2/finished"));
}
@Test
public void testCreateReplyFailureDueRequiredMissing() throws Exception {
given(questionnaireRepository.findOne(2L)).willReturn(questionnaire);
given(replyRepository.save(any(Reply.class))).willReturn(null);
mvc.perform(post("/questionnaires/2")
.param("questionnaireId", "2")
.param("items[0].questionId", "5")
.param("items[0].inputText", "")
.param("items[1].questionId", "6")
.param("items[1].selectedOptions", "")
)
.andExpect(status().isOk())
.andExpect(content().string(containsString("You must answer the required questions.")));
}
@Test
public void testCreateReplyFailureDueFieldErrors() throws Exception {
given(questionnaireRepository.findOne(2L)).willReturn(questionnaire);
given(replyRepository.save(any(Reply.class))).willReturn(null);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4096; i++) {
sb.append('0');
}
mvc.perform(post("/questionnaires/2")
.param("questionnaireId", "2")
.param("items[0].questionId", "5")
.param("items[0].inputText", "12312341234")
.param("items[1].questionId", "6")
.param("items[1].selectedOptions", "-1")
.param("items[1].inputText", sb.toString())
)
.andExpect(status().isOk())
.andExpect(content().string(containsString("The length of input text should be less than or equal to 4094.")));
}
}
| mit |
tg-msft/azure-sdk-tools | src/dotnet/Mgmt.CI.BuildTools/CI/CI.Common/Mgmt.CI.Common/BaseTasks/INetSdkTask.cs | 485 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
namespace MS.Az.Mgmt.CI.BuildTasks.Common.Base
{
using System;
public interface INetSdkTask : IDisposable
{
/// <summary>
/// Task name, either class name or a friendly name that identifies your task (Build task or Util task)
/// </summary>
string NetSdkTaskName { get; }
}
}
| mit |
sinfo/eventdeck | server/routes/events/handlers.js | 3974 | var Joi = require('joi')
var render = require('../../views/event')
exports = module.exports
// TODO: GET LAST EVENT
exports.create = {
auth: 'session',
tags: ['api', 'event'],
validate: {
payload: {
id: Joi.string().description('id of the event'),
name: Joi.string().required().description('name of the event'),
kind: Joi.string().description('kind of the event'),
description: Joi.string().description('description of the event'),
date: Joi.date().description('date of the event'),
duration: Joi.date().description('duration of the event'),
updated: Joi.date().description('date the event was last updated')
}
},
pre: [
{ method: 'event.create(payload, auth.credentials.id)', assign: 'event' }
// TODO: CREATE NOTIFICATION
],
handler: function (request, reply) {
reply(render(request.pre.event)).created('/api/events/' + request.pre.event.id)
},
description: 'Creates a new event'
}
exports.update = {
auth: 'session',
tags: ['api', 'event'],
validate: {
params: {
id: Joi.string().required().description('id of the event we want to update')
},
payload: {
id: Joi.string().description('id of the event'),
name: Joi.string().description('name of the event'),
kind: Joi.string().description('kind of the event'),
description: Joi.string().description('description of the event'),
date: Joi.date().description('date of the event'),
duration: Joi.date().description('duration of the event'),
updated: Joi.date().description('date the event was last updated')
}
},
pre: [
// TODO: CHECK PERMISSIONS
{ method: 'event.update(params.id, payload)', assign: 'event' }
// TODO: CREATE NOTIFICATION
],
handler: function (request, reply) {
reply(render(request.pre.event))
},
description: 'Updates an event'
}
exports.get = {
auth: {
strategies: ['session'],
mode: 'try'
},
tags: ['api', 'event'],
validate: {
headers: Joi.object({
'Only-Public': Joi.boolean().description('Set to true if you only want to receive the public list, even if you are authenticated')
}).unknown(),
params: {
id: Joi.string().required().description('id of the event we want to retrieve')
},
query: {
fields: Joi.string().description('Fields we want to retrieve')
}
},
pre: [
{ method: 'event.get(params.id, query)', assign: 'event' }
],
handler: function (request, reply) {
reply(render(request.pre.event, request.auth.isAuthenticated && !request.headers['Only-Public']))
},
description: 'Gets an event'
}
exports.list = {
auth: {
strategies: ['session'],
mode: 'try'
},
tags: ['api', 'event'],
validate: {
headers: Joi.object({
'Only-Public': Joi.boolean().description('Set to true if you only want to receive the public list, even if you are authenticated')
}).unknown(),
query: {
fields: Joi.string().description('Fields we want to retrieve'),
skip: Joi.number().integer().min(0).default(0).description('Number of documents to skip'),
limit: Joi.number().integer().min(1).description('Max number of documents to retrieve'),
sort: Joi.string().description('How to sort the array')
}
},
pre: [
{ method: 'event.list(query)', assign: 'events' }
],
handler: function (request, reply) {
reply(render(request.pre.events, request.auth.isAuthenticated && !request.headers['Only-Public']))
},
description: 'Gets all the events'
}
exports.remove = {
auth: 'session',
tags: ['api', 'event'],
validate: {
params: {
// TODO: CHECK PERMISSIONS
id: Joi.string().required().description('Id of the event we want to remove')
// TODO: REMOVE NOTIFICATIONS
}
},
pre: [
{ method: 'event.remove(params.id)', assign: 'event' }
],
handler: function (request, reply) {
reply(render(request.pre.event))
},
description: 'Removes an event'
}
| mit |
yangdd1205/data-structures | src/main/java/array/_20171001/FindMaximum.java | 2315 | package array._20171001;
import annotation.TimeComplexity;
/**
* http://www.geeksforgeeks.org/find-the-maximum-element-in-an-array-which-is-first-increasing-and-then-decreasing/
* <p>
* Given an array of integers which is initially increasing and then decreasing, find the maximum value in the array.
* <p>
* Input: arr[] = {8, 10, 20, 80, 100, 200, 400, 500, 3, 2, 1}
* Output: 500
* <p>
* Input: arr[] = {1, 3, 50, 10, 9, 7, 6}
* Output: 50
* <p>
* Corner case (No decreasing part)
* Input: arr[] = {10, 20, 30, 40, 50}
* Output: 50
* <p>
* Corner case (No increasing part)
* Input: arr[] = {120, 100, 80, 20, 0}
* Output: 120
*/
public class FindMaximum {
/**
* @param arr
* @return
* @author GeeksforGeeks
*/
@TimeComplexity("O(n)")
public static int solution1(int[] arr) {
int max = arr[0];
for (int i = 1; i < arr.length; i++) {
if (max < arr[i]) {
max = arr[i];
}
}
return max;
}
/**
* @param arr
* @return
* @author GeeksforGeeks
*/
@TimeComplexity("O(logn)")
public static int solution2(int[] arr) {
return binarySearch(arr, 0, arr.length);
}
private static int binarySearch(int[] arr, int low, int high) {
if (low == high) {
return arr[low];
}
if ((high == low + 1) && arr[low] >= arr[high]) {
return arr[low];
}
if ((high == low + 1) && arr[low] < arr[high]) {
return arr[high];
}
int mid = (low + high) / 2;
if (arr[mid] > arr[mid + 1] && arr[mid] > arr[mid - 1]) {
return arr[mid];
}
if (arr[mid] > arr[mid + 1] && arr[mid] < arr[mid - 1]) {
return binarySearch(arr, low, mid - 1);
} else {
return binarySearch(arr, mid + 1, high);
}
}
public static void main(String[] args) {
{
System.out.println("solution 1");
System.out.println("The maximum element is " + solution1(new int[]{1, 30, 40, 50, 60, 70, 23, 20}));
}
{
System.out.println("\nsolution 2");
System.out.println("The maximum element is " + solution2(new int[]{1, 3, 50, 10, 9, 7, 6}));
}
}
}
| mit |
affecto/dotnet-IdentityManagement | IdentityManagement.WebApi/Mapping/GroupMapper.cs | 397 | using Affecto.IdentityManagement.Interfaces.Model;
using Affecto.IdentityManagement.WebApi.Model;
using Affecto.Mapping.AutoMapper;
using AutoMapper;
namespace Affecto.IdentityManagement.WebApi.Mapping
{
internal class GroupMapper : OneWayMapper<IGroup, Group>
{
protected override void ConfigureMaps()
{
Mapper.CreateMap<IGroup, Group>();
}
}
} | mit |
JackeyC/tango-overgrown-explorer | Assets/TangoSDK/Examples/Common/Scripts/TangoDynamicMesh.cs | 38595 | //-----------------------------------------------------------------------
// <copyright file="TangoDynamicMesh.cs" company="Google">
//
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// </copyright>
//-----------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using Tango;
using UnityEngine;
using UnityEngine.Rendering;
/// <summary>
/// Updates a mesh dynamically based on the ITango3DReconstruction callbacks.
///
/// The "mesh" that is updated by TangoDynamicMesh is actually a collection of children split along grid boundaries.
/// If you want these children to draw or participate in physics, attach a MeshRenderer or MeshCollider to this object.
/// Any generated children will get copies of the MeshRenderer or MeshCollider or both.
/// </summary>
public class TangoDynamicMesh : MonoBehaviour, ITango3DReconstruction
{
/// <summary>
/// If set, debugging info is displayed.
/// </summary>
public bool m_enableDebugUI = true;
/// <summary>
/// If set, grid indices will stop meshing when they have been sufficiently observed.
/// </summary>
public bool m_enableSelectiveMeshing = false;
/// <summary>
/// How much the dynamic mesh should grow its internal arrays.
/// </summary>
private const float GROWTH_FACTOR = 1.5f;
/// <summary>
/// Maximum amount of time to spend each frame extracting meshes.
/// </summary>
private const int TIME_BUDGET_MS = 10;
/// <summary>
/// The initial amount of vertices for a single dynamic mesh.
/// </summary>
private const int INITIAL_VERTEX_COUNT = 100;
/// <summary>
/// The initial amount of indexes for a single dynamic mesh.
/// </summary>
private const int INITIAL_INDEX_COUNT = 99;
/// <summary>
/// How much the texture coordinates change relative to the actual distance.
/// </summary>
private const float UV_PER_METERS = 10;
/// <summary>
/// Used for selective meshing, number of sufficient observations necessary to consider a grid index as complete.
/// </summary>
private const int NUM_OBSERVATIONS_TO_COMPLETE = 25;
/// <summary>
/// Used for selective meshing, byte representation of the completed observation directions.
///
/// The mesh needs to be observed every 90 degrees around the mesh to be completed for a total of 4 directions.
/// The first 4 bits must be on, i.e. 00001111.
/// </summary>
private const byte DIRECTIONS_COMPLETE = 15;
/// <summary>
/// Used for selective meshing, the minimum value of the dot product between the camera forward direction and a
/// grid index's direction check.
///
/// If the calculated dot product meets this value, the grid index is considered to have been viewed from the given direction.
/// </summary>
private readonly float m_minDirectionCheck = Mathf.Cos(Mathf.PI / 4);
/// <summary>
/// The TangoApplication for the scene.
/// </summary>
private TangoApplication m_tangoApplication;
/// <summary>
/// The mesh renderer on this object. This mesh renderer will get used on all the DynamicMesh objects created.
/// </summary>
private MeshRenderer m_meshRenderer;
/// <summary>
/// The mesh collider on this object. This mesh collider will get used on all the DynamicMesh objects created.
/// </summary>
private MeshCollider m_meshCollider;
/// <summary>
/// Hash table to quickly get access to a dynamic mesh based on its position.
/// </summary>
private Dictionary<Tango3DReconstruction.GridIndex, TangoSingleDynamicMesh> m_meshes;
/// <summary>
/// List of grid indices that need to get extracted.
/// </summary>
private List<Tango3DReconstruction.GridIndex> m_gridIndexToUpdate;
/// <summary>
/// Backlog of grid indices we haven't had time to process.
/// </summary>
private HashSet<Tango3DReconstruction.GridIndex> m_gridUpdateBacklog;
/// <summary>
/// Debug info: Total number of vertices in the dynamic mesh.
/// </summary>
private int m_debugTotalVertices;
/// <summary>
/// Debug info: Total number of triangle indexes in the dynamic mesh.
/// </summary>
private int m_debugTotalTriangles;
/// <summary>
/// Debug info: Amount of time spent most recently updating the meshes.
/// </summary>
private float m_debugRemeshingTime;
/// <summary>
/// Debug info: Amount of meshes updated most recently.
/// </summary>
private int m_debugRemeshingCount;
/// <summary>
/// The list of grid index configuration sets (each represented as a list of grid indices) to
/// be checked for observation count during selective meshing.
/// </summary>
private Vector3[][] m_gridIndexConfigs;
/// <summary>
/// The bounding box of the mesh.
/// </summary>
private Bounds m_bounds;
/// <summary>
/// Gets the number of queued mesh updates still waiting for processing.
///
/// May be slightly overestimated if there have been too many updates to process and some
/// have been pushed to the backlog.
/// </summary>
/// <value>The number of queued mesh updates.</value>
public int NumQueuedMeshUpdates
{
get
{
return m_gridIndexToUpdate.Count + m_gridUpdateBacklog.Count;
}
}
/// <summary>
/// Unity Awake callback.
/// </summary>
public void Awake()
{
m_tangoApplication = GameObject.FindObjectOfType<TangoApplication>();
if (m_tangoApplication != null)
{
m_tangoApplication.Register(this);
}
m_meshes = new Dictionary<Tango3DReconstruction.GridIndex, TangoSingleDynamicMesh>(100);
m_gridIndexToUpdate = new List<Tango3DReconstruction.GridIndex>(100);
m_gridUpdateBacklog = new HashSet<Tango3DReconstruction.GridIndex>();
// Cache the renderer and collider on this object.
m_meshRenderer = GetComponent<MeshRenderer>();
if (m_meshRenderer != null)
{
m_meshRenderer.enabled = false;
}
m_meshCollider = GetComponent<MeshCollider>();
if (m_meshCollider != null)
{
m_meshCollider.enabled = false;
}
if (m_enableSelectiveMeshing)
{
_InitGridIndexConfigs();
}
}
/// <summary>
/// Unity destroy function.
/// </summary>
public void OnDestroy()
{
if (m_tangoApplication != null)
{
m_tangoApplication.Unregister(this);
}
}
/// <summary>
/// Unity Update callback.
/// </summary>
public void Update()
{
List<Tango3DReconstruction.GridIndex> needsResize = new List<Tango3DReconstruction.GridIndex>();
int it;
int startTimeMS = (int)(Time.realtimeSinceStartup * 1000);
for (it = 0; it < m_gridIndexToUpdate.Count; ++it)
{
Tango3DReconstruction.GridIndex gridIndex = m_gridIndexToUpdate[it];
if (_GoneOverTimeBudget(startTimeMS))
{
Debug.Log(string.Format(
"TangoDynamicMesh.Update() ran over budget with {0}/{1} grid indexes processed.",
it, m_gridIndexToUpdate.Count));
break;
}
_UpdateMeshAtGridIndex(gridIndex, needsResize);
m_gridUpdateBacklog.Remove(gridIndex);
}
// While we have time left over, go through backlog of unprocessed indices.
int numBacklogGridIndicesProcessed = 0;
if (!_GoneOverTimeBudget(startTimeMS))
{
List<Tango3DReconstruction.GridIndex> processedBacklog = new List<Tango3DReconstruction.GridIndex>();
foreach (Tango3DReconstruction.GridIndex gridIndex in m_gridUpdateBacklog)
{
_UpdateMeshAtGridIndex(gridIndex, needsResize);
processedBacklog.Add(gridIndex);
++numBacklogGridIndicesProcessed;
if (_GoneOverTimeBudget(startTimeMS))
{
break;
}
}
m_gridUpdateBacklog.ExceptWith(processedBacklog);
}
m_debugRemeshingTime = Time.realtimeSinceStartup - (startTimeMS * 0.001f);
m_debugRemeshingCount = it + numBacklogGridIndicesProcessed;
// Any leftover grid indices also need to get processed next frame.
while (it < m_gridIndexToUpdate.Count)
{
needsResize.Add(m_gridIndexToUpdate[it]);
++it;
}
m_gridIndexToUpdate = needsResize;
}
/// <summary>
/// Displays statistics and diagnostics information about the meshing cubes.
/// </summary>
public void OnGUI()
{
if (!m_enableDebugUI)
{
return;
}
GUI.color = Color.black;
string str = string.Format(
"<size=30>Total Verts/Triangles: {0}/{1} Volumes: {2} UpdateQueue: {3}</size>",
m_debugTotalVertices, m_debugTotalTriangles, m_meshes.Count, m_gridIndexToUpdate.Count);
GUI.Label(new Rect(40, 40, 1000, 40), str);
str = string.Format("<size=30>Remeshing Time: {0:F6} Remeshing Count: {1}</size>",
m_debugRemeshingTime, m_debugRemeshingCount);
GUI.Label(new Rect(40, 80, 1000, 40), str);
str = string.Format("<size=30>Backlog Size: {0}</size>", m_gridUpdateBacklog.Count);
GUI.Label(new Rect(40, 120, 1000, 40), str);
}
/// <summary>
/// Called when the 3D Reconstruction is dirty.
/// </summary>
/// <param name="gridIndexList">List of GridIndex objects that are dirty and should be updated.</param>
public void OnTango3DReconstructionGridIndicesDirty(List<Tango3DReconstruction.GridIndex> gridIndexList)
{
// It's more important to be responsive than to handle all indexes. Add unprocessed indices to the
// backlog and clear the current list if we have fallen behind in processing.
m_gridUpdateBacklog.UnionWith(m_gridIndexToUpdate);
m_gridIndexToUpdate.Clear();
m_gridIndexToUpdate.AddRange(gridIndexList);
}
/// <summary>
/// Clear the dynamic mesh's internal meshes.
///
/// NOTE: This does not clear the 3D Reconstruction's state. To do that call TangoApplication.Tango3DRClear().
/// </summary>
public void Clear()
{
foreach (Transform child in transform)
{
Destroy(child.gameObject);
}
m_meshes.Clear();
}
/// <summary>
/// Exports the constructed mesh to an OBJ file format. The file will include info
/// based on the enabled options in TangoApplication.
/// </summary>
/// <param name="filepath">File path to output the OBJ.</param>
public void ExportMeshToObj(string filepath)
{
AndroidHelper.ShowAndroidToastMessage("Exporting mesh...");
StringBuilder sb = new StringBuilder();
int startVertex = 0;
foreach (TangoSingleDynamicMesh tmesh in m_meshes.Values)
{
Mesh mesh = tmesh.m_mesh;
int meshVertices = 0;
sb.Append(string.Format("g {0}\n", tmesh.name));
// Vertices.
for (int i = 0; i < mesh.vertices.Length; i++)
{
meshVertices++;
Vector3 v = tmesh.transform.TransformPoint(mesh.vertices[i]);
// Include vertex colors as part of vertex point for applications that support it.
if (mesh.colors32.Length > 0)
{
float r = mesh.colors32[i].r / 255.0f;
float g = mesh.colors32[i].g / 255.0f;
float b = mesh.colors32[i].b / 255.0f;
sb.Append(string.Format("v {0} {1} {2} {3} {4} {5} 1.0\n", v.x, v.y, -v.z, r, g, b));
}
else
{
sb.Append(string.Format("v {0} {1} {2} 1.0\n", v.x, v.y, -v.z));
}
}
sb.Append("\n");
// Normals.
if (mesh.normals.Length > 0)
{
foreach (Vector3 n in mesh.normals)
{
sb.Append(string.Format("vn {0} {1} {2}\n", n.x, n.y, -n.z));
}
sb.Append("\n");
}
// Texture coordinates.
if (mesh.uv.Length > 0)
{
foreach (Vector3 uv in mesh.uv)
{
sb.Append(string.Format("vt {0} {1}\n", uv.x, uv.y));
}
sb.Append("\n");
}
// Faces.
int[] triangles = mesh.triangles;
for (int j = 0; j < triangles.Length; j += 3)
{
int v1 = triangles[j + 2] + 1 + startVertex;
int v2 = triangles[j + 1] + 1 + startVertex;
int v3 = triangles[j] + 1 + startVertex;
// Filter out single vertex index triangles which cause Maya to not be able to
// import the mesh.
if (v1 != v2 || v2 != v3)
{
sb.Append(string.Format("f {0}/{0}/{0} {1}/{1}/{1} {2}/{2}/{2}\n", v1, v2, v3));
}
}
sb.Append("\n");
startVertex += meshVertices;
}
StreamWriter sw = new StreamWriter(filepath);
sw.AutoFlush = true;
sw.Write(sb.ToString());
AndroidHelper.ShowAndroidToastMessage(string.Format("Exported: {0}", filepath));
}
/// <summary>
/// Gets each single dynamic mesh and fills out arrays with properties. Each mesh corresponds to the same index in each array.
/// </summary>
/// <param name="gridIndices">Filled out with grid index of each mesh.</param>
/// <param name="completed">Filled out with completion state of each mesh.</param>
/// <param name="completionScale">Filled out with amount that each mesh has been completed.</param>
/// <param name="directions">Filled out with a byte representation of the observed directions of each mesh.</param>
public void GetSingleMeshProperties(out Tango3DReconstruction.GridIndex[] gridIndices, out bool[] completed, out float[] completionScale, out byte[] directions)
{
int numIndices = m_meshes.Count;
gridIndices = new Tango3DReconstruction.GridIndex[numIndices];
completed = new bool[numIndices];
completionScale = new float[numIndices];
directions = new byte[numIndices];
// Assign mesh properties to each index of the arrays.
m_meshes.Keys.CopyTo(gridIndices, 0);
for (int i = 0; i < numIndices; i++)
{
TangoSingleDynamicMesh mesh = m_meshes[gridIndices[i]];
completed[i] = mesh.m_completed;
completionScale[i] = 1.0f * mesh.m_observations / NUM_OBSERVATIONS_TO_COMPLETE;
directions[i] = mesh.m_directions;
}
}
/// <summary>
/// Gets the highest point on the dynamic mesh through at a given position.
///
/// Raycast against a subset of TangoSingleDynamicMesh colliders and find the highest point. The subset
/// is defined by all the meshes intersected by a downward-pointing ray that passes through a position.
/// </summary>
/// <returns>The highest raycast hit point.</returns>
/// <param name="position">The position to cast a ray through.</param>
/// <param name="maxDistance">The max distance of the ray.</param>
public Vector3 GetHighestRaycastHitPoint(Vector3 position, float maxDistance)
{
if (GetComponent<Collider>() == null)
{
return position;
}
Vector3 topHitPoint = position;
Ray ray = new Ray(position + (Vector3.up * (maxDistance / 2)), Vector3.down);
// Find the starting grid index X and Y components.
float gridIndexSize = m_tangoApplication.m_3drResolutionMeters * 16;
int gridIndexX = Mathf.FloorToInt(position.x / gridIndexSize);
int gridIndexY = Mathf.FloorToInt(position.z / gridIndexSize);
// Find the top and bottom grid indices that are overlapped by a raycast downward through the position.
int topZ = Mathf.FloorToInt(ray.origin.y / gridIndexSize);
int btmZ = Mathf.FloorToInt((ray.origin.y - maxDistance) / gridIndexSize);
// Perform a raycast on each TangoSingleDynamicMesh collider the ray passes through.
for (int i = btmZ; i <= topZ; i++)
{
Tango3DReconstruction.GridIndex newGridIndex = new Tango3DReconstruction.GridIndex();
newGridIndex.x = gridIndexX;
newGridIndex.y = gridIndexY;
newGridIndex.z = i;
// Find the mesh associated with the grid index if available. Raycast to the attached collider.
TangoSingleDynamicMesh singleDynamicMesh;
if (m_meshes.TryGetValue(newGridIndex, out singleDynamicMesh))
{
Collider c = singleDynamicMesh.GetComponent<Collider>();
RaycastHit hit;
if (c.Raycast(ray, out hit, maxDistance))
{
// Update the highest position if the new raycast hit is above. Reject the hit if the normal is orthogonal to the up
// direction (to prevent the object from unintentionally climbing up walls).
if ((hit.point.y > topHitPoint.y) && (Vector3.Dot(hit.normal, Vector3.up) > 0.1f))
{
topHitPoint = hit.point;
}
}
}
}
return topHitPoint;
}
/// <summary>
/// Gets the bounds of the mesh.
/// </summary>
/// <returns>The bounds.</returns>
public Bounds GetBounds()
{
return m_bounds;
}
/// <summary>
/// Given a time value indicating when meshing started this frame,
/// returns a value indicating whether this frame's time budget for meshing has been exceeded.
/// </summary>
/// <returns><c>true</c>, if this frame's meshing time budget has been exceeded, <c>false</c> otherwise.</returns>
/// <param name="startTimeMS">Meshing start timestamp this frame (in milliseconds).</param>
private bool _GoneOverTimeBudget(int startTimeMS)
{
return (Time.realtimeSinceStartup * 1000) - startTimeMS > TIME_BUDGET_MS;
}
/// <summary>
/// Extract and update (or create, if it doesn't exist) the mesh at the given grid index.
/// </summary>
/// <param name="gridIndex">Grid index.</param>
/// <param name="needsResize">List to which indices needing a future resize will be added.</param>
private void _UpdateMeshAtGridIndex(Tango3DReconstruction.GridIndex gridIndex, List<Tango3DReconstruction.GridIndex> needsResize)
{
TangoSingleDynamicMesh dynamicMesh;
if (!m_meshes.TryGetValue(gridIndex, out dynamicMesh))
{
// build a dynamic mesh as a child of this game object.
GameObject newObj = new GameObject();
newObj.transform.parent = transform;
newObj.name = string.Format("{0},{1},{2}", gridIndex.x, gridIndex.y, gridIndex.z);
newObj.layer = gameObject.layer;
dynamicMesh = newObj.AddComponent<TangoSingleDynamicMesh>();
dynamicMesh.m_vertices = new Vector3[INITIAL_VERTEX_COUNT];
if (m_tangoApplication.m_3drGenerateTexCoord)
{
dynamicMesh.m_uv = new Vector2[INITIAL_VERTEX_COUNT];
}
if (m_tangoApplication.m_3drGenerateColor)
{
dynamicMesh.m_colors = new Color32[INITIAL_VERTEX_COUNT];
}
dynamicMesh.m_triangles = new int[INITIAL_INDEX_COUNT];
// Update debug info too.
m_debugTotalVertices = dynamicMesh.m_vertices.Length;
m_debugTotalTriangles = dynamicMesh.m_triangles.Length;
// Add the other necessary objects
MeshFilter meshFilter = newObj.AddComponent<MeshFilter>();
dynamicMesh.m_mesh = meshFilter.mesh;
if (m_meshRenderer != null)
{
MeshRenderer meshRenderer = newObj.AddComponent<MeshRenderer>();
#if UNITY_5
meshRenderer.shadowCastingMode = m_meshRenderer.shadowCastingMode;
meshRenderer.receiveShadows = m_meshRenderer.receiveShadows;
meshRenderer.sharedMaterials = m_meshRenderer.sharedMaterials;
meshRenderer.useLightProbes = m_meshRenderer.useLightProbes;
meshRenderer.reflectionProbeUsage = m_meshRenderer.reflectionProbeUsage;
meshRenderer.probeAnchor = m_meshRenderer.probeAnchor;
#elif UNITY_4_6
meshRenderer.castShadows = m_meshRenderer.castShadows;
meshRenderer.receiveShadows = m_meshRenderer.receiveShadows;
meshRenderer.sharedMaterials = m_meshRenderer.sharedMaterials;
meshRenderer.useLightProbes = m_meshRenderer.useLightProbes;
meshRenderer.lightProbeAnchor = m_meshRenderer.lightProbeAnchor;
#endif
}
if (m_meshCollider != null)
{
MeshCollider meshCollider = newObj.AddComponent<MeshCollider>();
meshCollider.convex = m_meshCollider.convex;
meshCollider.isTrigger = m_meshCollider.isTrigger;
meshCollider.sharedMaterial = m_meshCollider.sharedMaterial;
meshCollider.sharedMesh = dynamicMesh.m_mesh;
dynamicMesh.m_meshCollider = meshCollider;
}
m_meshes.Add(gridIndex, dynamicMesh);
_UpdateBounds(gridIndex);
}
// Skip updating this grid index if it is considered completed.
if (m_enableSelectiveMeshing)
{
if (dynamicMesh.m_completed)
{
return;
}
_ObserveGridIndex(gridIndex, dynamicMesh);
}
// Last frame the mesh needed more space. Give it more room now.
if (dynamicMesh.m_needsToGrow)
{
int newVertexSize = (int)(dynamicMesh.m_vertices.Length * GROWTH_FACTOR);
int newTriangleSize = (int)(dynamicMesh.m_triangles.Length * GROWTH_FACTOR);
newTriangleSize -= newTriangleSize % 3;
// Remove the old size, add the new size.
m_debugTotalVertices += newVertexSize - dynamicMesh.m_vertices.Length;
m_debugTotalTriangles += newTriangleSize - dynamicMesh.m_triangles.Length;
dynamicMesh.m_vertices = new Vector3[newVertexSize];
if (m_tangoApplication.m_3drGenerateTexCoord)
{
dynamicMesh.m_uv = new Vector2[newVertexSize];
}
if (m_tangoApplication.m_3drGenerateColor)
{
dynamicMesh.m_colors = new Color32[newVertexSize];
}
dynamicMesh.m_triangles = new int[newTriangleSize];
dynamicMesh.m_needsToGrow = false;
}
int numVertices;
int numTriangles;
Tango3DReconstruction.Status status = m_tangoApplication.Tango3DRExtractMeshSegment(
gridIndex, dynamicMesh.m_vertices, null, dynamicMesh.m_colors, dynamicMesh.m_triangles,
out numVertices, out numTriangles);
if (status != Tango3DReconstruction.Status.INSUFFICIENT_SPACE
&& status != Tango3DReconstruction.Status.SUCCESS)
{
Debug.Log("Tango3DR extraction failed, status code = " + status + Environment.StackTrace);
return;
}
else if (status == Tango3DReconstruction.Status.INSUFFICIENT_SPACE)
{
// We already spent the time extracting this mesh, let's not spend any more time this frame
// to extract the mesh.
Debug.Log(string.Format(
"TangoDynamicMesh.Update() extraction ran out of space with room for {0} vertexes, {1} indexes.",
dynamicMesh.m_vertices.Length, dynamicMesh.m_triangles.Length));
dynamicMesh.m_needsToGrow = true;
needsResize.Add(gridIndex);
}
// Make any leftover triangles degenerate.
for (int triangleIt = numTriangles * 3; triangleIt < dynamicMesh.m_triangles.Length; ++triangleIt)
{
dynamicMesh.m_triangles[triangleIt] = 0;
}
if (dynamicMesh.m_uv != null)
{
// Add texture coordinates.
for (int vertexIt = 0; vertexIt < numVertices; ++vertexIt)
{
Vector3 vertex = dynamicMesh.m_vertices[vertexIt];
dynamicMesh.m_uv[vertexIt].x = vertex.x * UV_PER_METERS;
dynamicMesh.m_uv[vertexIt].y = (vertex.z + vertex.y) * UV_PER_METERS;
}
}
dynamicMesh.m_mesh.Clear();
dynamicMesh.m_mesh.vertices = dynamicMesh.m_vertices;
dynamicMesh.m_mesh.uv = dynamicMesh.m_uv;
dynamicMesh.m_mesh.colors32 = dynamicMesh.m_colors;
dynamicMesh.m_mesh.triangles = dynamicMesh.m_triangles;
if (m_tangoApplication.m_3drGenerateNormal)
{
dynamicMesh.m_mesh.RecalculateNormals();
}
if (dynamicMesh.m_meshCollider != null)
{
// Force the mesh collider to update too.
dynamicMesh.m_meshCollider.sharedMesh = null;
dynamicMesh.m_meshCollider.sharedMesh = dynamicMesh.m_mesh;
}
}
/// <summary>
/// When the grid index has been updated, also determine whether it should be considered completed
/// based on its neighboring grid indices, number of observations, and mesh completeness.
///
/// When checking a grid index for completeness, the observation count of neighboring grid indices is checked.
/// If all grid indices contained in one of the configurations have a sufficient number of observations,
/// the grid index is considered complete.
/// </summary>
/// <param name="gridIndex">Grid index to observe.</param>
/// <param name="singleMesh">TangoSingleDynamicMesh to update and observe.</param>
private void _ObserveGridIndex(Tango3DReconstruction.GridIndex gridIndex, TangoSingleDynamicMesh singleMesh)
{
// Increment the observations made for this grid index.
singleMesh.m_observations++;
// Add observation based on the direction of the observation.
_ViewGridIndex(gridIndex);
// Exit if the grid index has not been observed from all 8 directions.
if (singleMesh.m_directions != DIRECTIONS_COMPLETE)
{
return;
}
// Run through each grid index configuration and check if the grid index is complete.
for (int i = 0; i < m_gridIndexConfigs.Length; i++)
{
Vector3[] config = m_gridIndexConfigs[i];
bool neighborsObserved = true;
foreach (Vector3 nPosition in config)
{
Tango3DReconstruction.GridIndex neighbor = new Tango3DReconstruction.GridIndex();
neighbor.x = (Int32)(nPosition.x + gridIndex.x);
neighbor.y = (Int32)(nPosition.y + gridIndex.y);
neighbor.z = (Int32)(nPosition.z + gridIndex.z);
TangoSingleDynamicMesh nSingleMesh;
if (m_meshes.TryGetValue(neighbor, out nSingleMesh))
{
if (nSingleMesh.m_observations < NUM_OBSERVATIONS_TO_COMPLETE)
{
neighborsObserved = false;
break;
}
}
}
// Complete using this configurations of the neighbors with sufficient observations.
if (neighborsObserved)
{
// Add the grid index to the completed list, so it will be skipped during next mesh update.
singleMesh.m_completed = true;
return;
}
}
}
/// <summary>
/// Initialize the sets of grid index neighbor configurations to check when performing selective meshing.
/// </summary>
private void _InitGridIndexConfigs()
{
// Grid indices use the Right Hand Local Level coordinate system. The diagrams below show which grid
// indices are checked in each configuration for sufficient observations. The following layouts show
// each config represented as 3x3 Vector3 matrices, separated into z-planes.
// (-1,1,0) (0,1,0) (1,1,0)
// (-1,0,0) (0,0,0) (1,0,0)
// (-1,-1,0) (0,-1,0) (1,-1,0)
// 'x' is a grid index that is checked, '-' is not checked.
// Wall-Corner-Floor configuration.
// z = 0 z = 1
// - x x - x -
// x x x x x -
// x x x - - -
Vector3[] wallCornerFloor = new Vector3[]
{
new Vector3(0, 1, 0),
new Vector3(1, 1, 0),
new Vector3(-1, 0, 0),
new Vector3(0, 0, 0),
new Vector3(1, 0, 0),
new Vector3(-1, -1, 0),
new Vector3(0, -1, 0),
new Vector3(1, -1, 0),
new Vector3(0, 1, 1),
new Vector3(-1, 0, 1),
new Vector3(0, 0, 1),
};
// Wall-Floor configuration.
// z = 0 z = 1
// - - - - - -
// x x x x x x
// x x x - - -
Vector3[] wallFloor = new Vector3[]
{
new Vector3(-1, 0, 0),
new Vector3(0, 0, 0),
new Vector3(1, 0, 0),
new Vector3(-1, -1, 0),
new Vector3(0, -1, 0),
new Vector3(1, -1, 0),
new Vector3(-1, 0, 1),
new Vector3(0, 0, 1),
new Vector3(1, 0, 1),
};
// Wall-Corner configuration.
// z = -1 z = 0 z = 1
// - x - - x - - x -
// x x - x x - x x -
// - - - - - - - - -
Vector3[] wallCorner = new Vector3[]
{
new Vector3(0, 1, -1),
new Vector3(-1, 0, -1),
new Vector3(0, 0, -1),
new Vector3(0, 1, 0),
new Vector3(-1, 0, 0),
new Vector3(0, 0, 0),
new Vector3(0, 1, 1),
new Vector3(-1, 0, 1),
new Vector3(0, 0, 1),
};
// Wall configuration.
// z = -1 z = 0 z = 1
// - - - - - - - - -
// x x x x x x x x x
// - - - - - - - - -
Vector3[] wall = new Vector3[]
{
new Vector3(-1, 0, 0),
new Vector3(0, 0, 0),
new Vector3(1, 0, 0),
new Vector3(-1, 0, 1),
new Vector3(0, 0, 1),
new Vector3(1, 0, 1),
new Vector3(-1, 0, -1),
new Vector3(0, 0, -1),
new Vector3(1, 0, -1),
};
// Floor configuration.
// z = 0
// x x x
// x x x
// x x x
Vector3[] floor = new Vector3[]
{
new Vector3(-1, 1, 0),
new Vector3(0, 1, 0),
new Vector3(1, 1, 0),
new Vector3(-1, 0, 0),
new Vector3(0, 0, 0),
new Vector3(1, 0, 0),
new Vector3(-1, -1, 0),
new Vector3(0, -1, 0),
new Vector3(1, -1, 0),
};
// Rotate each configuration for different orientations and add to the list of configs to check for completeness.
m_gridIndexConfigs = new Vector3[15][];
m_gridIndexConfigs[0] = wallCornerFloor;
m_gridIndexConfigs[1] = _GetRotatedGridIndexConfig(wallCornerFloor, 90);
m_gridIndexConfigs[2] = _GetRotatedGridIndexConfig(wallCornerFloor, 180);
m_gridIndexConfigs[3] = _GetRotatedGridIndexConfig(wallCornerFloor, 270);
m_gridIndexConfigs[4] = wallFloor;
m_gridIndexConfigs[5] = _GetRotatedGridIndexConfig(wallFloor, 90);
m_gridIndexConfigs[6] = _GetRotatedGridIndexConfig(wallFloor, 180);
m_gridIndexConfigs[7] = _GetRotatedGridIndexConfig(wallFloor, 270);
m_gridIndexConfigs[8] = wallCorner;
m_gridIndexConfigs[9] = _GetRotatedGridIndexConfig(wallCorner, 90);
m_gridIndexConfigs[10] = _GetRotatedGridIndexConfig(wallCorner, 180);
m_gridIndexConfigs[11] = _GetRotatedGridIndexConfig(wallCorner, 270);
m_gridIndexConfigs[12] = wall;
m_gridIndexConfigs[13] = _GetRotatedGridIndexConfig(wall, 90);
m_gridIndexConfigs[14] = floor;
}
/// <summary>
/// Helper function to get a copy of the grid index config after it has been rotated around the z-axis.
/// </summary>
/// <returns>The rotated grid index config.</returns>
/// <param name="config">List of grid indices in the config.</param>
/// <param name="zRotation">Amount of rotation to apply around the z-axis.</param>
private Vector3[] _GetRotatedGridIndexConfig(Vector3[] config, float zRotation)
{
Vector3[] finalConfig = new Vector3[config.Length];
for (int j = 0; j < config.Length; j++)
{
finalConfig[j] = Quaternion.Euler(0, 0, zRotation) * config[j];
}
return finalConfig;
}
/// <summary>
/// Set the grid index as having been observed from the given direction.
/// </summary>
/// <param name="gridIndex">Grid index to observe.</param>
private void _ViewGridIndex(Tango3DReconstruction.GridIndex gridIndex)
{
// This update may occur somewhat later than the actual time of the camera pose observation.
Vector3 dir = Camera.main.transform.forward;
dir = new Vector3(dir.x, 0.0f, dir.z).normalized;
Vector3[] directions = new Vector3[]
{
new Vector3(0, 0, 1),
new Vector3(0, 0, -1),
new Vector3(1, 0, 0),
new Vector3(-1, 0, 0)
};
for (int i = 0; i < 4; i++)
{
// If the camera is facing one of 4 directions (every 90 degrees) within a 45 degree spread,
// set that direction as seen.
float dot = Vector3.Dot(dir, directions[i]);
if (dot > m_minDirectionCheck)
{
// Bitwise OR new and old directions to show that the mesh has been observed from the new direction.
byte direction = (byte)(1 << i);
m_meshes[gridIndex].m_directions = (byte)(m_meshes[gridIndex].m_directions | direction);
break;
}
}
}
/// <summary>
/// Update the bounding box.
/// </summary>
/// <param name="gridIndex">Grid index to include in bounds.</param>
private void _UpdateBounds(Tango3DReconstruction.GridIndex gridIndex)
{
float gridIndexSize = m_tangoApplication.m_3drResolutionMeters * 16;
Vector3 pointToCompare = gridIndexSize * new Vector3(gridIndex.x, gridIndex.y, gridIndex.z);
Vector3 min = m_bounds.min;
Vector3 max = m_bounds.max;
if (m_bounds.min.x > pointToCompare.x)
{
min.x = pointToCompare.x;
}
if (m_bounds.min.y > pointToCompare.y)
{
min.y = pointToCompare.y;
}
if (m_bounds.min.z > pointToCompare.z)
{
min.z = pointToCompare.z;
}
if (m_bounds.max.x < pointToCompare.x)
{
max.x = pointToCompare.x;
}
if (m_bounds.max.y < pointToCompare.y)
{
max.y = pointToCompare.y;
}
if (m_bounds.max.z < pointToCompare.z)
{
max.z = pointToCompare.z;
}
m_bounds.SetMinMax(min, max);
}
/// <summary>
/// Component for a dynamic, resizable mesh.
///
/// This caches the arrays for vertices, normals, colors, etc. to avoid putting extra pressure on the
/// garbage collector.
/// </summary>
private class TangoSingleDynamicMesh : MonoBehaviour
{
/// <summary>
/// The single mesh.
/// </summary>
public Mesh m_mesh = null;
/// <summary>
/// If set, the mesh collider for this mesh.
/// </summary>
public MeshCollider m_meshCollider = null;
/// <summary>
/// If true, then this should grow all arrays at some point in the future.
/// </summary>
public bool m_needsToGrow;
/// <summary>
/// Cache for <c>Mesh.vertices</c>.
/// </summary>
[HideInInspector]
public Vector3[] m_vertices;
/// <summary>
/// Cache for <c>Mesh.uv</c>.
/// </summary>
[HideInInspector]
public Vector2[] m_uv;
/// <summary>
/// Cache for <c>Mesh.colors</c>.
/// </summary>
[HideInInspector]
public Color32[] m_colors;
/// <summary>
/// Cache to <c>Mesh.triangles</c>.
/// </summary>
[HideInInspector]
public int[] m_triangles;
/// <summary>
/// Set as <c>true</c> if the grid index is considered complete.
/// </summary>
[NonSerialized]
public bool m_completed;
/// <summary>
/// The number of times the grid index has been observed.
/// </summary>
[NonSerialized]
public int m_observations = 1;
/// <summary>
/// The directions from which that the grid index has been observed.
/// </summary>
[NonSerialized]
public byte m_directions;
}
}
| mit |
zurcoin/zurcoin | src/qt/locale/bitcoin_tr_TR.ts | 4075 | <TS language="tr_TR" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Right-click to edit address or label</source>
<translation>Adresi veya etiketi düzenlemek için sağ tıklayın</translation>
</message>
<message>
<source>Create a new address</source>
<translation>Yeni adres oluştur</translation>
</message>
<message>
<source>&New</source>
<translation>&Yeni</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Seçili adresi panoya kopyala</translation>
</message>
<message>
<source>&Copy</source>
<translation>&Kopyala</translation>
</message>
<message>
<source>C&lose</source>
<translation>K&apat</translation>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation>Seçili adresi listeden sil</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>Seçili sekmedeki veriyi dosya olarak dışa aktar</translation>
</message>
<message>
<source>&Export</source>
<translation>&Dışa Aktar</translation>
</message>
<message>
<source>&Delete</source>
<translation>&Sil</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
</context>
<context>
<name>AskPassphraseDialog</name>
</context>
<context>
<name>BanTableModel</name>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>&Receiving addresses...</source>
<translation>Alış adresleri</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>&Label</source>
<translation>Etiket</translation>
</message>
<message>
<source>&Address</source>
<translation>Adres</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
</context>
<context>
<name>HelpMessageDialog</name>
</context>
<context>
<name>Intro</name>
</context>
<context>
<name>OpenURIDialog</name>
</context>
<context>
<name>OptionsDialog</name>
</context>
<context>
<name>OverviewPage</name>
</context>
<context>
<name>PaymentServer</name>
</context>
<context>
<name>PeerTableModel</name>
</context>
<context>
<name>QObject</name>
</context>
<context>
<name>QRImageWidget</name>
</context>
<context>
<name>RPCConsole</name>
</context>
<context>
<name>ReceiveCoinsDialog</name>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>Copy &Address</source>
<translation>&Adresi Kopyala</translation>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
</context>
<context>
<name>SendCoinsDialog</name>
</context>
<context>
<name>SendCoinsEntry</name>
</context>
<context>
<name>SendConfirmationDialog</name>
</context>
<context>
<name>ShutdownWindow</name>
</context>
<context>
<name>SignVerifyMessageDialog</name>
</context>
<context>
<name>SplashScreen</name>
</context>
<context>
<name>TrafficGraphWidget</name>
</context>
<context>
<name>TransactionDesc</name>
</context>
<context>
<name>TransactionDescDialog</name>
</context>
<context>
<name>TransactionTableModel</name>
</context>
<context>
<name>TransactionView</name>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
</context>
<context>
<name>WalletView</name>
</context>
<context>
<name>zurcoin-core</name>
</context>
</TS> | mit |
robocon/maxsite | java.js | 756 | function checkAll(field)
{
for(i = 0; i < field.elements.length; i++)
field[i].checked = true ;
}
function uncheckAll(field)
{
for(i = 0; i < field.elements.length; i++)
field[i].checked = false ;
}
function Confirm(link,text)
{
if (confirm(text))
window.location=link
}
function delConfirm(obj){
var status=false;
for(var i=0 ; i < obj.elements.length ; i++ ){
if(obj[i].type=='checkbox'){
if(obj[i].checked==true){
status=true;
}
}
}
if(status==false){
alert('กรุณาเลือกข้อมูลที่ต้องการลบ.');
return false;
}else{
if(confirm('คุณมั่นใจในการลบข้อมูล?')){
return true;
}else{
return false;
}
}
} | mit |
sunilson/My-Ticker-Android | Schedule Server/node_modules/algoliasearch/src/Index.js | 32899 | var inherits = require('inherits');
var IndexCore = require('./IndexCore.js');
var deprecate = require('./deprecate.js');
var deprecatedMessage = require('./deprecatedMessage.js');
var exitPromise = require('./exitPromise.js');
var errors = require('./errors');
var deprecateForwardToSlaves = deprecate(
function() {},
deprecatedMessage('forwardToSlaves', 'forwardToReplicas')
);
module.exports = Index;
function Index() {
IndexCore.apply(this, arguments);
}
inherits(Index, IndexCore);
/*
* Add an object in this index
*
* @param content contains the javascript object to add inside the index
* @param objectID (optional) an objectID you want to attribute to this object
* (if the attribute already exist the old object will be overwrite)
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that contains 3 elements: createAt, taskId and objectID
*/
Index.prototype.addObject = function(content, objectID, callback) {
var indexObj = this;
if (arguments.length === 1 || typeof objectID === 'function') {
callback = objectID;
objectID = undefined;
}
return this.as._jsonRequest({
method: objectID !== undefined ?
'PUT' : // update or create
'POST', // create (API generates an objectID)
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + // create
(objectID !== undefined ? '/' + encodeURIComponent(objectID) : ''), // update or create
body: content,
hostType: 'write',
callback: callback
});
};
/*
* Add several objects
*
* @param objects contains an array of objects to add
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that updateAt and taskID
*/
Index.prototype.addObjects = function(objects, callback) {
var isArray = require('isarray');
var usage = 'Usage: index.addObjects(arrayOfObjects[, callback])';
if (!isArray(objects)) {
throw new Error(usage);
}
var indexObj = this;
var postObj = {
requests: []
};
for (var i = 0; i < objects.length; ++i) {
var request = {
action: 'addObject',
body: objects[i]
};
postObj.requests.push(request);
}
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/batch',
body: postObj,
hostType: 'write',
callback: callback
});
};
/*
* Update partially an object (only update attributes passed in argument)
*
* @param partialObject contains the javascript attributes to override, the
* object must contains an objectID attribute
* @param createIfNotExists (optional) if false, avoid an automatic creation of the object
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that contains 3 elements: createAt, taskId and objectID
*/
Index.prototype.partialUpdateObject = function(partialObject, createIfNotExists, callback) {
if (arguments.length === 1 || typeof createIfNotExists === 'function') {
callback = createIfNotExists;
createIfNotExists = undefined;
}
var indexObj = this;
var url = '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/' + encodeURIComponent(partialObject.objectID) + '/partial';
if (createIfNotExists === false) {
url += '?createIfNotExists=false';
}
return this.as._jsonRequest({
method: 'POST',
url: url,
body: partialObject,
hostType: 'write',
callback: callback
});
};
/*
* Partially Override the content of several objects
*
* @param objects contains an array of objects to update (each object must contains a objectID attribute)
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that updateAt and taskID
*/
Index.prototype.partialUpdateObjects = function(objects, callback) {
var isArray = require('isarray');
var usage = 'Usage: index.partialUpdateObjects(arrayOfObjects[, callback])';
if (!isArray(objects)) {
throw new Error(usage);
}
var indexObj = this;
var postObj = {
requests: []
};
for (var i = 0; i < objects.length; ++i) {
var request = {
action: 'partialUpdateObject',
objectID: objects[i].objectID,
body: objects[i]
};
postObj.requests.push(request);
}
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/batch',
body: postObj,
hostType: 'write',
callback: callback
});
};
/*
* Override the content of object
*
* @param object contains the javascript object to save, the object must contains an objectID attribute
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that updateAt and taskID
*/
Index.prototype.saveObject = function(object, callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'PUT',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/' + encodeURIComponent(object.objectID),
body: object,
hostType: 'write',
callback: callback
});
};
/*
* Override the content of several objects
*
* @param objects contains an array of objects to update (each object must contains a objectID attribute)
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that updateAt and taskID
*/
Index.prototype.saveObjects = function(objects, callback) {
var isArray = require('isarray');
var usage = 'Usage: index.saveObjects(arrayOfObjects[, callback])';
if (!isArray(objects)) {
throw new Error(usage);
}
var indexObj = this;
var postObj = {
requests: []
};
for (var i = 0; i < objects.length; ++i) {
var request = {
action: 'updateObject',
objectID: objects[i].objectID,
body: objects[i]
};
postObj.requests.push(request);
}
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/batch',
body: postObj,
hostType: 'write',
callback: callback
});
};
/*
* Delete an object from the index
*
* @param objectID the unique identifier of object to delete
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that contains 3 elements: createAt, taskId and objectID
*/
Index.prototype.deleteObject = function(objectID, callback) {
if (typeof objectID === 'function' || typeof objectID !== 'string' && typeof objectID !== 'number') {
var err = new errors.AlgoliaSearchError('Cannot delete an object without an objectID');
callback = objectID;
if (typeof callback === 'function') {
return callback(err);
}
return this.as._promise.reject(err);
}
var indexObj = this;
return this.as._jsonRequest({
method: 'DELETE',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/' + encodeURIComponent(objectID),
hostType: 'write',
callback: callback
});
};
/*
* Delete several objects from an index
*
* @param objectIDs contains an array of objectID to delete
* @param callback (optional) the result callback called with two arguments:
* error: null or Error('message')
* content: the server answer that contains 3 elements: createAt, taskId and objectID
*/
Index.prototype.deleteObjects = function(objectIDs, callback) {
var isArray = require('isarray');
var map = require('./map.js');
var usage = 'Usage: index.deleteObjects(arrayOfObjectIDs[, callback])';
if (!isArray(objectIDs)) {
throw new Error(usage);
}
var indexObj = this;
var postObj = {
requests: map(objectIDs, function prepareRequest(objectID) {
return {
action: 'deleteObject',
objectID: objectID,
body: {
objectID: objectID
}
};
})
};
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/batch',
body: postObj,
hostType: 'write',
callback: callback
});
};
/*
* Delete all objects matching a query
*
* @param query the query string
* @param params the optional query parameters
* @param callback (optional) the result callback called with one argument
* error: null or Error('message')
*/
Index.prototype.deleteByQuery = function(query, params, callback) {
var clone = require('./clone.js');
var map = require('./map.js');
var indexObj = this;
var client = indexObj.as;
if (arguments.length === 1 || typeof params === 'function') {
callback = params;
params = {};
} else {
params = clone(params);
}
params.attributesToRetrieve = 'objectID';
params.hitsPerPage = 1000;
params.distinct = false;
// when deleting, we should never use cache to get the
// search results
this.clearCache();
// there's a problem in how we use the promise chain,
// see how waitTask is done
var promise = this
.search(query, params)
.then(stopOrDelete);
function stopOrDelete(searchContent) {
// stop here
if (searchContent.nbHits === 0) {
// return indexObj.as._request.resolve();
return searchContent;
}
// continue and do a recursive call
var objectIDs = map(searchContent.hits, function getObjectID(object) {
return object.objectID;
});
return indexObj
.deleteObjects(objectIDs)
.then(waitTask)
.then(doDeleteByQuery);
}
function waitTask(deleteObjectsContent) {
return indexObj.waitTask(deleteObjectsContent.taskID);
}
function doDeleteByQuery() {
return indexObj.deleteByQuery(query, params);
}
if (!callback) {
return promise;
}
promise.then(success, failure);
function success() {
exitPromise(function exit() {
callback(null);
}, client._setTimeout || setTimeout);
}
function failure(err) {
exitPromise(function exit() {
callback(err);
}, client._setTimeout || setTimeout);
}
};
/*
* Browse all content from an index using events. Basically this will do
* .browse() -> .browseFrom -> .browseFrom -> .. until all the results are returned
*
* @param {string} query - The full text query
* @param {Object} [queryParameters] - Any search query parameter
* @return {EventEmitter}
* @example
* var browser = index.browseAll('cool songs', {
* tagFilters: 'public,comments',
* hitsPerPage: 500
* });
*
* browser.on('result', function resultCallback(content) {
* console.log(content.hits);
* });
*
* // if any error occurs, you get it
* browser.on('error', function(err) {
* throw err;
* });
*
* // when you have browsed the whole index, you get this event
* browser.on('end', function() {
* console.log('finished');
* });
*
* // at any point if you want to stop the browsing process, you can stop it manually
* // otherwise it will go on and on
* browser.stop();
*
* @see {@link https://www.algolia.com/doc/rest_api#Browse|Algolia REST API Documentation}
*/
Index.prototype.browseAll = function(query, queryParameters) {
if (typeof query === 'object') {
queryParameters = query;
query = undefined;
}
var merge = require('./merge.js');
var IndexBrowser = require('./IndexBrowser');
var browser = new IndexBrowser();
var client = this.as;
var index = this;
var params = client._getSearchParams(
merge({}, queryParameters || {}, {
query: query
}), ''
);
// start browsing
browseLoop();
function browseLoop(cursor) {
if (browser._stopped) {
return;
}
var body;
if (cursor !== undefined) {
body = {
cursor: cursor
};
} else {
body = {
params: params
};
}
client._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(index.indexName) + '/browse',
hostType: 'read',
body: body,
callback: browseCallback
});
}
function browseCallback(err, content) {
if (browser._stopped) {
return;
}
if (err) {
browser._error(err);
return;
}
browser._result(content);
// no cursor means we are finished browsing
if (content.cursor === undefined) {
browser._end();
return;
}
browseLoop(content.cursor);
}
return browser;
};
/*
* Get a Typeahead.js adapter
* @param searchParams contains an object with query parameters (see search for details)
*/
Index.prototype.ttAdapter = function(params) {
var self = this;
return function ttAdapter(query, syncCb, asyncCb) {
var cb;
if (typeof asyncCb === 'function') {
// typeahead 0.11
cb = asyncCb;
} else {
// pre typeahead 0.11
cb = syncCb;
}
self.search(query, params, function searchDone(err, content) {
if (err) {
cb(err);
return;
}
cb(content.hits);
});
};
};
/*
* Wait the publication of a task on the server.
* All server task are asynchronous and you can check with this method that the task is published.
*
* @param taskID the id of the task returned by server
* @param callback the result callback with with two arguments:
* error: null or Error('message')
* content: the server answer that contains the list of results
*/
Index.prototype.waitTask = function(taskID, callback) {
// wait minimum 100ms before retrying
var baseDelay = 100;
// wait maximum 5s before retrying
var maxDelay = 5000;
var loop = 0;
// waitTask() must be handled differently from other methods,
// it's a recursive method using a timeout
var indexObj = this;
var client = indexObj.as;
var promise = retryLoop();
function retryLoop() {
return client._jsonRequest({
method: 'GET',
hostType: 'read',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/task/' + taskID
}).then(function success(content) {
loop++;
var delay = baseDelay * loop * loop;
if (delay > maxDelay) {
delay = maxDelay;
}
if (content.status !== 'published') {
return client._promise.delay(delay).then(retryLoop);
}
return content;
});
}
if (!callback) {
return promise;
}
promise.then(successCb, failureCb);
function successCb(content) {
exitPromise(function exit() {
callback(null, content);
}, client._setTimeout || setTimeout);
}
function failureCb(err) {
exitPromise(function exit() {
callback(err);
}, client._setTimeout || setTimeout);
}
};
/*
* This function deletes the index content. Settings and index specific API keys are kept untouched.
*
* @param callback (optional) the result callback called with two arguments
* error: null or Error('message')
* content: the settings object or the error message if a failure occured
*/
Index.prototype.clearIndex = function(callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/clear',
hostType: 'write',
callback: callback
});
};
/*
* Get settings of this index
*
* @param callback (optional) the result callback called with two arguments
* error: null or Error('message')
* content: the settings object or the error message if a failure occured
*/
Index.prototype.getSettings = function(callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'GET',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/settings?getVersion=2',
hostType: 'read',
callback: callback
});
};
Index.prototype.searchSynonyms = function(params, callback) {
if (typeof params === 'function') {
callback = params;
params = {};
} else if (params === undefined) {
params = {};
}
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/search',
body: params,
hostType: 'read',
callback: callback
});
};
Index.prototype.saveSynonym = function(synonym, opts, callback) {
if (typeof opts === 'function') {
callback = opts;
opts = {};
} else if (opts === undefined) {
opts = {};
}
if (opts.forwardToSlaves !== undefined) deprecateForwardToSlaves();
var forwardToReplicas = (opts.forwardToSlaves || opts.forwardToReplicas) ? 'true' : 'false';
return this.as._jsonRequest({
method: 'PUT',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/' + encodeURIComponent(synonym.objectID) +
'?forwardToReplicas=' + forwardToReplicas,
body: synonym,
hostType: 'write',
callback: callback
});
};
Index.prototype.getSynonym = function(objectID, callback) {
return this.as._jsonRequest({
method: 'GET',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/' + encodeURIComponent(objectID),
hostType: 'read',
callback: callback
});
};
Index.prototype.deleteSynonym = function(objectID, opts, callback) {
if (typeof opts === 'function') {
callback = opts;
opts = {};
} else if (opts === undefined) {
opts = {};
}
if (opts.forwardToSlaves !== undefined) deprecateForwardToSlaves();
var forwardToReplicas = (opts.forwardToSlaves || opts.forwardToReplicas) ? 'true' : 'false';
return this.as._jsonRequest({
method: 'DELETE',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/' + encodeURIComponent(objectID) +
'?forwardToReplicas=' + forwardToReplicas,
hostType: 'write',
callback: callback
});
};
Index.prototype.clearSynonyms = function(opts, callback) {
if (typeof opts === 'function') {
callback = opts;
opts = {};
} else if (opts === undefined) {
opts = {};
}
if (opts.forwardToSlaves !== undefined) deprecateForwardToSlaves();
var forwardToReplicas = (opts.forwardToSlaves || opts.forwardToReplicas) ? 'true' : 'false';
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/clear' +
'?forwardToReplicas=' + forwardToReplicas,
hostType: 'write',
callback: callback
});
};
Index.prototype.batchSynonyms = function(synonyms, opts, callback) {
if (typeof opts === 'function') {
callback = opts;
opts = {};
} else if (opts === undefined) {
opts = {};
}
if (opts.forwardToSlaves !== undefined) deprecateForwardToSlaves();
var forwardToReplicas = (opts.forwardToSlaves || opts.forwardToReplicas) ? 'true' : 'false';
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/synonyms/batch' +
'?forwardToReplicas=' + forwardToReplicas +
'&replaceExistingSynonyms=' + (opts.replaceExistingSynonyms ? 'true' : 'false'),
hostType: 'write',
body: synonyms,
callback: callback
});
};
/*
* Set settings for this index
*
* @param settigns the settings object that can contains :
* - minWordSizefor1Typo: (integer) the minimum number of characters to accept one typo (default = 3).
* - minWordSizefor2Typos: (integer) the minimum number of characters to accept two typos (default = 7).
* - hitsPerPage: (integer) the number of hits per page (default = 10).
* - attributesToRetrieve: (array of strings) default list of attributes to retrieve in objects.
* If set to null, all attributes are retrieved.
* - attributesToHighlight: (array of strings) default list of attributes to highlight.
* If set to null, all indexed attributes are highlighted.
* - attributesToSnippet**: (array of strings) default list of attributes to snippet alongside the number
* of words to return (syntax is attributeName:nbWords).
* By default no snippet is computed. If set to null, no snippet is computed.
* - attributesToIndex: (array of strings) the list of fields you want to index.
* If set to null, all textual and numerical attributes of your objects are indexed,
* but you should update it to get optimal results.
* This parameter has two important uses:
* - Limit the attributes to index: For example if you store a binary image in base64,
* you want to store it and be able to
* retrieve it but you don't want to search in the base64 string.
* - Control part of the ranking*: (see the ranking parameter for full explanation)
* Matches in attributes at the beginning of
* the list will be considered more important than matches in attributes further down the list.
* In one attribute, matching text at the beginning of the attribute will be
* considered more important than text after, you can disable
* this behavior if you add your attribute inside `unordered(AttributeName)`,
* for example attributesToIndex: ["title", "unordered(text)"].
* - attributesForFaceting: (array of strings) The list of fields you want to use for faceting.
* All strings in the attribute selected for faceting are extracted and added as a facet.
* If set to null, no attribute is used for faceting.
* - attributeForDistinct: (string) The attribute name used for the Distinct feature.
* This feature is similar to the SQL "distinct" keyword: when enabled
* in query with the distinct=1 parameter, all hits containing a duplicate
* value for this attribute are removed from results.
* For example, if the chosen attribute is show_name and several hits have
* the same value for show_name, then only the best one is kept and others are removed.
* - ranking: (array of strings) controls the way results are sorted.
* We have six available criteria:
* - typo: sort according to number of typos,
* - geo: sort according to decreassing distance when performing a geo-location based search,
* - proximity: sort according to the proximity of query words in hits,
* - attribute: sort according to the order of attributes defined by attributesToIndex,
* - exact:
* - if the user query contains one word: sort objects having an attribute
* that is exactly the query word before others.
* For example if you search for the "V" TV show, you want to find it
* with the "V" query and avoid to have all popular TV
* show starting by the v letter before it.
* - if the user query contains multiple words: sort according to the
* number of words that matched exactly (and not as a prefix).
* - custom: sort according to a user defined formula set in **customRanking** attribute.
* The standard order is ["typo", "geo", "proximity", "attribute", "exact", "custom"]
* - customRanking: (array of strings) lets you specify part of the ranking.
* The syntax of this condition is an array of strings containing attributes
* prefixed by asc (ascending order) or desc (descending order) operator.
* For example `"customRanking" => ["desc(population)", "asc(name)"]`
* - queryType: Select how the query words are interpreted, it can be one of the following value:
* - prefixAll: all query words are interpreted as prefixes,
* - prefixLast: only the last word is interpreted as a prefix (default behavior),
* - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
* - highlightPreTag: (string) Specify the string that is inserted before
* the highlighted parts in the query result (default to "<em>").
* - highlightPostTag: (string) Specify the string that is inserted after
* the highlighted parts in the query result (default to "</em>").
* - optionalWords: (array of strings) Specify a list of words that should
* be considered as optional when found in the query.
* @param callback (optional) the result callback called with two arguments
* error: null or Error('message')
* content: the server answer or the error message if a failure occured
*/
Index.prototype.setSettings = function(settings, opts, callback) {
if (arguments.length === 1 || typeof opts === 'function') {
callback = opts;
opts = {};
}
if (opts.forwardToSlaves !== undefined) deprecateForwardToSlaves();
var forwardToReplicas = (opts.forwardToSlaves || opts.forwardToReplicas) ? 'true' : 'false';
var indexObj = this;
return this.as._jsonRequest({
method: 'PUT',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/settings?forwardToReplicas='
+ forwardToReplicas,
hostType: 'write',
body: settings,
callback: callback
});
};
/*
@deprecated see index.listApiKeys
*/
Index.prototype.listUserKeys = deprecate(function(callback) {
return this.listApiKeys(callback);
}, deprecatedMessage('index.listUserKeys()', 'index.listApiKeys()'));
/*
* List all existing API keys to this index
*
* @param callback the result callback called with two arguments
* error: null or Error('message')
* content: the server answer with API keys belonging to the index
*/
Index.prototype.listApiKeys = function(callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'GET',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/keys',
hostType: 'read',
callback: callback
});
};
/*
@deprecated see index.getApiKey
*/
Index.prototype.getUserKeyACL = deprecate(function(key, callback) {
return this.getApiKey(key, callback);
}, deprecatedMessage('index.getUserKeyACL()', 'index.getApiKey()'));
/*
* Get an API key from this index
*
* @param key
* @param callback the result callback called with two arguments
* error: null or Error('message')
* content: the server answer with the right API key
*/
Index.prototype.getApiKey = function(key, callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'GET',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/keys/' + key,
hostType: 'read',
callback: callback
});
};
/*
@deprecated see index.deleteApiKey
*/
Index.prototype.deleteUserKey = deprecate(function(key, callback) {
return this.deleteApiKey(key, callback);
}, deprecatedMessage('index.deleteUserKey()', 'index.deleteApiKey()'));
/*
* Delete an existing API key associated to this index
*
* @param key
* @param callback the result callback called with two arguments
* error: null or Error('message')
* content: the server answer with the deletion date
*/
Index.prototype.deleteApiKey = function(key, callback) {
var indexObj = this;
return this.as._jsonRequest({
method: 'DELETE',
url: '/1/indexes/' + encodeURIComponent(indexObj.indexName) + '/keys/' + key,
hostType: 'write',
callback: callback
});
};
/*
@deprecated see index.addApiKey
*/
Index.prototype.addUserKey = deprecate(function(acls, params, callback) {
return this.addApiKey(acls, params, callback);
}, deprecatedMessage('index.addUserKey()', 'index.addApiKey()'));
/*
* Add a new API key to this index
*
* @param {string[]} acls - The list of ACL for this key. Defined by an array of strings that
* can contains the following values:
* - search: allow to search (https and http)
* - addObject: allows to add/update an object in the index (https only)
* - deleteObject : allows to delete an existing object (https only)
* - deleteIndex : allows to delete index content (https only)
* - settings : allows to get index settings (https only)
* - editSettings : allows to change index settings (https only)
* @param {Object} [params] - Optionnal parameters to set for the key
* @param {number} params.validity - Number of seconds after which the key will
* be automatically removed (0 means no time limit for this key)
* @param {number} params.maxQueriesPerIPPerHour - Number of API calls allowed from an IP address per hour
* @param {number} params.maxHitsPerQuery - Number of hits this API key can retrieve in one call
* @param {string} params.description - A description for your key
* @param {string[]} params.referers - A list of authorized referers
* @param {Object} params.queryParameters - Force the key to use specific query parameters
* @param {Function} callback - The result callback called with two arguments
* error: null or Error('message')
* content: the server answer with the added API key
* @return {Promise|undefined} Returns a promise if no callback given
* @example
* index.addUserKey(['search'], {
* validity: 300,
* maxQueriesPerIPPerHour: 2000,
* maxHitsPerQuery: 3,
* description: 'Eat three fruits',
* referers: ['*.algolia.com'],
* queryParameters: {
* tagFilters: ['public'],
* }
* })
* @see {@link https://www.algolia.com/doc/rest_api#AddIndexKey|Algolia REST API Documentation}
*/
Index.prototype.addApiKey = function(acls, params, callback) {
var isArray = require('isarray');
var usage = 'Usage: index.addApiKey(arrayOfAcls[, params, callback])';
if (!isArray(acls)) {
throw new Error(usage);
}
if (arguments.length === 1 || typeof params === 'function') {
callback = params;
params = null;
}
var postObj = {
acl: acls
};
if (params) {
postObj.validity = params.validity;
postObj.maxQueriesPerIPPerHour = params.maxQueriesPerIPPerHour;
postObj.maxHitsPerQuery = params.maxHitsPerQuery;
postObj.description = params.description;
if (params.queryParameters) {
postObj.queryParameters = this.as._getSearchParams(params.queryParameters, '');
}
postObj.referers = params.referers;
}
return this.as._jsonRequest({
method: 'POST',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/keys',
body: postObj,
hostType: 'write',
callback: callback
});
};
/**
* @deprecated use index.addApiKey()
*/
Index.prototype.addUserKeyWithValidity = deprecate(function deprecatedAddUserKeyWithValidity(acls, params, callback) {
return this.addApiKey(acls, params, callback);
}, deprecatedMessage('index.addUserKeyWithValidity()', 'index.addApiKey()'));
/*
@deprecated see index.updateApiKey
*/
Index.prototype.updateUserKey = deprecate(function(key, acls, params, callback) {
return this.updateApiKey(key, acls, params, callback);
}, deprecatedMessage('index.updateUserKey()', 'index.updateApiKey()'));
/**
* Update an existing API key of this index
* @param {string} key - The key to update
* @param {string[]} acls - The list of ACL for this key. Defined by an array of strings that
* can contains the following values:
* - search: allow to search (https and http)
* - addObject: allows to add/update an object in the index (https only)
* - deleteObject : allows to delete an existing object (https only)
* - deleteIndex : allows to delete index content (https only)
* - settings : allows to get index settings (https only)
* - editSettings : allows to change index settings (https only)
* @param {Object} [params] - Optionnal parameters to set for the key
* @param {number} params.validity - Number of seconds after which the key will
* be automatically removed (0 means no time limit for this key)
* @param {number} params.maxQueriesPerIPPerHour - Number of API calls allowed from an IP address per hour
* @param {number} params.maxHitsPerQuery - Number of hits this API key can retrieve in one call
* @param {string} params.description - A description for your key
* @param {string[]} params.referers - A list of authorized referers
* @param {Object} params.queryParameters - Force the key to use specific query parameters
* @param {Function} callback - The result callback called with two arguments
* error: null or Error('message')
* content: the server answer with user keys list
* @return {Promise|undefined} Returns a promise if no callback given
* @example
* index.updateApiKey('APIKEY', ['search'], {
* validity: 300,
* maxQueriesPerIPPerHour: 2000,
* maxHitsPerQuery: 3,
* description: 'Eat three fruits',
* referers: ['*.algolia.com'],
* queryParameters: {
* tagFilters: ['public'],
* }
* })
* @see {@link https://www.algolia.com/doc/rest_api#UpdateIndexKey|Algolia REST API Documentation}
*/
Index.prototype.updateApiKey = function(key, acls, params, callback) {
var isArray = require('isarray');
var usage = 'Usage: index.updateApiKey(key, arrayOfAcls[, params, callback])';
if (!isArray(acls)) {
throw new Error(usage);
}
if (arguments.length === 2 || typeof params === 'function') {
callback = params;
params = null;
}
var putObj = {
acl: acls
};
if (params) {
putObj.validity = params.validity;
putObj.maxQueriesPerIPPerHour = params.maxQueriesPerIPPerHour;
putObj.maxHitsPerQuery = params.maxHitsPerQuery;
putObj.description = params.description;
if (params.queryParameters) {
putObj.queryParameters = this.as._getSearchParams(params.queryParameters, '');
}
putObj.referers = params.referers;
}
return this.as._jsonRequest({
method: 'PUT',
url: '/1/indexes/' + encodeURIComponent(this.indexName) + '/keys/' + key,
body: putObj,
hostType: 'write',
callback: callback
});
};
| mit |
innogames/gitlabhq | app/models/concerns/sha_attribute.rb | 1214 | # frozen_string_literal: true
module ShaAttribute
extend ActiveSupport::Concern
class_methods do
def sha_attribute(name)
return if ENV['STATIC_VERIFICATION']
validate_binary_column_exists!(name) if Rails.env.development?
attribute(name, Gitlab::Database::ShaAttribute.new)
end
# This only gets executed in non-production environments as an additional check to ensure
# the column is the correct type. In production it should behave like any other attribute.
# See https://gitlab.com/gitlab-org/gitlab/merge_requests/5502 for more discussion
def validate_binary_column_exists!(name)
return unless database_exists?
return unless table_exists?
column = columns.find { |c| c.name == name.to_s }
return unless column
unless column.type == :binary
raise ArgumentError, "sha_attribute #{name.inspect} is invalid since the column type is not :binary"
end
rescue StandardError => error
Gitlab::AppLogger.error "ShaAttribute initialization: #{error.message}"
raise
end
def database_exists?
Gitlab::Database.exists?
end
end
end
ShaAttribute::ClassMethods.prepend_mod_with('ShaAttribute')
| mit |
wqwu/NioClient | src/com/wqwu/net/NioWriteFuture.java | 966 | package com.wqwu.net;
import java.util.concurrent.ConcurrentLinkedQueue;
public class NioWriteFuture {
private final NioTcpClient client;
private boolean isDone = false;
private boolean isSuccess = false;
private final ConcurrentLinkedQueue<NioWriteFutureListener> listeners = new ConcurrentLinkedQueue<NioWriteFutureListener>();
public NioWriteFuture(NioTcpClient client) {
this.client = client;
}
public NioTcpClient getClient() {
return client;
}
public boolean isSuccess() {
return isSuccess;
}
public void setSuccess(boolean isSuccess) {
this.isSuccess = isSuccess;
}
public boolean isDone() {
return isDone;
}
public void setDone(boolean isDone) {
this.isDone = isDone;
}
public void addListener(NioWriteFutureListener listener) {
listeners.add(listener);
}
public void notifyListeners() throws Exception {
for (NioWriteFutureListener listener : listeners) {
listener.operationComplete(this);
}
}
}
| mit |
phpcrystal/phpcrystal | src/Component/MVC/Controller/Input/DataItem.php | 173 | <?php
namespace PHPCrystal\PHPCrystal\Component\MVC\Controller\Input;
use PHPCrystal\PHPCrystal\Component\Container\AbstractItem;
class DataItem extends AbstractItem
{
}
| mit |
martin-sokolov/rails_admin | lib/rails_admin/config/sections.rb | 1484 | require 'active_support/core_ext/string/inflections'
require 'rails_admin/config/sections/base'
require 'rails_admin/config/sections/edit'
require 'rails_admin/config/sections/update'
require 'rails_admin/config/sections/create'
require 'rails_admin/config/sections/nested'
require 'rails_admin/config/sections/modal'
require 'rails_admin/config/sections/list'
require 'rails_admin/config/sections/export'
require 'rails_admin/config/sections/show'
module RailsAdmin
module Config
# Sections describe different views in the RailsAdmin engine. Configurable sections are
# list and navigation.
#
# Each section's class object can store generic configuration about that section (such as the
# number of visible tabs in the main navigation), while the instances (accessed via model
# configuration objects) store model specific configuration (such as the visibility of the
# model).
module Sections
def self.included(klass)
# Register accessors for all the sections in this namespace
constants.each do |name|
section = RailsAdmin::Config::Sections.const_get(name)
name = name.to_s.underscore.to_sym
klass.send(:define_method, name) do |&block|
@sections = {} unless @sections
@sections[name] = section.new(self) unless @sections[name]
@sections[name].instance_eval &block if block
@sections[name]
end
end
end
end
end
end
| mit |
goodwinxp/Yorozuya | YorozuyaGSLib/source/__inner_checkDetail.cpp | 1927 | #include <__inner_checkDetail.hpp>
#include <common/ATFCore.hpp>
START_ATF_NAMESPACE
namespace Detail
{
Info::__inner_checkctor___inner_check2_ptr __inner_checkctor___inner_check2_next(nullptr);
Info::__inner_checkctor___inner_check2_clbk __inner_checkctor___inner_check2_user(nullptr);
Info::__inner_checkdtor___inner_check6_ptr __inner_checkdtor___inner_check6_next(nullptr);
Info::__inner_checkdtor___inner_check6_clbk __inner_checkdtor___inner_check6_user(nullptr);
void __inner_checkctor___inner_check2_wrapper(struct __inner_check* _this)
{
__inner_checkctor___inner_check2_user(_this, __inner_checkctor___inner_check2_next);
};
void __inner_checkdtor___inner_check6_wrapper(struct __inner_check* _this)
{
__inner_checkdtor___inner_check6_user(_this, __inner_checkdtor___inner_check6_next);
};
::std::array<hook_record, 2> __inner_check_functions =
{
_hook_record {
(LPVOID)0x14027a500L,
(LPVOID *)&__inner_checkctor___inner_check2_user,
(LPVOID *)&__inner_checkctor___inner_check2_next,
(LPVOID)cast_pointer_function(__inner_checkctor___inner_check2_wrapper),
(LPVOID)cast_pointer_function((void(__inner_check::*)())&__inner_check::ctor___inner_check)
},
_hook_record {
(LPVOID)0x140272f60L,
(LPVOID *)&__inner_checkdtor___inner_check6_user,
(LPVOID *)&__inner_checkdtor___inner_check6_next,
(LPVOID)cast_pointer_function(__inner_checkdtor___inner_check6_wrapper),
(LPVOID)cast_pointer_function((void(__inner_check::*)())&__inner_check::dtor___inner_check)
},
};
}; // end namespace Detail
END_ATF_NAMESPACE
| mit |
nyc-chorus-frogs-2016/JKU-flashcards | app/controllers/rounds_controller.rb | 514 | #Rounds Controller
post '/rounds' do
new_round = Round.new(params[:round])
if new_round.save
new_round.flashcards.each do |card|
card.update_attributes(completed: false)
end
@round = new_round
erb :'rounds/next_card'
else
redirect '/oops'
end
end
get '/rounds/:id/next_card' do
@round = Round.find_by(id: params['id'])
# binding.pry
erb :'rounds/next_card'
end
get '/rounds/:id/show' do
@round = Round.find_by(id: params['id'])
# binding.pry
erb :'rounds/show'
end
| mit |
hycis/Mozi | mozi/utils/theano_utils.py | 826 | from __future__ import absolute_import
import numpy as np
import theano
import theano.tensor as T
floatX = theano.config.floatX
'''
from keras
'''
def asfloatX(X):
return np.asarray(X, dtype=floatX)
def sharedX(value, dtype=floatX, name=None, borrow=False, **kwargs):
return theano.shared(np.asarray(value, dtype=dtype), name=name, borrow=borrow, **kwargs)
def shared_zeros(shape, dtype=floatX, name=None, **kwargs):
return sharedX(np.zeros(shape), dtype=dtype, name=name, **kwargs)
def shared_scalar(val=0., dtype=floatX, name=None, **kwargs):
return theano.shared(np.cast[dtype](val), **kwargs)
def shared_ones(shape, dtype=floatX, name=None, **kwargs):
return sharedX(np.ones(shape), dtype=dtype, name=name, **kwargs)
def alloc_zeros_matrix(*dims):
return T.alloc(np.cast[floatX](0.), *dims)
| mit |
zentrick/iab-vast-loader | test/unit/node.js | 12404 | const express = require('express')
const fetch = require('node-fetch')
const fs = require('fs-extra')
const path = require('path')
const { VASTLoader } = require('../../lib/loader')
const { VASTLoaderError } = require('../../lib/loader-error')
const { atob } = require('../../lib/node/atob')
VASTLoader.atob = atob
const EMPTY_VAST_2 =
'<?xml version="1.0" encoding="UTF-8"?><VAST version="2.0"/>'
const EMPTY_VAST_3 =
'<?xml version="1.0" encoding="UTF-8"?><VAST version="3.0"/>'
const mockFetch = body => () =>
Promise.resolve({
ok: true,
text: () => Promise.resolve(body)
})
const expectLoaderError = (error, code, message, cause) => {
expect(error).to.be.an.instanceof(VASTLoaderError)
expect(error.code).to.equal(code)
expect(error.message).to.equal(message)
if (cause != null) {
expect(error.cause).to.include(cause)
}
}
describe('VASTLoaderError', function () {
describe('#code', function () {
it('gets set from the constructor', function () {
const error = new VASTLoaderError(301)
expect(error.code).to.equal(301)
})
})
describe('#message', function () {
it('resolves from the code', function () {
const error = new VASTLoaderError(301)
expect(error.message).to.equal(
'VAST error 301: Timeout of VAST URI provided in Wrapper element, or of VAST URI provided in a subsequent Wrapper element.'
)
})
})
describe('#cause', function () {
it('gets set from the constructor', function () {
const cause = new Error('Foo')
const error = new VASTLoaderError(301, cause)
expect(error.cause).to.equal(cause)
})
})
describe('#$type', function () {
it('is VASTLoaderError', function () {
const error = new VASTLoaderError(900)
expect(error.$type).to.equal('VASTLoaderError')
})
})
})
describe('VASTLoader', function () {
const fixturesPath = path.resolve(__dirname, '../fixtures')
const proxyPaths = {
'http://demo.tremormedia.com/proddev/vast/vast_inline_linear.xml':
'tremor-video/vast_inline_linear.xml',
'http://example.com/no-ads.xml': 'no-ads.xml',
'http://example.com/invalid-ads.xml': 'invalid-ads.xml'
}
let server
let baseUrl
let responseDelay
let localFetch
let failOnCredentials
const createLoader = (file, options) => {
VASTLoader.fetch = localFetch
return new VASTLoader(baseUrl + file, options)
}
before(function (cb) {
const app = express()
app.use((req, res, next) => {
setTimeout(() => next(), responseDelay)
})
app.use(express.static(fixturesPath))
server = app.listen(function () {
baseUrl = 'http://localhost:' + server.address().port + '/'
cb()
})
})
after(function (cb) {
server.close(cb)
})
beforeEach(function () {
responseDelay = 0
failOnCredentials = false
localFetch = sinon.spy((uri, options) => {
if (options.credentials === 'include' && failOnCredentials) {
return Promise.reject(new Error('Credentials not allowed'))
}
if (uri in proxyPaths) {
uri = baseUrl + proxyPaths[uri]
}
return fetch(uri, options)
})
})
describe('#load()', function () {
it('loads the InLine', async function () {
const uri = 'tremor-video/vast_inline_linear.xml'
const loader = createLoader(uri)
const chain = await loader.load()
expect(chain).to.be.an.instanceof(Array)
expect(chain.length).to.equal(1)
expect(chain[0].uri).to.equal(baseUrl + uri)
})
it('loads the Wrapper', async function () {
const uri = 'tremor-video/vast_wrapper_linear_1.xml'
const loader = createLoader(uri)
const chain = await loader.load()
expect(chain).to.be.an.instanceof(Array)
expect(chain.length).to.equal(2)
expect(chain[0].uri).to.equal(baseUrl + uri)
expect(chain[1].uri).to.equal(
'http://demo.tremormedia.com/proddev/vast/vast_inline_linear.xml'
)
})
it('prepares the first URI before fetching', async function () {
const origin = '[TEMPLATE].xml'
const loader = createLoader(origin, {
prepareUri: originalUri => originalUri.replace('[TEMPLATE]', 'inline')
})
await loader.load()
expect(localFetch.calledOnce).to.equal(true)
expect(localFetch.getCall(0).args[0]).to.equal(baseUrl + 'inline.xml')
})
it('prepares inner URIs before fetching', async function () {
const origin = 'wrapper-template.xml'
const inner = 'inline.xml'
const loader = createLoader(origin, {
prepareUri: originalUri =>
originalUri.replace('[VASTAdTagURI]', baseUrl + inner)
})
await loader.load()
expect(localFetch.calledTwice).to.equal(true)
expect(localFetch.getCall(0).args[0]).to.equal(baseUrl + origin)
expect(localFetch.getCall(1).args[0]).to.equal(baseUrl + inner)
})
it('loads the InLine as Base64', async function () {
const file = path.join(
fixturesPath,
'tremor-video/vast_inline_linear.xml'
)
const base64 = (await fs.readFile(file)).toString('base64')
const dataUri = 'data:text/xml;base64,' + base64
const loader = new VASTLoader(dataUri)
const chain = await loader.load()
expect(chain).to.be.an.instanceof(Array)
expect(chain.length).to.equal(1)
})
it('loads the InLine as XML', async function () {
const file = path.join(
fixturesPath,
'tremor-video/vast_inline_linear.xml'
)
const xml = (await fs.readFile(file, 'utf8')).replace(/\r?\n/g, '')
const dataUri = 'data:text/xml,' + xml
const loader = new VASTLoader(dataUri)
const chain = await loader.load()
expect(chain).to.be.an.instanceof(Array)
expect(chain.length).to.equal(1)
})
it('loads the empty tag', async function () {
const loader = createLoader('no-ads.xml')
const chain = await loader.load()
expect(chain.length).to.equal(1)
expect(chain[0].ads.length).to.equal(0)
})
it('throws VAST 303 on empty InLine inside Wrapper', async function () {
let error
try {
const loader = createLoader('no-ads-wrapper.xml')
await loader.load()
} catch (err) {
error = err
}
expectLoaderError(
error,
303,
'VAST error 303: No ads VAST response after one or more Wrappers. Also includes number of empty VAST responses from fallback.'
)
})
it('throws VAST 301 on invalid InLine inside Wrapper', async function () {
let error
try {
const loader = createLoader('invalid-ads-wrapper.xml')
await loader.load()
} catch (err) {
error = err
}
expectLoaderError(
error,
301,
'VAST error 301: Timeout of VAST URI provided in Wrapper element, or of VAST URI provided in a subsequent Wrapper element.'
)
})
it('throws on HTTP errors', async function () {
let error
try {
const loader = createLoader('four-oh-four')
await loader.load()
} catch (err) {
error = err
}
expectLoaderError(
error,
301,
'VAST error 301: Timeout of VAST URI provided in Wrapper element, or of VAST URI provided in a subsequent Wrapper element.',
{ status: 404, statusText: 'Not Found' }
)
})
})
// TODO Test event data
describe('#emit()', function () {
for (const type of ['willFetch', 'didFetch', 'willParse', 'didParse']) {
it(`emits ${type}`, async function () {
const spy = sinon.spy()
const loader = createLoader('tremor-video/vast_inline_linear.xml')
loader.on(type, spy)
await loader.load()
expect(spy.called).to.be.true()
})
}
for (const type of ['willFetch', 'didFetch', 'willParse', 'didParse']) {
it(`emits ${type} once per tag`, async function () {
const spy = sinon.spy()
const loader = createLoader('tremor-video/vast_wrapper_linear_1.xml')
loader.on(type, spy)
await loader.load()
expect(spy.calledTwice).to.be.true()
})
}
it('emits fetchError on fetch errors', async function () {
const spy = sinon.spy()
const loader = createLoader('four-oh-four')
loader.on('fetchError', spy)
try {
await loader.load()
} catch (err) {}
expect(spy.callCount).to.equal(1)
})
it('emits error on errors', async function () {
const spy = sinon.spy()
const loader = createLoader('four-oh-four')
loader.on('fetchError', spy)
try {
await loader.load()
} catch (err) {}
expect(spy.callCount).to.equal(1)
})
})
describe('maxDepth option', function () {
it('throws when maxDepth is reached', async function () {
let error
try {
const loader = createLoader('tremor-video/vast_wrapper_linear_1.xml', {
maxDepth: 1
})
await loader.load()
} catch (err) {
error = err
}
expectLoaderError(
error,
302,
'VAST error 302: Wrapper limit reached, as defined by the video player. Too many Wrapper responses have been received with no InLine response.'
)
})
})
describe('timeout option', function () {
it('throws when timeout is reached', async function () {
responseDelay = 100
let error
try {
const loader = createLoader('no-ads.xml', {
timeout: 10
})
await loader.load()
} catch (err) {
error = err
}
expectLoaderError(
error,
301,
'VAST error 301: Timeout of VAST URI provided in Wrapper element, or of VAST URI provided in a subsequent Wrapper element.'
)
})
})
describe('credentials option', function () {
it('is "omit" by default', async function () {
const loader = createLoader('tremor-video/vast_inline_linear.xml')
await loader.load()
expect(localFetch.callCount).to.equal(1)
expect(localFetch.firstCall.args[1]).to.eql({ credentials: 'omit' })
})
it('overrides with a string value', async function () {
const loader = createLoader('tremor-video/vast_inline_linear.xml', {
credentials: 'include'
})
await loader.load()
expect(localFetch.callCount).to.equal(1)
expect(localFetch.firstCall.args[1]).to.eql({ credentials: 'include' })
})
it('overrides with a function value', async function () {
const loader = createLoader('tremor-video/vast_inline_linear.xml', {
credentials: uri => 'same-origin'
})
await loader.load()
expect(localFetch.callCount).to.equal(1)
expect(localFetch.firstCall.args[1]).to.eql({
credentials: 'same-origin'
})
})
it('calls the function with the tag URI', async function () {
const credentials = sinon.spy(uri => 'same-origin')
const file = 'tremor-video/vast_inline_linear.xml'
const uri = baseUrl + file
const loader = createLoader(file, {
credentials
})
await loader.load()
expect(localFetch.callCount).to.equal(1)
expect(credentials).to.have.been.calledWith(uri)
})
it('falls through in array of values', async function () {
failOnCredentials = true
const loader = createLoader(
'tremor-video/vast_inline_linear.xml',
{
credentials: ['include', 'omit']
},
true
)
await loader.load()
expect(localFetch.callCount).to.equal(2)
expect(localFetch.firstCall.args[1]).to.eql({ credentials: 'include' })
expect(localFetch.secondCall.args[1]).to.eql({ credentials: 'omit' })
})
})
describe('fetch option', function () {
it('overwrites fetch per instance', async function () {
const loader1 = createLoader('tremor-video/vast_inline_linear.xml', {
fetch: mockFetch(EMPTY_VAST_2)
})
const loader2 = createLoader('tremor-video/vast_inline_linear.xml', {
fetch: mockFetch(EMPTY_VAST_3)
})
loader1.on('didFetch', ({ body }) => {
expect(body).to.equal(EMPTY_VAST_2)
})
loader2.on('didFetch', ({ body }) => {
expect(body).to.equal(EMPTY_VAST_3)
})
await Promise.all([loader1.load(), loader2.load()])
})
})
})
| mit |
dcrec1/fakefs | test/fakefs_test.rb | 31531 | $LOAD_PATH.unshift File.join(File.dirname(__FILE__), '..', 'lib')
require 'fakefs'
require 'test/unit'
class FakeFSTest < Test::Unit::TestCase
include FakeFS
def setup
FileSystem.clear
end
def test_can_be_initialized_empty
fs = FileSystem
assert_equal 0, fs.files.size
end
def xtest_can_be_initialized_with_an_existing_directory
fs = FileSystem
fs.clone(File.expand_path(File.dirname(__FILE__))).inspect
assert_equal 1, fs.files.size
end
def test_can_create_directories
FileUtils.mkdir_p("/path/to/dir")
assert_kind_of FakeDir, FileSystem.fs['path']['to']['dir']
end
def test_can_create_directories_with_mkpath
FileUtils.mkpath("/path/to/dir")
assert_kind_of FakeDir, FileSystem.fs['path']['to']['dir']
end
def test_can_delete_directories
FileUtils.mkdir_p("/path/to/dir")
FileUtils.rmdir("/path/to/dir")
assert File.exists?("/path/to/")
assert File.exists?("/path/to/dir") == false
end
def test_knows_directories_exist
FileUtils.mkdir_p(path = "/path/to/dir")
assert File.exists?(path)
end
def test_knows_directories_are_directories
FileUtils.mkdir_p(path = "/path/to/dir")
assert File.directory?(path)
end
def test_knows_symlink_directories_are_directories
FileUtils.mkdir_p(path = "/path/to/dir")
FileUtils.ln_s path, sympath = '/sympath'
assert File.directory?(sympath)
end
def test_knows_non_existent_directories_arent_directories
path = 'does/not/exist/'
assert_equal RealFile.directory?(path), File.directory?(path)
end
def test_doesnt_overwrite_existing_directories
FileUtils.mkdir_p(path = "/path/to/dir")
assert File.exists?(path)
FileUtils.mkdir_p("/path/to")
assert File.exists?(path)
end
def test_can_create_symlinks
FileUtils.mkdir_p(target = "/path/to/target")
FileUtils.ln_s(target, "/path/to/link")
assert_kind_of FakeSymlink, FileSystem.fs['path']['to']['link']
assert_raises(Errno::EEXIST) do
FileUtils.ln_s(target, '/path/to/link')
end
end
def test_can_force_creation_of_symlinks
FileUtils.mkdir_p(target = "/path/to/first/target")
FileUtils.ln_s(target, "/path/to/link")
assert_kind_of FakeSymlink, FileSystem.fs['path']['to']['link']
FileUtils.ln_s(target, '/path/to/link', :force => true)
end
def test_create_symlink_using_ln_sf
FileUtils.mkdir_p(target = "/path/to/first/target")
FileUtils.ln_s(target, "/path/to/link")
assert_kind_of FakeSymlink, FileSystem.fs['path']['to']['link']
FileUtils.ln_sf(target, '/path/to/link')
end
def test_can_follow_symlinks
FileUtils.mkdir_p(target = "/path/to/target")
FileUtils.ln_s(target, link = "/path/to/symlink")
assert_equal target, File.readlink(link)
end
def test_knows_symlinks_are_symlinks
FileUtils.mkdir_p(target = "/path/to/target")
FileUtils.ln_s(target, link = "/path/to/symlink")
assert File.symlink?(link)
end
def test_can_create_files
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.write "Yatta!"
end
assert File.exists?(path)
assert File.readable?(path)
assert File.writable?(path)
end
def test_can_create_files_with_bitmasks
path = '/path/to/file.txt'
File.open(path, File::RDWR | File::CREAT) do |f|
f.write "Yatta!"
end
assert File.exists?(path)
assert File.readable?(path)
assert File.writable?(path)
end
def test_file_opens_in_read_only_mode
File.open("foo", "w") { |f| f << "foo" }
f = File.open("foo")
assert_raises(IOError) do
f << "bar"
end
end
def test_file_opens_in_read_only_mode_with_bitmasks
File.open("foo", "w") { |f| f << "foo" }
f = File.open("foo", File::RDONLY)
assert_raises(IOError) do
f << "bar"
end
end
def test_file_opens_in_invalid_mode
FileUtils.touch("foo")
assert_raises(ArgumentError) do
File.open("foo", "an_illegal_mode")
end
end
def test_raises_error_when_cannot_find_file_in_read_mode
assert_raises(Errno::ENOENT) do
File.open("does_not_exist", "r")
end
end
def test_raises_error_when_cannot_find_file_in_read_write_mode
assert_raises(Errno::ENOENT) do
File.open("does_not_exist", "r+")
end
end
def test_creates_files_in_write_only_mode
File.open("foo", "w")
assert File.exists?("foo")
end
def test_creates_files_in_write_only_mode_with_bitmasks
File.open("foo", File::WRONLY | File::CREAT)
assert File.exists?("foo")
end
def test_raises_in_write_only_mode_without_create_bitmask
assert_raises(Errno::ENOENT) do
File.open("foo", File::WRONLY)
end
end
def test_creates_files_in_read_write_truncate_mode
File.open("foo", "w+")
assert File.exists?("foo")
end
def test_creates_files_in_append_write_only
File.open("foo", "a")
assert File.exists?("foo")
end
def test_creates_files_in_append_read_write
File.open("foo", "a+")
assert File.exists?("foo")
end
def test_file_in_write_only_raises_error_when_reading
FileUtils.touch("foo")
f = File.open("foo", "w")
assert_raises(IOError) do
f.read
end
end
def test_file_in_write_mode_truncates_existing_file
File.open("foo", "w") { |f| f << "contents" }
f = File.open("foo", "w")
assert_equal "", File.read("foo")
end
def test_file_in_read_write_truncation_mode_truncates_file
File.open("foo", "w") { |f| f << "foo" }
f = File.open("foo", "w+")
assert_equal "", File.read("foo")
end
def test_file_in_append_write_only_raises_error_when_reading
FileUtils.touch("foo")
f = File.open("foo", "a")
assert_raises(IOError) do
f.read
end
end
def test_can_read_files_once_written
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.write "Yatta!"
end
assert_equal "Yatta!", File.read(path)
end
def test_can_write_to_files
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << 'Yada Yada'
end
assert_equal 'Yada Yada', File.read(path)
end
def test_raises_error_when_opening_with_binary_mode_only
assert_raise ArgumentError do
File.open("/foo", "b")
end
end
def test_can_open_file_in_binary_mode
File.open("/foo", "wb") { |x| x << "a" }
assert_equal "a", File.read("/foo")
end
def test_can_chunk_io_when_reading
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << 'Yada Yada'
end
file = File.new(path, 'r')
assert_equal 'Yada', file.read(4)
assert_equal ' Yada', file.read(5)
assert_equal '', file.read
file.rewind
assert_equal 'Yada Yada', file.read
end
def test_can_get_size_of_files
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << 'Yada Yada'
end
assert_equal 9, File.size(path)
end
def test_can_check_if_file_has_size?
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << 'Yada Yada'
end
assert File.size?(path)
assert_nil File.size?("/path/to/other.txt")
end
def test_can_check_size?_of_empty_file
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << ''
end
assert_nil File.size?("/path/to/file.txt")
end
def test_raises_error_on_mtime_if_file_does_not_exist
assert_raise Errno::ENOENT do
File.mtime('/path/to/file.txt')
end
end
def test_can_return_mtime_on_existing_file
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f << ''
end
assert File.mtime('/path/to/file.txt').is_a?(Time)
end
def test_can_read_with_File_readlines
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.puts "Yatta!", "Gatta!"
f.puts ["woot","toot"]
end
assert_equal %w(Yatta! Gatta! woot toot), File.readlines(path)
end
def test_File_close_disallows_further_access
path = '/path/to/file.txt'
file = File.open(path, 'w')
file.write 'Yada'
file.close
assert_raise IOError do
file.read
end
end
def test_can_read_from_file_objects
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.write "Yatta!"
end
assert_equal "Yatta!", File.new(path).read
end
def test_file_read_errors_appropriately
assert_raise Errno::ENOENT do
File.read('anything')
end
end
def test_knows_files_are_files
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.write "Yatta!"
end
assert File.file?(path)
end
def test_knows_symlink_files_are_files
path = '/path/to/file.txt'
File.open(path, 'w') do |f|
f.write "Yatta!"
end
FileUtils.ln_s path, sympath='/sympath'
assert File.file?(sympath)
end
def test_knows_non_existent_files_arent_files
assert_equal RealFile.file?('does/not/exist.txt'), File.file?('does/not/exist.txt')
end
def test_can_chown_files
good = 'file.txt'
bad = 'nofile.txt'
File.open(good,'w') { |f| f.write "foo" }
out = FileUtils.chown('noone', 'nogroup', good, :verbose => true)
assert_equal [good], out
assert_raises(Errno::ENOENT) do
FileUtils.chown('noone', 'nogroup', bad, :verbose => true)
end
assert_equal [good], FileUtils.chown('noone', 'nogroup', good)
assert_raises(Errno::ENOENT) do
FileUtils.chown('noone', 'nogroup', bad)
end
assert_equal [good], FileUtils.chown('noone', 'nogroup', [good])
assert_raises(Errno::ENOENT) do
FileUtils.chown('noone', 'nogroup', [good, bad])
end
end
def test_can_chown_R_files
FileUtils.mkdir_p '/path/'
File.open('/path/foo', 'w') { |f| f.write 'foo' }
File.open('/path/foobar', 'w') { |f| f.write 'foo' }
resp = FileUtils.chown_R('no', 'no', '/path')
assert_equal ['/path'], resp
end
def test_dir_globs_paths
FileUtils.mkdir_p '/path'
File.open('/path/foo', 'w') { |f| f.write 'foo' }
File.open('/path/foobar', 'w') { |f| f.write 'foo' }
FileUtils.mkdir_p '/path/bar'
File.open('/path/bar/baz', 'w') { |f| f.write 'foo' }
FileUtils.cp_r '/path/bar', '/path/bar2'
assert_equal ['/path'], Dir['/path']
assert_equal %w( /path/bar /path/bar2 /path/foo /path/foobar ), Dir['/path/*']
assert_equal ['/path/bar/baz'], Dir['/path/bar/*']
assert_equal ['/path/foo'], Dir['/path/foo']
assert_equal ['/path'], Dir['/path*']
assert_equal ['/path/foo', '/path/foobar'], Dir['/p*h/foo*']
assert_equal ['/path/foo', '/path/foobar'], Dir['/p??h/foo*']
FileUtils.cp_r '/path', '/otherpath'
assert_equal %w( /otherpath/foo /otherpath/foobar /path/foo /path/foobar ), Dir['/*/foo*']
end
def test_dir_glob_handles_root
FileUtils.mkdir_p '/path'
# this fails. the root dir should be named '/' but it is '.'
#assert_equal ['/'], Dir['/']
end
def test_dir_glob_handles_recursive_globs
File.open('/one/two/three/four.rb', 'w')
File.open('/one/five.rb', 'w')
assert_equal ['/one/five.rb', '/one/two/three/four.rb'], Dir['/one/**/*.rb']
assert_equal ['/one/two'], Dir['/one/**/two']
assert_equal ['/one/two/three'], Dir['/one/**/three']
end
def test_dir_recursive_glob_ending_in_wildcards_only_returns_files
File.open('/one/two/three/four.rb', 'w')
File.open('/one/five.rb', 'w')
assert_equal ['/one/five.rb', '/one/two/three/four.rb'], Dir['/one/**/*']
assert_equal ['/one/five.rb', '/one/two/three/four.rb'], Dir['/one/**']
end
def test_chdir_changes_directories_like_a_boss
# I know memes!
FileUtils.mkdir_p '/path'
assert_equal '.', FileSystem.fs.name
assert_equal({}, FileSystem.fs['path'])
Dir.chdir '/path' do
File.open('foo', 'w') { |f| f.write 'foo'}
File.open('foobar', 'w') { |f| f.write 'foo'}
end
assert_equal '.', FileSystem.fs.name
assert_equal(['foo', 'foobar'], FileSystem.fs['path'].keys.sort)
c = nil
Dir.chdir '/path' do
c = File.open('foo', 'r') { |f| f.read }
end
assert_equal 'foo', c
end
def test_chdir_shouldnt_keep_us_from_absolute_paths
FileUtils.mkdir_p '/path'
Dir.chdir '/path' do
File.open('foo', 'w') { |f| f.write 'foo'}
File.open('/foobar', 'w') { |f| f.write 'foo'}
end
assert_equal ['foo'], FileSystem.fs['path'].keys.sort
assert_equal ['foobar', 'path'], FileSystem.fs.keys.sort
Dir.chdir '/path' do
FileUtils.rm('foo')
FileUtils.rm('/foobar')
end
assert_equal [], FileSystem.fs['path'].keys.sort
assert_equal ['path'], FileSystem.fs.keys.sort
end
def test_chdir_should_be_nestable
FileUtils.mkdir_p '/path/me'
Dir.chdir '/path' do
File.open('foo', 'w') { |f| f.write 'foo'}
Dir.chdir 'me' do
File.open('foobar', 'w') { |f| f.write 'foo'}
end
end
assert_equal ['foo','me'], FileSystem.fs['path'].keys.sort
assert_equal ['foobar'], FileSystem.fs['path']['me'].keys.sort
end
def test_chdir_should_flop_over_and_die_if_the_dir_doesnt_exist
assert_raise(Errno::ENOENT) do
Dir.chdir('/nope') do
1
end
end
end
def test_chdir_shouldnt_lose_state_because_of_errors
FileUtils.mkdir_p '/path'
Dir.chdir '/path' do
File.open('foo', 'w') { |f| f.write 'foo'}
File.open('foobar', 'w') { |f| f.write 'foo'}
end
begin
Dir.chdir('/path') do
raise Exception
end
rescue Exception # hardcore
end
Dir.chdir('/path') do
begin
Dir.chdir('nope'){ }
rescue Errno::ENOENT
end
assert_equal ['/path'], FileSystem.dir_levels
end
assert_equal(['foo', 'foobar'], FileSystem.fs['path'].keys.sort)
end
def test_chdir_with_no_block_is_awesome
FileUtils.mkdir_p '/path'
Dir.chdir('/path')
FileUtils.mkdir_p 'subdir'
assert_equal ['subdir'], FileSystem.current_dir.keys
Dir.chdir('subdir')
File.open('foo', 'w') { |f| f.write 'foo'}
assert_equal ['foo'], FileSystem.current_dir.keys
assert_raises(Errno::ENOENT) do
Dir.chdir('subsubdir')
end
assert_equal ['foo'], FileSystem.current_dir.keys
end
def test_current_dir_reflected_by_pwd
FileUtils.mkdir_p '/path'
Dir.chdir('/path')
assert_equal '/path', Dir.pwd
assert_equal '/path', Dir.getwd
FileUtils.mkdir_p 'subdir'
Dir.chdir('subdir')
assert_equal '/path/subdir', Dir.pwd
assert_equal '/path/subdir', Dir.getwd
end
def test_file_open_defaults_to_read
File.open('foo','w') { |f| f.write 'bar' }
assert_equal 'bar', File.open('foo') { |f| f.read }
end
def test_flush_exists_on_file
r = File.open('foo','w') { |f| f.write 'bar'; f.flush }
assert_equal 'foo', r.path
end
def test_mv_should_raise_error_on_missing_file
assert_raise(Errno::ENOENT) do
FileUtils.mv 'blafgag', 'foo'
end
end
def test_mv_actually_works
File.open('foo', 'w') { |f| f.write 'bar' }
FileUtils.mv 'foo', 'baz'
assert_equal 'bar', File.open('baz') { |f| f.read }
end
def test_cp_actually_works
File.open('foo', 'w') {|f| f.write 'bar' }
FileUtils.cp('foo', 'baz')
assert_equal 'bar', File.read('baz')
end
def test_cp_file_into_dir
File.open('foo', 'w') {|f| f.write 'bar' }
FileUtils.mkdir_p 'baz'
FileUtils.cp('foo', 'baz')
assert_equal 'bar', File.read('baz/foo')
end
def test_cp_overwrites_dest_file
File.open('foo', 'w') {|f| f.write 'FOO' }
File.open('bar', 'w') {|f| f.write 'BAR' }
FileUtils.cp('foo', 'bar')
assert_equal 'FOO', File.read('bar')
end
def test_cp_fails_on_no_source
assert_raise Errno::ENOENT do
FileUtils.cp('foo', 'baz')
end
end
def test_cp_fails_on_directory_copy
FileUtils.mkdir_p 'baz'
assert_raise Errno::EISDIR do
FileUtils.cp('baz', 'bar')
end
end
def test_cp_r_doesnt_tangle_files_together
File.open('foo', 'w') { |f| f.write 'bar' }
FileUtils.cp_r('foo', 'baz')
File.open('baz', 'w') { |f| f.write 'quux' }
assert_equal 'bar', File.open('foo') { |f| f.read }
end
def test_cp_r_should_raise_error_on_missing_file
# Yes, this error sucks, but it conforms to the original Ruby
# method.
assert_raise(RuntimeError) do
FileUtils.cp_r 'blafgag', 'foo'
end
end
def test_cp_r_handles_copying_directories
FileUtils.mkdir_p 'subdir'
Dir.chdir('subdir'){ File.open('foo', 'w') { |f| f.write 'footext' } }
FileUtils.mkdir_p 'baz'
# To a previously uncreated directory
FileUtils.cp_r('subdir', 'quux')
assert_equal 'footext', File.open('quux/foo') { |f| f.read }
# To a directory that already exists
FileUtils.cp_r('subdir', 'baz')
assert_equal 'footext', File.open('baz/subdir/foo') { |f| f.read }
# To a subdirectory of a directory that does not exist
assert_raises(Errno::ENOENT) do
FileUtils.cp_r('subdir', 'nope/something')
end
end
def test_cp_r_only_copies_into_directories
FileUtils.mkdir_p 'subdir'
Dir.chdir('subdir') { File.open('foo', 'w') { |f| f.write 'footext' } }
File.open('bar', 'w') { |f| f.write 'bartext' }
assert_raises(Errno::EEXIST) do
FileUtils.cp_r 'subdir', 'bar'
end
FileUtils.mkdir_p 'otherdir'
FileUtils.ln_s 'otherdir', 'symdir'
FileUtils.cp_r 'subdir', 'symdir'
assert_equal 'footext', File.open('symdir/subdir/foo') { |f| f.read }
end
def test_cp_r_sets_parent_correctly
FileUtils.mkdir_p '/path/foo'
File.open('/path/foo/bar', 'w') { |f| f.write 'foo' }
File.open('/path/foo/baz', 'w') { |f| f.write 'foo' }
FileUtils.cp_r '/path/foo', '/path/bar'
assert File.exists?('/path/bar/baz')
FileUtils.rm_rf '/path/bar/baz'
assert_equal %w( /path/bar/bar ), Dir['/path/bar/*']
end
def test_clone_clones_normal_files
RealFile.open(here('foo'), 'w') { |f| f.write 'bar' }
assert !File.exists?(here('foo'))
FileSystem.clone(here('foo'))
assert_equal 'bar', File.open(here('foo')) { |f| f.read }
ensure
RealFile.unlink(here('foo')) if RealFile.exists?(here('foo'))
end
def test_clone_clones_directories
RealFileUtils.mkdir_p(here('subdir'))
FileSystem.clone(here('subdir'))
assert File.exists?(here('subdir')), 'subdir was cloned'
assert File.directory?(here('subdir')), 'subdir is a directory'
ensure
RealFileUtils.rm_rf(here('subdir')) if RealFile.exists?(here('subdir'))
end
def test_clone_clones_dot_files_even_hard_to_find_ones
RealFileUtils.mkdir_p(here('subdir/.bar/baz/.quux/foo'))
assert !File.exists?(here('subdir'))
FileSystem.clone(here('subdir'))
assert_equal ['.bar'], FileSystem.find(here('subdir')).keys
assert_equal ['foo'], FileSystem.find(here('subdir/.bar/baz/.quux')).keys
ensure
RealFileUtils.rm_rf(here('subdir')) if RealFile.exists?(here('subdir'))
end
def test_putting_a_dot_at_end_copies_the_contents
FileUtils.mkdir_p 'subdir'
Dir.chdir('subdir') { File.open('foo', 'w') { |f| f.write 'footext' } }
FileUtils.mkdir_p 'newdir'
FileUtils.cp_r 'subdir/.', 'newdir'
assert_equal 'footext', File.open('newdir/foo') { |f| f.read }
end
def test_file_can_read_from_symlinks
File.open('first', 'w') { |f| f.write '1'}
FileUtils.ln_s 'first', 'one'
assert_equal '1', File.open('one') { |f| f.read }
FileUtils.mkdir_p 'subdir'
File.open('subdir/nother','w') { |f| f.write 'works' }
FileUtils.ln_s 'subdir', 'new'
assert_equal 'works', File.open('new/nother') { |f| f.read }
end
def test_can_symlink_through_file
FileUtils.touch("/foo")
File.symlink("/foo", "/bar")
assert File.symlink?("/bar")
end
def test_files_can_be_touched
FileUtils.touch('touched_file')
assert File.exists?('touched_file')
list = ['newfile', 'another']
FileUtils.touch(list)
list.each { |fp| assert(File.exists?(fp)) }
end
def test_touch_does_not_work_if_the_dir_path_cannot_be_found
assert_raises(Errno::ENOENT) do
FileUtils.touch('this/path/should/not/be/here')
end
FileUtils.mkdir_p('subdir')
list = ['subdir/foo', 'nosubdir/bar']
assert_raises(Errno::ENOENT) do
FileUtils.touch(list)
end
end
def test_extname
assert File.extname("test.doc") == ".doc"
end
# Directory tests
def test_new_directory
FileUtils.mkdir_p('/this/path/should/be/here')
assert_nothing_raised do
Dir.new('/this/path/should/be/here')
end
end
def test_new_directory_does_not_work_if_dir_path_cannot_be_found
assert_raises(Errno::ENOENT) do
Dir.new('/this/path/should/not/be/here')
end
end
def test_directory_close
FileUtils.mkdir_p('/this/path/should/be/here')
dir = Dir.new('/this/path/should/be/here')
assert dir.close.nil?
assert_raises(IOError) do
dir.each { |dir| dir }
end
end
def test_directory_each
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
yielded = []
dir.each do |dir|
yielded << dir
end
assert yielded.size == test.size
test.each { |t| assert yielded.include?(t) }
end
def test_directory_path
FileUtils.mkdir_p('/this/path/should/be/here')
good_path = '/this/path/should/be/here'
assert_equal good_path, Dir.new('/this/path/should/be/here').path
end
def test_directory_pos
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
assert dir.pos == 0
dir.read
assert dir.pos == 1
dir.read
assert dir.pos == 2
dir.read
assert dir.pos == 3
dir.read
assert dir.pos == 4
dir.read
assert dir.pos == 5
end
def test_directory_pos_assign
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
assert dir.pos == 0
dir.pos = 2
assert dir.pos == 2
end
def test_directory_read
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
assert dir.pos == 0
d = dir.read
assert dir.pos == 1
assert d == '.'
d = dir.read
assert dir.pos == 2
assert d == '..'
end
def test_directory_read_past_length
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_not_nil d
d = dir.read
assert_nil d
end
def test_directory_rewind
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
d = dir.read
d = dir.read
assert dir.pos == 2
dir.rewind
assert dir.pos == 0
end
def test_directory_seek
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.new('/this/path/should/be/here')
d = dir.seek 1
assert d == '..'
assert dir.pos == 1
end
def test_directory_class_delete
FileUtils.mkdir_p('/this/path/should/be/here')
Dir.delete('/this/path/should/be/here')
assert File.exists?('/this/path/should/be/here') == false
end
def test_directory_class_delete_does_not_act_on_non_empty_directory
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
assert_raises(SystemCallError) do
Dir.delete('/this/path/should/be/here')
end
end
def test_directory_entries
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
yielded = Dir.entries('/this/path/should/be/here')
assert yielded.size == test.size
test.each { |t| assert yielded.include?(t) }
end
def test_directory_entries_works_with_trailing_slash
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
yielded = Dir.entries('/this/path/should/be/here/')
assert yielded.size == test.size
test.each { |t| assert yielded.include?(t) }
end
def test_directory_foreach
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
yielded = []
Dir.foreach('/this/path/should/be/here') do |dir|
yielded << dir
end
assert yielded.size == test.size
test.each { |t| assert yielded.include?(t) }
end
def test_directory_mkdir
Dir.mkdir('/path')
assert File.exists?('/path')
end
def test_directory_mkdir_relative
FileUtils.mkdir_p('/new/root')
FileSystem.chdir('/new/root')
Dir.mkdir('path')
assert File.exists?('/new/root/path')
end
def test_directory_mkdir_not_recursive
assert_raises(Errno::ENOENT) do
Dir.mkdir('/path/does/not/exist')
end
end
def test_directory_open
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
dir = Dir.open('/this/path/should/be/here')
assert dir.path == '/this/path/should/be/here'
end
def test_directory_open_block
test = ['.', '..', 'file_1', 'file_2', 'file_3', 'file_4', 'file_5' ]
FileUtils.mkdir_p('/this/path/should/be/here')
test.each do |f|
FileUtils.touch("/this/path/should/be/here/#{f}")
end
yielded = []
Dir.open('/this/path/should/be/here') do |dir|
yielded << dir
end
assert yielded.size == test.size
test.each { |t| assert yielded.include?(t) }
end
def test_tmpdir
assert Dir.tmpdir == "/tmp"
end
def test_hard_link_creates_file
FileUtils.touch("/foo")
File.link("/foo", "/bar")
assert File.exists?("/bar")
end
def test_hard_link_with_missing_file_raises_error
assert_raises(Errno::ENOENT) do
File.link("/foo", "/bar")
end
end
def test_hard_link_with_existing_destination_file
FileUtils.touch("/foo")
FileUtils.touch("/bar")
assert_raises(Errno::EEXIST) do
File.link("/foo", "/bar")
end
end
def test_hard_link_returns_0_when_successful
FileUtils.touch("/foo")
assert_equal 0, File.link("/foo", "/bar")
end
def test_hard_link_returns_duplicate_file
File.open("/foo", "w") { |x| x << "some content" }
File.link("/foo", "/bar")
assert_equal "some content", File.read("/bar")
end
def test_hard_link_with_directory_raises_error
Dir.mkdir "/foo"
assert_raises(Errno::EPERM) do
File.link("/foo", "/bar")
end
end
def test_file_stat_returns_file_stat_object
FileUtils.touch("/foo")
assert_equal File::Stat, File.stat("/foo").class
end
def test_can_delete_file_with_delete
FileUtils.touch("/foo")
File.delete("/foo")
assert !File.exists?("/foo")
end
def test_can_delete_multiple_files_with_delete
FileUtils.touch("/foo")
FileUtils.touch("/bar")
File.delete("/foo", "/bar")
assert !File.exists?("/foo")
assert !File.exists?("/bar")
end
def test_delete_raises_argument_error_with_no_filename_given
assert_raises ArgumentError do
File.delete
end
end
def test_delete_returns_number_one_when_given_one_arg
FileUtils.touch("/foo")
assert_equal 1, File.delete("/foo")
end
def test_delete_returns_number_two_when_given_two_args
FileUtils.touch("/foo")
FileUtils.touch("/bar")
assert_equal 2, File.delete("/foo", "/bar")
end
def test_delete_raises_error_when_first_file_does_not_exist
assert_raises Errno::ENOENT do
File.delete("/foo")
end
end
def test_delete_does_not_raise_error_when_second_file_does_not_exist
FileUtils.touch("/foo")
assert_nothing_raised do
File.delete("/foo", "/bar")
end
end
def test_unlink_is_alias_for_delete
assert_equal File.method(:unlink), File.method(:delete)
end
def test_unlink_removes_only_one_file_content
File.open("/foo", "w") { |f| f << "some_content" }
File.link("/foo", "/bar")
File.unlink("/bar")
File.read("/foo") == "some_content"
end
def test_link_reports_correct_stat_info_after_unlinking
File.open("/foo", "w") { |f| f << "some_content" }
File.link("/foo", "/bar")
File.unlink("/bar")
assert_equal 1, File.stat("/foo").nlink
end
def test_delete_works_with_symlink
FileUtils.touch("/foo")
File.symlink("/foo", "/bar")
File.unlink("/bar")
assert File.exists?("/foo")
assert !File.exists?("/bar")
end
def test_delete_works_with_symlink_source
FileUtils.touch("/foo")
File.symlink("/foo", "/bar")
File.unlink("/foo")
assert !File.exists?("/foo")
end
def test_file_seek_returns_0
File.open("/foo", "w") do |f|
f << "one\ntwo\nthree"
end
file = File.open("/foo", "r")
assert_equal 0, file.seek(1)
end
def test_file_seek_seeks_to_location
File.open("/foo", "w") do |f|
f << "123"
end
file = File.open("/foo", "r")
file.seek(1)
assert_equal "23", file.read
end
def test_file_seek_seeks_to_correct_location
File.open("/foo", "w") do |f|
f << "123"
end
file = File.open("/foo", "r")
file.seek(2)
assert_equal "3", file.read
end
def test_file_seek_can_take_negative_offset
File.open("/foo", "w") do |f|
f << "123456789"
end
file = File.open("/foo", "r")
file.seek(-1, IO::SEEK_END)
assert_equal "9", file.read
file.seek(-2, IO::SEEK_END)
assert_equal "89", file.read
file.seek(-3, IO::SEEK_END)
assert_equal "789", file.read
end
def test_should_have_constants_inherited_from_descending_from_io
assert_equal IO::SEEK_CUR, File::SEEK_CUR
assert_equal IO::SEEK_END, File::SEEK_END
assert_equal IO::SEEK_SET, File::SEEK_SET
end
def here(fname)
RealFile.expand_path(RealFile.dirname(__FILE__)+'/'+fname)
end
end
| mit |
sprungknoedl/reputile | update.go | 858 | package main
import (
"context"
"time"
"github.com/Sirupsen/logrus"
"github.com/sprungknoedl/reputile/lists"
)
func UpdateDatabase(db *Datastore) {
// create new background context
ctx := context.Background()
for {
count := 0
start := time.Now()
// "convert" List to Iterator
its := make([]lists.Iterator, len(lists.Lists))
for i, list := range lists.Lists {
its[i] = list
}
ch := lists.Combine(its...).Run(ctx)
for entry := range ch {
if entry.Err != nil {
logrus.Errorf("(%s) failed to fetch entry: %v", entry.Source, entry.Err)
return
}
count++
err := db.Store(ctx, entry)
if err != nil {
logrus.Errorf("(%s) failed to store entry: %v", entry.Source, err)
return
}
}
db.Prune(ctx)
logrus.Printf("added %d entries in %v", count, time.Since(start))
time.Sleep(1 * time.Hour)
}
}
| mit |
DocuWare/PlatformJavaClient | src/com/docuware/dev/schema/_public/services/platform/UserInfo.java | 5894 |
package com.docuware.dev.schema._public.services.platform;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import java.net.URI;
import com.docuware.dev.Extensions.*;
import java.util.concurrent.CompletableFuture;
import java.util.*;
import com.docuware.dev.schema._public.services.Link;
import com.docuware.dev.schema._public.services.platform.UserValidation;
import javax.xml.bind.JAXBElement;
import javax.xml.namespace.QName;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
import com.docuware.dev.schema._public.services.Links;
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "UserInfo", propOrder = {
"proxy",
"user",
"loginInfo",
"links"
})
public class UserInfo implements IRelationsWithProxy {
private HttpClientProxy proxy;//test
@XmlElement(name = "User", required = true)
protected User user;
@XmlElement(name = "LoginInfo", required = true)
protected LoginInfo loginInfo;
@XmlElement(name = "Links", namespace = "http://dev.docuware.com/schema/public/services", required = true)
protected Links links;
/**User details and settings*/
public User getUser() {
return user;
}
public void setUser(User value) {
this.user = value;
}
/**Details about user login*/
public LoginInfo getLoginInfo() {
return loginInfo;
}
/**Details about user login*/
public void setLoginInfo(LoginInfo value) {
this.loginInfo = value;
}
@Dolphin
public Links getLinks() {
return links;
}
@Dolphin
public void setLinks(Links value) {
this.links = value;
}
/**
* Gets the proxy.
*
* @return The proxy
*/
@Extension
public HttpClientProxy getProxy() {
return this.proxy;
}
/**
* Sets the HTTP Communication Proxy which is used in futher HTTP communication.
*
* @param proxy The new proxy
*/
@Extension
public void setProxy(HttpClientProxy proxy) {
this.proxy = proxy;
if ((user != null)) {
user.setProxy(proxy);
}
}
/**
* Gets the base URI of the specified relations instance.
*
* @return The base URI of the specified relations instance.
*/
@Extension
public URI getBaseUri() {
return RelationsWithProxyExtensions.getBaseUri(this);
}
/**
* Gets the link by its name.
*
* @param relationName Name of the relation
* @return The link, if it exists; null otherwise.
*/
@Extension
public Link getLink(String relationName) {
return RelationExtension.getLink(this, relationName);
}
/**
* Gets the URI of the relation specified by the name.
*
* @param relationName Name of the relation
* @return The link, if it exists; null otherwise.
*/
@Extension
public String getRelationUri(String relationName) {
return RelationExtension.getRelationUri(this, relationName);
}
/**
* Gets the URI of the relation specified by the name.
*
* @param relationName Name of the relation
* @return The link, if it exists.
* @throws RuntimeException: The specified Link is not found
*/
@Extension
public String getRelationUriOrThrow(String relationName) {
return RelationExtension.getRelationUriOrThrow(this, relationName);
}
/**
* Determines whether the specified link exists.
*
* @param relationName Name of the relation
* @return True, if the specified link exists; otherwise, False.
*/
@Extension
public boolean hasRelationUri(String relationName) {
return RelationExtension.hasRelationUri(this, relationName);
}
/**
* Gets the Uri of the Link for the relation "Validate".
* Returns the Uri of the Link for the relation "Validate", if this links exists, or null, if this link does not exists. The returned link can be relative or absolute. If it is a relative link you must set it in the right context yourself.
* @return the requested URI
*/
public URI getValidateRelationLink() {
return MethodInvocation.getLink(this, links, "validate");
}
/**
* Calls the HTTP post Method on the link for the relation "Validate".
*/
public String postToValidateRelationForString(UserValidation data) {
return MethodInvocation.<String, UserValidation> post(this, links, "validate", String.class, new JAXBElement(new QName("http://dev.docuware.com/schema/public/services/platform", "UserValidation"), UserValidation.class, null, data), "application/vnd.docuware.platform.uservalidation+xml", "text/plain");
}
/**
* Calls the HTTP post Method on the link for the relation "Validate" asynchronously.
*/
public CompletableFuture<DeserializedHttpResponseGen<String>> postToValidateRelationForStringAsync(UserValidation data) {
return MethodInvocation.<String, UserValidation >postAsync(this, links, "validate", String.class, new JAXBElement(new QName("http://dev.docuware.com/schema/public/services/platform", "UserValidation"), UserValidation.class, null, data), "application/vnd.docuware.platform.uservalidation+xml", "text/plain");
}
/**
* Calls the HTTP post Method on the link for the relation "Validate" asynchronously.
*/
public CompletableFuture<DeserializedHttpResponseGen<String>> postToValidateRelationForStringAsync(CancellationToken ct, UserValidation data) {
return MethodInvocation.<String, UserValidation >postAsync(this, links, "validate", String.class, new JAXBElement(new QName("http://dev.docuware.com/schema/public/services/platform", "UserValidation"), UserValidation.class, null, data), "application/vnd.docuware.platform.uservalidation+xml", "text/plain", ct);
}
}
| mit |
MaxwellFlanagan/verbose-octo-spoon-4-30-16 | Chapter 8/Geometry/Triangle.java | 1111 |
/**
* Write a description of class Triangle here.
*
* @author (your name)
* @version (a version number or a date)
*/
public class Triangle implements Geometric, Comparable<Geometric>
{
private Point p1;
private Point p2;
private Point p3;
//Precond: 3 Points Actually Create A Triangle
public Triangle(Point x, Point y, Point z)
{
p1 = x;
p2 = y;
p3 = z;
}
public double getPerimeter()
{
return p1.distance(p2) + p2.distance(p3) + p3.distance(p1);
}
public double getArea()
{
double a = p1.distance(p2);
double b = p2.distance(p3);
double c = p3.distance(p1);
double s = (a + b + c)/2;
double x = ((s) * (s-a) * (s-b) * (s-c));
double Area = Math.sqrt(x);
return Area;
}
public String toString()
{
return "Triangle " + p1 + " " + p2 + " " + p3;
}
public int compareTo(Geometric other)
{
return (int)( this.getArea()*100 - other.getArea()*100);
}
}
| mit |
musukvl/Amba.AmazonS3 | Controllers/AdminController.cs | 1342 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Web.Mvc;
using Amba.AmazonS3.Services;
using Amba.AmazonS3.ViewModels;
using Orchard.UI.Admin;
namespace Amba.AmazonS3.Controllers
{
[Admin]
public class AdminController : Controller
{
private readonly IAmazonS3StorageConfiguration _settingsService;
public AdminController(IAmazonS3StorageConfiguration settingsService)
{
_settingsService = settingsService;
}
public ActionResult Settings()
{
var viewModel = new SettingsViewModel(_settingsService);
return View(viewModel);
}
[HttpPost]
public ActionResult Settings(SettingsViewModel viewModel)
{
if (!ModelState.IsValid)
return View(viewModel);
_settingsService.AWSAccessKey = viewModel.AWSAccessKey;
_settingsService.AWSFileBucket = viewModel.AWSFileBucket;
_settingsService.AWSS3PublicUrl = viewModel.AWSS3PublicUrl;
_settingsService.AWSSecretKey = viewModel.AWSSecretKey;
_settingsService.RootFolder = viewModel.RootFolder;
_settingsService.Save();
return RedirectToAction("Settings");
}
}
}
| mit |
felixgomez/PHP-CS-Fixer | src/Fixer/CastNotation/ModernizeTypesCastingFixer.php | 4930 | <?php
/*
* This file is part of PHP CS Fixer.
*
* (c) Fabien Potencier <fabien@symfony.com>
* Dariusz Rumiński <dariusz.ruminski@gmail.com>
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace PhpCsFixer\Fixer\CastNotation;
use PhpCsFixer\AbstractFunctionReferenceFixer;
use PhpCsFixer\FixerDefinition\CodeSample;
use PhpCsFixer\FixerDefinition\FixerDefinition;
use PhpCsFixer\Tokenizer\Token;
use PhpCsFixer\Tokenizer\Tokens;
/**
* @author Vladimir Reznichenko <kalessil@gmail.com>
*/
final class ModernizeTypesCastingFixer extends AbstractFunctionReferenceFixer
{
/**
* {@inheritdoc}
*/
public function fix(\SplFileInfo $file, Tokens $tokens)
{
// replacement patterns
static $replacement = array(
'intval' => array(T_INT_CAST, '(int)'),
'floatval' => array(T_DOUBLE_CAST, '(float)'),
'doubleval' => array(T_DOUBLE_CAST, '(float)'),
'strval' => array(T_STRING_CAST, '(string)'),
'boolval' => array(T_BOOL_CAST, '(bool)'),
);
foreach ($replacement as $functionIdentity => $newToken) {
$currIndex = 0;
while (null !== $currIndex) {
// try getting function reference and translate boundaries for humans
$boundaries = $this->find($functionIdentity, $tokens, $currIndex, $tokens->count() - 1);
if (null === $boundaries) {
// next function search, as current one not found
continue 2;
}
list($functionName, $openParenthesis, $closeParenthesis) = $boundaries;
// analysing cursor shift
$currIndex = $openParenthesis;
// indicator that the function is overriden
if (1 !== $this->countArguments($tokens, $openParenthesis, $closeParenthesis)) {
continue;
}
// check if something complex passed as an argument and preserve parenthesises then
$countParamTokens = 0;
for ($paramContentIndex = $openParenthesis + 1; $paramContentIndex < $closeParenthesis; ++$paramContentIndex) {
//not a space, means some sensible token
if (!$tokens[$paramContentIndex]->isGivenKind(T_WHITESPACE)) {
++$countParamTokens;
}
}
$preserveParenthesises = $countParamTokens > 1;
// analyse namespace specification (root one or none) and decide what to do
$prevTokenIndex = $tokens->getPrevMeaningfulToken($functionName);
if ($tokens[$prevTokenIndex]->isGivenKind(T_NS_SEPARATOR)) {
// get rid of root namespace when it used
$tokens->removeTrailingWhitespace($prevTokenIndex);
$tokens[$prevTokenIndex]->clear();
}
// perform transformation
$replacementSequence = array(
new Token($newToken),
new Token(array(T_WHITESPACE, ' ')),
);
if (!$preserveParenthesises) {
// closing parenthesis removed with leading spaces
$tokens->removeLeadingWhitespace($closeParenthesis);
$tokens[$closeParenthesis]->clear();
// opening parenthesis removed with trailing spaces
$tokens->removeLeadingWhitespace($openParenthesis);
$tokens->removeTrailingWhitespace($openParenthesis);
$tokens[$openParenthesis]->clear();
} else {
// we'll need to provide a space after a casting operator
$tokens->removeTrailingWhitespace($functionName);
}
$tokens->overrideRange($functionName, $functionName, $replacementSequence);
// nested transformations support
$currIndex = $functionName;
}
}
}
/**
* {@inheritdoc}
*/
public function getDefinition()
{
return new FixerDefinition(
'Replaces `intval`, `floatval`, `doubleval`, `strval` and `boolval` function calls with according type casting operator.',
array(new CodeSample(
'<?php
$a = intval($b);
$a = floatval($b);
$a = doubleval($b);
$a = strval ($b);
$a = boolval($b);
'),
),
null,
null,
null,
'Risky if any of the functions `intval`, `floatval`, `doubleval`, `strval` or `boolval` are overridden.'
);
}
/**
* {@inheritdoc}
*/
public function isCandidate(Tokens $tokens)
{
return $tokens->isTokenKindFound(T_STRING);
}
}
| mit |
Bionexo/pharos180 | app/controllers/profiles_controller.rb | 1932 | class ProfilesController < ApplicationController
before_action :set_profile, only: [:show, :edit, :update, :destroy]
# GET /profiles
# GET /profiles.json
def index
@profiles = Profile.all
end
# GET /profiles/1
# GET /profiles/1.json
def show
@profile = Profile.find params[:id]
end
# GET /profiles/new
def new
@profile = Profile.new
end
# GET /profiles/1/edit
def edit
end
# POST /profiles
# POST /profiles.json
def create
@profile = Profile.new(profile_params)
respond_to do |format|
if @profile.save
format.html { redirect_to @profile, notice: 'Profile was successfully created.' }
format.json { render :show, status: :created, location: @profile }
else
format.html { render :new }
format.json { render json: @profile.errors, status: :unprocessable_entity }
end
end
end
# PATCH/PUT /profiles/1
# PATCH/PUT /profiles/1.json
def update
respond_to do |format|
if @profile.update(profile_params)
format.html { redirect_to @profile, notice: 'Profile was successfully updated.' }
format.json { render :show, status: :ok, location: @profile }
else
format.html { render :edit }
format.json { render json: @profile.errors, status: :unprocessable_entity }
end
end
end
# DELETE /profiles/1
# DELETE /profiles/1.json
def destroy
@profile.destroy
respond_to do |format|
format.html { redirect_to profiles_url, notice: 'Profile was successfully destroyed.' }
format.json { head :no_content }
end
end
private
# Use callbacks to share common setup or constraints between actions.
def set_profile
@profile = Profile.find(params[:id])
end
# Never trust parameters from the scary internet, only allow the white list through.
def profile_params
params.require(:profile).permit(:name)
end
end
| mit |
allenhsu/FlashlightPlugins | timestamp.bundle/plugin.py | 1580 | #!/usr/bin/python
import sys, urllib, os, time, datetime
def results(parsed, original_query):
timestamp = int(parsed.get('timestamp', time.time()))
date = datetime.datetime.utcfromtimestamp(timestamp)
timestring = date.strftime('%Y-%m-%d %H:%M:%S UTC')
style = '''
<style type="text/css">
* {
padding: 0;
margin: 0;
}
html, body, body > div {
margin: 0;
width: 100%;
height: 100%;
font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "HiraginoSansGB-W3", "Hiragino Sans GB W3";
line-height: 1.2;
}
h1, h2, h3, h4, h5 {
font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "HiraginoSansGB-W6", "Hiragino Sans GB W6";
}
#loading, #error {
text-align: center;
}
#error, #results {
display: none;
}
#results {
text-align: left;
}
h1 {
font-size: 32px;
border-bottom: #ddd 1px solid;
padding: 0px 0px 10px 0px;
margin: 0px 0px 10px 0px;
color: #444;
}
h3 {
font-size: 15px;
color: #888;
}
div.content {
padding: 15px;
}
</style>
'''
return {
"title": '"%s" (%d) - Press ENTER to copy' % (timestring, timestamp),
"html": "%s%s" % (style, "<div><div class=\"content\"><h1>%s</h1><h3>%s</h3></div></div>" % (timestamp, timestring)),
"run_args": ['"%s" (%d)' % (timestring, timestamp)]
}
def run(string):
os.system('echo "{0}" | pbcopy'.format(string.replace("\"", "\\\"")))
| mit |
orocrm/platform | src/Oro/Bundle/ApiBundle/Tests/Unit/Processor/ContextMetadataAccessorTest.php | 2170 | <?php
namespace Oro\Bundle\ApiBundle\Tests\Unit\Processor;
use Oro\Bundle\ApiBundle\Metadata\EntityMetadata;
use Oro\Bundle\ApiBundle\Processor\Context;
use Oro\Bundle\ApiBundle\Processor\ContextMetadataAccessor;
use Oro\Bundle\ApiBundle\Tests\Unit\Fixtures\Entity\Product;
use Oro\Bundle\ApiBundle\Tests\Unit\Fixtures\Entity\User;
use Oro\Bundle\ApiBundle\Tests\Unit\Fixtures\Entity\UserProfile;
class ContextMetadataAccessorTest extends \PHPUnit\Framework\TestCase
{
/** @var \PHPUnit\Framework\MockObject\MockObject|Context */
private $context;
/** @var ContextMetadataAccessor */
private $metadataAccessor;
protected function setUp()
{
$this->context = $this->createMock(Context::class);
$this->metadataAccessor = new ContextMetadataAccessor($this->context);
}
public function testGetMetadataForContextClass()
{
$className = User::class;
$metadata = new EntityMetadata();
$this->context->expects(self::once())
->method('getClassName')
->willReturn($className);
$this->context->expects(self::once())
->method('getMetadata')
->willReturn($metadata);
self::assertSame($metadata, $this->metadataAccessor->getMetadata($className));
}
public function testGetMetadataForContextClassForCaseWhenApiResourceIsBasedOnManageableEntity()
{
$className = User::class;
$metadata = new EntityMetadata();
$this->context->expects(self::once())
->method('getClassName')
->willReturn(UserProfile::class);
$this->context->expects(self::once())
->method('getMetadata')
->willReturn($metadata);
self::assertSame($metadata, $this->metadataAccessor->getMetadata($className));
}
public function testGetMetadataForNotContextClass()
{
$this->context->expects(self::once())
->method('getClassName')
->willReturn(User::class);
$this->context->expects(self::never())
->method('getMetadata');
self::assertNull($this->metadataAccessor->getMetadata(Product::class));
}
}
| mit |
npmcomponent/RactiveJS-Ractive | src/render/StringFragment/Section.js | 1410 | define([
'config/types',
'render/shared/Mustache/_Mustache',
'render/shared/updateSection',
'shared/teardown',
'circular'
], function (
types,
Mustache,
updateSection,
teardown,
circular
) {
'use strict';
var StringSection, StringFragment;
circular.push( function () {
StringFragment = circular.StringFragment;
});
StringSection = function ( options ) {
this.type = types.SECTION;
this.fragments = [];
this.length = 0;
Mustache.init( this, options );
};
StringSection.prototype = {
update: Mustache.update,
resolve: Mustache.resolve,
reassign: Mustache.reassign,
teardown: function () {
this.teardownFragments();
teardown( this );
},
teardownFragments: function () {
while ( this.fragments.length ) {
this.fragments.shift().teardown();
}
this.length = 0;
},
bubble: function () {
this.value = this.fragments.join( '' );
this.parentFragment.bubble();
},
render: function ( value ) {
var wrapped;
// with sections, we need to get the fake value if we have a wrapped object
if ( wrapped = this.root._wrapped[ this.keypath ] ) {
value = wrapped.get();
}
updateSection( this, value );
this.parentFragment.bubble();
},
createFragment: function ( options ) {
return new StringFragment( options );
},
toString: function () {
return this.fragments.join( '' );
}
};
return StringSection;
}); | mit |
mmkassem/gitlabhq | spec/services/issues/create_service_spec.rb | 17763 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Issues::CreateService do
let(:project) { create(:project) }
let(:user) { create(:user) }
describe '#execute' do
let(:issue) { described_class.new(project, user, opts).execute }
let(:assignee) { create(:user) }
let(:milestone) { create(:milestone, project: project) }
context 'when params are valid' do
let(:labels) { create_pair(:label, project: project) }
before do
project.add_maintainer(user)
project.add_maintainer(assignee)
end
let(:opts) do
{ title: 'Awesome issue',
description: 'please fix',
assignee_ids: [assignee.id],
label_ids: labels.map(&:id),
milestone_id: milestone.id,
due_date: Date.tomorrow }
end
it 'creates the issue with the given params' do
expect(issue).to be_persisted
expect(issue.title).to eq('Awesome issue')
expect(issue.assignees).to eq [assignee]
expect(issue.labels).to match_array labels
expect(issue.milestone).to eq milestone
expect(issue.due_date).to eq Date.tomorrow
end
it 'refreshes the number of open issues', :use_clean_rails_memory_store_caching do
expect { issue }.to change { project.open_issues_count }.from(0).to(1)
end
context 'when current user cannot admin issues in the project' do
let(:guest) { create(:user) }
before do
project.add_guest(guest)
end
it 'filters out params that cannot be set without the :admin_issue permission' do
issue = described_class.new(project, guest, opts).execute
expect(issue).to be_persisted
expect(issue.title).to eq('Awesome issue')
expect(issue.description).to eq('please fix')
expect(issue.assignees).to be_empty
expect(issue.labels).to be_empty
expect(issue.milestone).to be_nil
expect(issue.due_date).to be_nil
end
end
it 'creates a pending todo for new assignee' do
attributes = {
project: project,
author: user,
user: assignee,
target_id: issue.id,
target_type: issue.class.name,
action: Todo::ASSIGNED,
state: :pending
}
expect(Todo.where(attributes).count).to eq 1
end
context 'when label belongs to project group' do
let(:group) { create(:group) }
let(:group_labels) { create_pair(:group_label, group: group) }
let(:opts) do
{
title: 'Title',
description: 'Description',
label_ids: group_labels.map(&:id)
}
end
before do
project.update(group: group)
end
it 'assigns group labels' do
expect(issue.labels).to match_array group_labels
end
end
context 'when label belongs to different project' do
let(:label) { create(:label) }
let(:opts) do
{ title: 'Title',
description: 'Description',
label_ids: [label.id] }
end
it 'does not assign label' do
expect(issue.labels).not_to include label
end
end
context 'when labels is nil' do
let(:opts) do
{ title: 'Title',
description: 'Description',
labels: nil }
end
it 'does not assign label' do
expect(issue.labels).to be_empty
end
end
context 'when labels is nil and label_ids is present' do
let(:opts) do
{ title: 'Title',
description: 'Description',
labels: nil,
label_ids: labels.map(&:id) }
end
it 'assigns group labels' do
expect(issue.labels).to match_array labels
end
end
context 'when milestone belongs to different project' do
let(:milestone) { create(:milestone) }
let(:opts) do
{ title: 'Title',
description: 'Description',
milestone_id: milestone.id }
end
it 'does not assign milestone' do
expect(issue.milestone).not_to eq milestone
end
end
context 'when assignee is set' do
let(:opts) do
{ title: 'Title',
description: 'Description',
assignees: [assignee] }
end
it 'invalidates open issues counter for assignees when issue is assigned' do
project.add_maintainer(assignee)
described_class.new(project, user, opts).execute
expect(assignee.assigned_open_issues_count).to eq 1
end
end
context 'when duplicate label titles are given' do
let(:label) { create(:label, project: project) }
let(:opts) do
{ title: 'Title',
description: 'Description',
labels: [label.title, label.title] }
end
it 'assigns the label once' do
expect(issue.labels).to contain_exactly(label)
end
end
it 'executes issue hooks when issue is not confidential' do
opts = { title: 'Title', description: 'Description', confidential: false }
expect(project).to receive(:execute_hooks).with(an_instance_of(Hash), :issue_hooks)
expect(project).to receive(:execute_services).with(an_instance_of(Hash), :issue_hooks)
described_class.new(project, user, opts).execute
end
it 'executes confidential issue hooks when issue is confidential' do
opts = { title: 'Title', description: 'Description', confidential: true }
expect(project).to receive(:execute_hooks).with(an_instance_of(Hash), :confidential_issue_hooks)
expect(project).to receive(:execute_services).with(an_instance_of(Hash), :confidential_issue_hooks)
described_class.new(project, user, opts).execute
end
context 'after_save callback to store_mentions' do
context 'when mentionable attributes change' do
let(:opts) { { title: 'Title', description: "Description with #{user.to_reference}" } }
it 'saves mentions' do
expect_next_instance_of(Issue) do |instance|
expect(instance).to receive(:store_mentions!).and_call_original
end
expect(issue.user_mentions.count).to eq 1
end
end
context 'when save fails' do
let(:opts) { { title: '', label_ids: labels.map(&:id), milestone_id: milestone.id } }
it 'does not call store_mentions' do
expect_next_instance_of(Issue) do |instance|
expect(instance).not_to receive(:store_mentions!).and_call_original
end
expect(issue.valid?).to be false
expect(issue.user_mentions.count).to eq 0
end
end
end
it 'deletes milestone issues count cache' do
expect_next_instance_of(Milestones::IssuesCountService, milestone) do |service|
expect(service).to receive(:delete_cache).and_call_original
end
issue
end
end
context 'issue create service' do
context 'assignees' do
before do
project.add_maintainer(user)
end
it 'removes assignee when user id is invalid' do
opts = { title: 'Title', description: 'Description', assignee_ids: [-1] }
issue = described_class.new(project, user, opts).execute
expect(issue.assignees).to be_empty
end
it 'removes assignee when user id is 0' do
opts = { title: 'Title', description: 'Description', assignee_ids: [0] }
issue = described_class.new(project, user, opts).execute
expect(issue.assignees).to be_empty
end
it 'saves assignee when user id is valid' do
project.add_maintainer(assignee)
opts = { title: 'Title', description: 'Description', assignee_ids: [assignee.id] }
issue = described_class.new(project, user, opts).execute
expect(issue.assignees).to eq([assignee])
end
context "when issuable feature is private" do
before do
project.project_feature.update(issues_access_level: ProjectFeature::PRIVATE,
merge_requests_access_level: ProjectFeature::PRIVATE)
end
levels = [Gitlab::VisibilityLevel::INTERNAL, Gitlab::VisibilityLevel::PUBLIC]
levels.each do |level|
it "removes not authorized assignee when project is #{Gitlab::VisibilityLevel.level_name(level)}" do
project.update(visibility_level: level)
opts = { title: 'Title', description: 'Description', assignee_ids: [assignee.id] }
issue = described_class.new(project, user, opts).execute
expect(issue.assignees).to be_empty
end
end
end
end
end
it_behaves_like 'issuable record that supports quick actions' do
let(:issuable) { described_class.new(project, user, params).execute }
end
context 'Quick actions' do
context 'with assignee and milestone in params and command' do
let(:opts) do
{
assignee_ids: [create(:user).id],
milestone_id: 1,
title: 'Title',
description: %(/assign @#{assignee.username}\n/milestone %"#{milestone.name}")
}
end
before do
project.add_maintainer(user)
project.add_maintainer(assignee)
end
it 'assigns and sets milestone to issuable from command' do
expect(issue).to be_persisted
expect(issue.assignees).to eq([assignee])
expect(issue.milestone).to eq(milestone)
end
end
end
context 'resolving discussions' do
let(:discussion) { create(:diff_note_on_merge_request).to_discussion }
let(:merge_request) { discussion.noteable }
let(:project) { merge_request.source_project }
before do
project.add_maintainer(user)
end
describe 'for a single discussion' do
let(:opts) { { discussion_to_resolve: discussion.id, merge_request_to_resolve_discussions_of: merge_request.iid } }
it 'resolves the discussion' do
described_class.new(project, user, opts).execute
discussion.first_note.reload
expect(discussion.resolved?).to be(true)
end
it 'added a system note to the discussion' do
described_class.new(project, user, opts).execute
reloaded_discussion = MergeRequest.find(merge_request.id).discussions.first
expect(reloaded_discussion.last_note.system).to eq(true)
end
it 'assigns the title and description for the issue' do
issue = described_class.new(project, user, opts).execute
expect(issue.title).not_to be_nil
expect(issue.description).not_to be_nil
end
it 'can set nil explicitly to the title and description' do
issue = described_class.new(project, user,
merge_request_to_resolve_discussions_of: merge_request,
description: nil,
title: nil).execute
expect(issue.description).to be_nil
expect(issue.title).to be_nil
end
end
describe 'for a merge request' do
let(:opts) { { merge_request_to_resolve_discussions_of: merge_request.iid } }
it 'resolves the discussion' do
described_class.new(project, user, opts).execute
discussion.first_note.reload
expect(discussion.resolved?).to be(true)
end
it 'added a system note to the discussion' do
described_class.new(project, user, opts).execute
reloaded_discussion = MergeRequest.find(merge_request.id).discussions.first
expect(reloaded_discussion.last_note.system).to eq(true)
end
it 'assigns the title and description for the issue' do
issue = described_class.new(project, user, opts).execute
expect(issue.title).not_to be_nil
expect(issue.description).not_to be_nil
end
it 'can set nil explicitly to the title and description' do
issue = described_class.new(project, user,
merge_request_to_resolve_discussions_of: merge_request,
description: nil,
title: nil).execute
expect(issue.description).to be_nil
expect(issue.title).to be_nil
end
end
end
context 'checking spam' do
include_context 'includes Spam constants'
let(:title) { 'Legit issue' }
let(:description) { 'please fix' }
let(:opts) do
{
title: title,
description: description,
request: double(:request, env: {})
}
end
subject { described_class.new(project, user, opts) }
before do
stub_feature_flags(allow_possible_spam: false)
end
context 'when reCAPTCHA was verified' do
let(:log_user) { user }
let(:spam_logs) { create_list(:spam_log, 2, user: log_user, title: title) }
let(:target_spam_log) { spam_logs.last }
before do
opts[:recaptcha_verified] = true
opts[:spam_log_id] = target_spam_log.id
expect(Spam::SpamVerdictService).not_to receive(:new)
end
it 'does not mark an issue as spam' do
expect(issue).not_to be_spam
end
it 'creates a valid issue' do
expect(issue).to be_valid
end
it 'does not assign a spam_log to the issue' do
expect(issue.spam_log).to be_nil
end
it 'marks related spam_log as recaptcha_verified' do
expect { issue }.to change { target_spam_log.reload.recaptcha_verified }.from(false).to(true)
end
context 'when spam log does not belong to a user' do
let(:log_user) { create(:user) }
it 'does not mark spam_log as recaptcha_verified' do
expect { issue }.not_to change { target_spam_log.reload.recaptcha_verified }
end
end
end
context 'when reCAPTCHA was not verified' do
before do
expect_next_instance_of(Spam::SpamActionService) do |spam_service|
expect(spam_service).to receive_messages(check_for_spam?: true)
end
end
context 'when SpamVerdictService requires reCAPTCHA' do
before do
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
expect(verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW)
end
end
it 'does not mark the issue as spam' do
expect(issue).not_to be_spam
end
it 'marks the issue as needing reCAPTCHA' do
expect(issue.needs_recaptcha?).to be_truthy
end
it 'invalidates the issue' do
expect(issue).to be_invalid
end
it 'creates a new spam_log' do
expect { issue }
.to have_spam_log(title: title, description: description, user_id: user.id, noteable_type: 'Issue')
end
end
context 'when SpamVerdictService disallows creation' do
before do
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
expect(verdict_service).to receive(:execute).and_return(DISALLOW)
end
end
context 'when allow_possible_spam feature flag is false' do
it 'marks the issue as spam' do
expect(issue).to be_spam
end
it 'does not mark the issue as needing reCAPTCHA' do
expect(issue.needs_recaptcha?).to be_falsey
end
it 'invalidates the issue' do
expect(issue).to be_invalid
end
it 'creates a new spam_log' do
expect { issue }
.to have_spam_log(title: title, description: description, user_id: user.id, noteable_type: 'Issue')
end
end
context 'when allow_possible_spam feature flag is true' do
before do
stub_feature_flags(allow_possible_spam: true)
end
it 'does not mark the issue as spam' do
expect(issue).not_to be_spam
end
it 'does not mark the issue as needing reCAPTCHA' do
expect(issue.needs_recaptcha?).to be_falsey
end
it 'creates a valid issue' do
expect(issue).to be_valid
end
it 'creates a new spam_log' do
expect { issue }
.to have_spam_log(title: title, description: description, user_id: user.id, noteable_type: 'Issue')
end
end
end
context 'when the SpamVerdictService allows creation' do
before do
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
expect(verdict_service).to receive(:execute).and_return(ALLOW)
end
end
it 'does not mark an issue as spam' do
expect(issue).not_to be_spam
end
it 'creates a valid issue' do
expect(issue).to be_valid
end
it 'does not assign a spam_log to an issue' do
expect(issue.spam_log).to be_nil
end
end
end
end
end
end
| mit |
gil0mendes/continuum | engine/src/main/java/org/continuum/blocks/BlockLargeHighGrass.java | 1701 | /*
* Copyright 2014-2017 Gil Mendes
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.continuum.blocks;
import org.continuum.utilities.Helper;
import org.lwjgl.util.vector.Vector2f;
import org.lwjgl.util.vector.Vector4f;
/**
* A large high grass billboard block.
*/
public class BlockLargeHighGrass extends Block {
@Override
public boolean isBlockTypeTranslucent() {
return true;
}
@Override
public Vector4f getColorOffsetFor(SIDE side, double temperature, double humidity) {
Vector4f foliageColor = foliageColorForTemperatureAndHumidity(temperature, humidity);
return new Vector4f(foliageColor.x, foliageColor.y, foliageColor.z, 1.0f);
}
@Override
public Vector2f getTextureOffsetFor(Block.SIDE side) {
return Helper.calcOffsetForTextureAt(15, 11);
}
@Override
public boolean isPenetrable() {
return true;
}
@Override
public boolean isCastingShadows() {
return true;
}
@Override
public boolean shouldRenderBoundingBox() {
return false;
}
@Override
public BLOCK_FORM getBlockForm() {
return BLOCK_FORM.BILLBOARD;
}
}
| mit |
thiagoh/de-animate | src/jquery.de-animate.js | 11093 | /*
* de-animate
* https://github.com/thiagoh/de-animate
*
* Copyright (c) 2015 Thiago Andrade
* Licensed under the MIT license.
*/
(function($) {
// Function from David Walsh: http://davidwalsh.name/css-animation-callback licensed with http://opensource.org/licenses/MIT
var getAnimationEvent = function() {
var t,
el = document.createElement("fakeelement"),
animations = {
'WebkitAnimation': 'webkitAnimationEnd',
'OAnimation': 'oAnimationEnd',
'msAnimation': 'MSAnimationEnd',
'animation': 'animationend'
};
for (t in animations) {
if (el.style[t] !== undefined) {
return animations[t];
}
}
},
getTransitionEvent = function() {
var t,
el = document.createElement("fakeelement"),
transitions = {
'WebkitTransition': 'webkitTransitionEnd',
'OTransition': 'oTransitionEnd',
'msTransition': 'MSTransitionEnd',
'transition': 'transitionend'
};
for (t in transitions) {
if (el.style[t] !== undefined) {
return transitions[t];
}
}
};
var getAnimationPair = function(clazz) {
var cin = clazz,
cout = clazz;
if (clazz.indexOf('In') >= 0) {
cin = clazz;
cout = clazz.replace('In', 'Out');
} else if (clazz.indexOf('Out') >= 0) {
cout = clazz;
cin = clazz.replace('Out', 'In');
}
return {
classIn: cin,
classOut: cout
};
};
var whichEvent = getAnimationEvent() + ' ' + getTransitionEvent();
var _animate = function(animateIn, $el, callback) {
$el.data('deanimate:animatedIn', animateIn);
var classIn = $el.data("deanimate:classIn"),
classOut = $el.data("deanimate:classOut"),
frontclass = $el.data("deanimate:front"),
backclass = $el.data("deanimate:back"),
parallel = $el.data("deanimate:parallel");
if (animateIn) {
$el.find(frontclass)
.addClass(classOut)
.removeClass(classIn);
var outFunction = function() {
var eventCount = $el.data("deanimate:eventCount");
if (eventCount <= 0) {
$el.data("deanimate:eventCount", eventCount++);
$el.find(backclass).css('display', '');
}
$el.find(backclass)
.addClass(classIn)
.removeClass(classOut);
};
if (!parallel) {
$el.one(whichEvent, outFunction);
} else {
outFunction();
}
} else {
$el.find(backclass)
.addClass(classOut)
.removeClass(classIn);
var inFunction = function() {
$el.find(frontclass)
.removeClass(classOut)
.addClass(classIn);
};
if (!parallel) {
$el.one(whichEvent, inFunction);
} else {
inFunction();
}
}
if (animateIn) {
//Providing a nicely wrapped up callback because transform is essentially async
$el.one(whichEvent, function() {
$(this).trigger('deanimate:animatedIn');
$(this).trigger('deanimate:animated');
if (callback !== undefined) {
callback.call(this);
}
});
} else {
//Providing a nicely wrapped up callback because transform is essentially async
$el.one(whichEvent, function() {
$(this).trigger('deanimate:animatedOut');
$(this).trigger('deanimate:animated');
if (callback !== undefined) {
callback.call(this);
}
});
}
};
//https://github.com/nnattawat/flip
$.fn.deAnimate = function(options, callback) {
if (typeof options === 'function') {
//This allows deAnimate to be called for setup with only a callback (default settings)
callback = options;
}
var $el = $(this);
if ($el.length === 1 && $el.data('deanimate:initiated') === true) {
if (options === 'animatedIn') {
return typeof $el.data('deanimate:animatedIn') === 'undefined' ? false : $el.data('deanimate:animatedIn');
} else if (options === 'animatedOut') {
return typeof $el.data('deanimate:animatedIn') === 'undefined' ? false : !$el.data('deanimate:animatedIn');
}
}
return this.each(function() {
var $el = $(this);
if (options === 'toggle') {
callback = callback || $el.data('deanimate:callback');
if ($el.data('deanimate:animatedIn')) {
_animate(false, $el, callback);
} else {
_animate(true, $el, callback);
}
} else if (options === 'destroy') {
$el.removeData('deanimate:initiated');
$el.removeData('deanimate:eventCount');
$el.removeData('deanimate:back');
$el.removeData('deanimate:front');
$el.removeData('deanimate:animatedIn');
$el.removeData('deanimate:classIn');
$el.removeData('deanimate:classOut');
$el.removeData('deanimate:callback');
$el.removeData('deanimate:parallel');
$el.off('click.deanimate');
$el.off('tap.deanimate');
} else if ($el.data('deanimate:initiated') !== true) {
if (!$el.data("deanimate:initiated")) { //Init animated DOM
$el.data("deanimate:initiated", true);
var settings = $.extend({}, $.deAnimate.options, options),
classIn = settings.classIn || 'flipInY',
classOut = settings.classOut,
divs = $el,
frontSelector,
backSelector;
if (typeof classOut === 'undefined') {
var pair = getAnimationPair(classIn);
classIn = pair.classIn;
classOut = pair.classOut;
}
$el.data("deanimate:parallel", settings.parallel);
$el.data("deanimate:classIn", classIn);
$el.data("deanimate:classOut", classOut);
$el.data("deanimate:callback", callback);
if (settings.front === 'auto') {
frontSelector = $el.find('.de-animate-front').length > 0 ? '.de-animate-front' :
$el.find('.front').length > 0 ? '.front' : ':first-child';
} else {
frontSelector = settings.front;
}
if (settings.back === 'auto') {
backSelector = $el.find('.de-animate-back').length > 0 ? '.de-animate-back' :
$el.find('.back').length > 0 ? '.back' : ':nth-child(2)';
} else {
backSelector = settings.back;
}
$el.data("deanimate:front", frontSelector);
$el.data("deanimate:back", backSelector);
divs = $el.find(frontSelector).add(backSelector);
$el.data("deanimate:eventCount", 0);
$el.find(backSelector).css('display', 'none');
divs.addClass('animated');
var speedInSec = settings.speed / 1000 || 0.5;
divs.css('animation-duration', speedInSec + 's');
if (typeof settings.trigger === 'string') {
if (settings.trigger.toLowerCase() === "click") {
$el.on($.fn.tap ? "tap.deanimate" : "click.deanimate", function(event) {
if (!event) {
event = window.event;
}
if ($el.find($(event.target).closest('button, a, input[type="submit"]')).length) {
return;
}
if ($el.data('deanimate:animatedIn')) {
_animate(false, $el, callback);
} else {
_animate(true, $el, callback);
}
});
} else if (settings.trigger.toLowerCase() === "hover") {
var performAnimation = function() {
$el.unbind('mouseleave', performDeanimation);
_animate(true, $el, callback);
setTimeout(function() {
$el.bind('mouseleave', performDeanimation);
if (!$el.is(":hover")) {
_animate(false, $el, callback);
}
}, (settings.speed + 150));
};
var performDeanimation = function() {
_animate(false, $el, callback);
};
$el.mouseenter(performAnimation);
$el.mouseleave(performDeanimation);
}
}
}
}
return this;
});
};
// Static method.
$.deAnimate = function(options) {
// Override default options with passed-in options.
options = $.extend({}, $.deAnimate.options, options);
// Return something awesome.
return 'awesome' + options.punctuation;
};
// Static method default options.
$.deAnimate.options = {
speed: 500,
parallel: true,
front: 'auto',
back: 'auto'
};
// Custom selector.
$.expr[':'].deAnimate = function(elem) {
// Is this element awesome?
return $(elem).text().indexOf('awesome') !== -1;
};
}(jQuery)); | mit |
godrose/Attest | src/Attest.Fake.Setup/MethodCallbackVisitorHelper.cs | 1140 | using System;
using Attest.Fake.Setup.Contracts;
namespace Attest.Fake.Setup
{
/// <summary>
/// Helper class for method callback visitors
/// </summary>
internal static class MethodCallbackVisitorHelper
{
internal static void VisitError(IThrowException onErrorCallback)
{
throw onErrorCallback.Exception;
}
internal static void VisitProgress<TCallback>(IProgressableProcessFinished<TCallback> progressCallback,
Action<TCallback> callbackAcceptor)
{
throw new ProgressMessageException(progressCallback.ProgressMessages,
() =>
{
if (progressCallback.FinishCallback != null)
{
callbackAcceptor(progressCallback.FinishCallback);
}
});
}
internal static void VisitCancel()
{
throw new CancelCallbackException();
}
internal static void VisitWithout()
{
throw new WithoutCallbackException();
}
}
} | mit |
euangoddard/name-that-cheese | tools/tasks/project/scss-lint.ts | 1192 | import * as gulp from 'gulp';
import * as gulpLoadPlugins from 'gulp-load-plugins';
import * as merge from 'merge-stream';
import * as reporter from 'postcss-reporter';
import * as stylelint from 'stylelint';
import * as doiuse from 'doiuse';
import * as colorguard from 'colorguard';
import {join} from 'path';
import {APP_SRC, BROWSER_LIST, DEPENDENCIES, ENV} from '../../config';
const plugins = <any>gulpLoadPlugins();
const isProd = ENV === 'prod';
const processors = [
doiuse({
browsers: BROWSER_LIST,
}),
colorguard(),
stylelint(),
reporter({clearMessages: true})
];
function lintComponentScss() {
return gulp.src([
join(APP_SRC, '**', '*.scss'),
'!' + join(APP_SRC, 'assets', '**', '*.scss')
])
.pipe(isProd ? plugins.cached('css-lint') : plugins.util.noop())
.pipe(plugins.postcss(processors));
}
function lintExternalScss() {
return gulp.src(getExternalScss().map(r => r.src))
.pipe(isProd ? plugins.cached('scss-lint') : plugins.util.noop())
.pipe(plugins.postcss(processors));
}
function getExternalScss() {
return DEPENDENCIES.filter(d => /\.scss/.test(d.src) && !d.vendor);
}
export = () => merge(lintComponentScss(), lintExternalScss());
| mit |
arhframe/arhframe | arhframe/eden/eden/postgre/delete.php | 1464 | <?php //-->
/*
* This file is part of the Eden package.
* (c) 2011-2012 Openovate Labs
*
* Copyright and license information can be found at LICENSE.txt
* distributed with this package.
*/
/**
* Generates delete query string syntax
*
* @package Eden
* @category sql
* @author Christian Blanquera cblanquera@openovate.com
*/
class eden_postgre_delete extends Eden_Sql_Delete
{
/* Constants
-------------------------------*/
/* Public Properties
-------------------------------*/
/* Protected Properties
-------------------------------*/
protected $_table = NULL;
protected $_where = array();
/* Private Properties
-------------------------------*/
/* Magic
-------------------------------*/
public static function i()
{
return self::_getMultiple(__CLASS__);
}
public function __construct($table = NULL)
{
if (is_string($table)) {
$this->setTable($table);
}
}
/* Public Methods
-------------------------------*/
/**
* Returns the string version of the query
*
* @return string
* @notes returns the query based on the registry
*/
public function getQuery()
{
return 'DELETE FROM "'. $this->_table . '" WHERE '. implode(' AND ', $this->_where).';';
}
/* Protected Methods
-------------------------------*/
/* Private Methods
-------------------------------*/
}
| mit |
ryankanno/py-configurator | py_configurator/backends/__init__.py | 274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .ini import IniProviderBackend
import os
def get_provider(path):
file_name, file_ext = os.path.splitext(path)
return {
'.ini': IniProviderBackend
}.get(file_ext.lower())(path)
# vim: filetype=python
| mit |
fellipe6900/origem | application/controllers/Usuario.php | 3225 | <?php
defined('BASEPATH') OR exit('No direct script access allowed');
class Usuario extends CI_Controller {
function __construct() {
parent::__construct();
$this->load->model('Usuario_m');
empty($_SESSION) ? session_start() : '';
login_necessario();
}
public function index() {
$data['usuario'] = $this->Usuario_m->listar();
$this->load->view('usuario/lista', $data);
}
public function form() {
$id = $this->uri->segment(3);
if (empty($id)) {
$data['acao'] = 'inserir';
$this->load->view('usuario/form', $data);
} else {
$usuario = $this->Usuario_m->listar($id);
$data['usuario'] = $usuario[0];
$data['acao'] = 'editar';
$data['id'] = $id;
$this->load->view('usuario/form', $data);
}
}
public function inserir() {
$usuario = new Usuario_m();
$usuario->id = null;
$usuario->nome = $this->input->post('nome');
$usuario->login = $this->input->post('login');
$dados = array(
'id' => $usuario->id,
'nome' => $usuario->nome,
'login' => $usuario->login
);
$this->form_validation->set_rules('nome', 'Nome', 'trim|required');
$this->form_validation->set_rules('login', 'Login', 'trim|required');
$this->form_validation->set_error_delimiters('<div class="erro">', '</div>');
if ($this->form_validation->run() == TRUE) {
if ($this->Usuario_m->inserir($dados)) {
redirect(base_url('usuario/?type=sucesso'), 'location');
} else {
redirect(base_url('usuario/?type=erro'), 'location');
}
} else {
$data['acao'] = 'inserir';
$this->load->view('usuario/form', $data);
}
}
public function editar() {
$usuario = new Usuario_m();
$usuario->id = $this->input->post('id');
$usuario->nome = $this->input->post('nome');
$usuario->login = $this->input->post('login');
$this->form_validation->set_rules('nome', 'Nome', 'trim|required');
$this->form_validation->set_rules('login', 'Login', 'trim|required');
if ($this->form_validation->run() == TRUE) {
$this->Usuario_m->editar($usuario);
redirect(base_url('usuario/?type=sucesso'), 'location');
} else {
redirect(base_url('usuario/?type=erro'), 'location');
}
}
public function deletar() {
$id = $this->uri->segment(3);
if ($this->Usuario_m->deletar($id)) {
redirect(base_url('usuario/?type=sucesso'), 'location');
} else {
redirect(base_url('usuario/?type=erro'), 'location');
}
}
public function reset_password() {
$id = $this->uri->segment(3);
if ($this->Usuario_m->reset_password($id)) {
redirect(base_url('usuario/?type=sucesso'), 'location');
} else {
redirect(base_url('usuario/?type=erro'), 'location');
}
}
}
| mit |
yogeshchaudhari16991/VigenereCipher | UnknownLanguageUnknwownKeyVigenereCipher/VigenereBreaker.java | 4509 | import java.util.*;
public class VigenereBreaker {
public String sliceString(String message, int whichSlice, int totalSlices) {
StringBuilder slicedMessage = new StringBuilder();
for(int i=whichSlice; i<message.length(); i+= totalSlices)
{
slicedMessage.append(message.charAt(i));
}
return slicedMessage.toString();
}
public int[] tryKeyLength(String encrypted, int klength, char mostCommon) {
int[] key = new int[klength];
CaesarCracker cc = new CaesarCracker(mostCommon);
for(int i=0; i<klength; i++)
{
String slice = sliceString(encrypted, i, klength);
key[i] = cc.getKey(slice);
}
return key;
}
public HashSet<String> readDictionary(String dictionary){
HashSet<String> hs = new HashSet<String>();
for(String word : dictionary.split("\\W"))
{
if(! hs.contains(word.toLowerCase()))
{
hs.add(word.toLowerCase());
}
}
return hs;
}
public int countWords(String message, HashSet<String> dictionary){
int count = 0;
for(String word : message.split("\\W"))
{
if(dictionary.contains(word))
{
count++;
}
}
return count;
}
public String breakForLanguage(String encMessage, HashSet<String> dictionary){
int maxNumOfWords = 0;
String message= "";
char mostCommChar=mostCommonCharIn(dictionary);
for(int i = 1; i<encMessage.length(); i++)
{
int[] key = tryKeyLength(encMessage, i, mostCommChar);
VigenereCipher vc = new VigenereCipher(key);
String decryptedMessage = vc.decrypt(encMessage);
int numOfWords = countWords(decryptedMessage, dictionary);
if(numOfWords > maxNumOfWords)
{
maxNumOfWords = numOfWords;
message = decryptedMessage;
}
}
return message;
}
public char mostCommonCharIn(HashSet<String> dictionary){
HashMap<Character, Integer> count = new HashMap<Character, Integer>();
char maxChar = '0';
for(String word : dictionary)
{
word = word.toLowerCase();
for(char ch : word.toCharArray())
{
if(count.containsKey(ch))
{
int charCount = count.get(ch);
count.put(ch, ++charCount);
}
else
{
count.put(ch, 1);
}
}
}
int max = 0;
for(char ch : count.keySet())
{
if(count.get(ch) > max)
{
max = count.get(ch);
maxChar = ch;
}
}
return maxChar;
}
public void breakForAllLanguages(String encMessage, HashMap<String, HashSet<String>> languageMap){
int maxNumOfWords = 0;
String messageLanguage = "", message = "";
for(String languageName : languageMap.keySet())
{
HashSet<String> dictionary = languageMap.get(languageName);
String decryptedMessage = breakForLanguage(encMessage, dictionary);
int numOfWords = countWords(decryptedMessage, dictionary);
if(numOfWords > maxNumOfWords)
{
maxNumOfWords = numOfWords;
messageLanguage = languageName;
message = decryptedMessage;
}
}
System.out.println("Message Language: " + messageLanguage + ",\nMessage: " + message);
}
public String readMessage(){
String message = "";
//write your message here
return message;
}
public HashSet<String> readDictionary() {
String dictionary = "";
//write your dictionary here
HashSet<String> dictionarySet = readDictionary(dictionary);
return dictionarySet;
}
public void breakVigenere () {
String encMessage = readMessage();
HashMap<String, HashSet<String>> languageMap = new HashMap<String, HashSet<String>>();
//Number of dictionaries'
languageMap.put("English", readDictionary());
languageMap.put("Spanish", readDictionary());
breakForAllLanguages(encMessage, languageMap);
}
}
| mit |
stanmihai4/json | test/src/unit-msgpack.cpp | 55367 | /*
__ _____ _____ _____
__| | __| | | | JSON for Modern C++ (test suite)
| | |__ | | | | | | version 2.1.0
|_____|_____|_____|_|___| https://github.com/nlohmann/json
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright (c) 2013-2017 Niels Lohmann <http://nlohmann.me>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "catch.hpp"
#include "json.hpp"
using nlohmann::json;
#include <fstream>
TEST_CASE("MessagePack")
{
SECTION("individual values")
{
SECTION("discarded")
{
// discarded values are not serialized
json j = json::value_t::discarded;
const auto result = json::to_msgpack(j);
CHECK(result.empty());
}
SECTION("null")
{
json j = nullptr;
std::vector<uint8_t> expected = {0xc0};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("boolean")
{
SECTION("true")
{
json j = true;
std::vector<uint8_t> expected = {0xc3};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("false")
{
json j = false;
std::vector<uint8_t> expected = {0xc2};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("number")
{
SECTION("signed")
{
SECTION("-32..-1 (negative fixnum)")
{
for (auto i = -32; i <= -1; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = i;
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 1);
// check individual bytes
CHECK(static_cast<int8_t>(result[0]) == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("0..127 (positive fixnum)")
{
for (size_t i = 0; i <= 127; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = -1;
j.get_ref<json::number_integer_t&>() = static_cast<json::number_integer_t>(i);
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 1);
// check individual bytes
CHECK(result[0] == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("128..255 (int 8)")
{
for (size_t i = 128; i <= 255; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = -1;
j.get_ref<json::number_integer_t&>() = static_cast<json::number_integer_t>(i);
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcc);
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 2);
// check individual bytes
CHECK(result[0] == 0xcc);
uint8_t restored = static_cast<uint8_t>(result[1]);
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("256..65535 (int 16)")
{
for (size_t i = 256; i <= 65535; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = -1;
j.get_ref<json::number_integer_t&>() = static_cast<json::number_integer_t>(i);
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcd);
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 3);
// check individual bytes
CHECK(result[0] == 0xcd);
uint16_t restored = static_cast<uint8_t>(result[1]) * 256 + static_cast<uint8_t>(result[2]);
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("65536..4294967295 (int 32)")
{
for (uint32_t i :
{
65536u, 77777u, 1048576u, 4294967295u
})
{
CAPTURE(i);
// create JSON value with integer number
json j = -1;
j.get_ref<json::number_integer_t&>() = static_cast<json::number_integer_t>(i);
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xce);
expected.push_back(static_cast<uint8_t>((i >> 24) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 16) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 5);
// check individual bytes
CHECK(result[0] == 0xce);
uint32_t restored = static_cast<uint32_t>((static_cast<uint32_t>(result[1]) << 030) +
(static_cast<uint32_t>(result[2]) << 020) +
(static_cast<uint32_t>(result[3]) << 010) +
static_cast<uint32_t>(result[4]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("4294967296..9223372036854775807 (int 64)")
{
for (uint64_t i :
{
4294967296lu, 9223372036854775807lu
})
{
CAPTURE(i);
// create JSON value with integer number
json j = -1;
j.get_ref<json::number_integer_t&>() = static_cast<json::number_integer_t>(i);
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcf);
expected.push_back(static_cast<uint8_t>((i >> 070) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 060) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 050) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 040) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 030) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 020) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 010) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 9);
// check individual bytes
CHECK(result[0] == 0xcf);
uint64_t restored = static_cast<uint64_t>((static_cast<uint64_t>(result[1]) << 070) +
(static_cast<uint64_t>(result[2]) << 060) +
(static_cast<uint64_t>(result[3]) << 050) +
(static_cast<uint64_t>(result[4]) << 040) +
(static_cast<uint64_t>(result[5]) << 030) +
(static_cast<uint64_t>(result[6]) << 020) +
(static_cast<uint64_t>(result[7]) << 010) +
static_cast<uint64_t>(result[8]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("-128..-33 (int 8)")
{
for (auto i = -128; i <= -33; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = i;
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xd0);
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 2);
// check individual bytes
CHECK(result[0] == 0xd0);
CHECK(static_cast<int8_t>(result[1]) == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("-9263 (int 16)")
{
json j = -9263;
std::vector<uint8_t> expected = {0xd1, 0xdb, 0xd1};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
int16_t restored = (result[1] << 8) + result[2];
CHECK(restored == -9263);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("-32768..-129 (int 16)")
{
for (int16_t i = -32768; i <= -129; ++i)
{
CAPTURE(i);
// create JSON value with integer number
json j = i;
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xd1);
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 3);
// check individual bytes
CHECK(result[0] == 0xd1);
int16_t restored = (result[1] << 8) + result[2];
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("-32769..-2147483648")
{
std::vector<int32_t> numbers;
numbers.push_back(-32769);
numbers.push_back(-65536);
numbers.push_back(-77777);
numbers.push_back(-1048576);
numbers.push_back(-2147483648);
for (auto i : numbers)
{
CAPTURE(i);
// create JSON value with integer number
json j = i;
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xd2);
expected.push_back(static_cast<uint8_t>((i >> 24) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 16) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 5);
// check individual bytes
CHECK(result[0] == 0xd2);
uint32_t restored = static_cast<uint32_t>((static_cast<uint32_t>(result[1]) << 030) +
(static_cast<uint32_t>(result[2]) << 020) +
(static_cast<uint32_t>(result[3]) << 010) +
static_cast<uint32_t>(result[4]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("-9223372036854775808..-2147483649 (int 64)")
{
std::vector<int64_t> numbers;
numbers.push_back(INT64_MIN);
numbers.push_back(-2147483649ll);
for (auto i : numbers)
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_integer());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xd3);
expected.push_back(static_cast<uint8_t>((i >> 070) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 060) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 050) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 040) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 030) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 020) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 010) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 9);
// check individual bytes
CHECK(result[0] == 0xd3);
int64_t restored = static_cast<int64_t>((static_cast<int64_t>(result[1]) << 070) +
(static_cast<int64_t>(result[2]) << 060) +
(static_cast<int64_t>(result[3]) << 050) +
(static_cast<int64_t>(result[4]) << 040) +
(static_cast<int64_t>(result[5]) << 030) +
(static_cast<int64_t>(result[6]) << 020) +
(static_cast<int64_t>(result[7]) << 010) +
static_cast<int64_t>(result[8]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
}
SECTION("unsigned")
{
SECTION("0..127 (positive fixnum)")
{
for (size_t i = 0; i <= 127; ++i)
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_unsigned());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 1);
// check individual bytes
CHECK(result[0] == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("128..255 (uint 8)")
{
for (size_t i = 128; i <= 255; ++i)
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_unsigned());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcc);
expected.push_back(static_cast<uint8_t>(i));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 2);
// check individual bytes
CHECK(result[0] == 0xcc);
uint8_t restored = static_cast<uint8_t>(result[1]);
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("256..65535 (uint 16)")
{
for (size_t i = 256; i <= 65535; ++i)
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_unsigned());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcd);
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 3);
// check individual bytes
CHECK(result[0] == 0xcd);
uint16_t restored = static_cast<uint8_t>(result[1]) * 256 + static_cast<uint8_t>(result[2]);
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("65536..4294967295 (uint 32)")
{
for (uint32_t i :
{
65536u, 77777u, 1048576u, 4294967295u
})
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_unsigned());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xce);
expected.push_back(static_cast<uint8_t>((i >> 24) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 16) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 8) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 5);
// check individual bytes
CHECK(result[0] == 0xce);
uint32_t restored = static_cast<uint32_t>((static_cast<uint32_t>(result[1]) << 030) +
(static_cast<uint32_t>(result[2]) << 020) +
(static_cast<uint32_t>(result[3]) << 010) +
static_cast<uint32_t>(result[4]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("4294967296..18446744073709551615 (uint 64)")
{
for (uint64_t i :
{
4294967296lu, 18446744073709551615lu
})
{
CAPTURE(i);
// create JSON value with unsigned integer number
json j = i;
// check type
CHECK(j.is_number_unsigned());
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xcf);
expected.push_back(static_cast<uint8_t>((i >> 070) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 060) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 050) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 040) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 030) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 020) & 0xff));
expected.push_back(static_cast<uint8_t>((i >> 010) & 0xff));
expected.push_back(static_cast<uint8_t>(i & 0xff));
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == 9);
// check individual bytes
CHECK(result[0] == 0xcf);
uint64_t restored = static_cast<uint64_t>((static_cast<uint64_t>(result[1]) << 070) +
(static_cast<uint64_t>(result[2]) << 060) +
(static_cast<uint64_t>(result[3]) << 050) +
(static_cast<uint64_t>(result[4]) << 040) +
(static_cast<uint64_t>(result[5]) << 030) +
(static_cast<uint64_t>(result[6]) << 020) +
(static_cast<uint64_t>(result[7]) << 010) +
static_cast<uint64_t>(result[8]));
CHECK(restored == i);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
}
SECTION("float")
{
SECTION("3.1415925")
{
double v = 3.1415925;
json j = v;
std::vector<uint8_t> expected =
{
0xcb, 0x40, 0x09, 0x21, 0xfb, 0x3f, 0xa6, 0xde, 0xfc
};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// restore value (reverse array for endianess)
double restored;
std::reverse(expected.begin(), expected.end());
memcpy(&restored, expected.data(), sizeof(double));
CHECK(restored == v);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
}
SECTION("string")
{
SECTION("N = 0..31")
{
// explicitly enumerate the first byte for all 32 strings
const std::vector<uint8_t> first_bytes =
{
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1,
0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba,
0xbb, 0xbc, 0xbd, 0xbe, 0xbf
};
for (size_t N = 0; N < first_bytes.size(); ++N)
{
CAPTURE(N);
// create JSON value with string containing of N * 'x'
const auto s = std::string(N, 'x');
json j = s;
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(first_bytes[N]);
for (size_t i = 0; i < N; ++i)
{
expected.push_back('x');
}
// check first byte
CHECK((first_bytes[N] & 0x1f) == N);
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == N + 1);
// check that no null byte is appended
if (N > 0)
{
CHECK(result.back() != '\x00');
}
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("N = 32..255")
{
for (size_t N = 32; N <= 255; ++N)
{
CAPTURE(N);
// create JSON value with string containing of N * 'x'
const auto s = std::string(N, 'x');
json j = s;
// create expected byte vector
std::vector<uint8_t> expected;
expected.push_back(0xd9);
expected.push_back(static_cast<uint8_t>(N));
for (size_t i = 0; i < N; ++i)
{
expected.push_back('x');
}
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == N + 2);
// check that no null byte is appended
CHECK(result.back() != '\x00');
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("N = 256..65535")
{
for (size_t N :
{
256u, 999u, 1025u, 3333u, 2048u, 65535u
})
{
CAPTURE(N);
// create JSON value with string containing of N * 'x'
const auto s = std::string(N, 'x');
json j = s;
// create expected byte vector (hack: create string first)
std::vector<uint8_t> expected(N, 'x');
// reverse order of commands, because we insert at begin()
expected.insert(expected.begin(), static_cast<uint8_t>(N & 0xff));
expected.insert(expected.begin(), static_cast<uint8_t>((N >> 8) & 0xff));
expected.insert(expected.begin(), 0xda);
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == N + 3);
// check that no null byte is appended
CHECK(result.back() != '\x00');
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("N = 65536..4294967295")
{
for (size_t N :
{
65536u, 77777u, 1048576u
})
{
CAPTURE(N);
// create JSON value with string containing of N * 'x'
const auto s = std::string(N, 'x');
json j = s;
// create expected byte vector (hack: create string first)
std::vector<uint8_t> expected(N, 'x');
// reverse order of commands, because we insert at begin()
expected.insert(expected.begin(), static_cast<uint8_t>(N & 0xff));
expected.insert(expected.begin(), static_cast<uint8_t>((N >> 8) & 0xff));
expected.insert(expected.begin(), static_cast<uint8_t>((N >> 16) & 0xff));
expected.insert(expected.begin(), static_cast<uint8_t>((N >> 24) & 0xff));
expected.insert(expected.begin(), 0xdb);
// compare result + size
const auto result = json::to_msgpack(j);
CHECK(result == expected);
CHECK(result.size() == N + 5);
// check that no null byte is appended
CHECK(result.back() != '\x00');
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
}
SECTION("array")
{
SECTION("empty")
{
json j = json::array();
std::vector<uint8_t> expected = {0x90};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("[null]")
{
json j = {nullptr};
std::vector<uint8_t> expected = {0x91, 0xc0};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("[1,2,3,4,5]")
{
json j = json::parse("[1,2,3,4,5]");
std::vector<uint8_t> expected = {0x95, 0x01, 0x02, 0x03, 0x04, 0x05};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("[[[[]]]]")
{
json j = json::parse("[[[[]]]]");
std::vector<uint8_t> expected = {0x91, 0x91, 0x91, 0x90};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("array 16")
{
json j(16, nullptr);
std::vector<uint8_t> expected(j.size() + 3, 0xc0); // all null
expected[0] = 0xdc; // array 16
expected[1] = 0x00; // size (0x0010), byte 0
expected[2] = 0x10; // size (0x0010), byte 1
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("array 32")
{
json j(65536, nullptr);
std::vector<uint8_t> expected(j.size() + 5, 0xc0); // all null
expected[0] = 0xdd; // array 32
expected[1] = 0x00; // size (0x00100000), byte 0
expected[2] = 0x01; // size (0x00100000), byte 1
expected[3] = 0x00; // size (0x00100000), byte 2
expected[4] = 0x00; // size (0x00100000), byte 3
const auto result = json::to_msgpack(j);
//CHECK(result == expected);
CHECK(result.size() == expected.size());
for (size_t i = 0; i < expected.size(); ++i)
{
CAPTURE(i);
CHECK(result[i] == expected[i]);
}
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
SECTION("object")
{
SECTION("empty")
{
json j = json::object();
std::vector<uint8_t> expected = {0x80};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("{\"\":null}")
{
json j = {{"", nullptr}};
std::vector<uint8_t> expected = {0x81, 0xa0, 0xc0};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("{\"a\": {\"b\": {\"c\": {}}}}")
{
json j = json::parse("{\"a\": {\"b\": {\"c\": {}}}}");
std::vector<uint8_t> expected =
{
0x81, 0xa1, 0x61, 0x81, 0xa1, 0x62, 0x81, 0xa1, 0x63, 0x80
};
const auto result = json::to_msgpack(j);
CHECK(result == expected);
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("map 16")
{
json j = R"({"00": null, "01": null, "02": null, "03": null,
"04": null, "05": null, "06": null, "07": null,
"08": null, "09": null, "10": null, "11": null,
"12": null, "13": null, "14": null, "15": null})"_json;
const auto result = json::to_msgpack(j);
// Checking against an expected vector byte by byte is
// difficult, because no assumption on the order of key/value
// pairs are made. We therefore only check the prefix (type and
// size and the overall size. The rest is then handled in the
// roundtrip check.
CHECK(result.size() == 67); // 1 type, 2 size, 16*4 content
CHECK(result[0] == 0xde); // map 16
CHECK(result[1] == 0x00); // byte 0 of size (0x0010)
CHECK(result[2] == 0x10); // byte 1 of size (0x0010)
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
SECTION("map 32")
{
json j;
for (auto i = 0; i < 65536; ++i)
{
// format i to a fixed width of 5
// each entry will need 7 bytes: 6 for fixstr, 1 for null
std::stringstream ss;
ss << std::setw(5) << std::setfill('0') << i;
j.emplace(ss.str(), nullptr);
}
const auto result = json::to_msgpack(j);
// Checking against an expected vector byte by byte is
// difficult, because no assumption on the order of key/value
// pairs are made. We therefore only check the prefix (type and
// size and the overall size. The rest is then handled in the
// roundtrip check.
CHECK(result.size() == 458757); // 1 type, 4 size, 65536*7 content
CHECK(result[0] == 0xdf); // map 32
CHECK(result[1] == 0x00); // byte 0 of size (0x00010000)
CHECK(result[2] == 0x01); // byte 1 of size (0x00010000)
CHECK(result[3] == 0x00); // byte 2 of size (0x00010000)
CHECK(result[4] == 0x00); // byte 3 of size (0x00010000)
// roundtrip
CHECK(json::from_msgpack(result) == j);
}
}
}
SECTION("from float32")
{
auto given = std::vector<uint8_t>({0xca, 0x41, 0xc8, 0x00, 0x01});
json j = json::from_msgpack(given);
CHECK(j.get<double>() == Approx(25.0000019073486));
}
}
// use this testcase outside [hide] to run it with Valgrind
TEST_CASE("single MessagePack roundtrip")
{
SECTION("sample.json")
{
std::string filename = "test/data/json_testsuite/sample.json";
// parse JSON file
std::ifstream f_json(filename);
json j1 = json::parse(f_json);
// parse MessagePack file
std::ifstream f_msgpack(filename + ".msgpack", std::ios::binary);
std::vector<uint8_t> packed((std::istreambuf_iterator<char>(f_msgpack)),
std::istreambuf_iterator<char>());
json j2;
CHECK_NOTHROW(j2 = json::from_msgpack(packed));
// compare parsed JSON values
CHECK(j1 == j2);
}
}
TEST_CASE("MessagePack roundtrips", "[hide]")
{
SECTION("input from msgpack-python")
{
for (std::string filename :
{
"test/data/json_nlohmann_tests/all_unicode.json",
"test/data/json.org/1.json",
"test/data/json.org/2.json",
"test/data/json.org/3.json",
"test/data/json.org/4.json",
"test/data/json.org/5.json",
"test/data/json_roundtrip/roundtrip01.json",
"test/data/json_roundtrip/roundtrip02.json",
"test/data/json_roundtrip/roundtrip03.json",
"test/data/json_roundtrip/roundtrip04.json",
"test/data/json_roundtrip/roundtrip05.json",
"test/data/json_roundtrip/roundtrip06.json",
"test/data/json_roundtrip/roundtrip07.json",
"test/data/json_roundtrip/roundtrip08.json",
"test/data/json_roundtrip/roundtrip09.json",
"test/data/json_roundtrip/roundtrip10.json",
"test/data/json_roundtrip/roundtrip11.json",
"test/data/json_roundtrip/roundtrip12.json",
"test/data/json_roundtrip/roundtrip13.json",
"test/data/json_roundtrip/roundtrip14.json",
"test/data/json_roundtrip/roundtrip15.json",
"test/data/json_roundtrip/roundtrip16.json",
"test/data/json_roundtrip/roundtrip17.json",
"test/data/json_roundtrip/roundtrip18.json",
"test/data/json_roundtrip/roundtrip19.json",
"test/data/json_roundtrip/roundtrip20.json",
"test/data/json_roundtrip/roundtrip21.json",
"test/data/json_roundtrip/roundtrip22.json",
"test/data/json_roundtrip/roundtrip23.json",
"test/data/json_roundtrip/roundtrip24.json",
"test/data/json_roundtrip/roundtrip25.json",
"test/data/json_roundtrip/roundtrip26.json",
"test/data/json_roundtrip/roundtrip27.json",
"test/data/json_roundtrip/roundtrip28.json",
"test/data/json_roundtrip/roundtrip29.json",
"test/data/json_roundtrip/roundtrip30.json",
"test/data/json_roundtrip/roundtrip31.json",
"test/data/json_roundtrip/roundtrip32.json",
"test/data/json_testsuite/sample.json", // kills AppVeyor
"test/data/json_tests/pass1.json",
"test/data/json_tests/pass2.json",
"test/data/json_tests/pass3.json",
"test/data/regression/floats.json",
"test/data/regression/signed_ints.json",
"test/data/regression/unsigned_ints.json",
"test/data/regression/working_file.json",
"test/data/nst_json_testsuite/test_parsing/y_array_arraysWithSpaces.json",
"test/data/nst_json_testsuite/test_parsing/y_array_empty-string.json",
"test/data/nst_json_testsuite/test_parsing/y_array_empty.json",
"test/data/nst_json_testsuite/test_parsing/y_array_ending_with_newline.json",
"test/data/nst_json_testsuite/test_parsing/y_array_false.json",
"test/data/nst_json_testsuite/test_parsing/y_array_heterogeneous.json",
"test/data/nst_json_testsuite/test_parsing/y_array_null.json",
"test/data/nst_json_testsuite/test_parsing/y_array_with_1_and_newline.json",
"test/data/nst_json_testsuite/test_parsing/y_array_with_leading_space.json",
"test/data/nst_json_testsuite/test_parsing/y_array_with_several_null.json",
"test/data/nst_json_testsuite/test_parsing/y_array_with_trailing_space.json",
"test/data/nst_json_testsuite/test_parsing/y_number.json",
"test/data/nst_json_testsuite/test_parsing/y_number_0e+1.json",
"test/data/nst_json_testsuite/test_parsing/y_number_0e1.json",
"test/data/nst_json_testsuite/test_parsing/y_number_after_space.json",
"test/data/nst_json_testsuite/test_parsing/y_number_double_close_to_zero.json",
"test/data/nst_json_testsuite/test_parsing/y_number_double_huge_neg_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_huge_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_int_with_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_minus_zero.json",
"test/data/nst_json_testsuite/test_parsing/y_number_negative_int.json",
"test/data/nst_json_testsuite/test_parsing/y_number_negative_one.json",
"test/data/nst_json_testsuite/test_parsing/y_number_negative_zero.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_capital_e.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_capital_e_neg_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_capital_e_pos_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_exponent.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_fraction_exponent.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_neg_exp.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_neg_overflow.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_pos_exponent.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_pos_overflow.json",
"test/data/nst_json_testsuite/test_parsing/y_number_real_underflow.json",
"test/data/nst_json_testsuite/test_parsing/y_number_simple_int.json",
"test/data/nst_json_testsuite/test_parsing/y_number_simple_real.json",
//"test/data/nst_json_testsuite/test_parsing/y_number_too_big_neg_int.json",
//"test/data/nst_json_testsuite/test_parsing/y_number_too_big_pos_int.json",
//"test/data/nst_json_testsuite/test_parsing/y_number_very_big_negative_int.json",
"test/data/nst_json_testsuite/test_parsing/y_object.json",
"test/data/nst_json_testsuite/test_parsing/y_object_basic.json",
"test/data/nst_json_testsuite/test_parsing/y_object_duplicated_key.json",
"test/data/nst_json_testsuite/test_parsing/y_object_duplicated_key_and_value.json",
"test/data/nst_json_testsuite/test_parsing/y_object_empty.json",
"test/data/nst_json_testsuite/test_parsing/y_object_empty_key.json",
"test/data/nst_json_testsuite/test_parsing/y_object_escaped_null_in_key.json",
"test/data/nst_json_testsuite/test_parsing/y_object_extreme_numbers.json",
"test/data/nst_json_testsuite/test_parsing/y_object_long_strings.json",
"test/data/nst_json_testsuite/test_parsing/y_object_simple.json",
"test/data/nst_json_testsuite/test_parsing/y_object_string_unicode.json",
"test/data/nst_json_testsuite/test_parsing/y_object_with_newlines.json",
"test/data/nst_json_testsuite/test_parsing/y_string_1_2_3_bytes_UTF-8_sequences.json",
"test/data/nst_json_testsuite/test_parsing/y_string_UTF-16_Surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json",
"test/data/nst_json_testsuite/test_parsing/y_string_accepted_surrogate_pair.json",
"test/data/nst_json_testsuite/test_parsing/y_string_accepted_surrogate_pairs.json",
"test/data/nst_json_testsuite/test_parsing/y_string_allowed_escapes.json",
"test/data/nst_json_testsuite/test_parsing/y_string_backslash_and_u_escaped_zero.json",
"test/data/nst_json_testsuite/test_parsing/y_string_backslash_doublequotes.json",
"test/data/nst_json_testsuite/test_parsing/y_string_comments.json",
"test/data/nst_json_testsuite/test_parsing/y_string_double_escape_a.json",
"test/data/nst_json_testsuite/test_parsing/y_string_double_escape_n.json",
"test/data/nst_json_testsuite/test_parsing/y_string_escaped_control_character.json",
"test/data/nst_json_testsuite/test_parsing/y_string_escaped_noncharacter.json",
"test/data/nst_json_testsuite/test_parsing/y_string_in_array.json",
"test/data/nst_json_testsuite/test_parsing/y_string_in_array_with_leading_space.json",
"test/data/nst_json_testsuite/test_parsing/y_string_last_surrogates_1_and_2.json",
"test/data/nst_json_testsuite/test_parsing/y_string_newline_uescaped.json",
"test/data/nst_json_testsuite/test_parsing/y_string_nonCharacterInUTF-8_U+10FFFF.json",
"test/data/nst_json_testsuite/test_parsing/y_string_nonCharacterInUTF-8_U+1FFFF.json",
"test/data/nst_json_testsuite/test_parsing/y_string_nonCharacterInUTF-8_U+FFFF.json",
"test/data/nst_json_testsuite/test_parsing/y_string_null_escape.json",
"test/data/nst_json_testsuite/test_parsing/y_string_one-byte-utf-8.json",
"test/data/nst_json_testsuite/test_parsing/y_string_pi.json",
"test/data/nst_json_testsuite/test_parsing/y_string_simple_ascii.json",
"test/data/nst_json_testsuite/test_parsing/y_string_space.json",
"test/data/nst_json_testsuite/test_parsing/y_string_three-byte-utf-8.json",
"test/data/nst_json_testsuite/test_parsing/y_string_two-byte-utf-8.json",
"test/data/nst_json_testsuite/test_parsing/y_string_u+2028_line_sep.json",
"test/data/nst_json_testsuite/test_parsing/y_string_u+2029_par_sep.json",
"test/data/nst_json_testsuite/test_parsing/y_string_uEscape.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unescaped_char_delete.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicode.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicodeEscapedBackslash.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicode_2.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicode_U+2064_invisible_plus.json",
"test/data/nst_json_testsuite/test_parsing/y_string_unicode_escaped_double_quote.json",
// "test/data/nst_json_testsuite/test_parsing/y_string_utf16.json",
"test/data/nst_json_testsuite/test_parsing/y_string_utf8.json",
"test/data/nst_json_testsuite/test_parsing/y_string_with_del_character.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_false.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_int.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_negative_real.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_null.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_string.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_lonely_true.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_string_empty.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_trailing_newline.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_true_in_array.json",
"test/data/nst_json_testsuite/test_parsing/y_structure_whitespace_array.json"
})
{
CAPTURE(filename);
// parse JSON file
std::ifstream f_json(filename);
json j1 = json::parse(f_json);
// parse MessagePack file
std::ifstream f_msgpack(filename + ".msgpack", std::ios::binary);
std::vector<uint8_t> packed((std::istreambuf_iterator<char>(f_msgpack)),
std::istreambuf_iterator<char>());
json j2;
CHECK_NOTHROW(j2 = json::from_msgpack(packed));
// compare parsed JSON values
CHECK(j1 == j2);
}
}
}
| mit |
maurer/tiamat | samples/Juliet/testcases/CWE122_Heap_Based_Buffer_Overflow/s07/CWE122_Heap_Based_Buffer_Overflow__c_CWE805_char_ncpy_62b.cpp | 1442 | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE122_Heap_Based_Buffer_Overflow__c_CWE805_char_ncpy_62b.cpp
Label Definition File: CWE122_Heap_Based_Buffer_Overflow__c_CWE805.string.label.xml
Template File: sources-sink-62b.tmpl.cpp
*/
/*
* @description
* CWE: 122 Heap Based Buffer Overflow
* BadSource: Allocate using malloc() and set data pointer to a small buffer
* GoodSource: Allocate using malloc() and set data pointer to a large buffer
* Sinks: ncpy
* BadSink : Copy string to data using strncpy
* Flow Variant: 62 Data flow: data flows using a C++ reference from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <wchar.h>
namespace CWE122_Heap_Based_Buffer_Overflow__c_CWE805_char_ncpy_62
{
#ifndef OMITBAD
void badSource(char * &data)
{
/* FLAW: Allocate and point data to a small buffer that is smaller than the large buffer used in the sinks */
data = (char *)malloc(50*sizeof(char));
data[0] = '\0'; /* null terminate */
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() uses the GoodSource with the BadSink */
void goodG2BSource(char * &data)
{
/* FIX: Allocate and point data to a large buffer that is at least as large as the large buffer used in the sink */
data = (char *)malloc(100*sizeof(char));
data[0] = '\0'; /* null terminate */
}
#endif /* OMITGOOD */
} /* close namespace */
| mit |
graze/queue | src/Handler/BatchAcknowledgementHandler.php | 2878 | <?php
/**
* This file is part of graze/queue.
*
* Copyright (c) 2015 Nature Delivered Ltd. <https://www.graze.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
* @license https://github.com/graze/queue/blob/master/LICENSE MIT
*
* @link https://github.com/graze/queue
*/
namespace Graze\Queue\Handler;
use Graze\Queue\Adapter\AdapterInterface;
use Graze\Queue\Message\MessageInterface;
class BatchAcknowledgementHandler extends AbstractAcknowledgementHandler
{
/** @var int */
protected $batchSize;
/** @var MessageInterface[] */
protected $acknowledged = [];
/** @var MessageInterface[] */
protected $rejected = [];
/** @var MessageInterface[][] */
protected $delayed = [];
/**
* @param int $batchSize
*/
public function __construct($batchSize = 0)
{
$this->batchSize = (integer) $batchSize;
}
/**
* @param MessageInterface $message
* @param AdapterInterface $adapter
* @param mixed $result
*/
protected function acknowledge(
MessageInterface $message,
AdapterInterface $adapter,
$result = null
) {
$this->acknowledged[] = $message;
if (count($this->acknowledged) === $this->batchSize) {
$this->flush($adapter);
}
}
/**
* @param MessageInterface $message
* @param AdapterInterface $adapter
* @param int $duration
*/
protected function extend(
MessageInterface $message,
AdapterInterface $adapter,
$duration
) {
$this->delayed[$duration][] = $message;
if (count($this->delayed[$duration]) === $this->batchSize) {
$this->flush($adapter);
}
}
/**
* @param MessageInterface $message
* @param AdapterInterface $adapter
* @param mixed $result
*/
protected function reject(
MessageInterface $message,
AdapterInterface $adapter,
$result = null
) {
$this->rejected[] = $message;
if (count($this->rejected) === $this->batchSize) {
$this->flush($adapter);
}
}
/**
* @param AdapterInterface $adapter
*/
protected function flush(AdapterInterface $adapter)
{
if (!empty($this->acknowledged)) {
$adapter->acknowledge($this->acknowledged);
$this->acknowledged = [];
}
if (!empty($this->rejected)) {
$adapter->acknowledge($this->rejected);
$this->rejected = [];
}
if (!empty($this->delayed)) {
foreach ($this->delayed as $duration => $messages) {
$adapter->extend($messages, $duration);
}
$this->delayed = [];
}
}
}
| mit |
fstudio/clangbuilder | sources/bela/include/bela/bufio.hpp | 2130 | //
#ifndef BELA_BUFIO_HPP
#define BELA_BUFIO_HPP
#include <algorithm>
#include "base.hpp"
#include "types.hpp"
namespace bela::bufio {
constexpr ssize_t default_buffer_size = 4096;
// Fixed capacity size bufio.Reader implementation
template <ssize_t Size = default_buffer_size> class Reader {
public:
Reader(HANDLE r) : fd(r) {}
Reader(const Reader &) = delete;
Reader &operator=(const Reader &) = delete;
ssize_t Buffered() const { return w - r; }
ssize_t Read(void *buffer, ssize_t len, bela::error_code &ec) {
if (buffer == nullptr || len == 0) {
ec = bela::make_error_code(L"short read");
return -1;
}
if (r == w) {
if (static_cast<size_t>(len) > sizeof(data)) {
// Large read, empty buffer.
// Read directly into p to avoid copy.
ssize_t rlen = 0;
if (!fsread(buffer, len, rlen, ec)) {
return -1;
}
return rlen;
}
w = 0;
r = 0;
if (!fsread(data, sizeof(data), w, ec)) {
return -1;
}
if (w == 0) {
ec = bela::make_error_code(L"unexpected EOF");
return -1;
}
}
auto n = (std::min)(w - r, len);
memcpy(buffer, data + r, n);
r += n;
return n;
}
ssize_t ReadFull(void *buffer, ssize_t len, bela::error_code &ec) {
auto p = reinterpret_cast<uint8_t *>(buffer);
ssize_t n = 0;
for (; n < len;) {
auto nn = Read(p + n, len - n, ec);
if (nn == -1) {
return -1;
}
n += nn;
}
if (n < len) {
ec = bela::make_error_code(L"unexpected EOF");
return -1;
}
return n;
}
constexpr int size() const { return Size; }
private:
HANDLE fd{INVALID_HANDLE_VALUE};
uint8_t data[Size] = {0};
ssize_t w{0};
ssize_t r{0};
bool fsread(void *b, ssize_t len, ssize_t &rlen, bela::error_code &ec) {
DWORD dwSize = {0};
if (::ReadFile(fd, b, static_cast<DWORD>(len), &dwSize, nullptr) != TRUE) {
ec = bela::make_system_error_code(L"ReadFile: ");
return false;
}
rlen = static_cast<ssize_t>(len);
return true;
}
};
} // namespace bela::bufio
#endif | mit |
buildlet/Utilities | Projects/Utilities/BUILDLet.UtilitiesTests/AssemblyAttributesTests.cs | 3652 | /*******************************************************************************
The MIT License (MIT)
Copyright (c) 2015-2017 Daiki Sakamoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
********************************************************************************/
using System;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System.Diagnostics;
using System.Reflection;
using BUILDLet.Utilities.Diagnostics;
namespace BUILDLet.Utilities.Tests
{
[TestClass]
public class AssemblyAttributesTests
{
private struct parameter
{
public Assembly Assembly;
public string AssemblyName;
};
[TestMethod()]
[TestCategory("MANUAL")]
public void AssemblyAttributes_Test()
{
parameter[] parameters =
{
new parameter() { Assembly = null, AssemblyName = "BUILDLet.UtilitiesTests" },
new parameter() { Assembly = Assembly.GetExecutingAssembly(), AssemblyName = "BUILDLet.UtilitiesTests" },
new parameter() { Assembly = Assembly.GetCallingAssembly(), AssemblyName = "Microsoft.VisualStudio.TestPlatform.Extensions.VSTestIntegration" }
};
AssemblyAttributes target;
string asssembly_name;
foreach (var parameter in parameters)
{
if (parameter.Assembly == null)
{
target = new AssemblyAttributes();
asssembly_name = string.Format("\"{0}\" (Executing Assembly)", Assembly.GetExecutingAssembly().GetName().Name);
}
else
{
target = new AssemblyAttributes(parameter.Assembly);
asssembly_name = string.Format("\"{0}\"", parameter.Assembly.GetName().Name);
}
// Console Output
Console.WriteLine("Assembly={0}", asssembly_name);
Console.WriteLine("AssemblyAttributes.Name=\"{0}\"", target.Name);
Console.WriteLine("AssemblyAttributes.FullName=\"{0}\"", target.FullName);
Console.WriteLine("AssemblyAttributes.Version.ToString()=\"{0}\"", target.Version.ToString());
Console.WriteLine("AssemblyAttributes.CultureInfo.ToString()=\"{0}\"", target.CultureInfo.ToString());
Console.WriteLine("AssemblyAttributes.CultureName=\"{0}\"", target.CultureName);
Console.WriteLine();
// Assertion (only Name)
Assert.AreEqual(parameter.AssemblyName, target.Name);
}
}
}
}
| mit |
wkgcass/common | SQL/src/net/cassite/sql/OrderStarter.java | 304 | package net.cassite.sql;
public class OrderStarter<O extends OrderStarter<O>> extends SQLEnd<O> {
protected OrderStarter(StringBuilder sb) {
super(sb);
}
public OrderByClause orderBy(String col) {
return new OrderByClause(sb, col);
}
}
| mit |
gaborkolozsy/XChange | xchange-ripple/src/main/java/org/knowm/xchange/ripple/service/RippleMarketDataService.java | 2492 | package org.knowm.xchange.ripple.service;
import java.io.IOException;
import org.knowm.xchange.Exchange;
import org.knowm.xchange.currency.CurrencyPair;
import org.knowm.xchange.dto.marketdata.OrderBook;
import org.knowm.xchange.dto.marketdata.Ticker;
import org.knowm.xchange.dto.marketdata.Trades;
import org.knowm.xchange.exceptions.ExchangeException;
import org.knowm.xchange.exceptions.NotAvailableFromExchangeException;
import org.knowm.xchange.exceptions.NotYetImplementedForExchangeException;
import org.knowm.xchange.ripple.RippleAdapters;
import org.knowm.xchange.ripple.RippleExchange;
import org.knowm.xchange.ripple.dto.marketdata.RippleOrderBook;
import org.knowm.xchange.ripple.service.params.RippleMarketDataParams;
import org.knowm.xchange.service.marketdata.MarketDataService;
public class RippleMarketDataService extends RippleMarketDataServiceRaw implements MarketDataService {
public RippleMarketDataService(final Exchange exchange) {
super(exchange);
}
/**
* If the base currency is not XRP then the returned orders' additional data map contains a value for {@link RippleExchange.DATA_BASE_COUNTERPARTY},
* similarly if the counter currency is not XRP then {@link RippleExchange.DATA_COUNTER_COUNTERPARTY} is populated.
*
* @param currencyPair the base/counter currency pair
* @param args a RippleMarketDataParams object needs to be supplied
*/
@Override
public OrderBook getOrderBook(final CurrencyPair currencyPair, final Object... args) throws IOException {
if ((args != null && args.length > 0) && (args[0] instanceof RippleMarketDataParams)) {
final RippleMarketDataParams params = (RippleMarketDataParams) args[0];
final RippleOrderBook orderBook = getRippleOrderBook(currencyPair, params);
return RippleAdapters.adaptOrderBook(orderBook, params, currencyPair);
} else {
throw new ExchangeException("RippleMarketDataParams is missing");
}
}
@Override
public Ticker getTicker(final CurrencyPair currencyPair,
final Object... args) throws ExchangeException, NotAvailableFromExchangeException, NotYetImplementedForExchangeException, IOException {
throw new NotYetImplementedForExchangeException();
}
@Override
public Trades getTrades(final CurrencyPair currencyPair,
final Object... args) throws ExchangeException, NotAvailableFromExchangeException, NotYetImplementedForExchangeException, IOException {
throw new NotYetImplementedForExchangeException();
}
}
| mit |
madetech/made-social-engine | app/models/social/instagram_hashtag.rb | 716 | module Social
class InstagramHashtag < ActiveRecord::Base
attr_accessible :hashtag
has_many :photos,
:dependent => :destroy,
:class_name => "Social::InstagramPhoto"
validates :hashtag,
:presence => true,
:uniqueness => true
after_save :update_photos
def update_photos
InstagramPhoto.get_hashtag_photos(self)
end
def self.refresh_hashtag_photos
InstagramHashtag.all.each do |instagram_hashtag|
InstagramPhoto.get_hashtag_photos(instagram_hashtag)
end
end
def to_s
self.hashtag
end
end
end
| mit |
ClxS/Stardew-Farmhand | Libraries/API/FarmhandGame/Item/BigCraftable.cs | 1285 | namespace Farmhand.Game.Item
{
using Farmhand.API.Items;
using Microsoft.Xna.Framework;
/// <summary>
/// Acts as a base class for BigCraftable objects.
/// </summary>
public class BigCraftable : StardewObject
{
/// <summary>
/// Initializes a new instance of the <see cref="BigCraftable" /> class.
/// </summary>
protected BigCraftable()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BigCraftable" /> class.
/// </summary>
/// <param name="information">
/// The BigCraftable information.
/// </param>
/// <param name="tileLocation">
/// The tile location.
/// </param>
/// <param name="isRecipe">
/// Whether it is instantiated by a recipe.
/// </param>
public BigCraftable(BigCraftableInformation information, Vector2 tileLocation, bool isRecipe = false)
: base(tileLocation, information.Id, isRecipe)
{
this.Information = information;
}
/// <summary>
/// Gets or sets Big Craftable information.
/// </summary>
public BigCraftableInformation Information { get; set; }
}
} | mit |
luchobenitez/radiosoo | src/models/moderators.js | 537 | 'use strict';
module.exports = function (sequelize, DataTypes) {
var Moderator = sequelize.define(
'Moderator',
{
id: {
type: DataTypes.BIGINT(11),
primaryKey: true,
autoIncrement: true,
comment: 'nid, Primary Key, Moderator ID'
},
status: {
type: DataTypes.ENUM('A','D','I','B'),
allowNull: true,
defaultValue: 'A',
comment: 'Estado, Moderator status D:dead, A: active, I:inactive-deleted, B: Banned'
}
}
);
return Moderator;
};
| mit |
chjj/rocksdown | deps/rocksdb/port-libuv/port_uv.cc | 947 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "port_uv.h"
#include <cstdlib>
#include <stdio.h>
#include <string.h>
#include "util/logging.h"
namespace rocksdb {
namespace port {
Mutex::Mutex() { uv_mutex_init(&mu_); }
Mutex::~Mutex() { uv_mutex_destroy(&mu_); }
void Mutex::Lock() { uv_mutex_lock(&mu_); }
void Mutex::Unlock() { uv_mutex_unlock(&mu_); }
CondVar::CondVar(Mutex* mu) : mu_(mu) { uv_cond_init(&cv_); }
CondVar::~CondVar() { uv_cond_destroy(&cv_); }
void CondVar::Wait() { uv_cond_wait(&cv_, &mu_->mu_); }
void CondVar::Signal() { uv_cond_signal(&cv_); }
void CondVar::SignalAll() { uv_cond_broadcast(&cv_); }
void InitOnce(OnceType* once, void (*initializer)()) { uv_once(once, initializer); }
} // namespace port
} // namespace rocksdb
| mit |
schematics/schemalchemy | schemalchemy.py | 3877 |
"""
SchemAlchemy = Schematics + SQLAlchemy
## Usage
0. Import schemalchemy (before schematics)
1. Inherit from schemalchemy `Base`
2. Define the SQLAlchemy columns (or provide a Table)
3. Define the Schematics fields
Note: Column property names must be '_' + field_name (see about SQLAlchemy
`column_prefix` if you need to customize the prefix).
## Example
class Person(Base):
__tablename__ = 'person'
_id = Column('id', Integer, primary_key=True)
_name = Column('name', String(50))
id = IntType(default=1)
name = StringType()
"""
import schematics.models
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
class SchemAlchemyFieldDescriptor(schematics.models.FieldDescriptor):
def __set__(self, instance, value):
"""
Field setter override to set same value into table column properties.
"""
super(SchemAlchemyFieldDescriptor, self).__set__(instance, value)
if hasattr(self, 'column_name'):
setattr(instance, self.column_name, getattr(instance, self.name))
class SchemAlchemyModelMeta(schematics.models.ModelMeta, DeclarativeMeta):
"""
SchemAlchemy common metaclass.
Assumes the base metaclasses do not conflict, as in, they do not define the
same methods.
"""
def __init__(cls, classname, bases, dict_):
"""
Map the Schematics fields to the SQLAlchemy columns using synonym
properties.
"""
super(SchemAlchemyModelMeta, cls).__init__(classname, bases, dict_)
if not hasattr(cls, '__mapper__'):
return
mapper = cls.__mapper__
for field_name in cls._fields:
column_name = (mapper.column_prefix or '') + field_name
if not column_name in mapper.all_orm_descriptors:
continue
field_descriptor = cls.__dict__.get(field_name)
field_descriptor.column_name = column_name
field_synonym = orm.synonym(column_name, descriptor=field_descriptor)
mapper.add_property(field_name, field_synonym)
class SchemAlchemyModel(schematics.models.Model):
"""
Set columns on init and trigger the descriptors for all mapped fields when
loading from database.
"""
__mapper_args__ = {'column_prefix': '_'}
def __init__(self, *a, **kw):
super(SchemAlchemyModel, self).__init__(*a, **kw)
self._set_mapped_column_values()
@orm.reconstructor
def _reconstructor(self):
super(SchemAlchemyModel, self).__init__()
self._set_mapped_field_values()
def _iter_column_fields(self):
cls = self.__class__
for field_name in self._fields:
field_descriptor = cls.__dict__.get(field_name)
if not hasattr(field_descriptor, 'column_name'):
continue
column_name = field_descriptor.column_name
yield (field_name, column_name)
def _set_mapped_field_values(self):
for field_name, column_name in self._iter_column_fields():
value = orm.base.instance_dict(self).get(column_name)
setattr(self, field_name, value)
def _set_mapped_column_values(self):
for field_name, column_name in self._iter_column_fields():
if hasattr(self, field_name):
instance_dict = orm.base.instance_dict(self)
instance_dict[column_name] = getattr(self, field_name)
# Schematics monkeypatching
schematics.models.FieldDescriptor = SchemAlchemyFieldDescriptor
schematics.models.ModelMeta = SchemAlchemyModelMeta
schematics.models.Model = SchemAlchemyModel
# For model definition inherit from the `Base` class below instead of `Model`
Base = declarative_base(
cls=SchemAlchemyModel,
metaclass=SchemAlchemyModelMeta,
constructor=None)
| mit |