text
stringlengths 3
1.05M
|
|---|
"""
Utility methods for Qt inside maya
"""
import contextlib
from PySide2 import QtCore, QtWidgets
import shiboken2
import functools
import maya.cmds as cmds
import maya.OpenMayaUI as omui
def get_maya_window():
"""Get the main Maya window as a QtWidgets.QMainWindow instance
Returns:
[QtWidgets.QMainWindow]: instance of the top level Maya windows
"""
mwindow = omui.MQtUtil.mainWindow()
if mwindow is not None:
return shiboken2.wrapInstance(long(mwindow), QtWidgets.QWidget)
def ensure_single_widget(name):
"""
Ensure all existing widgets with the same name are destroyed before the new one is craeted.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if cmds.window(name, exists=True):
cmds.deleteUI(name)
wsname = name + "WorkspaceControl"
if cmds.workspaceControl(wsname, ex=True):
if cmds.workspaceControlState(wsname, ex=True):
cmds.workspaceControlState(wsname, remove=True)
cmds.deleteUI(wsname)
return func(*args, **kwargs)
return wrapper
return decorator
class MayaWidget(QtWidgets.QWidget):
def keyPressEvent(self, event):
"""Maya's main window will catch shift and control keys, so accept
them here to avoid it taking focus"""
if event.key() in (QtCore.Qt.Key_Shift, QtCore.Qt.Key_Control):
event.accept()
else:
event.ignore()
@classmethod
def launch_dialog(cls):
widget = cls(parent=get_maya_window())
widget.show()
return widget
|
import DefaultHandler from './default';
export const schema = {
type: 'array',
items: {
type: 'object',
properties: {
name: { type: 'string', minLength: 1, pattern: '[^<>]+' }
},
required: [ 'name' ]
}
};
export default class ClientHandler extends DefaultHandler {
constructor(config) {
super({
...config,
type: 'clients',
id: 'client_id',
identifiers: [ 'client_id', 'name' ],
stripUpdateFields: [
// Fields not allowed during updates
'callback_url_template', 'signing_keys', 'global', 'tenant', 'jwt_configuration.secret_encoded'
]
});
}
objString(item) {
return super.objString({ name: item.name, client_id: item.client_id });
}
async processChanges(assets) {
const { clients } = assets;
// Do nothing if not set
if (!clients) return;
const excludedClients = (assets.exclude && assets.exclude.clients) || [];
const {
del, update, create, conflicts
} = await this.calcChanges(assets);
// Always filter out the client we are using to access Auth0 Management API
// As it could cause problems if it gets deleted or updated etc
const currentClient = this.config('AUTH0_CLIENT_ID');
const filterClients = (list) => {
if (excludedClients.length) {
return list.filter((item) => item.client_id !== currentClient && !excludedClients.includes(item.name));
}
return list.filter((item) => item.client_id !== currentClient);
};
const changes = {
del: filterClients(del),
update: filterClients(update),
create: filterClients(create),
conflicts: filterClients(conflicts)
};
await super.processChanges(assets, {
...changes
});
}
async getType() {
if (this.existing) {
return this.existing;
}
this.existing = await this.client.clients.getAll({ paginate: true, is_global: false });
return this.existing;
}
}
|
import math
import numbers
import torch
from torch import nn
import torch.nn.functional as F
from torch_butterfly import Butterfly
from torch_butterfly.multiply_base4 import butterfly_multiply_base4_torch
from torch_butterfly.multiply_base4 import twiddle_base2_to_base4
from torch_butterfly.complex_utils import real_dtype_to_complex
from torch_butterfly.complex_utils import complex_matmul
class ButterflyBase4(Butterfly):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Compatible with torch.nn.Linear.
Parameters:
in_size: size of input
out_size: size of output
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
complex: whether complex or real
increasing_stride: whether the first butterfly block will multiply with increasing stride
(e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1).
init: 'randn', 'ortho', or 'identity'. Whether the weight matrix should be initialized to
from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix.
nblocks: number of B or B^T blocks. The B and B^T will alternate.
"""
def __init__(self, *args, **kwargs):
init = kwargs.get('init', None)
if (isinstance(init, tuple) and len(init) == 2 and isinstance(init[0], torch.Tensor)
and isinstance(init[1], torch.Tensor)):
twiddle4, twiddle2 = init[0].clone(), init[1].clone()
kwargs['init'] = 'empty'
super().__init__(*args, **kwargs)
else:
super().__init__(*args, **kwargs)
with torch.no_grad():
twiddle4, twiddle2 = twiddle_base2_to_base4(self.twiddle, self.increasing_stride)
del self.twiddle
self.twiddle4 = nn.Parameter(twiddle4)
self.twiddle2 = nn.Parameter(twiddle2)
self.twiddle4._is_structured = True # Flag to avoid weight decay
self.twiddle2._is_structured = True # Flag to avoid weight decay
def forward(self, input):
"""
Parameters:
input: (batch, *, in_size)
Return:
output: (batch, *, out_size)
"""
output = self.pre_process(input)
output_size = self.out_size if self.nstacks == 1 else None
output = butterfly_multiply_base4_torch(self.twiddle4, self.twiddle2, output,
self.increasing_stride, output_size)
return self.post_process(input, output)
def __imul__(self, scale):
"""In-place multiply the whole butterfly matrix by some scale factor, by multiplying the
twiddle.
Scale must be nonnegative
"""
assert isinstance(scale, numbers.Number)
assert scale >= 0
scale_per_entry = scale ** (1.0 / self.nblocks / self.log_n)
self.twiddle4 *= scale_per_entry ** 2
self.twiddle2 *= scale_per_entry
return self
|
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#import <UIKit/UIKit.h>
@interface ABI14_0_0RCTDatePicker : UIDatePicker
@end
|
Docs = {"data":{"classes":[{"name":"Siesta.Harness","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.Recorder","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.ExtJS","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.View","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.Grid","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.DatePicker","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.BoundList","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.TimeAxisColumn","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.Recognizer.NumberField","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Recorder.TargetExtractor.ExtJS","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Grid","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Element","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Component","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Store","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.DataView","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Observable","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.Ajax","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS.FormField","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJSCore","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Element","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Function","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Simulate.Touch","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Simulate.Mouse","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Simulate.Keyboard","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Simulate.KeyCodes","extends":null,"private":null,"icon":"icon-singleton"},{"name":"Siesta.Test.Simulate.Event","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.SenchaTouch","extends":"Siesta.Test.Browser","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MouseUp","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.SetValue","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MonkeyTest","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.DoubleTap","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MoveCursor","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.SetUrl","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Swipe","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Click","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.TouchDrag","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Eval","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.LongPress","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.DoubleClick","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Role.HasTarget","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Tap","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MethodCall","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Pinch","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Wait","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MoveCursorTo","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.MouseDown","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Drag","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Done","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.RightClick","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action.Type","extends":"Siesta.Test.Action","private":null,"icon":"icon-class"},{"name":"Siesta.Test.jQuery","extends":"Siesta.Test.Browser","private":null,"icon":"icon-class"},{"name":"Siesta.Test.Action","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.More","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.BDD.Spy","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.BDD.Expectation","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Browser","extends":"Siesta.Test","private":null,"icon":"icon-class"},{"name":"Siesta.Test.ActionTarget","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.Date","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.ExtJS","extends":"Siesta.Test.Browser","private":null,"icon":"icon-class"},{"name":"Siesta.Test.BDD","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Test.TextSelection","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Role.CanStyleOutput","extends":null,"private":true,"icon":"icon-class"},{"name":"Siesta.Harness.Browser.ExtJSCore","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Harness.Browser.SenchaTouch","extends":"Siesta.Harness.Browser","private":null,"icon":"icon-class"},{"name":"Siesta.Harness.Browser.ExtJS","extends":"Siesta.Harness.Browser","private":null,"icon":"icon-class"},{"name":"Siesta.Harness.NodeJS","extends":"Siesta.Harness","private":null,"icon":"icon-class"},{"name":"Siesta.Harness.Browser","extends":"Siesta.Harness","private":null,"icon":"icon-class"},{"name":"Siesta.Test","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Util.Role.CanParseBrowser","extends":null,"private":null,"icon":"icon-class"},{"name":"Siesta.Util.Role.CanCompareObjects","extends":null,"private":null,"icon":"icon-class"}],"guides":[{"title":"Siesta","items":[{"name":"siesta_getting_started","title":"Getting Started with Siesta - stress free JavaScript unit testing","description":"This guide gets you started with Siesta in no time.","url":"/workspace/JavaScript/siesta/resources/docs/guides/siesta_getting_started","filename":"/workspace/JavaScript/siesta/resources/docs/guides/siesta_getting_started/README.md"},{"name":"testing_cmd_application","title":"Testing applications generated by Sencha Cmd","description":"Intro to testing Sencha Cmd apps.","url":"/workspace/JavaScript/siesta/resources/docs/guides/testing_cmd_application","filename":"/workspace/JavaScript/siesta/resources/docs/guides/testing_cmd_application/README.md"},{"name":"siesta_automation","title":"Siesta automation & reports.","description":"This guide describes how Siesta tests can be launched from the command line.","url":"/workspace/JavaScript/siesta/resources/docs/guides/siesta_automation","filename":"/workspace/JavaScript/siesta/resources/docs/guides/siesta_automation/README.md"},{"name":"browserstack_integration","title":"Cloud testing. BrowserStack integration","description":"This guide describes how to run your tests in the cloud using BrowserStack infrastructure.","url":"/workspace/JavaScript/siesta/resources/docs/guides/browserstack_integration","filename":"/workspace/JavaScript/siesta/resources/docs/guides/browserstack_integration/README.md"},{"name":"saucelabs_integration","title":"Cloud testing. Sauce Labs integration","description":"This guide describes how to run your tests in the cloud using Sauce Labs infrastructure.","url":"/workspace/JavaScript/siesta/resources/docs/guides/saucelabs_integration","filename":"/workspace/JavaScript/siesta/resources/docs/guides/saucelabs_integration/README.md"},{"name":"testing_mvc_app","title":"Testing an Ext JS MVC application with Siesta","description":"This guide shows you how to test a Sencha MVC application.","url":"/workspace/JavaScript/siesta/resources/docs/guides/testing_mvc_app","filename":"/workspace/JavaScript/siesta/resources/docs/guides/testing_mvc_app/README.md"},{"name":"extending_test_class","title":"Extending a test class with your own assertions and utility methods","description":"This guide describes how to add your own assertions and/or utility methods to a Test Class.","url":"/workspace/JavaScript/siesta/resources/docs/guides/extending_test_class","filename":"/workspace/JavaScript/siesta/resources/docs/guides/extending_test_class/README.md"},{"name":"testing_sencha_touch_app","title":"Testing a Sencha Touch application. Implementing automatic login before every test","description":"Learn how to efficiently test your Sencha Touch application with Siesta","url":"/workspace/JavaScript/siesta/resources/docs/guides/testing_sencha_touch_app","filename":"/workspace/JavaScript/siesta/resources/docs/guides/testing_sencha_touch_app/README.md"},{"name":"cross_page_testing","title":"Cross page testing","description":"This guide describes how you can write cross-page tests (tests involving page refresh / redirects)","url":"/workspace/JavaScript/siesta/resources/docs/guides/cross_page_testing","filename":"/workspace/JavaScript/siesta/resources/docs/guides/cross_page_testing/README.md"},{"name":"bdd_conventions","title":"BDD conventions","description":"This is an introductory guide on using BDD conventions for your tests","url":"/workspace/JavaScript/siesta/resources/docs/guides/bdd_conventions","filename":"/workspace/JavaScript/siesta/resources/docs/guides/bdd_conventions/README.md"},{"name":"code_coverage","title":"Code coverage with Siesta","description":"This guide introduces you to the code coverage module in Siesta.","url":"/workspace/JavaScript/siesta/resources/docs/guides/code_coverage","filename":"/workspace/JavaScript/siesta/resources/docs/guides/code_coverage/README.md"},{"name":"event_recorder","title":"Using the event recorder","description":"Learn how to use the event recorder to quickly generate good test stubs.","url":"/workspace/JavaScript/siesta/resources/docs/guides/event_recorder","filename":"/workspace/JavaScript/siesta/resources/docs/guides/event_recorder/README.md"}]}],"videos":[],"examples":[],"search":[{"name":"Harness","fullName":"Siesta.Harness","icon":"icon-class","url":"#!/api/Siesta.Harness","meta":{},"sort":1},{"name":"title","fullName":"Siesta.Harness.title","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-title","meta":{},"sort":3},{"name":"testClass","fullName":"Siesta.Harness.testClass","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-testClass","meta":{},"sort":3},{"name":"transparentEx","fullName":"Siesta.Harness.transparentEx","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-transparentEx","meta":{},"sort":3},{"name":"runCore","fullName":"Siesta.Harness.runCore","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-runCore","meta":{},"sort":3},{"name":"maxThreads","fullName":"Siesta.Harness.maxThreads","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-maxThreads","meta":{},"sort":3},{"name":"autoCheckGlobals","fullName":"Siesta.Harness.autoCheckGlobals","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-autoCheckGlobals","meta":{},"sort":3},{"name":"expectedGlobals","fullName":"Siesta.Harness.expectedGlobals","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-expectedGlobals","meta":{},"sort":3},{"name":"preload","fullName":"Siesta.Harness.preload","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-preload","meta":{},"sort":3},{"name":"alsoPreload","fullName":"Siesta.Harness.alsoPreload","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-alsoPreload","meta":{},"sort":3},{"name":"listeners","fullName":"Siesta.Harness.listeners","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-listeners","meta":{},"sort":3},{"name":"cachePreload","fullName":"Siesta.Harness.cachePreload","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-cachePreload","meta":{},"sort":3},{"name":"keepNLastResults","fullName":"Siesta.Harness.keepNLastResults","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-keepNLastResults","meta":{},"sort":3},{"name":"overrideSetTimeout","fullName":"Siesta.Harness.overrideSetTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-overrideSetTimeout","meta":{},"sort":3},{"name":"needDone","fullName":"Siesta.Harness.needDone","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-needDone","meta":{},"sort":3},{"name":"waitForTimeout","fullName":"Siesta.Harness.waitForTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-waitForTimeout","meta":{},"sort":3},{"name":"defaultTimeout","fullName":"Siesta.Harness.defaultTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-defaultTimeout","meta":{},"sort":3},{"name":"subTestTimeout","fullName":"Siesta.Harness.subTestTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-subTestTimeout","meta":{},"sort":3},{"name":"isReadyTimeout","fullName":"Siesta.Harness.isReadyTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-isReadyTimeout","meta":{},"sort":3},{"name":"pauseBetweenTests","fullName":"Siesta.Harness.pauseBetweenTests","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-pauseBetweenTests","meta":{},"sort":3},{"name":"failOnExclusiveSpecsWhenAutomated","fullName":"Siesta.Harness.failOnExclusiveSpecsWhenAutomated","icon":"icon-cfg","url":"#!/api/Siesta.Harness-cfg-failOnExclusiveSpecsWhenAutomated","meta":{},"sort":3},{"name":"testsuitestart","fullName":"Siesta.Harness.testsuitestart","icon":"icon-event","url":"#!/api/Siesta.Harness-event-testsuitestart","meta":{},"sort":3},{"name":"testsuiteend","fullName":"Siesta.Harness.testsuiteend","icon":"icon-event","url":"#!/api/Siesta.Harness-event-testsuiteend","meta":{},"sort":3},{"name":"configure","fullName":"Siesta.Harness.configure","icon":"icon-method","url":"#!/api/Siesta.Harness-method-configure","meta":{},"sort":3},{"name":"start","fullName":"Siesta.Harness.start","icon":"icon-method","url":"#!/api/Siesta.Harness-method-start","meta":{},"sort":3},{"name":"startFromUrl","fullName":"Siesta.Harness.startFromUrl","icon":"icon-method","url":"#!/api/Siesta.Harness-method-startFromUrl","meta":{},"sort":3},{"name":"Recorder.Recorder","fullName":"Siesta.Recorder.Recorder","icon":"icon-class","url":"#!/api/Siesta.Recorder.Recorder","meta":{},"sort":1},{"name":"uniqueComponentProperty","fullName":"Siesta.Recorder.Recorder.uniqueComponentProperty","icon":"icon-cfg","url":"#!/api/Siesta.Recorder.Recorder-cfg-uniqueComponentProperty","meta":{},"sort":3},{"name":"uniqueDomNodeProperty","fullName":"Siesta.Recorder.Recorder.uniqueDomNodeProperty","icon":"icon-cfg","url":"#!/api/Siesta.Recorder.Recorder-cfg-uniqueDomNodeProperty","meta":{},"sort":3},{"name":"recordOffsets","fullName":"Siesta.Recorder.Recorder.recordOffsets","icon":"icon-cfg","url":"#!/api/Siesta.Recorder.Recorder-cfg-recordOffsets","meta":{},"sort":3},{"name":"Recorder.ExtJS","fullName":"Siesta.Recorder.ExtJS","icon":"icon-class","url":"#!/api/Siesta.Recorder.ExtJS","meta":{},"sort":1},{"name":"Recorder.TargetExtractor","fullName":"Siesta.Recorder.TargetExtractor","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.View","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.View","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.View","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.Grid","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.Grid","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.Grid","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.DatePicker","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.DatePicker","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.DatePicker","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.BoundList","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.BoundList","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.BoundList","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.TimeAxisColumn","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.TimeAxisColumn","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.TimeAxisColumn","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.Recognizer.NumberField","fullName":"Siesta.Recorder.TargetExtractor.Recognizer.NumberField","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.Recognizer.NumberField","meta":{},"sort":1},{"name":"Recorder.TargetExtractor.ExtJS","fullName":"Siesta.Recorder.TargetExtractor.ExtJS","icon":"icon-class","url":"#!/api/Siesta.Recorder.TargetExtractor.ExtJS","meta":{},"sort":1},{"name":"Test.ExtJS.Grid","fullName":"Siesta.Test.ExtJS.Grid","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Grid","meta":{},"sort":1},{"name":"waitForRowsVisible","fullName":"Siesta.Test.ExtJS.Grid.waitForRowsVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-waitForRowsVisible","meta":{},"sort":3},{"name":"getFirstRow","fullName":"Siesta.Test.ExtJS.Grid.getFirstRow","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-getFirstRow","meta":{},"sort":3},{"name":"getFirstCell","fullName":"Siesta.Test.ExtJS.Grid.getFirstCell","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-getFirstCell","meta":{},"sort":3},{"name":"getRow","fullName":"Siesta.Test.ExtJS.Grid.getRow","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-getRow","meta":{},"sort":3},{"name":"getCell","fullName":"Siesta.Test.ExtJS.Grid.getCell","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-getCell","meta":{},"sort":3},{"name":"getLastCellInRow","fullName":"Siesta.Test.ExtJS.Grid.getLastCellInRow","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-getLastCellInRow","meta":{},"sort":3},{"name":"matchGridCellContent","fullName":"Siesta.Test.ExtJS.Grid.matchGridCellContent","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-matchGridCellContent","meta":{},"sort":3},{"name":"clickToEditCell","fullName":"Siesta.Test.ExtJS.Grid.clickToEditCell","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-clickToEditCell","meta":{},"sort":3},{"name":"assertCellIsEmpty","fullName":"Siesta.Test.ExtJS.Grid.assertCellIsEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-assertCellIsEmpty","meta":{},"sort":3},{"name":"waitForCellEmpty","fullName":"Siesta.Test.ExtJS.Grid.waitForCellEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Grid-method-waitForCellEmpty","meta":{},"sort":3},{"name":"Test.ExtJS.Element","fullName":"Siesta.Test.ExtJS.Element","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Element","meta":{},"sort":1},{"name":"hasRegion","fullName":"Siesta.Test.ExtJS.Element.hasRegion","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Element-method-hasRegion","meta":{},"sort":3},{"name":"Test.ExtJS.Component","fullName":"Siesta.Test.ExtJS.Component","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Component","meta":{},"sort":1},{"name":"waitForComponentVisible","fullName":"Siesta.Test.ExtJS.Component.waitForComponentVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentVisible","meta":{},"sort":3},{"name":"waitForComponentNotVisible","fullName":"Siesta.Test.ExtJS.Component.waitForComponentNotVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentNotVisible","meta":{},"sort":3},{"name":"waitForComponentQuery","fullName":"Siesta.Test.ExtJS.Component.waitForComponentQuery","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentQuery","meta":{},"sort":3},{"name":"waitForCompositeQuery","fullName":"Siesta.Test.ExtJS.Component.waitForCompositeQuery","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCompositeQuery","meta":{},"sort":3},{"name":"waitForCompositeQueryNotFound","fullName":"Siesta.Test.ExtJS.Component.waitForCompositeQueryNotFound","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCompositeQueryNotFound","meta":{},"sort":3},{"name":"waitForCQ","fullName":"Siesta.Test.ExtJS.Component.waitForCQ","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCQ","meta":{},"sort":3},{"name":"waitForCQNotFound","fullName":"Siesta.Test.ExtJS.Component.waitForCQNotFound","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCQNotFound","meta":{},"sort":3},{"name":"waitForComponentQueryNotFound","fullName":"Siesta.Test.ExtJS.Component.waitForComponentQueryNotFound","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentQueryNotFound","meta":{},"sort":3},{"name":"waitForCQVisible","fullName":"Siesta.Test.ExtJS.Component.waitForCQVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCQVisible","meta":{},"sort":3},{"name":"waitForCQNotVisible","fullName":"Siesta.Test.ExtJS.Component.waitForCQNotVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForCQNotVisible","meta":{},"sort":3},{"name":"waitForComponentQueryVisible","fullName":"Siesta.Test.ExtJS.Component.waitForComponentQueryVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentQueryVisible","meta":{},"sort":3},{"name":"waitForComponentQueryNotVisible","fullName":"Siesta.Test.ExtJS.Component.waitForComponentQueryNotVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponentQueryNotVisible","meta":{},"sort":3},{"name":"waitForXType","fullName":"Siesta.Test.ExtJS.Component.waitForXType","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForXType","meta":{},"sort":3},{"name":"waitForComponent","fullName":"Siesta.Test.ExtJS.Component.waitForComponent","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-waitForComponent","meta":{},"sort":3},{"name":"hasSize","fullName":"Siesta.Test.ExtJS.Component.hasSize","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-hasSize","meta":{},"sort":3},{"name":"hasPosition","fullName":"Siesta.Test.ExtJS.Component.hasPosition","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-hasPosition","meta":{},"sort":3},{"name":"destroysOk","fullName":"Siesta.Test.ExtJS.Component.destroysOk","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Component-method-destroysOk","meta":{},"sort":3},{"name":"Test.ExtJS.Store","fullName":"Siesta.Test.ExtJS.Store","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Store","meta":{},"sort":1},{"name":"waitForStoresToLoad","fullName":"Siesta.Test.ExtJS.Store.waitForStoresToLoad","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Store-method-waitForStoresToLoad","meta":{},"sort":3},{"name":"loadStoresAndThen","fullName":"Siesta.Test.ExtJS.Store.loadStoresAndThen","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Store-method-loadStoresAndThen","meta":{},"sort":3},{"name":"isStoreEmpty","fullName":"Siesta.Test.ExtJS.Store.isStoreEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Store-method-isStoreEmpty","meta":{},"sort":3},{"name":"Test.ExtJS.DataView","fullName":"Siesta.Test.ExtJS.DataView","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.DataView","meta":{},"sort":1},{"name":"waitForViewRendered","fullName":"Siesta.Test.ExtJS.DataView.waitForViewRendered","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.DataView-method-waitForViewRendered","meta":{},"sort":3},{"name":"getFirstItem","fullName":"Siesta.Test.ExtJS.DataView.getFirstItem","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.DataView-method-getFirstItem","meta":{},"sort":3},{"name":"Test.ExtJS.Observable","fullName":"Siesta.Test.ExtJS.Observable","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Observable","meta":{},"sort":1},{"name":"wontFire","fullName":"Siesta.Test.ExtJS.Observable.wontFire","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-wontFire","meta":{},"sort":3},{"name":"firesOnce","fullName":"Siesta.Test.ExtJS.Observable.firesOnce","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-firesOnce","meta":{},"sort":3},{"name":"firesAtLeastNTimes","fullName":"Siesta.Test.ExtJS.Observable.firesAtLeastNTimes","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-firesAtLeastNTimes","meta":{},"sort":3},{"name":"","fullName":"Siesta.Test.ExtJS.Observable.","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-","meta":{},"sort":3},{"name":"hasListener","fullName":"Siesta.Test.ExtJS.Observable.hasListener","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-hasListener","meta":{},"sort":3},{"name":"","fullName":"Siesta.Test.ExtJS.Observable.","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Observable-method-","meta":{},"sort":3},{"name":"Test.ExtJS.Ajax","fullName":"Siesta.Test.ExtJS.Ajax","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.Ajax","meta":{},"sort":1},{"name":"isAjaxLoading","fullName":"Siesta.Test.ExtJS.Ajax.isAjaxLoading","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Ajax-method-isAjaxLoading","meta":{},"sort":3},{"name":"ajaxRequestAndThen","fullName":"Siesta.Test.ExtJS.Ajax.ajaxRequestAndThen","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Ajax-method-ajaxRequestAndThen","meta":{},"sort":3},{"name":"waitForAjaxRequest","fullName":"Siesta.Test.ExtJS.Ajax.waitForAjaxRequest","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.Ajax-method-waitForAjaxRequest","meta":{},"sort":3},{"name":"Test.ExtJS.FormField","fullName":"Siesta.Test.ExtJS.FormField","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS.FormField","meta":{},"sort":1},{"name":"fieldHasValue","fullName":"Siesta.Test.ExtJS.FormField.fieldHasValue","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.FormField-method-fieldHasValue","meta":{},"sort":3},{"name":"isFieldEmpty","fullName":"Siesta.Test.ExtJS.FormField.isFieldEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS.FormField-method-isFieldEmpty","meta":{},"sort":3},{"name":"Test.ExtJSCore","fullName":"Siesta.Test.ExtJSCore","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJSCore","meta":{},"sort":1},{"name":"getExt","fullName":"Siesta.Test.ExtJSCore.getExt","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-getExt","meta":{},"sort":3},{"name":"Ext","fullName":"Siesta.Test.ExtJSCore.Ext","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-Ext","meta":{},"sort":3},{"name":"compToEl","fullName":"Siesta.Test.ExtJSCore.compToEl","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-compToEl","meta":{"private":true},"sort":3},{"name":"knownBugIn","fullName":"Siesta.Test.ExtJSCore.knownBugIn","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-knownBugIn","meta":{},"sort":3},{"name":"requireOk","fullName":"Siesta.Test.ExtJSCore.requireOk","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-requireOk","meta":{},"sort":3},{"name":"","fullName":"Siesta.Test.ExtJSCore.","icon":"icon-property","url":"#!/api/Siesta.Test.ExtJSCore-property-","meta":{},"sort":3},{"name":"clickComponentQuery","fullName":"Siesta.Test.ExtJSCore.clickComponentQuery","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-clickComponentQuery","meta":{},"sort":3},{"name":"clickCQ","fullName":"Siesta.Test.ExtJSCore.clickCQ","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-clickCQ","meta":{},"sort":3},{"name":"compositeQuery","fullName":"Siesta.Test.ExtJSCore.compositeQuery","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-compositeQuery","meta":{},"sort":3},{"name":"cq","fullName":"Siesta.Test.ExtJSCore.cq","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-cq","meta":{},"sort":3},{"name":"cq1","fullName":"Siesta.Test.ExtJSCore.cq1","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-cq1","meta":{},"sort":3},{"name":"waitForTarget","fullName":"Siesta.Test.ExtJSCore.waitForTarget","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-waitForTarget","meta":{},"sort":3},{"name":"messageBoxIsVisible","fullName":"Siesta.Test.ExtJSCore.messageBoxIsVisible","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-messageBoxIsVisible","meta":{},"sort":3},{"name":"messageBoxIsHidden","fullName":"Siesta.Test.ExtJSCore.messageBoxIsHidden","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-messageBoxIsHidden","meta":{},"sort":3},{"name":"cqExists","fullName":"Siesta.Test.ExtJSCore.cqExists","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-cqExists","meta":{},"sort":3},{"name":"cqNotExists","fullName":"Siesta.Test.ExtJSCore.cqNotExists","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-cqNotExists","meta":{},"sort":3},{"name":"componentQueryExists","fullName":"Siesta.Test.ExtJSCore.componentQueryExists","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-componentQueryExists","meta":{},"sort":3},{"name":"setValue","fullName":"Siesta.Test.ExtJSCore.setValue","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-setValue","meta":{},"sort":3},{"name":"waitForAnimations","fullName":"Siesta.Test.ExtJSCore.waitForAnimations","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJSCore-method-waitForAnimations","meta":{},"sort":3},{"name":"Test.Element","fullName":"Siesta.Test.Element","icon":"icon-class","url":"#!/api/Siesta.Test.Element","meta":{},"sort":1},{"name":"findCenter","fullName":"Siesta.Test.Element.findCenter","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-findCenter","meta":{},"sort":3},{"name":"isElementVisible","fullName":"Siesta.Test.Element.isElementVisible","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-isElementVisible","meta":{},"sort":3},{"name":"contentLike","fullName":"Siesta.Test.Element.contentLike","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-contentLike","meta":{},"sort":3},{"name":"contentNotLike","fullName":"Siesta.Test.Element.contentNotLike","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-contentNotLike","meta":{},"sort":3},{"name":"waitForContentLike","fullName":"Siesta.Test.Element.waitForContentLike","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForContentLike","meta":{},"sort":3},{"name":"waitForContentNotLike","fullName":"Siesta.Test.Element.waitForContentNotLike","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForContentNotLike","meta":{},"sort":3},{"name":"monkeyTest","fullName":"Siesta.Test.Element.monkeyTest","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-monkeyTest","meta":{},"sort":3},{"name":"hasCls","fullName":"Siesta.Test.Element.hasCls","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasCls","meta":{},"sort":3},{"name":"hasNotCls","fullName":"Siesta.Test.Element.hasNotCls","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasNotCls","meta":{},"sort":3},{"name":"hasStyle","fullName":"Siesta.Test.Element.hasStyle","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasStyle","meta":{},"sort":3},{"name":"hasNotStyle","fullName":"Siesta.Test.Element.hasNotStyle","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasNotStyle","meta":{},"sort":3},{"name":"waitForSelectorAt","fullName":"Siesta.Test.Element.waitForSelectorAt","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForSelectorAt","meta":{},"sort":3},{"name":"waitForSelectorAtCursor","fullName":"Siesta.Test.Element.waitForSelectorAtCursor","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForSelectorAtCursor","meta":{},"sort":3},{"name":"waitForSelector","fullName":"Siesta.Test.Element.waitForSelector","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForSelector","meta":{},"sort":3},{"name":"waitForSelectors","fullName":"Siesta.Test.Element.waitForSelectors","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForSelectors","meta":{},"sort":3},{"name":"waitForSelectorNotFound","fullName":"Siesta.Test.Element.waitForSelectorNotFound","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForSelectorNotFound","meta":{},"sort":3},{"name":"waitForElementVisible","fullName":"Siesta.Test.Element.waitForElementVisible","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementVisible","meta":{},"sort":3},{"name":"waitForElementNotVisible","fullName":"Siesta.Test.Element.waitForElementNotVisible","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementNotVisible","meta":{},"sort":3},{"name":"waitForElementTop","fullName":"Siesta.Test.Element.waitForElementTop","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementTop","meta":{},"sort":3},{"name":"waitForElementNotTop","fullName":"Siesta.Test.Element.waitForElementNotTop","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementNotTop","meta":{},"sort":3},{"name":"elementIsVisible","fullName":"Siesta.Test.Element.elementIsVisible","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsVisible","meta":{},"sort":3},{"name":"elementIsNotVisible","fullName":"Siesta.Test.Element.elementIsNotVisible","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsNotVisible","meta":{},"sort":3},{"name":"elementIsTop","fullName":"Siesta.Test.Element.elementIsTop","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsTop","meta":{},"sort":3},{"name":"elementIsAt","fullName":"Siesta.Test.Element.elementIsAt","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsAt","meta":{},"sort":3},{"name":"elementIsTopElement","fullName":"Siesta.Test.Element.elementIsTopElement","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsTopElement","meta":{},"sort":3},{"name":"elementIsNotTopElement","fullName":"Siesta.Test.Element.elementIsNotTopElement","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsNotTopElement","meta":{},"sort":3},{"name":"selectorIsAt","fullName":"Siesta.Test.Element.selectorIsAt","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-selectorIsAt","meta":{},"sort":3},{"name":"selectorExists","fullName":"Siesta.Test.Element.selectorExists","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-selectorExists","meta":{},"sort":3},{"name":"selectorNotExists","fullName":"Siesta.Test.Element.selectorNotExists","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-selectorNotExists","meta":{},"sort":3},{"name":"waitForScrollChange","fullName":"Siesta.Test.Element.waitForScrollChange","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForScrollChange","meta":{},"sort":3},{"name":"waitForScrollLeftChange","fullName":"Siesta.Test.Element.waitForScrollLeftChange","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForScrollLeftChange","meta":{},"sort":3},{"name":"waitForScrollTopChange","fullName":"Siesta.Test.Element.waitForScrollTopChange","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForScrollTopChange","meta":{},"sort":3},{"name":"scrollVerticallyTo","fullName":"Siesta.Test.Element.scrollVerticallyTo","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-scrollVerticallyTo","meta":{},"sort":3},{"name":"scrollHorizontallyTo","fullName":"Siesta.Test.Element.scrollHorizontallyTo","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-scrollHorizontallyTo","meta":{},"sort":3},{"name":"chainClick","fullName":"Siesta.Test.Element.chainClick","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-chainClick","meta":{},"sort":3},{"name":"clickSelector","fullName":"Siesta.Test.Element.clickSelector","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-clickSelector","meta":{},"sort":3},{"name":"selectorCountIs","fullName":"Siesta.Test.Element.selectorCountIs","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-selectorCountIs","meta":{},"sort":3},{"name":"isInView","fullName":"Siesta.Test.Element.isInView","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-isInView","meta":{},"sort":3},{"name":"elementIsInView","fullName":"Siesta.Test.Element.elementIsInView","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsInView","meta":{},"sort":3},{"name":"waitUntilInView","fullName":"Siesta.Test.Element.waitUntilInView","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitUntilInView","meta":{},"sort":3},{"name":"elementIsEmpty","fullName":"Siesta.Test.Element.elementIsEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsEmpty","meta":{},"sort":3},{"name":"elementIsNotEmpty","fullName":"Siesta.Test.Element.elementIsNotEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-elementIsNotEmpty","meta":{},"sort":3},{"name":"waitForElementEmpty","fullName":"Siesta.Test.Element.waitForElementEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementEmpty","meta":{},"sort":3},{"name":"waitForElementNotEmpty","fullName":"Siesta.Test.Element.waitForElementNotEmpty","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-waitForElementNotEmpty","meta":{},"sort":3},{"name":"hasAttributeValue","fullName":"Siesta.Test.Element.hasAttributeValue","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasAttributeValue","meta":{},"sort":3},{"name":"hasValue","fullName":"Siesta.Test.Element.hasValue","icon":"icon-method","url":"#!/api/Siesta.Test.Element-method-hasValue","meta":{},"sort":3},{"name":"Test.Function","fullName":"Siesta.Test.Function","icon":"icon-class","url":"#!/api/Siesta.Test.Function","meta":{},"sort":1},{"name":"isCalled","fullName":"Siesta.Test.Function.isCalled","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-isCalled","meta":{},"sort":3},{"name":"isCalledOnce","fullName":"Siesta.Test.Function.isCalledOnce","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-isCalledOnce","meta":{},"sort":3},{"name":"isCalledNTimes","fullName":"Siesta.Test.Function.isCalledNTimes","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-isCalledNTimes","meta":{},"sort":3},{"name":"isntCalled","fullName":"Siesta.Test.Function.isntCalled","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-isntCalled","meta":{},"sort":3},{"name":"methodIsCalledNTimes","fullName":"Siesta.Test.Function.methodIsCalledNTimes","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-methodIsCalledNTimes","meta":{},"sort":3},{"name":"methodIsCalled","fullName":"Siesta.Test.Function.methodIsCalled","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-methodIsCalled","meta":{},"sort":3},{"name":"methodIsntCalled","fullName":"Siesta.Test.Function.methodIsntCalled","icon":"icon-method","url":"#!/api/Siesta.Test.Function-method-methodIsntCalled","meta":{},"sort":3},{"name":"Test.Simulate.Touch","fullName":"Siesta.Test.Simulate.Touch","icon":"icon-class","url":"#!/api/Siesta.Test.Simulate.Touch","meta":{},"sort":1},{"name":"tap","fullName":"Siesta.Test.Simulate.Touch.tap","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-tap","meta":{},"sort":3},{"name":"doubleTap","fullName":"Siesta.Test.Simulate.Touch.doubleTap","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-doubleTap","meta":{},"sort":3},{"name":"longPress","fullName":"Siesta.Test.Simulate.Touch.longPress","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-longPress","meta":{},"sort":3},{"name":"pinch","fullName":"Siesta.Test.Simulate.Touch.pinch","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-pinch","meta":{},"sort":3},{"name":"touchDragTo","fullName":"Siesta.Test.Simulate.Touch.touchDragTo","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-touchDragTo","meta":{},"sort":3},{"name":"touchDragBy","fullName":"Siesta.Test.Simulate.Touch.touchDragBy","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-touchDragBy","meta":{},"sort":3},{"name":"swipe","fullName":"Siesta.Test.Simulate.Touch.swipe","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Touch-method-swipe","meta":{},"sort":3},{"name":"Test.Simulate.Mouse","fullName":"Siesta.Test.Simulate.Mouse","icon":"icon-class","url":"#!/api/Siesta.Test.Simulate.Mouse","meta":{},"sort":1},{"name":"dragDelay","fullName":"Siesta.Test.Simulate.Mouse.dragDelay","icon":"icon-cfg","url":"#!/api/Siesta.Test.Simulate.Mouse-cfg-dragDelay","meta":{},"sort":3},{"name":"moveCursorBetweenPoints","fullName":"Siesta.Test.Simulate.Mouse.moveCursorBetweenPoints","icon":"icon-cfg","url":"#!/api/Siesta.Test.Simulate.Mouse-cfg-moveCursorBetweenPoints","meta":{},"sort":3},{"name":"moveMouseTo","fullName":"Siesta.Test.Simulate.Mouse.moveMouseTo","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-moveMouseTo","meta":{},"sort":3},{"name":"moveCursorTo","fullName":"Siesta.Test.Simulate.Mouse.moveCursorTo","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-moveCursorTo","meta":{},"sort":3},{"name":"moveMouseBy","fullName":"Siesta.Test.Simulate.Mouse.moveMouseBy","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-moveMouseBy","meta":{},"sort":3},{"name":"moveCursorBy","fullName":"Siesta.Test.Simulate.Mouse.moveCursorBy","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-moveCursorBy","meta":{},"sort":3},{"name":"click","fullName":"Siesta.Test.Simulate.Mouse.click","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-click","meta":{},"sort":3},{"name":"rightClick","fullName":"Siesta.Test.Simulate.Mouse.rightClick","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-rightClick","meta":{},"sort":3},{"name":"doubleClick","fullName":"Siesta.Test.Simulate.Mouse.doubleClick","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-doubleClick","meta":{},"sort":3},{"name":"mouseDown","fullName":"Siesta.Test.Simulate.Mouse.mouseDown","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-mouseDown","meta":{},"sort":3},{"name":"mouseUp","fullName":"Siesta.Test.Simulate.Mouse.mouseUp","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-mouseUp","meta":{},"sort":3},{"name":"mouseOver","fullName":"Siesta.Test.Simulate.Mouse.mouseOver","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-mouseOver","meta":{},"sort":3},{"name":"mouseOut","fullName":"Siesta.Test.Simulate.Mouse.mouseOut","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-mouseOut","meta":{},"sort":3},{"name":"drag","fullName":"Siesta.Test.Simulate.Mouse.drag","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-drag","meta":{"deprecated":{"text":"This method is deprecated in favor of {@link #dragTo} and {@link #dragBy} methods"}},"sort":3},{"name":"dragTo","fullName":"Siesta.Test.Simulate.Mouse.dragTo","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-dragTo","meta":{},"sort":3},{"name":"dragBy","fullName":"Siesta.Test.Simulate.Mouse.dragBy","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Mouse-method-dragBy","meta":{},"sort":3},{"name":"Test.Simulate.Keyboard","fullName":"Siesta.Test.Simulate.Keyboard","icon":"icon-class","url":"#!/api/Siesta.Test.Simulate.Keyboard","meta":{},"sort":1},{"name":"type","fullName":"Siesta.Test.Simulate.Keyboard.type","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Keyboard-method-type","meta":{},"sort":3},{"name":"keyPress","fullName":"Siesta.Test.Simulate.Keyboard.keyPress","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Keyboard-method-keyPress","meta":{},"sort":3},{"name":"Test.Simulate.KeyCodes","fullName":"Siesta.Test.Simulate.KeyCodes","icon":"icon-singleton","url":"#!/api/Siesta.Test.Simulate.KeyCodes","meta":{},"sort":1},{"name":"Test.Simulate.Event","fullName":"Siesta.Test.Simulate.Event","icon":"icon-class","url":"#!/api/Siesta.Test.Simulate.Event","meta":{},"sort":1},{"name":"simulateEventsWith","fullName":"Siesta.Test.Simulate.Event.simulateEventsWith","icon":"icon-cfg","url":"#!/api/Siesta.Test.Simulate.Event-cfg-simulateEventsWith","meta":{},"sort":3},{"name":"simulateEvent","fullName":"Siesta.Test.Simulate.Event.simulateEvent","icon":"icon-method","url":"#!/api/Siesta.Test.Simulate.Event-method-simulateEvent","meta":{},"sort":3},{"name":"Test.SenchaTouch","fullName":"Siesta.Test.SenchaTouch","icon":"icon-class","url":"#!/api/Siesta.Test.SenchaTouch","meta":{},"sort":1},{"name":"moveFingerTo","fullName":"Siesta.Test.SenchaTouch.moveFingerTo","icon":"icon-method","url":"#!/api/Siesta.Test.SenchaTouch-method-moveFingerTo","meta":{},"sort":3},{"name":"moveFingerBy","fullName":"Siesta.Test.SenchaTouch.moveFingerBy","icon":"icon-method","url":"#!/api/Siesta.Test.SenchaTouch-method-moveFingerBy","meta":{},"sort":3},{"name":"scrollUntilElementVisible","fullName":"Siesta.Test.SenchaTouch.scrollUntilElementVisible","icon":"icon-method","url":"#!/api/Siesta.Test.SenchaTouch-method-scrollUntilElementVisible","meta":{},"sort":3},{"name":"waitForScrollerPosition","fullName":"Siesta.Test.SenchaTouch.waitForScrollerPosition","icon":"icon-method","url":"#!/api/Siesta.Test.SenchaTouch-method-waitForScrollerPosition","meta":{},"sort":3},{"name":"Test.Action.MouseUp","fullName":"Siesta.Test.Action.MouseUp","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MouseUp","meta":{},"sort":1},{"name":"Test.Action.SetValue","fullName":"Siesta.Test.Action.SetValue","icon":"icon-class","url":"#!/api/Siesta.Test.Action.SetValue","meta":{},"sort":1},{"name":"A","fullName":"Siesta.Test.Action.SetValue.A","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.SetValue-cfg-A","meta":{},"sort":3},{"name":"Test.Action.MonkeyTest","fullName":"Siesta.Test.Action.MonkeyTest","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MonkeyTest","meta":{},"sort":1},{"name":"Test.Action.DoubleTap","fullName":"Siesta.Test.Action.DoubleTap","icon":"icon-class","url":"#!/api/Siesta.Test.Action.DoubleTap","meta":{},"sort":1},{"name":"Test.Action.MoveCursor","fullName":"Siesta.Test.Action.MoveCursor","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MoveCursor","meta":{},"sort":1},{"name":"to","fullName":"Siesta.Test.Action.MoveCursor.to","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MoveCursor-cfg-to","meta":{},"sort":3},{"name":"by","fullName":"Siesta.Test.Action.MoveCursor.by","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MoveCursor-cfg-by","meta":{},"sort":3},{"name":"offset","fullName":"Siesta.Test.Action.MoveCursor.offset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MoveCursor-cfg-offset","meta":{},"sort":3},{"name":"Test.Action.SetUrl","fullName":"Siesta.Test.Action.SetUrl","icon":"icon-class","url":"#!/api/Siesta.Test.Action.SetUrl","meta":{},"sort":1},{"name":"The","fullName":"Siesta.Test.Action.SetUrl.The","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.SetUrl-cfg-The","meta":{},"sort":3},{"name":"Test.Action.Swipe","fullName":"Siesta.Test.Action.Swipe","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Swipe","meta":{},"sort":1},{"name":"direction","fullName":"Siesta.Test.Action.Swipe.direction","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Swipe-cfg-direction","meta":{},"sort":3},{"name":"Test.Action.Click","fullName":"Siesta.Test.Action.Click","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Click","meta":{},"sort":1},{"name":"Test.Action.TouchDrag","fullName":"Siesta.Test.Action.TouchDrag","icon":"icon-class","url":"#!/api/Siesta.Test.Action.TouchDrag","meta":{},"sort":1},{"name":"target","fullName":"Siesta.Test.Action.TouchDrag.target","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-target","meta":{},"sort":3},{"name":"source","fullName":"Siesta.Test.Action.TouchDrag.source","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-source","meta":{},"sort":3},{"name":"to","fullName":"Siesta.Test.Action.TouchDrag.to","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-to","meta":{},"sort":3},{"name":"fromOffset","fullName":"Siesta.Test.Action.TouchDrag.fromOffset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-fromOffset","meta":{},"sort":3},{"name":"toOffset","fullName":"Siesta.Test.Action.TouchDrag.toOffset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-toOffset","meta":{},"sort":3},{"name":"by","fullName":"Siesta.Test.Action.TouchDrag.by","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-by","meta":{},"sort":3},{"name":"dragOnly","fullName":"Siesta.Test.Action.TouchDrag.dragOnly","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.TouchDrag-cfg-dragOnly","meta":{},"sort":3},{"name":"Test.Action.Eval","fullName":"Siesta.Test.Action.Eval","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Eval","meta":{},"sort":1},{"name":"options","fullName":"Siesta.Test.Action.Eval.options","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Eval-cfg-options","meta":{},"sort":3},{"name":"Test.Action.LongPress","fullName":"Siesta.Test.Action.LongPress","icon":"icon-class","url":"#!/api/Siesta.Test.Action.LongPress","meta":{},"sort":1},{"name":"Test.Action.DoubleClick","fullName":"Siesta.Test.Action.DoubleClick","icon":"icon-class","url":"#!/api/Siesta.Test.Action.DoubleClick","meta":{},"sort":1},{"name":"Test.Action.Role.HasTarget","fullName":"Siesta.Test.Action.Role.HasTarget","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Role.HasTarget","meta":{},"sort":1},{"name":"target","fullName":"Siesta.Test.Action.Role.HasTarget.target","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Role.HasTarget-cfg-target","meta":{},"sort":3},{"name":"el","fullName":"Siesta.Test.Action.Role.HasTarget.el","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Role.HasTarget-cfg-el","meta":{},"sort":3},{"name":"passTargetToNext","fullName":"Siesta.Test.Action.Role.HasTarget.passTargetToNext","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Role.HasTarget-cfg-passTargetToNext","meta":{},"sort":3},{"name":"options","fullName":"Siesta.Test.Action.Role.HasTarget.options","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Role.HasTarget-cfg-options","meta":{},"sort":3},{"name":"offset","fullName":"Siesta.Test.Action.Role.HasTarget.offset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Role.HasTarget-cfg-offset","meta":{},"sort":3},{"name":"Test.Action.Tap","fullName":"Siesta.Test.Action.Tap","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Tap","meta":{},"sort":1},{"name":"Test.Action.MethodCall","fullName":"Siesta.Test.Action.MethodCall","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MethodCall","meta":{},"sort":1},{"name":"methodName","fullName":"Siesta.Test.Action.MethodCall.methodName","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MethodCall-cfg-methodName","meta":{},"sort":3},{"name":"args","fullName":"Siesta.Test.Action.MethodCall.args","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MethodCall-cfg-args","meta":{},"sort":3},{"name":"callbackIndex","fullName":"Siesta.Test.Action.MethodCall.callbackIndex","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.MethodCall-cfg-callbackIndex","meta":{},"sort":3},{"name":"Test.Action.Pinch","fullName":"Siesta.Test.Action.Pinch","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Pinch","meta":{},"sort":1},{"name":"scale","fullName":"Siesta.Test.Action.Pinch.scale","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Pinch-cfg-scale","meta":{},"sort":3},{"name":"Test.Action.Wait","fullName":"Siesta.Test.Action.Wait","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Wait","meta":{},"sort":1},{"name":"delay","fullName":"Siesta.Test.Action.Wait.delay","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Wait-cfg-delay","meta":{},"sort":3},{"name":"timeout","fullName":"Siesta.Test.Action.Wait.timeout","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Wait-cfg-timeout","meta":{},"sort":3},{"name":"args","fullName":"Siesta.Test.Action.Wait.args","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Wait-cfg-args","meta":{},"sort":3},{"name":"waitFor","fullName":"Siesta.Test.Action.Wait.waitFor","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Wait-cfg-waitFor","meta":{},"sort":3},{"name":"trigger","fullName":"Siesta.Test.Action.Wait.trigger","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Wait-cfg-trigger","meta":{},"sort":3},{"name":"Test.Action.MoveCursorTo","fullName":"Siesta.Test.Action.MoveCursorTo","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MoveCursorTo","meta":{},"sort":1},{"name":"Test.Action.MouseDown","fullName":"Siesta.Test.Action.MouseDown","icon":"icon-class","url":"#!/api/Siesta.Test.Action.MouseDown","meta":{},"sort":1},{"name":"Test.Action.Drag","fullName":"Siesta.Test.Action.Drag","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Drag","meta":{},"sort":1},{"name":"target","fullName":"Siesta.Test.Action.Drag.target","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-target","meta":{},"sort":3},{"name":"source","fullName":"Siesta.Test.Action.Drag.source","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-source","meta":{},"sort":3},{"name":"to","fullName":"Siesta.Test.Action.Drag.to","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-to","meta":{},"sort":3},{"name":"fromOffset","fullName":"Siesta.Test.Action.Drag.fromOffset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-fromOffset","meta":{},"sort":3},{"name":"toOffset","fullName":"Siesta.Test.Action.Drag.toOffset","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-toOffset","meta":{},"sort":3},{"name":"by","fullName":"Siesta.Test.Action.Drag.by","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-by","meta":{},"sort":3},{"name":"dragOnly","fullName":"Siesta.Test.Action.Drag.dragOnly","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Drag-cfg-dragOnly","meta":{},"sort":3},{"name":"Test.Action.Done","fullName":"Siesta.Test.Action.Done","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Done","meta":{},"sort":1},{"name":"delay","fullName":"Siesta.Test.Action.Done.delay","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Done-cfg-delay","meta":{},"sort":3},{"name":"Test.Action.RightClick","fullName":"Siesta.Test.Action.RightClick","icon":"icon-class","url":"#!/api/Siesta.Test.Action.RightClick","meta":{},"sort":1},{"name":"Test.Action.Type","fullName":"Siesta.Test.Action.Type","icon":"icon-class","url":"#!/api/Siesta.Test.Action.Type","meta":{},"sort":1},{"name":"text","fullName":"Siesta.Test.Action.Type.text","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Type-cfg-text","meta":{},"sort":3},{"name":"options","fullName":"Siesta.Test.Action.Type.options","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Type-cfg-options","meta":{},"sort":3},{"name":"clearExisting","fullName":"Siesta.Test.Action.Type.clearExisting","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action.Type-cfg-clearExisting","meta":{},"sort":3},{"name":"Test.jQuery","fullName":"Siesta.Test.jQuery","icon":"icon-class","url":"#!/api/Siesta.Test.jQuery","meta":{},"sort":1},{"name":"get$","fullName":"Siesta.Test.jQuery.get$","icon":"icon-method","url":"#!/api/Siesta.Test.jQuery-method-getS-","meta":{},"sort":3},{"name":"Test.Action","fullName":"Siesta.Test.Action","icon":"icon-class","url":"#!/api/Siesta.Test.Action","meta":{},"sort":1},{"name":"desc","fullName":"Siesta.Test.Action.desc","icon":"icon-cfg","url":"#!/api/Siesta.Test.Action-cfg-desc","meta":{},"sort":3},{"name":"Test.More","fullName":"Siesta.Test.More","icon":"icon-class","url":"#!/api/Siesta.Test.More","meta":{},"sort":1},{"name":"waitForTimeout","fullName":"Siesta.Test.More.waitForTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Test.More-cfg-waitForTimeout","meta":{},"sort":3},{"name":"isGreater","fullName":"Siesta.Test.More.isGreater","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isGreater","meta":{},"sort":3},{"name":"isLess","fullName":"Siesta.Test.More.isLess","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isLess","meta":{},"sort":3},{"name":"isGreaterOrEqual","fullName":"Siesta.Test.More.isGreaterOrEqual","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isGreaterOrEqual","meta":{},"sort":3},{"name":"isLessOrEqual","fullName":"Siesta.Test.More.isLessOrEqual","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isLessOrEqual","meta":{},"sort":3},{"name":"isApprox","fullName":"Siesta.Test.More.isApprox","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isApprox","meta":{},"sort":3},{"name":"like","fullName":"Siesta.Test.More.like","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-like","meta":{},"sort":3},{"name":"unlike","fullName":"Siesta.Test.More.unlike","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-unlike","meta":{},"sort":3},{"name":"throwsOk","fullName":"Siesta.Test.More.throwsOk","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-throwsOk","meta":{},"sort":3},{"name":"livesOk","fullName":"Siesta.Test.More.livesOk","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-livesOk","meta":{},"sort":3},{"name":"isInstanceOf","fullName":"Siesta.Test.More.isInstanceOf","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isInstanceOf","meta":{},"sort":3},{"name":"isString","fullName":"Siesta.Test.More.isString","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isString","meta":{},"sort":3},{"name":"isObject","fullName":"Siesta.Test.More.isObject","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isObject","meta":{},"sort":3},{"name":"isArray","fullName":"Siesta.Test.More.isArray","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isArray","meta":{},"sort":3},{"name":"isNumber","fullName":"Siesta.Test.More.isNumber","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isNumber","meta":{},"sort":3},{"name":"isBoolean","fullName":"Siesta.Test.More.isBoolean","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isBoolean","meta":{},"sort":3},{"name":"isDate","fullName":"Siesta.Test.More.isDate","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isDate","meta":{},"sort":3},{"name":"isRegExp","fullName":"Siesta.Test.More.isRegExp","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isRegExp","meta":{},"sort":3},{"name":"isFunction","fullName":"Siesta.Test.More.isFunction","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isFunction","meta":{},"sort":3},{"name":"isDeeply","fullName":"Siesta.Test.More.isDeeply","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isDeeply","meta":{},"sort":3},{"name":"isDeeplyStrict","fullName":"Siesta.Test.More.isDeeplyStrict","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-isDeeplyStrict","meta":{},"sort":3},{"name":"expectGlobals","fullName":"Siesta.Test.More.expectGlobals","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-expectGlobals","meta":{},"sort":3},{"name":"verifyGlobals","fullName":"Siesta.Test.More.verifyGlobals","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-verifyGlobals","meta":{},"sort":3},{"name":"waitFor","fullName":"Siesta.Test.More.waitFor","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-waitFor","meta":{},"sort":3},{"name":"waitForMs","fullName":"Siesta.Test.More.waitForMs","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-waitForMs","meta":{},"sort":3},{"name":"waitForFn","fullName":"Siesta.Test.More.waitForFn","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-waitForFn","meta":{},"sort":3},{"name":"chain","fullName":"Siesta.Test.More.chain","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-chain","meta":{},"sort":3},{"name":"chainForArray","fullName":"Siesta.Test.More.chainForArray","icon":"icon-method","url":"#!/api/Siesta.Test.More-method-chainForArray","meta":{},"sort":3},{"name":"Test.BDD.Spy","fullName":"Siesta.Test.BDD.Spy","icon":"icon-class","url":"#!/api/Siesta.Test.BDD.Spy","meta":{},"sort":1},{"name":"calls","fullName":"Siesta.Test.BDD.Spy.calls","icon":"icon-property","url":"#!/api/Siesta.Test.BDD.Spy-property-calls","meta":{},"sort":3},{"name":"and","fullName":"Siesta.Test.BDD.Spy.and","icon":"icon-property","url":"#!/api/Siesta.Test.BDD.Spy-property-and","meta":{},"sort":3},{"name":"callThrough","fullName":"Siesta.Test.BDD.Spy.callThrough","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-callThrough","meta":{"chainable":true},"sort":3},{"name":"stub","fullName":"Siesta.Test.BDD.Spy.stub","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-stub","meta":{"chainable":true},"sort":3},{"name":"returnValue","fullName":"Siesta.Test.BDD.Spy.returnValue","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-returnValue","meta":{"chainable":true},"sort":3},{"name":"callFake","fullName":"Siesta.Test.BDD.Spy.callFake","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-callFake","meta":{"chainable":true},"sort":3},{"name":"throwError","fullName":"Siesta.Test.BDD.Spy.throwError","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-throwError","meta":{"chainable":true},"sort":3},{"name":"reset","fullName":"Siesta.Test.BDD.Spy.reset","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Spy-method-reset","meta":{},"sort":3},{"name":"Test.BDD.Expectation","fullName":"Siesta.Test.BDD.Expectation","icon":"icon-class","url":"#!/api/Siesta.Test.BDD.Expectation","meta":{},"sort":1},{"name":"not","fullName":"Siesta.Test.BDD.Expectation.not","icon":"icon-property","url":"#!/api/Siesta.Test.BDD.Expectation-property-not","meta":{},"sort":3},{"name":"toBe","fullName":"Siesta.Test.BDD.Expectation.toBe","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBe","meta":{},"sort":3},{"name":"toEqual","fullName":"Siesta.Test.BDD.Expectation.toEqual","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toEqual","meta":{},"sort":3},{"name":"toBeNull","fullName":"Siesta.Test.BDD.Expectation.toBeNull","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeNull","meta":{},"sort":3},{"name":"toBeNaN","fullName":"Siesta.Test.BDD.Expectation.toBeNaN","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeNaN","meta":{},"sort":3},{"name":"toBeDefined","fullName":"Siesta.Test.BDD.Expectation.toBeDefined","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeDefined","meta":{},"sort":3},{"name":"toBeUndefined","fullName":"Siesta.Test.BDD.Expectation.toBeUndefined","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeUndefined","meta":{},"sort":3},{"name":"toBeTruthy","fullName":"Siesta.Test.BDD.Expectation.toBeTruthy","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeTruthy","meta":{},"sort":3},{"name":"toBeFalsy","fullName":"Siesta.Test.BDD.Expectation.toBeFalsy","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeFalsy","meta":{},"sort":3},{"name":"toMatch","fullName":"Siesta.Test.BDD.Expectation.toMatch","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toMatch","meta":{},"sort":3},{"name":"toContain","fullName":"Siesta.Test.BDD.Expectation.toContain","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toContain","meta":{},"sort":3},{"name":"toBeLessThan","fullName":"Siesta.Test.BDD.Expectation.toBeLessThan","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeLessThan","meta":{},"sort":3},{"name":"toBeGreaterThan","fullName":"Siesta.Test.BDD.Expectation.toBeGreaterThan","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeGreaterThan","meta":{},"sort":3},{"name":"toBeCloseTo","fullName":"Siesta.Test.BDD.Expectation.toBeCloseTo","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toBeCloseTo","meta":{},"sort":3},{"name":"toThrow","fullName":"Siesta.Test.BDD.Expectation.toThrow","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toThrow","meta":{},"sort":3},{"name":"toHaveBeenCalled","fullName":"Siesta.Test.BDD.Expectation.toHaveBeenCalled","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toHaveBeenCalled","meta":{},"sort":3},{"name":"toHaveBeenCalledWith","fullName":"Siesta.Test.BDD.Expectation.toHaveBeenCalledWith","icon":"icon-method","url":"#!/api/Siesta.Test.BDD.Expectation-method-toHaveBeenCalledWith","meta":{},"sort":3},{"name":"Test.Browser","fullName":"Siesta.Test.Browser","icon":"icon-class","url":"#!/api/Siesta.Test.Browser","meta":{},"sort":1},{"name":"elementFromPoint","fullName":"Siesta.Test.Browser.elementFromPoint","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-elementFromPoint","meta":{},"sort":3},{"name":"getElementAtCursor","fullName":"Siesta.Test.Browser.getElementAtCursor","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-getElementAtCursor","meta":{},"sort":3},{"name":"waitForEvent","fullName":"Siesta.Test.Browser.waitForEvent","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-waitForEvent","meta":{},"sort":3},{"name":"firesOk","fullName":"Siesta.Test.Browser.firesOk","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-firesOk","meta":{},"sort":3},{"name":"willFireNTimes","fullName":"Siesta.Test.Browser.willFireNTimes","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-willFireNTimes","meta":{},"sort":3},{"name":"wontFire","fullName":"Siesta.Test.Browser.wontFire","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-wontFire","meta":{},"sort":3},{"name":"firesOnce","fullName":"Siesta.Test.Browser.firesOnce","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-firesOnce","meta":{},"sort":3},{"name":"isntFired","fullName":"Siesta.Test.Browser.isntFired","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-isntFired","meta":{},"sort":3},{"name":"firesAtLeastNTimes","fullName":"Siesta.Test.Browser.firesAtLeastNTimes","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-firesAtLeastNTimes","meta":{},"sort":3},{"name":"isFiredWithSignature","fullName":"Siesta.Test.Browser.isFiredWithSignature","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-isFiredWithSignature","meta":{},"sort":3},{"name":"waitForTextPresent","fullName":"Siesta.Test.Browser.waitForTextPresent","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-waitForTextPresent","meta":{},"sort":3},{"name":"waitForTextNotPresent","fullName":"Siesta.Test.Browser.waitForTextNotPresent","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-waitForTextNotPresent","meta":{},"sort":3},{"name":"waitForTarget","fullName":"Siesta.Test.Browser.waitForTarget","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-waitForTarget","meta":{},"sort":3},{"name":"setWindowSize","fullName":"Siesta.Test.Browser.setWindowSize","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-setWindowSize","meta":{},"sort":3},{"name":"setUrl","fullName":"Siesta.Test.Browser.setUrl","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-setUrl","meta":{},"sort":3},{"name":"expectAlertMessage","fullName":"Siesta.Test.Browser.expectAlertMessage","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-expectAlertMessage","meta":{},"sort":3},{"name":"setNextConfirmReturnValue","fullName":"Siesta.Test.Browser.setNextConfirmReturnValue","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-setNextConfirmReturnValue","meta":{},"sort":3},{"name":"setNextPromptReturnValue","fullName":"Siesta.Test.Browser.setNextPromptReturnValue","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-setNextPromptReturnValue","meta":{},"sort":3},{"name":"switchTo","fullName":"Siesta.Test.Browser.switchTo","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-switchTo","meta":{},"sort":3},{"name":"switchToMain","fullName":"Siesta.Test.Browser.switchToMain","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-switchToMain","meta":{},"sort":3},{"name":"waitForPageLoad","fullName":"Siesta.Test.Browser.waitForPageLoad","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-waitForPageLoad","meta":{},"sort":3},{"name":"setTimeout","fullName":"Siesta.Test.Browser.setTimeout","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-setTimeout","meta":{},"sort":3},{"name":"clearTimeout","fullName":"Siesta.Test.Browser.clearTimeout","icon":"icon-method","url":"#!/api/Siesta.Test.Browser-method-clearTimeout","meta":{},"sort":3},{"name":"Test.ActionTarget","fullName":"Siesta.Test.ActionTarget","icon":"icon-class","url":"#!/api/Siesta.Test.ActionTarget","meta":{},"sort":1},{"name":"Test.Date","fullName":"Siesta.Test.Date","icon":"icon-class","url":"#!/api/Siesta.Test.Date","meta":{},"sort":1},{"name":"isDateEqual","fullName":"Siesta.Test.Date.isDateEqual","icon":"icon-method","url":"#!/api/Siesta.Test.Date-method-isDateEqual","meta":{},"sort":3},{"name":"Test.ExtJS","fullName":"Siesta.Test.ExtJS","icon":"icon-class","url":"#!/api/Siesta.Test.ExtJS","meta":{},"sort":1},{"name":"assertNoGlobalExtOverrides","fullName":"Siesta.Test.ExtJS.assertNoGlobalExtOverrides","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS-method-assertNoGlobalExtOverrides","meta":{},"sort":3},{"name":"assertMaxNumberOfGlobalExtOverrides","fullName":"Siesta.Test.ExtJS.assertMaxNumberOfGlobalExtOverrides","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS-method-assertMaxNumberOfGlobalExtOverrides","meta":{},"sort":3},{"name":"getTotalLayoutCounter","fullName":"Siesta.Test.ExtJS.getTotalLayoutCounter","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS-method-getTotalLayoutCounter","meta":{},"sort":3},{"name":"assertNoLayoutTriggered","fullName":"Siesta.Test.ExtJS.assertNoLayoutTriggered","icon":"icon-method","url":"#!/api/Siesta.Test.ExtJS-method-assertNoLayoutTriggered","meta":{},"sort":3},{"name":"Test.BDD","fullName":"Siesta.Test.BDD","icon":"icon-class","url":"#!/api/Siesta.Test.BDD","meta":{},"sort":1},{"name":"ddescribe","fullName":"Siesta.Test.BDD.ddescribe","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-ddescribe","meta":{},"sort":3},{"name":"xdescribe","fullName":"Siesta.Test.BDD.xdescribe","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-xdescribe","meta":{},"sort":3},{"name":"describe","fullName":"Siesta.Test.BDD.describe","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-describe","meta":{},"sort":3},{"name":"iit","fullName":"Siesta.Test.BDD.iit","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-iit","meta":{},"sort":3},{"name":"xit","fullName":"Siesta.Test.BDD.xit","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-xit","meta":{},"sort":3},{"name":"it","fullName":"Siesta.Test.BDD.it","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-it","meta":{},"sort":3},{"name":"expect","fullName":"Siesta.Test.BDD.expect","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-expect","meta":{},"sort":3},{"name":"any","fullName":"Siesta.Test.BDD.any","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-any","meta":{},"sort":3},{"name":"anyNumberApprox","fullName":"Siesta.Test.BDD.anyNumberApprox","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-anyNumberApprox","meta":{},"sort":3},{"name":"anyStringLike","fullName":"Siesta.Test.BDD.anyStringLike","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-anyStringLike","meta":{},"sort":3},{"name":"beforeEach","fullName":"Siesta.Test.BDD.beforeEach","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-beforeEach","meta":{},"sort":3},{"name":"afterEach","fullName":"Siesta.Test.BDD.afterEach","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-afterEach","meta":{},"sort":3},{"name":"spyOn","fullName":"Siesta.Test.BDD.spyOn","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-spyOn","meta":{},"sort":3},{"name":"createSpy","fullName":"Siesta.Test.BDD.createSpy","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-createSpy","meta":{},"sort":3},{"name":"createSpyObj","fullName":"Siesta.Test.BDD.createSpyObj","icon":"icon-method","url":"#!/api/Siesta.Test.BDD-method-createSpyObj","meta":{},"sort":3},{"name":"Test.TextSelection","fullName":"Siesta.Test.TextSelection","icon":"icon-class","url":"#!/api/Siesta.Test.TextSelection","meta":{},"sort":1},{"name":"getSelectedText","fullName":"Siesta.Test.TextSelection.getSelectedText","icon":"icon-method","url":"#!/api/Siesta.Test.TextSelection-method-getSelectedText","meta":{},"sort":3},{"name":"selectText","fullName":"Siesta.Test.TextSelection.selectText","icon":"icon-method","url":"#!/api/Siesta.Test.TextSelection-method-selectText","meta":{},"sort":3},{"name":"Role.CanStyleOutput","fullName":"Siesta.Role.CanStyleOutput","icon":"icon-class","url":"#!/api/Siesta.Role.CanStyleOutput","meta":{"private":true},"sort":1},{"name":"disableColoring","fullName":"Siesta.Role.CanStyleOutput.disableColoring","icon":"icon-cfg","url":"#!/api/Siesta.Role.CanStyleOutput-cfg-disableColoring","meta":{},"sort":3},{"name":"Harness.Browser.ExtJSCore","fullName":"Siesta.Harness.Browser.ExtJSCore","icon":"icon-class","url":"#!/api/Siesta.Harness.Browser.ExtJSCore","meta":{},"sort":1},{"name":"loaderPath","fullName":"Siesta.Harness.Browser.ExtJSCore.loaderPath","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJSCore-cfg-loaderPath","meta":{},"sort":3},{"name":"requires","fullName":"Siesta.Harness.Browser.ExtJSCore.requires","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJSCore-cfg-requires","meta":{},"sort":3},{"name":"waitForExtComponentQueryReady","fullName":"Siesta.Harness.Browser.ExtJSCore.waitForExtComponentQueryReady","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJSCore-cfg-waitForExtComponentQueryReady","meta":{},"sort":3},{"name":"getLoaderPathHook","fullName":"Siesta.Harness.Browser.ExtJSCore.getLoaderPathHook","icon":"icon-method","url":"#!/api/Siesta.Harness.Browser.ExtJSCore-method-getLoaderPathHook","meta":{},"sort":3},{"name":"Harness.Browser.SenchaTouch","fullName":"Siesta.Harness.Browser.SenchaTouch","icon":"icon-class","url":"#!/api/Siesta.Harness.Browser.SenchaTouch","meta":{},"sort":1},{"name":"testClass","fullName":"Siesta.Harness.Browser.SenchaTouch.testClass","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.SenchaTouch-cfg-testClass","meta":{},"sort":3},{"name":"transparentEx","fullName":"Siesta.Harness.Browser.SenchaTouch.transparentEx","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.SenchaTouch-cfg-transparentEx","meta":{},"sort":3},{"name":"performSetup","fullName":"Siesta.Harness.Browser.SenchaTouch.performSetup","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.SenchaTouch-cfg-performSetup","meta":{},"sort":3},{"name":"Harness.Browser.ExtJS","fullName":"Siesta.Harness.Browser.ExtJS","icon":"icon-class","url":"#!/api/Siesta.Harness.Browser.ExtJS","meta":{},"sort":1},{"name":"testClass","fullName":"Siesta.Harness.Browser.ExtJS.testClass","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJS-cfg-testClass","meta":{},"sort":3},{"name":"waitForExtReady","fullName":"Siesta.Harness.Browser.ExtJS.waitForExtReady","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJS-cfg-waitForExtReady","meta":{},"sort":3},{"name":"waitForAppReady","fullName":"Siesta.Harness.Browser.ExtJS.waitForAppReady","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJS-cfg-waitForAppReady","meta":{},"sort":3},{"name":"failOnMultipleComponentMatches","fullName":"Siesta.Harness.Browser.ExtJS.failOnMultipleComponentMatches","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser.ExtJS-cfg-failOnMultipleComponentMatches","meta":{},"sort":3},{"name":"Harness.NodeJS","fullName":"Siesta.Harness.NodeJS","icon":"icon-class","url":"#!/api/Siesta.Harness.NodeJS","meta":{},"sort":1},{"name":"Harness.Browser","fullName":"Siesta.Harness.Browser","icon":"icon-class","url":"#!/api/Siesta.Harness.Browser","meta":{},"sort":1},{"name":"testClass","fullName":"Siesta.Harness.Browser.testClass","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-testClass","meta":{},"sort":3},{"name":"autoRun","fullName":"Siesta.Harness.Browser.autoRun","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-autoRun","meta":{},"sort":3},{"name":"viewDOM","fullName":"Siesta.Harness.Browser.viewDOM","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-viewDOM","meta":{},"sort":3},{"name":"domContainerRegion","fullName":"Siesta.Harness.Browser.domContainerRegion","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-domContainerRegion","meta":{},"sort":3},{"name":"speedRun","fullName":"Siesta.Harness.Browser.speedRun","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-speedRun","meta":{},"sort":3},{"name":"mouseMovePrecision","fullName":"Siesta.Harness.Browser.mouseMovePrecision","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-mouseMovePrecision","meta":{},"sort":3},{"name":"breakOnFail","fullName":"Siesta.Harness.Browser.breakOnFail","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-breakOnFail","meta":{},"sort":3},{"name":"debuggerOnFail","fullName":"Siesta.Harness.Browser.debuggerOnFail","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-debuggerOnFail","meta":{},"sort":3},{"name":"failOnResourceLoadError","fullName":"Siesta.Harness.Browser.failOnResourceLoadError","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-failOnResourceLoadError","meta":{},"sort":3},{"name":"disableCaching","fullName":"Siesta.Harness.Browser.disableCaching","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-disableCaching","meta":{},"sort":3},{"name":"forceDOMVisible","fullName":"Siesta.Harness.Browser.forceDOMVisible","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-forceDOMVisible","meta":{},"sort":3},{"name":"runInPopup","fullName":"Siesta.Harness.Browser.runInPopup","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-runInPopup","meta":{},"sort":3},{"name":"pageUrl","fullName":"Siesta.Harness.Browser.pageUrl","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-pageUrl","meta":{},"sort":3},{"name":"useStrictMode","fullName":"Siesta.Harness.Browser.useStrictMode","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-useStrictMode","meta":{},"sort":3},{"name":"innerHtmlHead","fullName":"Siesta.Harness.Browser.innerHtmlHead","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-innerHtmlHead","meta":{},"sort":3},{"name":"innerHtmlBody","fullName":"Siesta.Harness.Browser.innerHtmlBody","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-innerHtmlBody","meta":{},"sort":3},{"name":"sandbox","fullName":"Siesta.Harness.Browser.sandbox","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-sandbox","meta":{},"sort":3},{"name":"sandboxBoundaryByGroup","fullName":"Siesta.Harness.Browser.sandboxBoundaryByGroup","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-sandboxBoundaryByGroup","meta":{},"sort":3},{"name":"sandboxCleanup","fullName":"Siesta.Harness.Browser.sandboxCleanup","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-sandboxCleanup","meta":{},"sort":3},{"name":"runCore","fullName":"Siesta.Harness.Browser.runCore","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-runCore","meta":{},"sort":3},{"name":"simulateEventsWith","fullName":"Siesta.Harness.Browser.simulateEventsWith","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-simulateEventsWith","meta":{},"sort":3},{"name":"autoScrollElementsIntoView","fullName":"Siesta.Harness.Browser.autoScrollElementsIntoView","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-autoScrollElementsIntoView","meta":{},"sort":3},{"name":"enableUnreachableClickWarning","fullName":"Siesta.Harness.Browser.enableUnreachableClickWarning","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-enableUnreachableClickWarning","meta":{},"sort":3},{"name":"maintainViewportSize","fullName":"Siesta.Harness.Browser.maintainViewportSize","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-maintainViewportSize","meta":{},"sort":3},{"name":"viewportWidth","fullName":"Siesta.Harness.Browser.viewportWidth","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-viewportWidth","meta":{},"sort":3},{"name":"viewportHeight","fullName":"Siesta.Harness.Browser.viewportHeight","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-viewportHeight","meta":{},"sort":3},{"name":"recorderConfig","fullName":"Siesta.Harness.Browser.recorderConfig","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-recorderConfig","meta":{},"sort":3},{"name":"jasmine","fullName":"Siesta.Harness.Browser.jasmine","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-jasmine","meta":{},"sort":3},{"name":"needUI","fullName":"Siesta.Harness.Browser.needUI","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-needUI","meta":{},"sort":3},{"name":"rerunHotKey","fullName":"Siesta.Harness.Browser.rerunHotKey","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-rerunHotKey","meta":{},"sort":3},{"name":"restartOnBlur","fullName":"Siesta.Harness.Browser.restartOnBlur","icon":"icon-cfg","url":"#!/api/Siesta.Harness.Browser-cfg-restartOnBlur","meta":{},"sort":3},{"name":"isStandardPackage","fullName":"Siesta.Harness.Browser.isStandardPackage","icon":"icon-method","url":"#!/api/Siesta.Harness.Browser-method-isStandardPackage","meta":{},"sort":3},{"name":"Test","fullName":"Siesta.Test","icon":"icon-class","url":"#!/api/Siesta.Test","meta":{},"sort":1},{"name":"url","fullName":"Siesta.Test.url","icon":"icon-property","url":"#!/api/Siesta.Test-property-url","meta":{},"sort":3},{"name":"isReadyTimeout","fullName":"Siesta.Test.isReadyTimeout","icon":"icon-cfg","url":"#!/api/Siesta.Test-cfg-isReadyTimeout","meta":{},"sort":3},{"name":"isReady","fullName":"Siesta.Test.isReady","icon":"icon-method","url":"#!/api/Siesta.Test-method-isReady","meta":{},"sort":3},{"name":"testupdate","fullName":"Siesta.Test.testupdate","icon":"icon-event","url":"#!/api/Siesta.Test-event-testupdate","meta":{},"sort":3},{"name":"diag","fullName":"Siesta.Test.diag","icon":"icon-method","url":"#!/api/Siesta.Test-method-diag","meta":{},"sort":3},{"name":"pass","fullName":"Siesta.Test.pass","icon":"icon-method","url":"#!/api/Siesta.Test-method-pass","meta":{},"sort":3},{"name":"fail","fullName":"Siesta.Test.fail","icon":"icon-method","url":"#!/api/Siesta.Test-method-fail","meta":{},"sort":3},{"name":"exit","fullName":"Siesta.Test.exit","icon":"icon-method","url":"#!/api/Siesta.Test-method-exit","meta":{},"sort":3},{"name":"ok","fullName":"Siesta.Test.ok","icon":"icon-method","url":"#!/api/Siesta.Test-method-ok","meta":{},"sort":3},{"name":"notOk","fullName":"Siesta.Test.notOk","icon":"icon-method","url":"#!/api/Siesta.Test-method-notOk","meta":{},"sort":3},{"name":"is","fullName":"Siesta.Test.is","icon":"icon-method","url":"#!/api/Siesta.Test-method-is","meta":{},"sort":3},{"name":"isNot","fullName":"Siesta.Test.isNot","icon":"icon-method","url":"#!/api/Siesta.Test-method-isNot","meta":{},"sort":3},{"name":"isStrict","fullName":"Siesta.Test.isStrict","icon":"icon-method","url":"#!/api/Siesta.Test-method-isStrict","meta":{},"sort":3},{"name":"isNotStrict","fullName":"Siesta.Test.isNotStrict","icon":"icon-method","url":"#!/api/Siesta.Test-method-isNotStrict","meta":{},"sort":3},{"name":"wait","fullName":"Siesta.Test.wait","icon":"icon-method","url":"#!/api/Siesta.Test-method-wait","meta":{},"sort":3},{"name":"endWait","fullName":"Siesta.Test.endWait","icon":"icon-method","url":"#!/api/Siesta.Test-method-endWait","meta":{},"sort":3},{"name":"beginAsync","fullName":"Siesta.Test.beginAsync","icon":"icon-method","url":"#!/api/Siesta.Test-method-beginAsync","meta":{},"sort":3},{"name":"endAsync","fullName":"Siesta.Test.endAsync","icon":"icon-method","url":"#!/api/Siesta.Test-method-endAsync","meta":{},"sort":3},{"name":"getSubTest","fullName":"Siesta.Test.getSubTest","icon":"icon-method","url":"#!/api/Siesta.Test-method-getSubTest","meta":{},"sort":3},{"name":"launchSubTest","fullName":"Siesta.Test.launchSubTest","icon":"icon-method","url":"#!/api/Siesta.Test-method-launchSubTest","meta":{},"sort":3},{"name":"todo","fullName":"Siesta.Test.todo","icon":"icon-method","url":"#!/api/Siesta.Test-method-todo","meta":{},"sort":3},{"name":"snooze","fullName":"Siesta.Test.snooze","icon":"icon-method","url":"#!/api/Siesta.Test-method-snooze","meta":{},"sort":3},{"name":"subTest","fullName":"Siesta.Test.subTest","icon":"icon-method","url":"#!/api/Siesta.Test-method-subTest","meta":{},"sort":3},{"name":"testfailedwithexception","fullName":"Siesta.Test.testfailedwithexception","icon":"icon-event","url":"#!/api/Siesta.Test-event-testfailedwithexception","meta":{},"sort":3},{"name":"teststart","fullName":"Siesta.Test.teststart","icon":"icon-event","url":"#!/api/Siesta.Test-event-teststart","meta":{},"sort":3},{"name":"setup","fullName":"Siesta.Test.setup","icon":"icon-method","url":"#!/api/Siesta.Test-method-setup","meta":{},"sort":3},{"name":"tearDown","fullName":"Siesta.Test.tearDown","icon":"icon-method","url":"#!/api/Siesta.Test-method-tearDown","meta":{},"sort":3},{"name":"earlySetup","fullName":"Siesta.Test.earlySetup","icon":"icon-method","url":"#!/api/Siesta.Test-method-earlySetup","meta":{},"sort":3},{"name":"testfinalize","fullName":"Siesta.Test.testfinalize","icon":"icon-event","url":"#!/api/Siesta.Test-event-testfinalize","meta":{},"sort":3},{"name":"done","fullName":"Siesta.Test.done","icon":"icon-method","url":"#!/api/Siesta.Test-method-done","meta":{},"sort":3},{"name":"beforetestfinalize","fullName":"Siesta.Test.beforetestfinalize","icon":"icon-event","url":"#!/api/Siesta.Test-event-beforetestfinalize","meta":{},"sort":3},{"name":"Util.Role.CanParseBrowser","fullName":"Siesta.Util.Role.CanParseBrowser","icon":"icon-class","url":"#!/api/Siesta.Util.Role.CanParseBrowser","meta":{},"sort":1},{"name":"Util.Role.CanCompareObjects","fullName":"Siesta.Util.Role.CanCompareObjects","icon":"icon-class","url":"#!/api/Siesta.Util.Role.CanCompareObjects","meta":{},"sort":1},{"name":"compareObjects","fullName":"Siesta.Util.Role.CanCompareObjects.compareObjects","icon":"icon-method","url":"#!/api/Siesta.Util.Role.CanCompareObjects-method-compareObjects","meta":{},"sort":3},{"name":"Getting Started with Siesta - stress free JavaScript unit testing","fullName":"guide: Getting Started with Siesta - stress free JavaScript unit testing","icon":"icon-guide","url":"#!/guide/siesta_getting_started","meta":{},"sort":4},{"name":"Testing applications generated by Sencha Cmd","fullName":"guide: Testing applications generated by Sencha Cmd","icon":"icon-guide","url":"#!/guide/testing_cmd_application","meta":{},"sort":4},{"name":"Siesta automation & reports.","fullName":"guide: Siesta automation & reports.","icon":"icon-guide","url":"#!/guide/siesta_automation","meta":{},"sort":4},{"name":"Cloud testing. BrowserStack integration","fullName":"guide: Cloud testing. BrowserStack integration","icon":"icon-guide","url":"#!/guide/browserstack_integration","meta":{},"sort":4},{"name":"Cloud testing. Sauce Labs integration","fullName":"guide: Cloud testing. Sauce Labs integration","icon":"icon-guide","url":"#!/guide/saucelabs_integration","meta":{},"sort":4},{"name":"Testing an Ext JS MVC application with Siesta","fullName":"guide: Testing an Ext JS MVC application with Siesta","icon":"icon-guide","url":"#!/guide/testing_mvc_app","meta":{},"sort":4},{"name":"Extending a test class with your own assertions and utility methods","fullName":"guide: Extending a test class with your own assertions and utility methods","icon":"icon-guide","url":"#!/guide/extending_test_class","meta":{},"sort":4},{"name":"Testing a Sencha Touch application. Implementing automatic login before every test","fullName":"guide: Testing a Sencha Touch application. Implementing automatic login before every test","icon":"icon-guide","url":"#!/guide/testing_sencha_touch_app","meta":{},"sort":4},{"name":"Cross page testing","fullName":"guide: Cross page testing","icon":"icon-guide","url":"#!/guide/cross_page_testing","meta":{},"sort":4},{"name":"BDD conventions","fullName":"guide: BDD conventions","icon":"icon-guide","url":"#!/guide/bdd_conventions","meta":{},"sort":4},{"name":"Code coverage with Siesta","fullName":"guide: Code coverage with Siesta","icon":"icon-guide","url":"#!/guide/code_coverage","meta":{},"sort":4},{"name":"Using the event recorder","fullName":"guide: Using the event recorder","icon":"icon-guide","url":"#!/guide/event_recorder","meta":{},"sort":4}],"guideSearch":{},"tests":false,"signatures":[{"long":"abstract","short":"ABS","tagname":"abstract"},{"long":"chainable","short":">","tagname":"chainable"},{"long":"deprecated","short":"DEP","tagname":"deprecated"},{"long":"experimental","short":"EXP","tagname":"experimental"},{"long":"★","short":"★","tagname":"new"},{"long":"preventable","short":"PREV","tagname":"preventable"},{"long":"private","short":"PRI","tagname":"private"},{"long":"protected","short":"PRO","tagname":"protected"},{"long":"readonly","short":"R O","tagname":"readonly"},{"long":"removed","short":"REM","tagname":"removed"},{"long":"required","short":"REQ","tagname":"required"},{"long":"static","short":"STA","tagname":"static"},{"long":"template","short":"TMP","tagname":"template"}],"memberTypes":[{"title":"Config options","toolbar_title":"Configs","position":1,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/cfg.png","subsections":[{"title":"Required config options","filter":{"required":true}},{"title":"Optional config options","filter":{"required":false},"default":true}],"name":"cfg"},{"title":"Properties","position":2,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/property.png","subsections":[{"title":"Instance properties","filter":{"static":false},"default":true},{"title":"Static properties","filter":{"static":true}}],"name":"property"},{"title":"Methods","position":3,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/method.png","subsections":[{"title":"Instance methods","filter":{"static":false},"default":true},{"title":"Static methods","filter":{"static":true}}],"name":"method"},{"title":"Events","position":4,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/event.png","name":"event"},{"title":"CSS Variables","toolbar_title":"CSS Vars","position":5,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/css_var.png","name":"css_var"},{"title":"CSS Mixins","position":6,"icon":"/var/lib/gems/1.9.1/gems/jsduck-5.3.4/lib/jsduck/tag/icons/css_mixin.png","name":"css_mixin"}],"localStorageDb":"docs","showPrintButton":false,"touchExamplesUi":false,"source":true,"commentsUrl":null,"commentsDomain":null,"message":""}};
|
import React from 'react'
import Layout from '../../components/layout'
import SEO from '../../components/seo'
const NotFoundPage = () => (
<Layout>
<SEO
title="404: Not Found"
description="You've found a route that doesn't exist. Oh no!"
/>
<h1>NOT FOUND</h1>
<p>You just hit a route that doesn't exist... the sadness.</p>
</Layout>
)
export default NotFoundPage
|
# -*- coding: utf-8 -*-
"""Tests for `codex-africanus` package."""
import numpy as np
import pytest
def test_fit_spi_components_vs_scipy():
"""
Here we just test the per component spi fitter against
a looped version of scipy's curve_fit
:return:
"""
from africanus.model.spi import fit_spi_components
curve_fit = pytest.importorskip("scipy.optimize").curve_fit
np.random.seed(123)
ncomps = 250
alphas = -0.7 + 0.25 * np.random.randn(ncomps, 1)
i0s = 5.0 + np.random.randn(ncomps, 1)
nfreqs = 100
freqs = np.linspace(0.5, 1.5, nfreqs).reshape(1, nfreqs)
freq0 = 0.7
beams = np.zeros((ncomps, nfreqs))
for i in range(ncomps):
beams[i, :] = np.sinc(freqs - freqs[0])
model = beams * i0s * (freqs / freq0) ** alphas
sigma = np.abs(0.25 + 0.1 * np.random.randn(nfreqs))
data = model + sigma[None, :] * np.random.randn(ncomps, nfreqs)
weights = 1.0/sigma**2
alpha1, alphavar1, I01, I0var1 = fit_spi_components(
data, weights, freqs.squeeze(), freq0, tol=1e-8)
def spi_func(nu, I0, alpha, beam=1.0):
return beam * I0 * nu ** alpha
I02 = np.zeros(ncomps)
I0var2 = np.zeros(ncomps)
alpha2 = np.zeros(ncomps)
alphavar2 = np.zeros(ncomps)
for i in range(ncomps):
def fit_func(nu, I0, alpha): return spi_func(nu, I0, alpha,
beam=beams[i])
popt, pcov = curve_fit(fit_func, (freqs / freq0).squeeze(),
data[i, :], sigma=np.diag(sigma**2),
p0=np.array([1.0, -0.7]),
absolute_sigma=False)
I02[i] = popt[0]
I0var2[i] = pcov[0, 0]
alpha2[i] = popt[1]
alphavar2[i] = pcov[1, 1]
np.testing.assert_array_almost_equal(alpha1, alpha2, decimal=5)
np.testing.assert_array_almost_equal(alphavar1, alphavar2, decimal=5)
np.testing.assert_array_almost_equal(I01, I02, decimal=5)
np.testing.assert_array_almost_equal(I0var1, I0var2, decimal=5)
def test_dask_fit_spi_components_vs_np():
from africanus.model.spi import fit_spi_components as np_fit_spi
from africanus.model.spi.dask import fit_spi_components
da = pytest.importorskip("dask.array")
np.random.seed(123)
ncomps = 800
alphas = -0.7 + 0.25 * np.random.randn(ncomps, 1)
i0s = 5.0 + np.random.randn(ncomps, 1)
nfreqs = 1000
freqs = np.linspace(0.5, 1.5, nfreqs).reshape(1, nfreqs)
freq0 = 0.7
model = i0s * (freqs / freq0) ** alphas
sigma = np.abs(0.25 + 0.1 * np.random.randn(nfreqs))
data = model + sigma[None, :] * np.random.randn(ncomps, nfreqs)
weights = 1.0/sigma**2
freqs = freqs.squeeze()
alpha1, alphavar1, I01, I0var1 = np_fit_spi(data, weights, freqs, freq0)
# now for the dask version
data_dask = da.from_array(data, chunks=(100, nfreqs))
weights_dask = da.from_array(weights, chunks=(nfreqs))
freqs_dask = da.from_array(freqs, chunks=(nfreqs))
alpha2, alphavar2, I02, I0var2 = fit_spi_components(data_dask,
weights_dask,
freqs_dask,
freq0).compute()
np.testing.assert_array_almost_equal(alpha1, alpha2, decimal=6)
np.testing.assert_array_almost_equal(alphavar1, alphavar2, decimal=6)
np.testing.assert_array_almost_equal(I01, I02, decimal=6)
np.testing.assert_array_almost_equal(I0var1, I0var2, decimal=6)
|
/*
* CanJS - 1.1.3 (2012-12-11)
* http://canjs.us/
* Copyright (c) 2012 Bitovi
* Licensed MIT
*/
define(['can/util/can'], function (can) {
var core_hasOwn = Object.prototype.hasOwnProperty,
isWindow = function (obj) {
return obj != null && obj == obj.window;
},
isPlainObject = function (obj) {
// Must be an Object.
// Because of IE, we also have to check the presence of the constructor property.
// Make sure that DOM nodes and window objects don't pass through, as well
if (!obj || (typeof obj !== "object") || obj.nodeType || isWindow(obj)) {
return false;
}
try {
// Not own constructor property must be Object
if (obj.constructor && !core_hasOwn.call(obj, "constructor") && !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf")) {
return false;
}
} catch (e) {
// IE8,9 Will throw exceptions on certain host objects #9897
return false;
}
// Own properties are enumerated firstly, so to speed up,
// if last one is own, then all properties are own.
var key;
for (key in obj) {}
return key === undefined || core_hasOwn.call(obj, key);
}
can.isPlainObject = isPlainObject;
return can;
});
|
from .getsecret import GetSecret
from .listsecrets import ListSecrets
|
const navigation = require('./navigation');
module.exports = {
navigation,
company: 'Endeavor Business Media, LLC',
logos: {
navbar: {
src: 'https://base.imgix.net/files/base/ebm/frn/image/static/logo/site_logo.png?h=45',
srcset: [
'https://base.imgix.net/files/base/ebm/frn/image/static/logo/site_logo.png?h=90 2x',
],
},
footer: {
src: 'https://base.imgix.net/files/base/ebm/frn/image/static/logo/site_logo.png?h=60',
srcset: [
'https://base.imgix.net/files/base/ebm/frn/image/static/logo/site_logo.png?h=120 2x',
],
},
},
socialMediaLinks: [
{ provider: 'linkedin', href: 'https://www.linkedin.com/company/forester_media_inc-/' },
{ provider: 'twitter', href: 'https://twitter.com/foresternetwork' },
{ provider: 'facebook', href: 'https://www.facebook.com/foresternetwork' },
],
gtm: {
containerId: 'GTM-TNFLPD9',
},
wufoo: {
userName: 'cygnuscorporate',
},
magazines: {
description: '',
},
contactUs: {
branding: {
bgColor: '#164f77',
logo: 'https://base.imgix.net/files/base/ebm/frn/image/static/logo/site_logo.png?h=60',
},
to: 'adsales@endeavorb2b.com',
},
};
|
// pages/invitation/invitation.js
Page({
/**
* 页面的初始数据
*/
data: {
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
}
})
|
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from ch08.deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
print("calculating test accuracy ... ")
#sampled = 1000
#x_test = x_test[:sampled]
#t_test = t_test[:sampled]
classified_ids = []
acc = 0.0
batch_size = 100
for i in range(int(x_test.shape[0] / batch_size)):
tx = x_test[i*batch_size:(i+1)*batch_size]
tt = t_test[i*batch_size:(i+1)*batch_size]
y = network.predict(tx, train_flg=False)
y = np.argmax(y, axis=1)
classified_ids.append(y)
acc += np.sum(y == tt)
acc = acc / x_test.shape[0]
print("test accuracy:" + str(acc))
classified_ids = np.array(classified_ids)
classified_ids = classified_ids.flatten()
max_view = 20
current_view = 1
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.2, wspace=0.2)
mis_pairs = {}
for i, val in enumerate(classified_ids == t_test):
if not val:
ax = fig.add_subplot(4, 5, current_view, xticks=[], yticks=[])
ax.imshow(x_test[i].reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
mis_pairs[current_view] = (t_test[i], classified_ids[i])
current_view += 1
if current_view > max_view:
break
print("======= misclassified result =======")
print("{view index: (label, inference), ...}")
print(mis_pairs)
plt.show()
|
# model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144)))),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_hrnetv2p_w18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
import { makeSelectReader, makeSelectIdentity } from 'containers/NetworkClient/selectors';
import { takeLatest, call, put, select, all, fork, join } from 'redux-saga/effects';
import { FETCH_STAKE } from './constants';
import { fetchedStake } from './actions';
const stakeTable = {
json: true,
scope: 'boidcomtoken',
code: 'boidcomtoken',
table: 'stakes',
limit: 1
}
const refundTable = {
json: true,
//scope: scope is the user
code: 'boidcomtoken',
table: 'stakes',
limit: 0
}
//
// Get the network Stake
//
function* getStake() {
try {
const data = [];
const networkReader = yield select(makeSelectReader());
const currentIdentity = yield select(makeSelectIdentity());
const stake = {
...stakeTable,
lower_bound: currentIdentity.name,
}
const stakes = yield networkReader.get_table_rows(stake);
const refund = {
...refundTable,
scope: currentIdentity.name,
}
const refunds = yield networkReader.get_table_rows(refund);
stakes.rows.map(row => {
data.push({
owner: currentIdentity.name,
...row,
});
});
refunds.rows.map(row => {
data.push({
owner: 'Refunding',
...row,
});
});
yield put(fetchedStake(data));
} catch (err) {
console.error('An EOSToolkit error occured - see details below:');
console.error(err);
yield put(fetchedStake([]));
}
}
function* watchFetchStake() {
yield takeLatest(FETCH_STAKE, getStake);
}
//
// Combine sagas into root saga
//
export default function* rootSaga() {
yield all([watchFetchStake()]);
}
|
import {UPDATE_RATING_FILTER} from "../../../actions/courses/sidebar/sidebarRatingFilterActions";
const updateFilterValue = (state, payload) => {
return payload.newValue;
}
export const sidebarRatingFilterReducer = (state = "", action) => {
const {payload, type} = action;
switch (type) {
case UPDATE_RATING_FILTER:
return updateFilterValue(state, payload);
default:
return state;
}
}
|
import datetime
import json
import pathlib
import time
import typing
import webbrowser
import requests
from bs4 import BeautifulSoup as Soup
try:
import colorama as colour
except ImportError:
RED = ""
YELLOW = ""
GREEN = ""
BLUE = ""
RESET = ""
else:
colour.init()
RED = colour.Fore.LIGHTRED_EX
YELLOW = colour.Fore.LIGHTYELLOW_EX
GREEN = colour.Fore.LIGHTGREEN_EX
BLUE = colour.Fore.LIGHTBLUE_EX
RESET = colour.Fore.RESET
from .data import DATA_DIR, DEFAULT_YEAR, URL, WAIT_TIME, get_cookie
def _open_page(page: str) -> None:
"""Open the page if the user hasn't opted out"""
if not (DATA_DIR / ".nobrowser").exists():
webbrowser.open(page)
def _make(folder: pathlib.Path) -> None:
"""Create folder if it doesn't exist."""
if not folder.exists():
folder.mkdir(parents=True)
def _pretty_print(message: str) -> None:
"""Analyse and print message"""
if message.startswith("That's the"):
print(GREEN + message + RESET)
elif message.startswith("You don't"):
print(YELLOW + message + RESET)
elif message.startswith("That's not"):
print(RED + message + RESET)
else:
raise ValueError("Failed to parse response.")
def fetch(
day: int, year: int = DEFAULT_YEAR
) -> str: # Might consider a default TODAY for day
"""Fetch and return the input for `day` of `year`.
All inputs are cached in `aoc_helper.DATA_DIR`."""
day_ = str(day)
year_ = str(year)
_make(DATA_DIR / year_)
input_path = DATA_DIR / year_ / (day_ + ".in")
if input_path.exists():
return input_path.read_text()
else:
unlock = datetime.datetime(year, 12, day, 5)
now = datetime.datetime.utcnow()
if now < unlock:
print(YELLOW + "Waiting for puzzle unlock..." + RESET)
time.sleep((unlock - now).total_seconds())
print(GREEN + "Fetching input!" + RESET)
_open_page(URL.format(day=day, year=year))
resp = requests.get(
URL.format(day=day, year=year) + "/input", cookies=get_cookie()
)
if not resp.ok:
raise ValueError("Received bad response")
# Note to star: May consider rstrip instead -- I don't know if AoC will ever
# publish input that has significant whitespace at the beginning though. --
# should now be fixed: stripping newlines only
data = resp.text.strip("\n")
input_path.write_text(data)
return data
def submit(day: int, part: int, answer: typing.Any, year: int = DEFAULT_YEAR) -> None:
"""Submit a solution.
Submissions are cached; submitting an already submitted solution will return the
previous response.
"""
day_ = str(day)
year_ = str(year)
part_ = str(part)
answer_ = str(answer)
submission_dir = DATA_DIR / year_ / day_
_make(submission_dir)
# Load cached solutions
submissions = submission_dir / "submissions.json"
if submissions.exists():
with submissions.open() as f:
solutions = json.load(f)
else:
solutions = {"1": {}, "2": {}}
# Check if solved
solution_file = submission_dir / f"{part}.solution"
if solution_file.exists():
solution = solution_file.read_text()
print(
f"Day {BLUE}{day}{RESET} part {BLUE}{part}{RESET} "
"has already been solved.\nThe solution was: "
f"{BLUE}{solution}{RESET}" # "\nResponse was:\n"
)
return # _pretty_print(solutions[part_][solution])
# printing the response here is pretty pointless, the user
# already knows it's correct
# salt: Yeah, but AoC will sometimes tell you what you placed (if you're in the top 1000) and you might want to see it!
# Check if answer has already been submitted
if answer_ in solutions[part_]:
print(
f"{YELLOW}Solution: {BLUE}{answer}{YELLOW} to part "
f"{BLUE}{part}{YELLOW} has already been submitted.\n"
f"Response was:{RESET}"
)
return _pretty_print(solutions[part_][answer_])
while True:
print(
f"Submitting {BLUE}{answer}{RESET} as the solution to part "
f"{BLUE}{part}{RESET}..."
)
resp = requests.post(
url=URL.format(day=day, year=year) + "/answer",
cookies=get_cookie(),
data={"level": part_, "answer": answer_},
)
if not resp.ok:
raise ValueError("Received bad response")
msg: str = Soup(resp.text, "html.parser").article.text
if msg.startswith("You gave"):
print(RED + msg + RESET)
wait_match = WAIT_TIME.search(msg)
pause = 60 * int(wait_match[1] or 0) + int(wait_match[2])
print(f"{YELLOW}Waiting {BLUE}{pause}{YELLOW} seconds to retry...{RESET}")
time.sleep(pause)
else:
break
_pretty_print(msg)
if msg.startswith("That's the"):
solution_file.write_text(answer_)
if part == 1:
if not resp.url.endswith("#part2"):
resp.url += "#part2" # scroll to part 2
_open_page(resp.url) # open part 2 in the user's browser
# Cache submission
solutions[part_][answer_] = msg
with submissions.open("w") as f:
json.dump(solutions, f)
def lazy_submit(
day: int, solution: typing.Callable[[], typing.Any], year: int = DEFAULT_YEAR
) -> None:
"""Run the function only if we haven't seen a solution.
solution is expected to be named 'part_one' or 'part_two'
"""
part = 1 if solution.__name__ == "part_one" else 2
if not (DATA_DIR / str(year) / str(day) / f"{part}.solution").exists():
if (answer := solution()) is not None:
submit(day, part, answer, year)
|
/**
* Created by xj on 11/4/16 11:08 AM.
*/
function bindEventToButtonInListView(params) {
bindEventToFilterButton(params);
bindEventToSearchList(params);
bindEventToClearSearch(params);
dialogShowInformation(params.type);
}
function bindEventToShowAnnounceEdit() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var announceId = $this.attr('data');
var url = '/announce/' + announceId + '/edit';
var menuEditLi = $('#announce-edit');
menuEditLi.parent().trigger('click');
ajaxData('get', url, appendViewToContainer);
$('#myAnnounce .remove').trigger('click');
}
}
function bindEventToShowPublicationAuthor() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var personId = $this.attr('data');
var url = '/persons/' + personId;
ajaxData('get', url, handleGetViewShowCallback, [], 'myPerson');
$('#myPublication .remove').trigger('click');
}
}
function bindEventToShowProduct() {
var $this = $(this);
if(!isUndefined($this.attr('data-product')) && !isUndefined($this.attr('data-cart-product'))) {
var productId = $this.attr('data-product');
var cartId = $this.attr('data-cart-product');
var url = '/product/'+productId+'/cartProduct/'+cartId;
ajaxData('get', url, handleGetViewShowCallback, [], 'myProduct');
//$('#myOrder .remove').trigger('click');
}
}
function bindEventToShowIwallAuthor() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var personId = $this.attr('data');
var url = '/persons/' + personId;
ajaxData('get', url, handleGetViewShowCallback, [], 'myPerson');
$('#myIwall .remove').trigger('click');
}
}
function bindEventToShowAuthorPublication() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var publicationId = $this.attr('data');
var url = '/publications/' + publicationId;
ajaxData('get', url, handleGetViewShowCallback, [], 'myPerson');
// $('#myPerson .remove').trigger('click');
}
}
function bindEventToShowAuthorPublications() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var personId = $this.attr('data');
if($('.dialog-lg').length > 0) {
$('.dialog-lg').slideUp("slow", function () {
$('.dialog-lg').remove();
showAuthorPublications(personId);
})
} else {
showAuthorPublications(personId);
}
}
}
function bindEventToShowAuthorIwalls() {
var $this = $(this);
if(!isUndefined($this.attr('data'))) {
var personId = $this.attr('data');
if($('.dialog-lg').length > 0) {
$('.dialog-lg').slideUp("slow", function () {
$('.dialog-lg').remove();
showAuthorIwalls(personId);
})
} else {
showAuthorIwalls(personId);
}
}
}
function showAuthorIwalls(personId) {
$('#person-iwall-list').parent().trigger('click');
var url = '/iwall?type=person&personId=' + personId + '&take=12&skip=0';
ajaxData('get', url, appendViewToContainer);
}
function showAuthorPublications(personId) {
$('#person-publication-list').parent().trigger('click');
var url = '/publications?type=person&personId=' + personId + '&take=12&skip=0';
ajaxData('get', url, appendViewToContainer);
}
function bindEventToFilterButton(params) {
$('#filter-type a').unbind('click').on('click', function () {
if(!isNull($(this).attr('data-type'))) {
var type = $(this).attr('data-type');
//$('#filter-type').attr('data-type',type);//列表按钮的html不刷新时可用
var url = getShowListUrl(params, type);
if(!isNull(url)) {
if(params.type == 'report' || params.type == 'advice' || params.type == 'folder') {
ajaxData('get', url, function (view) {
$('#container').html(view);
convertUtcTimeToLocalTime('container');
})
} else {
ajaxData('get', url, appendViewToContainer);
}
}
}
})
}
function bindEventToSearchList(params) {
$('#list-search button').unbind('click').on('click', function () {
var url = params.url;
console.log(url)
var searchText = getVal($(this).prev());
var type = params.type;
//if(type == 'publication' || type == 'person') {
if(type == 'publication') {
url += '&title=';
} else if(type == 'person') {
url += '&nick=';
}else if(type == 'iwall'){
url += '&title=';
}else if(type == 'order'){
url += '&title=';
}else if(type == 'product'){
url += '?title=';
}
if(!isNull(searchText)) {
url += searchText + '&take=' + params.take + '&skip=0';
ajaxData('get', url, appendViewToContainer);
}
//}
})
}
function bindEventToClearSearch(params) {
$('#list-search #search-clear').unbind('click').on('click', function () {
var url = params.url + '&take=' + params.take + '&skip=0';
ajaxData('get', url, appendViewToContainer);
})
}
function getShowListUrl(params, dataType) {
if(!isNull(params) && !isUndefined(params.type) && !isUndefined(params.take)) {
var type = params.type;
var take = params.take;
var url = '';
switch(type) {
case 'publication' :
url = 'publications?type=';
if(dataType == 'unforbidden') {
url += 'view&isForbidden=false';
} else if(dataType == 'forbidden') {
url += 'view&isForbidden=true';
} else {
url += dataType;
}
break;
case 'iwall':
url = 'iwall?type='+dataType;
break;
case 'person' :
url = 'persons?isForbidden=';
if(dataType == 'forbidden') {
url += 'true';
} else {
url += 'false';
}
break;
case 'announce' :
url += 'announceList?type=' + dataType;
break;
case 'order' :
url += 'order?type=' + dataType;
break;
case 'statistics' :
url += 'orderStatistics?type=' + dataType;
break;
case 'advice' :
url += 'getAdviceList?' + dataType;
break;
case 'report' :
url += params.url + '&' + dataType;
break;
case 'purchase' :
url += 'purchase?type='+dataType;
break;
case 'reward':
url += 'reward?type='+dataType;
break;
case 'refundRecord':
url += 'refundRecord?type='+dataType;
break;
case 'refundRequest':
url += 'refundRequest?auditing='+dataType;
break;
case 'reject':
url += 'reject?auditing='+dataType;
break;
case 'refundList':
url += 'refundList?type='+dataType;
break;
case 'product':
url += 'new_pro/products?type='+dataType;
break;
default:
if(!isNull(params.url)) {
url = params.url;
if(url.indexOf('?') == -1) {
url += '?';
} else {
url += '&';
}
url += dataType;
break;
} else {
return null;
}
}
url += '&take='+ take +'&skip=0';
return url;
}
}
function dialogShowInformation(type) {
$('.data-list a:not(#pagination a)').on('click', function () {
var $this = $(this);
if(!isUndefined($this.attr('data')) && !isNull(type)) {
var id = $this.attr('data');
var url = null;
var dialogId = null;
if(type == 'publication') {
url = 'publications/';
dialogId = 'myPublication';
} else if(type == 'person') {
url = 'persons/';
dialogId = 'myPerson';
} else if(type == 'announce') {
url = 'announce/';
dialogId = 'myAnnounce';
}else if(type == 'order') {
url = 'order/';
dialogId = 'myOrder';
} else if(type == 'iwall'){
url = 'iwall/';
dialogId = 'myIwall';
}else if(type == 'product'){
url = 'new_pro/products/';
dialogId = 'myProduct';
}
if(!isNull(url)) {
url += id;
ajaxData('get', url, handleGetViewShowCallback, [], dialogId);
}
}
})
}
function handleGetViewShowCallback(result, dialogId) {
if(!isNull(result)) {
createDialogLargeBox(result, 'no-padding margin', dialogId);
convertUtcTimeToLocalTime(dialogId);
dialogReasonBox();
$('#' + dialogId + ' img').unbind('error').on('error', function () {
$(this).attr('src', '/img/default.png');
})
}
}
function dialogReasonBox() {
$('.dialog-lg .dialog-body button').on('click', bindEventToShowReason);
}
function bindEventToShowReason() {
var $this = $(this);
var id = $this.attr('id');
var type = $this.attr('type').trim();
var targetId = $this.attr('data').trim();
var dataType = $this.attr('data-type').trim();
var params = {};
params.data = {};
params.data.type = type;
params.data.targetId = targetId;
params.dataType = dataType;
var url = null;
if(dataType == 'forbidden' || dataType == 'unForbidden') {
url = '/' + dataType;
params.forbiddenContent = $this.siblings('*[name="forbidden-content"]').text().trim();
} else if(dataType == 'official' || dataType == 'unOfficial') {
if(id == 'iwall-official') {
url = '/iwall/' + targetId + '/' + dataType;
} else {
url = '/publications/' + targetId + '/' + dataType;
}
} else if(dataType == 'audit'){
url = '/auditAnnounce/' + targetId;
} else if(dataType == 'delete'){
url = '/' + type + '/' + dataType + '/' + targetId;
} else if(dataType == 'unGag') {
url = '/persons/' + targetId +'/gag';
} else if(dataType == 'gag') {
url = '/personGag/' + targetId;
//params.method = 'put';
params.method = 'post';
}
if(!isNull(url)) {
params.url = url;
bootstrapQ.confirm({
'id': 'myReason',
'msg': $('#form-reason').html()
}, postReason, '', dialogReasonBoxCallback, params);
}
}
function dialogReasonBoxCallback(params) {
if(!isUndefined(params)) {
$('#myReason input[data-type="' + params.dataType + '"]').parent().removeClass('hide');
if(!isNull(params.forbiddenContent)) {
$('#myReason .content').text(params.forbiddenContent);
$('#myReason .content').parent().parent().removeClass('hide').addClass('show');
if(params.dataType == 'forbidden') {
$('#myReason input[name="filters"]').parent().parent().removeClass('hide').addClass('show');
}
}
var radios = $('#myReason input');
var hasRadio = false;
radios.each(function () {
var $this = $(this);
if(!$this.parent().hasClass('hide')) {
hasRadio = true;
}
})
if(hasRadio == false) {
$('#myReason input[type="radio"]').parent().parent().parent().addClass('hide');
}
initPageElement('myReason');
if(params.dataType != 'audit') {
$('#myReason form input[type="radio"][name="reason-id"]').on('click', function () {
var $this = $(this);
var isChecked = $this.attr('checked');
var value = $this.val().trim();
if (isChecked == 'checked' && value == 'other') {
$('textarea[name="other"]').parent().removeClass('hide').addClass('show');
} else {
$('textarea[name="other"]').parent().removeClass('show').addClass('hide');
}
})
if($('#myReason input[data-type="' + params.dataType + '"]').length == 0) {
$('#myReason form input[type="radio"][name="reason-id"]').eq(-1).prop('checked','checked').trigger('click');
} else {
$('#myReason input[data-type="' + params.dataType + '"]').eq(0).prop('checked','checked').trigger('click');
}
}
if(/*params.dataType == 'gag' || */params.dataType == 'unGag') {
$('#myReason .gag').removeClass('hide').addClass('show');
}
}
}
function postReason(params) {
if(!isUndefined(params)) {
var radios = $('#myReason form input[name="reason-id"]');
var hasRadio = false;
if(isUndefined(params.method)) {
params.method = 'post';
}
radios.each(function () {
var $this = $(this);
var label = $this.parent().parent().parent();
if (!label.hasClass('hide')) {
hasRadio = true;
}
})
if(hasRadio && isUndefined($('#myReason form input[name="reason-id"]:checked').val())) {
messageAlert({
'message': '请选择原因',
'type': 'error'
})
return false;
} else {
var reasonId = hasRadio == false ? null : $('#myReason form input[name="reason-id"]:checked').val().trim();
if(reasonId == 'other') {
params.data.reasonType = 'text';
params.data.reason = $('#myReason textarea[name="other"]').val().trim();
if(isNull(params.data.reason)) {
messageAlert({
'message': '请输入原因',
'type': 'error'
})
return false;
}
} else if(params.dataType == 'reply') {
params.data.replyType = 'id';
params.data.reply = reasonId;
params.data.memo = $('#myReason textarea[name="memo"]').val().trim();
} else {
params.data.reasonType = 'id';
params.data.reason = reasonId;
}
if(params.data.type == 'announce'){
params.data.memo = $('#myReason textarea[name="memo"]').val().trim();
}
if(!isNull(params.forbiddenContent) && params.dataType == 'forbidden') {
var filter = $('#myReason input[name="filters"]');
var filters = getForbiddenFilters(filter, params.forbiddenContent);
if(filters == false) {
return false;
} else {
params.data.filters = filters;
}
}
if(/*params.dataType == 'gag' || */params.dataType == 'unGag') {
params.data.type = getSelectVal($('#myReason form select[name="type"]'));
var interval = getSelectVal($('#myReason form select[name="interval"]'));
if(interval == '-1') {
params.data.isForever = true;
} else {
params.data.interval = interval;
params.data.isForever = false;
}
}
ajaxData(params.method, params.url, handlePostReason, [], params);
}
return false;
}
}
function getForbiddenFilters(filter, forbiddenContent) {
var filterValue = filter.val().trim();
var data = [];
if(isNull(filterValue)) {
messageAlert({
'message': '请输入被禁内容中的敏感词,多个敏感词,以逗号隔开',
'type': 'error'
})
filter.focus();
return false;
} else {
var filters = filterValue.split(',');
for(var i in filters) {
filters[i] = filters[i].split(',');
for(var j in filters[i]) {
var val = filters[i][j].trim();
if(!isNull(val) && data.indexOf(val) == -1) {
if(forbiddenContent.indexOf(val) == -1) {
messageAlert({
'message': '敏感词 "'+ val +'" 不在该被禁内容中',
'type': 'error'/*,
'sticky': true*/
})
return false;
} else {
data.push(val);
}
}
}
}
if(data.length > 0) {
return data;
} else {
return false;
}
}
}
function handlePostReason(result) {
$('body').append(result);
$('#myReason').modal('hide');
}
|
import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as psf
# TODO Create a schema for incoming resources
schema = StructType([
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", StringType(), True),
StructField("call_date", StringType(), True),
StructField("offense_date", StringType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", StringType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
def run_spark_job(spark):
# TODO Create Spark Configuration
# Create Spark configurations with max offset of 200 per trigger
# set up correct bootstrap server and port
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "com.crime.police_call") \
.option("startingOffsets", "earliest") \
.option("maxOffsetsPerTrigger", 100) \
.option("maxRatePerPartition", 50) \
.option("stopGracefullyOnShutdown", "true") \
.load()
# Show schema for the incoming resources for checks
print("df schema:")
df.printSchema()
# extract the correct column from the kafka input resources
# Take only value and convert it to String
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df\
.select(psf.from_json(psf.col('value'), schema).alias("DF"))\
.select("DF.*")
print("service table")
service_table.printSchema()
# TODO select original_crime_type_name and disposition
distinct_table = service_table.select(psf.to_timestamp(psf.col('call_date_time')).alias('call_date_time'), psf.col('original_crime_type_name') , psf.col('disposition'))
print("distinct table")
distinct_table.printSchema()
# count the number of original crime type
# we use watermark to solve dealing with late arriving data and stateful aggregation
agg_df = distinct_table.select(
distinct_table.original_crime_type_name,
distinct_table.call_date_time,
distinct_table.disposition
).withWatermark("call_date_time", "60 minutes").groupBy(psf.window(distinct_table.call_date_time, "10 minutes", "5 minutes"), psf.col('original_crime_type_name')).count()
# TODO Q1. Submit a screen shot of a batch ingestion of the aggregation
# TODO write output stream
query = agg_df.writeStream.format("console").outputMode("complete").start()
# TODO attach a ProgressReporter
query.awaitTermination()
# TODO get the right radio code json path
radio_code_json_filepath = "radio_code.json"
radio_code_df = spark.read.json(radio_code_json_filepath)
# clean up your data so that the column names match on radio_code_df and agg_df
# we will want to join on the disposition code
# TODO rename disposition_code column to disposition
radio_code_df = radio_code_df.withColumnRenamed("disposition_code", "disposition")
# TODO join on disposition column
join_query = agg_df.join(radio_code_df, col("agg_df.disposition") == col("radio_code_df.disposition"), "left_outer")
join_query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
# TODO Create Spark in Standalone mode
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("KafkaSparkStructuredStreaming") \
.config("spark.ui.port", 3000) \
.getOrCreate()
logger.info("Spark started")
run_spark_job(spark)
spark.stop()
|
import React, { Component } from 'react';
import IceContainer from '@icedesign/container';
import { Grid } from '@alifd/next';
const { Row, Col } = Grid;
const mockData = [
{
title: '总计邀请(个)',
value: '187',
},
{
title: '正在进行中(个)',
value: '62',
},
{
title: '已完成(个)',
value: '23',
},
{
title: '完成平均时长(天)',
value: '39',
},
{
title: '参与成员(人)',
value: '96',
},
];
export default class Overview extends Component {
render() {
return (
<IceContainer style={styles.container}>
<Row>
<Col l="4">
<div style={styles.item}>
<img
src="https://gw.alipayobjects.com/zos/rmsportal/heTdoQXAHjxNGiLSUkYA.svg"
alt=""
/>
</div>
</Col>
{mockData.map((item, index) => {
return (
<Col l="4" key={index}>
<div style={styles.item}>
<p style={styles.itemTitle}>{item.title}</p>
<p style={styles.itemValue}>{item.value}</p>
</div>
</Col>
);
})}
</Row>
</IceContainer>
);
}
}
const styles = {
item: {
height: '120px',
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
},
itemTitle: {
color: '#666',
fontSize: '14px',
},
itemValue: {
color: '#333',
fontSize: '36px',
marginTop: '10px',
},
};
|
var path = require('path'),
gulp = require('gulp'),
gutil = require('gulp-util'),
mirror = require('gulp-mirror'),
uglify = require('gulp-uglify'),
rename = require('gulp-rename'),
source = require('vinyl-source-stream'),
sourcemaps = require('gulp-sourcemaps'),
buffer = require('vinyl-buffer'),
browserify = require('browserify'),
watchify = require('watchify'),
stripDebug = require('gulp-strip-debug'),
handleErrors = require('../util/handleErrors');
// TODO - Concat license header to dev/prod build files.
function rebundle(devBundle) {
if (devBundle) {
gutil.log('Starting dev rebundle...');
}
var debug, min;
debug = sourcemaps.init({loadMaps: true});
debug.pipe(sourcemaps.write('./', {sourceRoot: './'}))
.pipe(gulp.dest(paths.out));
min = rename({ suffix: '.min' });
min.pipe(sourcemaps.init({loadMaps: true}))
.pipe(uglify())
.pipe(stripDebug())
.pipe(sourcemaps.write('./', {sourceRoot: './', addComment: false}))
.pipe(gulp.dest(paths.out));
var stream = this.bundle()
.on('error', handleErrors.handler)
.pipe(handleErrors())
.pipe(source('index.js'))
.pipe(buffer());
if (devBundle) {
return stream.pipe(debug).once('end', function () {
gutil.log('Dev rebundle complete.');
});
}
else {
return stream.pipe(mirror(debug, min));
}
}
function createBundler(args) {
args = args || {};
args.debug = true;
args.standalone = 'D&C';
var bundle = browserify(paths.jsEntry, args),
argv = require('minimist')(process.argv.slice(2)),
exclude = (argv.exclude || []).concat(argv.e || []);
for (var i = 0; i < exclude.length; ++i) {
bundle.ignore(require.resolve('../../src/' + exclude[i]));
}
return bundle;
}
function watch(onUpdate) {
var bundler = watchify(createBundler(watchify.args));
bundler.on('update', function () {
var bundle = rebundle.call(this, true);
if (onUpdate) {
bundle.on('end', onUpdate);
}
});
return rebundle.call(bundler);
}
module.exports = function bundle() {
return rebundle.call(createBundler());
};
module.exports.watch = watch;
module.exports.rebundle = rebundle;
module.exports.createBundler = createBundler;
|
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#pragma once
namespace Js
{
class ModuleNamespaceEnumerator : public JavascriptEnumerator
{
protected:
DEFINE_VTABLE_CTOR(ModuleNamespaceEnumerator, JavascriptEnumerator);
ModuleNamespaceEnumerator(ModuleNamespace* nsObject, EnumeratorFlags flags, ScriptContext* scriptContext);
BOOL Init(ForInCache * forInCache);
public:
static ModuleNamespaceEnumerator* New(ModuleNamespace* nsObject, EnumeratorFlags flags, ScriptContext* scriptContext, ForInCache * forInCache);
virtual void Reset() override;
virtual Var MoveAndGetNext(PropertyId& propertyId, PropertyAttributes* attributes = nullptr) override;
virtual Var GetCurrentValue() { Assert(false); return nullptr; }
private:
ModuleNamespace* nsObject;
JavascriptStaticEnumerator symbolEnumerator;
ModuleNamespace::UnambiguousExportMap* nonLocalMap;
BigPropertyIndex currentLocalMapIndex;
BigPropertyIndex currentNonLocalMapIndex;
bool doneWithLocalExports;
bool doneWithSymbol;
EnumeratorFlags flags;
};
}
|
exports.handler = async (event, context) => ({
statusCode: 200,
body: JSON.stringify({
NETLIFY: process.env.NETLIFY,
BUILD_ID: process.env.BUILD_ID,
CONTEXT: process.env.CONTEXT,
NODE_VERSION: process.env.NODE_VERSION,
URL: process.env.URL,
DEPLOY_URL: process.env.DEPLOY_URL,
}),
});
|
export Publish from './Publish';
|
# Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLVector
class Acceleration(XMLVector):
_NAME = 'acceleration'
_TYPE = 'sdf'
def __init__(self, default=[0, 0, 0, 0, 0, 0]):
super(Acceleration, self).__init__(size=6)
self.value = default
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="mac-remote-command",
version="0.1.0",
author="Daniel Flanagan",
description="Client for embedded systems to facilitate remote configuration.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FlantasticDan/mac-remote-command",
project_urls={
"Bug Tracker": "https://github.com/FlantasticDan/mac-remote-command/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha"
],
packages=setuptools.find_packages(),
python_requires=">=3.9",
install_requires=['httpx', 'getmac'],
extras_require={
"oled": ['oled-status']
}
)
|
import sys
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
from http.cookiejar import CookieJar
import lxml.etree
import lxml.html
import re
from datetime import timedelta, date
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
from time import sleep
import random
import os
import pickle
import validators
from newspaper import Article, ArticleException
"""
This script was written to search Google news for a list of search terms over a
specified range of dates. The idea here is to replicate the methodology used by
Jennifer Mascia to put together the The Gun Report, which was a New York Times blog
that chronicled daily shootings across the country. The methodology is described
in an On the Media interview: http://www.onthemedia.org/story/end-gun-report/
"""
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days) + 1):
yield end_date - timedelta(n)
if len(sys.argv) < 3:
print("Usage: python compile_google_news_links_selenium-SHOOTINGS.py startYear-startMonth-startDay endYear-endMonth-endDay\n")
exit(0)
sdate = sys.argv[1]
edate = sys.argv[2]
sy, sm, sd = sdate.split('-')
ey, em, ed = edate.split('-')
start_date = date(int(sy), int(sm), int(sd))
end_date = date(int(ey), int(em), int(ed))
search_terms = [ "man shot", "woman shot", "officer-involved shooting"]
blacklist = ['https://www.youtube.com/?gl=US', 'https://www.blogger.com/?tab=nj']
print("Search from %s to %s for terms %s\n"%(start_date, end_date, str(search_terms)))
def main():
first_run = True
driver = webdriver.Chrome('./chromedriver')
crawl_output = {} # url -> dict representing article
if os.path.exists('crawl_output.p'):
crawl_output = pickle.load(open('crawl_output.p', 'rb'))
i = 0
# Iterate through dates
for single_date in daterange(start_date, end_date):
for term in search_terms:
url = 'https://www.google.com/search?'
values = {'q' : term,
'hl' : 'en',
'gl' : 'us',
'authuser' : '0',
'source' : 'lnt',
'tbs' : 'cdr:1,cd_min:' + single_date.strftime("%m/%d/%Y") + ",cd_max:" + single_date.strftime("%m/%d/%Y"),
'tbm' : 'nws',
'start' : '0' }
print("***" + url + urllib.parse.urlencode(values))
driver.get(url + urllib.parse.urlencode(values))
if first_run:
print("Set search preferences by going to Settings > Search Settings, and selecting 'Never show instant results', and then set the Results per page to ***40***")
sleep(60)
first_run = False
for i,a in enumerate(driver.find_elements_by_tag_name('a')):
try:
link = a.get_attribute('href')
while link is not None and link.startswith('https://ipv4.google.com/sorry/'): #captchas
print("Blocked on: %s\n"%(str(link)))
sleep(10)
link = a.get_attribute('href')
if link is not None and link not in crawl_output.keys() and validators.url(link) and link not in blacklist:
if "google.com" not in link and "webcache.googleusercontent" not in link: #remove some obvious ads and junk <a> elements
print(single_date)
print(single_date.strftime("%Y-%m-%d"), "\t", term, "\t", end=' ')
print(link)
try:
article = Article(link)
article.download()
article.parse()
pub_date = article.publish_date
text = article.text
title = article.title
if pub_date is None or text is None or title is None:
continue
article_dict = {
# I know 'data' is a typo, but it's too late now...
'publication data': str(pub_date),
'text': str(text),
'title': str(title),
}
crawl_output[link] = article_dict
i += 1
if i % 30 == 0:
# every 30 articles, dump the output
print('dumping crawl_output')
pickle.dump(crawl_output, open('crawl_output.p', 'wb'))
except (ArticleException, ValueError):
# invalid article (either bad url or contains
# illegal characters)
continue
except StaleElementReferenceException:
print("Stale element: %s\n"%str(a))
driver.close()
print('dumping crawl_output')
print('{} articles crawled'.format(len(crawl_output.keys())))
pickle.dump(crawl_output, open('crawl_output.p', 'wb'))
if __name__ == "__main__":
main()
|
"use strict";
import _objectWithoutPropertiesLoose from "@babel/runtime/helpers/esm/objectWithoutPropertiesLoose";
import _inheritsLoose from "@babel/runtime/helpers/esm/inheritsLoose";
import _isEqual from "lodash-es/isEqual";
import { TileLayer } from 'leaflet';
import { withLeaflet } from './context';
import GridLayer from './GridLayer';
import { EVENTS_RE } from './MapEvented';
var WMSTileLayer =
/*#__PURE__*/
function (_GridLayer) {
_inheritsLoose(WMSTileLayer, _GridLayer);
function WMSTileLayer() {
return _GridLayer.apply(this, arguments) || this;
}
var _proto = WMSTileLayer.prototype;
_proto.createLeafletElement = function createLeafletElement(props) {
var url = props.url,
params = _objectWithoutPropertiesLoose(props, ["url"]);
return new TileLayer.WMS(url, this.getOptions(params));
};
_proto.updateLeafletElement = function updateLeafletElement(fromProps, toProps) {
_GridLayer.prototype.updateLeafletElement.call(this, fromProps, toProps);
var prevUrl = fromProps.url,
_po = fromProps.opacity,
_pz = fromProps.zIndex,
prevParams = _objectWithoutPropertiesLoose(fromProps, ["url", "opacity", "zIndex"]);
var url = toProps.url,
_o = toProps.opacity,
_z = toProps.zIndex,
params = _objectWithoutPropertiesLoose(toProps, ["url", "opacity", "zIndex"]);
if (url !== prevUrl) {
this.leafletElement.setUrl(url);
}
if (!_isEqual(params, prevParams)) {
this.leafletElement.setParams(params);
}
};
_proto.getOptions = function getOptions(params) {
var superOptions = _GridLayer.prototype.getOptions.call(this, params);
return Object.keys(superOptions).reduce(function (options, key) {
if (!EVENTS_RE.test(key)) {
options[key] = superOptions[key];
}
return options;
}, {});
};
return WMSTileLayer;
}(GridLayer);
export default withLeaflet(WMSTileLayer);
|
//
// MNTaskRecordModel.h
// ManekiNeko
//
// Created by JackCheng on 16/2/19.
// Copyright © 2016年 HardTime. All rights reserved.
//
#import "MNBaseModel.h"
@interface MNTaskRecordModel : MNBaseModel
//uid = 'UID'
@property (nonatomic, strong)NSString *uid;
//token = 'TOKEN'
@property (nonatomic, strong)NSString *token;
//p = '页码:从0开始'
@property (nonatomic, strong)NSString *p;
@end
|
/*
* linux/kernel/seccomp.c
*
* Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
*
* Copyright (C) 2012 Google, Inc.
* Will Drewry <wad@chromium.org>
*
* This defines a simple but solid secure-computing facility.
*
* Mode 1 uses a fixed list of allowed system calls.
* Mode 2 allows user-defined system call filters in the form
* of Berkeley Packet Filters/Linux Socket Filters.
*/
#include <linux/atomic.h>
#include <linux/audit.h>
#include <linux/compat.h>
#include <linux/coredump.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/seccomp.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
#include <asm/syscall.h>
#endif
#ifdef CONFIG_SECCOMP_FILTER
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>
/**
* struct seccomp_filter - container for seccomp BPF programs
*
* @usage: reference count to manage the object lifetime.
* get/put helpers should be used when accessing an instance
* outside of a lifetime-guarded section. In general, this
* is only needed for handling filters shared across tasks.
* @prev: points to a previously installed, or inherited, filter
* @prog: the BPF program to evaluate
*
* seccomp_filter objects are organized in a tree linked via the @prev
* pointer. For any task, it appears to be a singly-linked list starting
* with current->seccomp.filter, the most recently attached or inherited filter.
* However, multiple filters may share a @prev node, by way of fork(), which
* results in a unidirectional tree existing in memory. This is similar to
* how namespaces work.
*
* seccomp_filter objects should never be modified after being attached
* to a task_struct (other than @usage).
*/
struct seccomp_filter {
atomic_t usage;
struct seccomp_filter *prev;
struct bpf_prog *prog;
};
/* Limit any path through the tree to 256KB worth of instructions. */
#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
/*
* Endianness is explicitly ignored and left for BPF program authors to manage
* as per the specific architecture.
*/
static void populate_seccomp_data(struct seccomp_data *sd)
{
struct task_struct *task = current;
struct pt_regs *regs = task_pt_regs(task);
unsigned long args[6];
sd->nr = syscall_get_nr(task, regs);
sd->arch = syscall_get_arch();
syscall_get_arguments(task, regs, 0, 6, args);
sd->args[0] = args[0];
sd->args[1] = args[1];
sd->args[2] = args[2];
sd->args[3] = args[3];
sd->args[4] = args[4];
sd->args[5] = args[5];
sd->instruction_pointer = KSTK_EIP(task);
}
/**
* seccomp_check_filter - verify seccomp filter code
* @filter: filter to verify
* @flen: length of filter
*
* Takes a previously checked filter (by bpf_check_classic) and
* redirects all filter code that loads struct sk_buff data
* and related data through seccomp_bpf_load. It also
* enforces length and alignment checking of those loads.
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
{
int pc;
for (pc = 0; pc < flen; pc++) {
struct sock_filter *ftest = &filter[pc];
u16 code = ftest->code;
u32 k = ftest->k;
switch (code) {
case BPF_LD | BPF_W | BPF_ABS:
ftest->code = BPF_LDX | BPF_W | BPF_ABS;
/* 32-bit aligned and not out of bounds. */
if (k >= sizeof(struct seccomp_data) || k & 3)
return -EINVAL;
continue;
case BPF_LD | BPF_W | BPF_LEN:
ftest->code = BPF_LD | BPF_IMM;
ftest->k = sizeof(struct seccomp_data);
continue;
case BPF_LDX | BPF_W | BPF_LEN:
ftest->code = BPF_LDX | BPF_IMM;
ftest->k = sizeof(struct seccomp_data);
continue;
/* Explicitly include allowed calls. */
case BPF_RET | BPF_K:
case BPF_RET | BPF_A:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_NEG:
case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM:
case BPF_MISC | BPF_TAX:
case BPF_MISC | BPF_TXA:
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
case BPF_ST:
case BPF_STX:
case BPF_JMP | BPF_JA:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
continue;
default:
return -EINVAL;
}
}
return 0;
}
/**
* seccomp_run_filters - evaluates all seccomp filters against @sd
* @sd: optional seccomp data to be passed to filters
*
* Returns valid seccomp BPF response codes.
*/
static u32 seccomp_run_filters(const struct seccomp_data *sd)
{
struct seccomp_data sd_local;
u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f =
lockless_dereference(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
if (unlikely(WARN_ON(f == NULL)))
return SECCOMP_RET_KILL;
if (!sd) {
populate_seccomp_data(&sd_local);
sd = &sd_local;
}
/*
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
for (; f; f = f->prev) {
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
}
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
{
assert_spin_locked(¤t->sighand->siglock);
if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
return false;
return true;
}
static inline void seccomp_assign_mode(struct task_struct *task,
unsigned long seccomp_mode)
{
assert_spin_locked(&task->sighand->siglock);
task->seccomp.mode = seccomp_mode;
/*
* Make sure TIF_SECCOMP cannot be set before the mode (and
* filter) is set.
*/
smp_mb__before_atomic();
set_tsk_thread_flag(task, TIF_SECCOMP);
}
#ifdef CONFIG_SECCOMP_FILTER
/* Returns 1 if the parent is an ancestor of the child. */
static int is_ancestor(struct seccomp_filter *parent,
struct seccomp_filter *child)
{
/* NULL is the root ancestor. */
if (parent == NULL)
return 1;
for (; child; child = child->prev)
if (child == parent)
return 1;
return 0;
}
/**
* seccomp_can_sync_threads: checks if all threads can be synchronized
*
* Expects sighand and cred_guard_mutex locks to be held.
*
* Returns 0 on success, -ve on error, or the pid of a thread which was
* either not in the correct seccomp mode or it did not have an ancestral
* seccomp filter.
*/
static inline pid_t seccomp_can_sync_threads(void)
{
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
assert_spin_locked(¤t->sighand->siglock);
/* Validate all threads being eligible for synchronization. */
caller = current;
for_each_thread(caller, thread) {
pid_t failed;
/* Skip current, since it is initiating the sync. */
if (thread == caller)
continue;
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
(thread->seccomp.mode == SECCOMP_MODE_FILTER &&
is_ancestor(thread->seccomp.filter,
caller->seccomp.filter)))
continue;
/* Return the first thread that cannot be synchronized. */
failed = task_pid_vnr(thread);
/* If the pid cannot be resolved, then return -ESRCH */
if (unlikely(WARN_ON(failed == 0)))
failed = -ESRCH;
return failed;
}
return 0;
}
/**
* seccomp_sync_threads: sets all threads to use current's filter
*
* Expects sighand and cred_guard_mutex locks to be held, and for
* seccomp_can_sync_threads() to have returned success already
* without dropping the locks.
*
*/
static inline void seccomp_sync_threads(void)
{
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
assert_spin_locked(¤t->sighand->siglock);
/* Synchronize all threads. */
caller = current;
for_each_thread(caller, thread) {
/* Skip current, since it needs no changes. */
if (thread == caller)
continue;
/* Get a task reference for the new leaf node. */
get_seccomp_filter(caller);
/*
* Drop the task reference to the shared ancestor since
* current's path will hold a reference. (This also
* allows a put before the assignment.)
*/
put_seccomp_filter(thread);
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
/*
* Don't let an unprivileged task work around
* the no_new_privs restriction by creating
* a thread that sets it up, enters seccomp,
* then dies.
*/
if (task_no_new_privs(caller))
task_set_no_new_privs(thread);
/*
* Opt the other thread into seccomp if needed.
* As threads are considered to be trust-realm
* equivalent (see ptrace_may_access), it is safe to
* allow one thread to transition the other.
*/
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
}
}
/**
* seccomp_prepare_filter: Prepares a seccomp filter for use.
* @fprog: BPF program to install
*
* Returns filter on success or an ERR_PTR on failure.
*/
static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
{
struct seccomp_filter *sfilter;
int ret;
const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
/*
* Installing a seccomp filter requires that the task has
* CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
* This avoids scenarios where unprivileged tasks can affect the
* behavior of privileged children.
*/
if (!task_no_new_privs(current) &&
security_capable_noaudit(current_cred(), current_user_ns(),
CAP_SYS_ADMIN) != 0)
return ERR_PTR(-EACCES);
/* Allocate a new seccomp_filter */
sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
if (!sfilter)
return ERR_PTR(-ENOMEM);
ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
seccomp_check_filter, save_orig);
if (ret < 0) {
kfree(sfilter);
return ERR_PTR(ret);
}
atomic_set(&sfilter->usage, 1);
return sfilter;
}
/**
* seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
* @user_filter: pointer to the user data containing a sock_fprog.
*
* Returns 0 on success and non-zero otherwise.
*/
static struct seccomp_filter *
seccomp_prepare_user_filter(const char __user *user_filter)
{
struct sock_fprog fprog;
struct seccomp_filter *filter = ERR_PTR(-EFAULT);
#ifdef CONFIG_COMPAT
if (in_compat_syscall()) {
struct compat_sock_fprog fprog32;
if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
goto out;
fprog.len = fprog32.len;
fprog.filter = compat_ptr(fprog32.filter);
} else /* falls through to the if below. */
#endif
if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
goto out;
filter = seccomp_prepare_filter(&fprog);
out:
return filter;
}
/**
* seccomp_attach_filter: validate and attach filter
* @flags: flags to change filter behavior
* @filter: seccomp filter to add to the current process
*
* Caller must be holding current->sighand->siglock lock.
*
* Returns 0 on success, -ve on error.
*/
static long seccomp_attach_filter(unsigned int flags,
struct seccomp_filter *filter)
{
unsigned long total_insns;
struct seccomp_filter *walker;
assert_spin_locked(¤t->sighand->siglock);
/* Validate resulting filter length. */
total_insns = filter->prog->len;
for (walker = current->seccomp.filter; walker; walker = walker->prev)
total_insns += walker->prog->len + 4; /* 4 instr penalty */
if (total_insns > MAX_INSNS_PER_PATH)
return -ENOMEM;
/* If thread sync has been requested, check that it is possible. */
if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
int ret;
ret = seccomp_can_sync_threads();
if (ret)
return ret;
}
/*
* If there is an existing filter, make it the prev and don't drop its
* task reference.
*/
filter->prev = current->seccomp.filter;
current->seccomp.filter = filter;
/* Now that the new filter is in place, synchronize to all threads. */
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
seccomp_sync_threads();
return 0;
}
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
if (!orig)
return;
/* Reference count is bounded by the number of total processes. */
atomic_inc(&orig->usage);
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
{
if (filter) {
bpf_prog_destroy(filter->prog);
kfree(filter);
}
}
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
void put_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
orig = orig->prev;
seccomp_filter_free(freeme);
}
}
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
{
memset(info, 0, sizeof(*info));
info->si_signo = SIGSYS;
info->si_code = SYS_SECCOMP;
info->si_call_addr = (void __user *)KSTK_EIP(current);
info->si_errno = reason;
info->si_arch = syscall_get_arch();
info->si_syscall = syscall;
}
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
* @reason: filter-supplied reason code to send to userland (via si_errno)
*
* Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
*/
static void seccomp_send_sigsys(int syscall, int reason)
{
struct siginfo info;
seccomp_init_siginfo(&info, syscall, reason);
force_sig_info(SIGSYS, &info, current);
}
#endif /* CONFIG_SECCOMP_FILTER */
/*
* Secure computing mode 1 allows only read/write/exit/sigreturn.
* To be fully secure this must be combined with rlimit
* to limit the stack allocations too.
*/
static const int mode1_syscalls[] = {
__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
0, /* null terminated */
};
static void __secure_computing_strict(int this_syscall)
{
const int *syscall_whitelist = mode1_syscalls;
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
syscall_whitelist = get_compat_mode1_syscalls();
#endif
do {
if (*syscall_whitelist == this_syscall)
return;
} while (*++syscall_whitelist);
#ifdef SECCOMP_DEBUG
dump_stack();
#endif
audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
do_exit(SIGKILL);
}
#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
void secure_computing_strict(int this_syscall)
{
int mode = current->seccomp.mode;
if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return;
if (mode == SECCOMP_MODE_DISABLED)
return;
else if (mode == SECCOMP_MODE_STRICT)
__secure_computing_strict(this_syscall);
else
BUG();
}
#else
#ifdef CONFIG_SECCOMP_FILTER
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
const bool recheck_after_trace)
{
u32 filter_ret, action;
int data;
/*
* Make sure that any changes to mode from another thread have
* been seen after TIF_SECCOMP was seen.
*/
rmb();
filter_ret = seccomp_run_filters(sd);
data = filter_ret & SECCOMP_RET_DATA;
action = filter_ret & SECCOMP_RET_ACTION;
switch (action) {
case SECCOMP_RET_ERRNO:
/* Set low-order bits as an errno, capped at MAX_ERRNO. */
if (data > MAX_ERRNO)
data = MAX_ERRNO;
syscall_set_return_value(current, task_pt_regs(current),
-data, 0);
goto skip;
case SECCOMP_RET_TRAP:
/* Show the handler the original registers. */
syscall_rollback(current, task_pt_regs(current));
/* Let the filter pass back 16 bits of data. */
seccomp_send_sigsys(this_syscall, data);
goto skip;
case SECCOMP_RET_TRACE:
/* We've been put in this state by the ptracer already. */
if (recheck_after_trace)
return 0;
/* ENOSYS these calls if there is no tracer attached. */
if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
syscall_set_return_value(current,
task_pt_regs(current),
-ENOSYS, 0);
goto skip;
}
/* Allow the BPF to provide the event message */
ptrace_event(PTRACE_EVENT_SECCOMP, data);
/*
* The delivery of a fatal signal during event
* notification may silently skip tracer notification,
* which could leave us with a potentially unmodified
* syscall that the tracer would have liked to have
* changed. Since the process is about to die, we just
* force the syscall to be skipped and let the signal
* kill the process and correctly handle any tracer exit
* notifications.
*/
if (fatal_signal_pending(current))
goto skip;
/* Check if the tracer forced the syscall to be skipped. */
this_syscall = syscall_get_nr(current, task_pt_regs(current));
if (this_syscall < 0)
goto skip;
/*
* Recheck the syscall, since it may have changed. This
* intentionally uses a NULL struct seccomp_data to force
* a reload of all registers. This does not goto skip since
* a skip would have already been reported.
*/
if (__seccomp_filter(this_syscall, NULL, true))
return -1;
return 0;
case SECCOMP_RET_ALLOW:
return 0;
case SECCOMP_RET_KILL:
default: {
siginfo_t info;
audit_seccomp(this_syscall, SIGSYS, action);
/* Dump core only if this is the last remaining thread. */
if (get_nr_threads(current) == 1) {
/* Show the original registers in the dump. */
syscall_rollback(current, task_pt_regs(current));
/* Trigger a manual coredump since do_exit skips it. */
seccomp_init_siginfo(&info, this_syscall, data);
do_coredump(&info);
}
do_exit(SIGSYS);
}
}
unreachable();
skip:
audit_seccomp(this_syscall, 0, action);
return -1;
}
#else
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
const bool recheck_after_trace)
{
BUG();
}
#endif
int __secure_computing(const struct seccomp_data *sd)
{
int mode = current->seccomp.mode;
int this_syscall;
if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return 0;
this_syscall = sd ? sd->nr :
syscall_get_nr(current, task_pt_regs(current));
switch (mode) {
case SECCOMP_MODE_STRICT:
__secure_computing_strict(this_syscall); /* may call do_exit */
return 0;
case SECCOMP_MODE_FILTER:
return __seccomp_filter(this_syscall, sd, false);
default:
BUG();
}
}
#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
long prctl_get_seccomp(void)
{
return current->seccomp.mode;
}
/**
* seccomp_set_mode_strict: internal function for setting strict seccomp
*
* Once current->seccomp.mode is non-zero, it may not be changed.
*
* Returns 0 on success or -EINVAL on failure.
*/
static long seccomp_set_mode_strict(void)
{
const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
long ret = -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
if (!seccomp_may_assign_mode(seccomp_mode))
goto out;
#ifdef TIF_NOTSC
disable_TSC();
#endif
seccomp_assign_mode(current, seccomp_mode);
ret = 0;
out:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
#ifdef CONFIG_SECCOMP_FILTER
/**
* seccomp_set_mode_filter: internal function for setting seccomp filter
* @flags: flags to change filter behavior
* @filter: struct sock_fprog containing filter
*
* This function may be called repeatedly to install additional filters.
* Every filter successfully installed will be evaluated (in reverse order)
* for each system call the task makes.
*
* Once current->seccomp.mode is non-zero, it may not be changed.
*
* Returns 0 on success or -EINVAL on failure.
*/
static long seccomp_set_mode_filter(unsigned int flags,
const char __user *filter)
{
const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
struct seccomp_filter *prepared = NULL;
long ret = -EINVAL;
/* Validate flags. */
if (flags & ~SECCOMP_FILTER_FLAG_MASK)
return -EINVAL;
/* Prepare the new filter before holding any locks. */
prepared = seccomp_prepare_user_filter(filter);
if (IS_ERR(prepared))
return PTR_ERR(prepared);
/*
* Make sure we cannot change seccomp or nnp state via TSYNC
* while another thread is in the middle of calling exec.
*/
if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
mutex_lock_killable(¤t->signal->cred_guard_mutex))
goto out_free;
spin_lock_irq(¤t->sighand->siglock);
if (!seccomp_may_assign_mode(seccomp_mode))
goto out;
ret = seccomp_attach_filter(flags, prepared);
if (ret)
goto out;
/* Do not free the successfully attached filter. */
prepared = NULL;
seccomp_assign_mode(current, seccomp_mode);
out:
spin_unlock_irq(¤t->sighand->siglock);
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
mutex_unlock(¤t->signal->cred_guard_mutex);
out_free:
seccomp_filter_free(prepared);
return ret;
}
#else
static inline long seccomp_set_mode_filter(unsigned int flags,
const char __user *filter)
{
return -EINVAL;
}
#endif
/* Common entry point for both prctl and syscall. */
static long do_seccomp(unsigned int op, unsigned int flags,
const char __user *uargs)
{
switch (op) {
case SECCOMP_SET_MODE_STRICT:
if (flags != 0 || uargs != NULL)
return -EINVAL;
return seccomp_set_mode_strict();
case SECCOMP_SET_MODE_FILTER:
return seccomp_set_mode_filter(flags, uargs);
default:
return -EINVAL;
}
}
SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
const char __user *, uargs)
{
return do_seccomp(op, flags, uargs);
}
/**
* prctl_set_seccomp: configures current->seccomp.mode
* @seccomp_mode: requested mode to use
* @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
*
* Returns 0 on success or -EINVAL on failure.
*/
long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
{
unsigned int op;
char __user *uargs;
switch (seccomp_mode) {
case SECCOMP_MODE_STRICT:
op = SECCOMP_SET_MODE_STRICT;
/*
* Setting strict mode through prctl always ignored filter,
* so make sure it is always NULL here to pass the internal
* check in do_seccomp().
*/
uargs = NULL;
break;
case SECCOMP_MODE_FILTER:
op = SECCOMP_SET_MODE_FILTER;
uargs = filter;
break;
default:
return -EINVAL;
}
/* prctl interface doesn't have flags, so they are always zero. */
return do_seccomp(op, 0, uargs);
}
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
void __user *data)
{
struct seccomp_filter *filter;
struct sock_fprog_kern *fprog;
long ret;
unsigned long count = 0;
if (!capable(CAP_SYS_ADMIN) ||
current->seccomp.mode != SECCOMP_MODE_DISABLED) {
return -EACCES;
}
spin_lock_irq(&task->sighand->siglock);
if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
ret = -EINVAL;
goto out;
}
filter = task->seccomp.filter;
while (filter) {
filter = filter->prev;
count++;
}
if (filter_off >= count) {
ret = -ENOENT;
goto out;
}
count -= filter_off;
filter = task->seccomp.filter;
while (filter && count > 1) {
filter = filter->prev;
count--;
}
if (WARN_ON(count != 1 || !filter)) {
/* The filter tree shouldn't shrink while we're using it. */
ret = -ENOENT;
goto out;
}
fprog = filter->prog->orig_prog;
if (!fprog) {
/* This must be a new non-cBPF filter, since we save
* every cBPF filter's orig_prog above when
* CONFIG_CHECKPOINT_RESTORE is enabled.
*/
ret = -EMEDIUMTYPE;
goto out;
}
ret = fprog->len;
if (!data)
goto out;
get_seccomp_filter(task);
spin_unlock_irq(&task->sighand->siglock);
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
put_seccomp_filter(task);
return ret;
out:
spin_unlock_irq(&task->sighand->siglock);
return ret;
}
#endif
|
from abc import ABCMeta, abstractmethod
import json
from typing import Any, IO, Optional, Sequence, cast
from pytest_wdl.core import DataDirs, DataManager, DataResolver
from pytest_wdl.utils import ensure_path
from py.path import local
import pytest
from _pytest.fixtures import FixtureRequest
try:
from ruamel import yaml
except ImportError:
yaml = None
def pytest_collection(session: pytest.Session):
"""
Prints an empty line to make the report look slightly better.
"""
print()
def pytest_collect_file(path: local, parent) -> Optional[pytest.File]:
if path.basename.startswith("test") and not path.basename.startswith("test_data."):
if path.ext == ".json":
return JsonWdlTestsModule(path, parent)
elif yaml and path.ext == ".yaml":
return YamlWdlTestsModule(path, parent)
# TODO: the Node API will be changing at some point
# https://docs.pytest.org/en/latest/example/nonpython.html#a-basic-example-for-specifying-tests-in-yaml-files
class WdlTestsModule(pytest.Module, metaclass=ABCMeta):
@abstractmethod
def _load(self, fp: IO) -> dict:
pass
def collect(self):
with self.fspath.open() as inp:
d = self._load(inp)
if "tests" not in d:
raise ValueError(f"Tests file {self.fspath} must contain a 'tests' key")
data = d.get("data")
for spec in d["tests"]:
if "name" not in spec:
raise ValueError("Test case missing 'name' key")
yield TestItem(self, data=data, **spec)
class YamlWdlTestsModule(WdlTestsModule):
def _load(self, fp: IO) -> dict:
yaml_loader = yaml.YAML(typ="safe")
yaml_loader.default_flow_style = False
return yaml_loader.load(fp)
class JsonWdlTestsModule(WdlTestsModule):
def _load(self, fp: IO) -> dict:
return json.load(fp)
class TestItem(pytest.Item):
def __init__(
self,
parent,
data: Optional[dict] = None,
name: Optional[str] = None,
wdl: Optional[str] = None,
inputs: Optional[dict] = None,
expected: Optional[dict] = None,
tags: Optional[Sequence] = None,
**kwargs
):
if not all((name, wdl)):
raise ValueError("Every test must have 'name' and 'wdl' keys")
super().__init__(name, parent)
self._wdl = wdl
self._inputs = inputs
self._expected = expected
self._tags = tags # TODO: add tags as marks
self._workflow_runner_kwargs = kwargs
self._data = data
self._fixture_request = None
def setup(self):
"""
This method is black magic - uses internal pytest APIs to create a
FixtureRequest that can be used to access fixtures in `runtest()`.
Copied from
https://github.com/pytest-dev/pytest/blob/master/src/_pytest/doctest.py.
"""
def func():
pass
self.funcargs = {}
fm = self.session._fixturemanager
self._fixtureinfo = fm.getfixtureinfo(
node=self, func=func, cls=None, funcargs=False
)
self._fixture_request = FixtureRequest(self)
self._fixture_request._fillfixtures()
def runtest(self):
# Get/create DataManager
if self._data:
config = self._fixture_request.getfixturevalue("user_config")
data_resolver = DataResolver(self._data, config)
data_dirs = DataDirs(
ensure_path(self._fixture_request.fspath.dirpath(), canonicalize=True),
function=self.name,
module=None, # TODO: support a top-level key for module name
cls=None, # TODO: support test groupings
)
workflow_data = DataManager(data_resolver, data_dirs)
else:
workflow_data = self._fixture_request.getfixturevalue("workflow_data")
# Build the arguments to workflow_runner
workflow_runner_kwargs = self._workflow_runner_kwargs
# Resolve test data requests in the inputs and outputs
if self._inputs:
workflow_runner_kwargs["inputs"] = _resolve_test_data(
self._inputs, workflow_data
)
if self._expected:
workflow_runner_kwargs["expected"] = _resolve_test_data(
self._expected, workflow_data
)
# Run the test
workflow_runner = self._fixture_request.getfixturevalue("workflow_runner")
return workflow_runner(self._wdl, **workflow_runner_kwargs)
def _resolve_test_data(d: dict, workflow_data: DataManager) -> dict:
def _resolve(val: Any):
if isinstance(val, str):
try:
# See if it's a test data entry
return workflow_data[cast(str, val)]
except FileNotFoundError:
# It's a string literal
return val
elif isinstance(val, dict):
return dict((key, _resolve(value)) for key, value in cast(dict, d).items())
elif isinstance(val, Sequence):
return [_resolve(value) for value in cast(Sequence, val)]
else:
return val
return _resolve(d)
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FLAG_XSS = "CTF{3mbr4c3_the_c00k1e_w0r1d_ord3r}" # This is the flag of web-cwo-xss
FLAG_SSRF = "CTF{WhatIsThisCookieFriendSpaceBookPlusAllAccessRedPremiumThingLooksYummy}" # This is the flag of web-cwo-xss-2
TOKEN = "XOKhV00iZQpnvAqsRMWO"
ADMIN_TOKEN = "TUtb9PPA9cYkfcVQWYzxy4XbtyL3VNKz"
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[6],{203:function(t,e,s){"use strict";s.r(e);var a=s(0),r=Object(a.a)({},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("div",{staticClass:"content"},[t._m(0),t._v(" "),s("Bit"),t._v(" "),t._m(1),t._v(" "),t._m(2),t._v(" "),t._m(3),t._v(" "),t._m(4),t._v(" "),t._m(5),t._v(" "),t._m(6),t._v(" "),s("ul",[s("li",[s("router-link",{attrs:{to:"./../guide/assets.html#相对-urls"}},[t._v("基准 URL")])],1),t._v(" "),s("li",[s("router-link",{attrs:{to:"./../guide/deploy.html#github-页面"}},[t._v("部署教程 > Github 页面")])],1)]),t._v(" "),t._m(7),t._v(" "),t._m(8),t._v(" "),s("p",[t._v("网站的标题。这将是所有页面标题的前缀,并显示在默认主题的导航栏中。")]),t._v(" "),t._m(9),t._v(" "),t._m(10),t._v(" "),t._m(11),t._v(" "),t._m(12),t._v(" "),t._m(13),t._v(" "),t._m(14),t._v(" "),t._m(15),t._m(16),t._v(" "),t._m(17),t._v(" "),s("p",[t._v("指定用于 dev 服务器的主机。")]),t._v(" "),t._m(18),t._v(" "),t._m(19),t._v(" "),s("p",[t._v("指定用于 dev 服务器的端口。")]),t._v(" "),t._m(20),t._v(" "),t._m(21),t._v(" "),t._m(22),t._v(" "),t._m(23),t._v(" "),t._m(24),t._v(" "),s("p",[t._v("提供 Google AnalyticsID 来开启集成功能。")]),t._v(" "),s("div",{staticClass:"tip custom-block"},[s("p",{staticClass:"custom-block-title"},[t._v("提示")]),t._v(" "),s("p",[t._v("请留意 "),s("a",{attrs:{href:"https://ec.europa.eu/commission/priorities/justice-and-fundamental-rights/data-protection/2018-reform-eu-data-protection-rules_en",target:"_blank",rel:"noopener noreferrer"}},[t._v("GDPR (2018年欧盟数据保护规则改革)"),s("OutboundLink")],1),t._v(", 在合适或者需要的情况下,考虑将 Google Analytics 设置为"),s("a",{attrs:{href:"https://support.google.com/analytics/answer/2763052?hl=zh-Hans",target:"_blank",rel:"noopener noreferrer"}},[t._v("匿名化的 IP"),s("OutboundLink")],1),t._v("。")])]),t._v(" "),t._m(25),t._v(" "),t._m(26),t._v(" "),t._m(27),t._v(" "),t._m(28),t._v(" "),t._m(29),t._v(" "),s("div",{staticClass:"tip custom-block"},[s("p",{staticClass:"custom-block-title"},[t._v("PWA 注意事项")]),t._v(" "),s("p",[s("code",[t._v("serviceWorker")]),t._v(" 选项只能处理 service worker。要使你的站点完全符合 PWA,你需要在"),s("code",[t._v(".vuepress/public")]),t._v(" 中提供 Web App 清单和图标。有关更多详细信息,请参阅 "),s("a",{attrs:{href:"https://developer.mozilla.org/en-US/docs/Web/Manifest",target:"_blank",rel:"noopener noreferrer"}},[t._v("MDN 关于 Web 应用程序清单的文档"),s("OutboundLink")],1),t._v("。")]),t._v(" "),s("p",[t._v("此外,只有在你能够使用 SSL 部署你的站点时才能启用此功能,因为 service worker 只能在 HTTPs URLs 下注册。")])]),t._v(" "),t._m(30),t._v(" "),t._m(31),t._v(" "),s("p",[t._v("指定用于 i18n 支持,要获取更多细节,请参考"),s("router-link",{attrs:{to:"./../guide/i18n.html"}},[t._v("国际化指南")]),t._v("。")],1),t._v(" "),t._m(32),t._v(" "),t._m(33),t._v(" "),s("p",[t._v("一个函数,用来控制对于哪些文件,是需要生成 "),s("code",[t._v('<link rel="prefetch">')]),t._v(" 资源提示的。请参考 "),s("a",{attrs:{href:"https://ssr.vuejs.org/zh/api/#shouldpreload",target:"_blank",rel:"noopener noreferrer"}},[t._v("shouldPrefetch"),s("OutboundLink")],1),t._v("。")]),t._v(" "),t._m(34),t._v(" "),t._m(35),t._v(" "),t._m(36),t._v(" "),t._m(37),t._v(" "),t._m(38),t._v(" "),t._m(39),t._v(" "),s("p",[t._v("为使用的主题提供配置选项。这些选项将根据你使用的主题而有所不同。")]),t._v(" "),t._m(40),t._v(" "),s("ul",[s("li",[s("router-link",{attrs:{to:"./../default-theme-config/"}},[t._v("默认主题配置")]),t._v(".")],1)]),t._v(" "),t._m(41),t._v(" "),t._m(42),t._v(" "),t._m(43),t._v(" "),s("p",[t._v("是否在每个代码块的左侧显示行号。")]),t._v(" "),t._m(44),t._v(" "),s("ul",[s("li",[s("router-link",{attrs:{to:"./../guide/markdown.html#行号"}},[t._v("行号")])],1)]),t._v(" "),t._m(45),t._v(" "),s("ul",[t._m(46),t._v(" "),s("li",[t._v("Default: "),s("a",{attrs:{href:"https://github.com/vuejs/vuepress/blob/master/lib/markdown/slugify.js",target:"_blank",rel:"noopener noreferrer"}},[t._v("source"),s("OutboundLink")],1)])]),t._v(" "),s("p",[t._v("将标题文本转换为别名(slug)的函数。这会影响标题锚点、目录和侧边栏链接生成的 id 和链接。(译者注:此功能是为了解决非 ASCII 码字符生成链接时的 "),s("a",{attrs:{href:"https://github.com/vuejs/vuepress/issues/45",target:"_blank",rel:"noopener noreferrer"}},[t._v("bug"),s("OutboundLink")],1),t._v(",具体代码查看 /lib/markdown/slugify 的 slugify 函数)")]),t._v(" "),t._m(47),t._v(" "),t._m(48),t._v(" "),t._m(49),t._v(" "),t._m(50),t._v(" "),t._m(51),t._v(" "),s("p",[s("a",{attrs:{href:"https://github.com/valeriangalliat/markdown-it-anchor",target:"_blank",rel:"noopener noreferrer"}},[t._v("markdown-it-anchor"),s("OutboundLink")],1),t._v(" 的选项。(注意:如果你想自定义标题 id 的话尽量使用 "),s("code",[t._v("markdown.slugify")]),t._v("。)")]),t._v(" "),t._m(52),t._v(" "),t._m(53),t._v(" "),s("p",[s("a",{attrs:{href:"https://github.com/Oktavilla/markdown-it-table-of-contents",target:"_blank",rel:"noopener noreferrer"}},[t._v("markdown-it-table-of-contents"),s("OutboundLink")],1),t._v(" 的选项。(注意:如果你想自定义标题 id 的话尽量使用 "),s("code",[t._v("markdown.slugify")]),t._v("。)")]),t._v(" "),t._m(54),t._v(" "),t._m(55),t._v(" "),s("p",[t._v("修改默认配置,或将额外的插件应用于渲染源文件的 "),s("a",{attrs:{href:"https://github.com/markdown-it/markdown-it",target:"_blank",rel:"noopener noreferrer"}},[t._v("markdown-it"),s("OutboundLink")],1),t._v(" 实例的函数。例如:")]),t._v(" "),t._m(56),t._m(57),t._v(" "),t._m(58),t._v(" "),t._m(59),t._v(" "),s("p",[s("a",{attrs:{href:"https://github.com/postcss/postcss-loader",target:"_blank",rel:"noopener noreferrer"}},[t._v("postcss-loader"),s("OutboundLink")],1),t._v(" 的选项。注意:指定这个值将会覆盖 autoprefixer,你需要把 autoprefixer 的选项也包含进去。")]),t._v(" "),t._m(60),t._v(" "),t._m(61),t._v(" "),s("p",[t._v("提供给 "),s("a",{attrs:{href:"https://github.com/shama/stylus-loader",target:"_blank",rel:"noopener noreferrer"}},[t._v("stylus-loader"),s("OutboundLink")],1),t._v(" 的参数。")]),t._v(" "),t._m(62),t._v(" "),t._m(63),t._v(" "),s("p",[t._v("提供给 "),s("a",{attrs:{href:"https://github.com/webpack-contrib/sass-loader",target:"_blank",rel:"noopener noreferrer"}},[t._v("sass-loader"),s("OutboundLink")],1),t._v(" 的参数,用来加载 "),s("code",[t._v("*.scss")]),t._v(" 文件。")]),t._v(" "),t._m(64),t._v(" "),t._m(65),t._v(" "),s("p",[t._v("提供给 "),s("a",{attrs:{href:"https://github.com/webpack-contrib/sass-loader",target:"_blank",rel:"noopener noreferrer"}},[t._v("sass-loader"),s("OutboundLink")],1),t._v(" 的参数,用来加载 "),s("code",[t._v("*.sass")]),t._v(" 文件。")]),t._v(" "),t._m(66),t._v(" "),t._m(67),t._v(" "),s("p",[t._v("提供给 "),s("a",{attrs:{href:"https://github.com/webpack-contrib/less-loader",target:"_blank",rel:"noopener noreferrer"}},[t._v("less-loader"),s("OutboundLink")],1),t._v(" 的参数。")]),t._v(" "),t._m(68),t._v(" "),t._m(69),t._v(" "),s("p",[t._v("修改内部 webpack 配置。如果该值是一个对象,它将被合并到使用 "),s("a",{attrs:{href:"https://github.com/survivejs/webpack-merge",target:"_blank",rel:"noopener noreferrer"}},[t._v("webpack-merge"),s("OutboundLink")],1),t._v(" 的最终配置中;如果该值是一个函数,它将接收 config 作为第一个参数,并将 "),s("code",[t._v("isServer")]),t._v(" 这个标志作为第二个参数。你可以直接改变配置,或者返回一个要合并的对象:")]),t._v(" "),t._m(70),t._m(71),t._v(" "),t._m(72),t._v(" "),s("p",[t._v("使用 "),s("a",{attrs:{href:"https://github.com/mozilla-neutrino/webpack-chain",target:"_blank",rel:"noopener noreferrer"}},[t._v("webpack-chain"),s("OutboundLink")],1),t._v(" 修改内部的 webpack 配置。")]),t._v(" "),t._m(73),t._m(74),t._v(" "),t._m(75),t._v(" "),t._m(76),t._v(" "),t._m(77)],1)},[function(){var t=this.$createElement,e=this._self._c||t;return e("h1",{attrs:{id:"配置参考"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#配置参考","aria-hidden":"true"}},[this._v("#")]),this._v(" 配置参考")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h2",{attrs:{id:"基本配置-basic-config"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#基本配置-basic-config","aria-hidden":"true"}},[this._v("#")]),this._v(" 基本配置(basic config)")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"base"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#base","aria-hidden":"true"}},[this._v("#")]),this._v(" base")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("/")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("网站用来部署的基准 URL。如果你打算在子路径下部署你的站点,例如 GitHub 页面,则需要设置此项。如果你打算将你的网站部署到"),e("code",[this._v("https://foo.github.io/bar/")]),this._v(",则应将 "),e("code",[this._v("base")]),this._v(" 设置为 "),e("code",[this._v('"/bar/"')]),this._v("。它应该始终以斜杠开始和结束。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("在其他选项中,"),e("code",[this._v("base")]),this._v(" 会自动添加到以 "),e("code",[this._v("/")]),this._v(" 开头的所有 URL 中,因此你只需指定一次即可。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[e("strong",[this._v("另请参考:")])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"title"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#title","aria-hidden":"true"}},[this._v("#")]),this._v(" title")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"description"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#description","aria-hidden":"true"}},[this._v("#")]),this._v(" description")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("网站描述。这将在页面 HTML 中表现为一个 "),e("code",[this._v("<meta>")]),this._v(" 标签。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"head"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#head","aria-hidden":"true"}},[this._v("#")]),this._v(" head")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Array")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("[]")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("被注入页面 HTML "),e("code",[this._v("<head>")]),this._v(" 额外的标签。每个标签可以用 "),e("code",[this._v("[tagName, { attrName: attrValue }, innerHTML?]")]),this._v(" 的形式指定。例如,要添加自定义图标:")])},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("div",{staticClass:"language-js extra-class"},[s("pre",{pre:!0,attrs:{class:"language-js"}},[s("code",[t._v("module"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),t._v("exports "),s("span",{attrs:{class:"token operator"}},[t._v("=")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n head"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("[")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("[")]),s("span",{attrs:{class:"token string"}},[t._v("'link'")]),s("span",{attrs:{class:"token punctuation"}},[t._v(",")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v(" rel"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token string"}},[t._v("'icon'")]),s("span",{attrs:{class:"token punctuation"}},[t._v(",")]),t._v(" href"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token string"}},[t._v("'/logo.png'")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),s("span",{attrs:{class:"token punctuation"}},[t._v("]")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("]")]),t._v("\n"),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"host"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#host","aria-hidden":"true"}},[this._v("#")]),this._v(" host")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("'0.0.0.0'")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"port"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#port","aria-hidden":"true"}},[this._v("#")]),this._v(" port")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("number")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("8080")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"dest"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#dest","aria-hidden":"true"}},[this._v("#")]),this._v(" dest")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v(".vuepress/dist")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("指定 "),e("code",[this._v("vuepress build")]),this._v(" 的输出目录。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"ga"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#ga","aria-hidden":"true"}},[this._v("#")]),this._v(" ga")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"serviceworker"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#serviceworker","aria-hidden":"true"}},[this._v("#")]),this._v(" serviceWorker")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("boolean")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("false")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("如果设置为 "),e("code",[this._v("true")]),this._v(",VuePress 将自动生成并注册一个 service worker ,这个 worker 将内容缓存以供离线使用(仅在生产环境中启用)。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("如果开发了一个自定义主题,"),e("code",[this._v("Layout.vue")]),this._v(" 组件还将触发以下事件:")])},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("ul",[s("li",[s("code",[t._v("sw-ready")])]),t._v(" "),s("li",[s("code",[t._v("sw-cached")])]),t._v(" "),s("li",[s("code",[t._v("sw-updated")])]),t._v(" "),s("li",[s("code",[t._v("sw-offline")])]),t._v(" "),s("li",[s("code",[t._v("sw-error")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"locales"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#locales","aria-hidden":"true"}},[this._v("#")]),this._v(" locales")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("{ [path: string]: Object }")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"shouldprefetch"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#shouldprefetch","aria-hidden":"true"}},[this._v("#")]),this._v(" shouldPrefetch")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("类型: "),e("code",[this._v("Function")])]),this._v(" "),e("li",[this._v("默认值: "),e("code",[this._v("() => true")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h2",{attrs:{id:"主题化-theming"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#主题化-theming","aria-hidden":"true"}},[this._v("#")]),this._v(" 主题化(theming)")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"theme"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#theme","aria-hidden":"true"}},[this._v("#")]),this._v(" theme")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("string")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("指定此选项来使用自定义主题。使用 "),e("code",[this._v('"foo"')]),this._v(" 的值,VuePress 将尝试在 "),e("code",[this._v("node_modules/vuepress-theme-foo/Layout.vue")]),this._v(" 加载主题组件。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"themeconfig"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#themeconfig","aria-hidden":"true"}},[this._v("#")]),this._v(" themeConfig")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{}")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[e("strong",[this._v("另请参阅:")])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h2",{attrs:{id:"markdown"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown","aria-hidden":"true"}},[this._v("#")]),this._v(" Markdown")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-linenumbers"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-linenumbers","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.lineNumbers")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("boolean")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[e("strong",[this._v("另请参阅:")])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-slugify"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-slugify","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.slugify")])},function(){var t=this.$createElement,e=this._self._c||t;return e("li",[this._v("Type: "),e("code",[this._v("Function")])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-externallinks"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-externallinks","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.externalLinks")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ target: '_blank', rel: 'noopener noreferrer' }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("键和值对将被添加到指向外部链接的 "),e("code",[this._v("<a>")]),this._v(" 标签。默认选项将在新窗口中打开外部链接。")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-anchor"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-anchor","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.anchor")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ permalink: true, permalinkBefore: true, permalinkSymbol: '#' }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-toc"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-toc","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.toc")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ includeLevel: [2, 3] }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"markdown-config"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#markdown-config","aria-hidden":"true"}},[this._v("#")]),this._v(" markdown.config")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Function")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("div",{staticClass:"language-js extra-class"},[s("pre",{pre:!0,attrs:{class:"language-js"}},[s("code",[t._v("module"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),t._v("exports "),s("span",{attrs:{class:"token operator"}},[t._v("=")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n markdown"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n config"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" md "),s("span",{attrs:{class:"token operator"}},[t._v("=>")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n md"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),s("span",{attrs:{class:"token keyword"}},[t._v("set")]),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v(" breaks"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token boolean"}},[t._v("true")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),t._v("\n md"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),s("span",{attrs:{class:"token function"}},[t._v("use")]),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),s("span",{attrs:{class:"token function"}},[t._v("require")]),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),s("span",{attrs:{class:"token string"}},[t._v("'markdown-it-xxx'")]),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n"),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h2",{attrs:{id:"建立管道-build-pipeline"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#建立管道-build-pipeline","aria-hidden":"true"}},[this._v("#")]),this._v(" 建立管道(build pipeline)")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"postcss"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#postcss","aria-hidden":"true"}},[this._v("#")]),this._v(" postcss")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ plugins: [require('autoprefixer')] }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"stylus"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#stylus","aria-hidden":"true"}},[this._v("#")]),this._v(" stylus")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ preferPathResolver: 'webpack' }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"scss"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#scss","aria-hidden":"true"}},[this._v("#")]),this._v(" scss")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{}")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"sass"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#sass","aria-hidden":"true"}},[this._v("#")]),this._v(" sass")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{ indentedSyntax: true }")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"less"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#less","aria-hidden":"true"}},[this._v("#")]),this._v(" less")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("{}")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"configurewebpack"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#configurewebpack","aria-hidden":"true"}},[this._v("#")]),this._v(" configureWebpack")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Object | Function")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("div",{staticClass:"language-js extra-class"},[s("pre",{pre:!0,attrs:{class:"language-js"}},[s("code",[t._v("module"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),t._v("exports "),s("span",{attrs:{class:"token operator"}},[t._v("=")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n configureWebpack"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),t._v("config"),s("span",{attrs:{class:"token punctuation"}},[t._v(",")]),t._v(" isServer"),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),t._v(" "),s("span",{attrs:{class:"token operator"}},[t._v("=>")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n "),s("span",{attrs:{class:"token keyword"}},[t._v("if")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),s("span",{attrs:{class:"token operator"}},[t._v("!")]),t._v("isServer"),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n "),s("span",{attrs:{class:"token comment"}},[t._v("// mutate the config for client")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n"),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"chainwebpack"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#chainwebpack","aria-hidden":"true"}},[this._v("#")]),this._v(" chainWebpack")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("Function")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("undefined")])])])},function(){var t=this,e=t.$createElement,s=t._self._c||e;return s("div",{staticClass:"language-js extra-class"},[s("pre",{pre:!0,attrs:{class:"language-js"}},[s("code",[t._v("module"),s("span",{attrs:{class:"token punctuation"}},[t._v(".")]),t._v("exports "),s("span",{attrs:{class:"token operator"}},[t._v("=")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n chainWebpack"),s("span",{attrs:{class:"token punctuation"}},[t._v(":")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("(")]),t._v("config"),s("span",{attrs:{class:"token punctuation"}},[t._v(",")]),t._v(" isServer"),s("span",{attrs:{class:"token punctuation"}},[t._v(")")]),t._v(" "),s("span",{attrs:{class:"token operator"}},[t._v("=>")]),t._v(" "),s("span",{attrs:{class:"token punctuation"}},[t._v("{")]),t._v("\n "),s("span",{attrs:{class:"token comment"}},[t._v("// config is an instance of ChainableConfig")]),t._v("\n "),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n"),s("span",{attrs:{class:"token punctuation"}},[t._v("}")]),t._v("\n")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("h2",{attrs:{id:"浏览器兼容性-browser-compatibility"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#浏览器兼容性-browser-compatibility","aria-hidden":"true"}},[this._v("#")]),this._v(" 浏览器兼容性(browser compatibility)")])},function(){var t=this.$createElement,e=this._self._c||t;return e("h3",{attrs:{id:"evergreen"}},[e("a",{staticClass:"header-anchor",attrs:{href:"#evergreen","aria-hidden":"true"}},[this._v("#")]),this._v(" evergreen")])},function(){var t=this.$createElement,e=this._self._c||t;return e("ul",[e("li",[this._v("Type: "),e("code",[this._v("boolean")])]),this._v(" "),e("li",[this._v("Default: "),e("code",[this._v("false")])])])},function(){var t=this.$createElement,e=this._self._c||t;return e("p",[this._v("如果你只针对常青树浏览器,请设置为 "),e("code",[this._v("true")]),this._v(" 。这将禁用 IE5 的 ES5 转码和 polyfill,导致更快的构建和更小的文件。")])}],!1,null,null,null);e.default=r.exports}}]);
|
import { html } from 'lit-html';
import { DemoPage } from '@advanced-rest-client/arc-demo-helper';
import '@advanced-rest-client/arc-demo-helper/arc-interactive-demo.js';
import '@anypoint-web-components/anypoint-button/anypoint-button.js';
import '@anypoint-web-components/anypoint-checkbox/anypoint-checkbox.js';
import '@advanced-rest-client/arc-models/websocket-url-history-model.js';
import { ExportHandlerMixin } from '@advanced-rest-client/arc-demo-helper/src/ExportHandlerMixin.js';
import encodingHelper from '@advanced-rest-client/arc-demo-helper/src/EncodingHelpers.js';
import { ArcMock } from '@advanced-rest-client/arc-data-generator';
import { ImportEvents, ArcModelEvents } from '@advanced-rest-client/arc-events';
import { MonacoLoader } from '@advanced-rest-client/monaco-support';
import { BodyProcessor } from '@advanced-rest-client/body-editor';
import { v4 } from '@advanced-rest-client/uuid-generator';
import '../arc-websocket-editor.js';
import env from './env.js';
/** @typedef {import('@advanced-rest-client/arc-types').WebSocket.WebsocketEditorRequest} WebsocketEditorRequest */
/** @typedef {import('@advanced-rest-client/arc-events').ArcExportFilesystemEvent} ArcExportFilesystemEvent */
/** @typedef {import('@advanced-rest-client/arc-events').GoogleDriveSaveEvent} GoogleDriveSaveEvent */
const REQUEST_STORE_KEY = 'demo.arc-websocket-ui.editorRequest';
class ComponentDemo extends ExportHandlerMixin(DemoPage) {
constructor() {
super();
this.initObservableProperties([
'editorRequest', 'withMenu', 'initialized',
]);
this.componentName = 'ARC websocket editor';
this.compatibility = false;
this.withMenu = false;
this.initialized = false;
/**
* @type {WebsocketEditorRequest}
*/
this.editorRequest = {
id: v4(),
request: {
kind: 'ARC#WebsocketRequest',
},
};
this.generator = new ArcMock();
this.generateData = this.generateData.bind(this);
this.deleteData = this.deleteData.bind(this);
this.initEditors();
this.restoreRequest();
this.renderViewControls = true;
if (window.matchMedia('(prefers-color-scheme: dark)').matches) {
this.darkThemeActive = true;
}
encodingHelper();
this._requestChangeHandler = this._requestChangeHandler.bind(this);
}
async initEditors() {
await this.loadMonaco();
this.initialized = true;
}
async generateData() {
await this.generator.store.insertWebsockets();
ImportEvents.dataImported(document.body);
}
async deleteData() {
await this.generator.store.destroyWebsockets();
ArcModelEvents.destroyed(document.body, 'all');
}
async loadMonaco() {
const base = `../node_modules/monaco-editor/`;
MonacoLoader.createEnvironment(base);
await MonacoLoader.loadMonaco(base);
await MonacoLoader.monacoReady();
}
restoreRequest() {
const valueRaw = localStorage.getItem(REQUEST_STORE_KEY);
if (!valueRaw) {
return;
}
let data;
try {
data = JSON.parse(valueRaw);
} catch (e) {
return;
}
if (data.request) {
data.request = BodyProcessor.restoreRequest(data.request);
}
console.log('restored', data);
this.editorRequest = data;
}
_requestChangeHandler() {
const editor = document.querySelector('arc-websocket-editor');
const object = editor.serialize();
console.log('storing request data', object);
this.storeValue(object);
}
/**
* Stores request value data in the local store
* @param {WebsocketEditorRequest} data
*/
async storeValue(data) {
if (!data) {
window.localStorage.removeItem(REQUEST_STORE_KEY);
return;
}
const safeRequest = await BodyProcessor.stringifyRequest(data.request);
localStorage.setItem(REQUEST_STORE_KEY, JSON.stringify({
...data,
request: safeRequest,
}));
}
_demoTemplate() {
if (!this.initialized) {
return html`<progress></progress>`;
}
const {
demoStates,
darkThemeActive,
compatibility,
editorRequest,
} = this;
const { request } = editorRequest;
const { url, payload, ui } = request;
return html`
<section class="documentation-section">
<h3>Interactive demo</h3>
<p>
This demo lets you preview the web socket request editor element with various configuration options.
</p>
<p>
Demo web socket server is running: <b>ws://localhost:${env.port}</b>
</p>
<arc-interactive-demo
.states="${demoStates}"
@state-changed="${this._demoStateHandler}"
?dark="${darkThemeActive}"
>
<div class="demo-app" slot="content">
<arc-websocket-editor
.url="${url}"
.payload="${payload}"
.uiConfig="${ui}"
?compatibility="${compatibility}"
@change="${this._requestChangeHandler}"
></arc-websocket-editor>
</div>
<label slot="options" id="mainOptionsLabel">Options</label>
<anypoint-checkbox
aria-describedby="mainOptionsLabel"
slot="options"
name="withMenu"
@change="${this._toggleMainOption}"
title="Uses request objects instead of request ids"
>
Render menu
</anypoint-checkbox>
</arc-interactive-demo>
</section>
`;
}
_dataControlsTemplate() {
return html`
<section class="documentation-section">
<h3>Data control</h3>
<p>
This section allows you to control demo data
</p>
<anypoint-button @click="${this.generateData}">Generate data</anypoint-button>
<anypoint-button @click="${this.deleteData}">Clear list</anypoint-button>
</section>`;
}
contentTemplate() {
return html`
<websocket-url-history-model></websocket-url-history-model>
${this._demoTemplate()}
${this._dataControlsTemplate()}
${this.exportTemplate()}
`;
}
}
const instance = new ComponentDemo();
instance.render();
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.trimPath = exports.ensurePath = exports.toPath = undefined;
var _slicedToArray2 = require('babel-runtime/helpers/slicedToArray');
var _slicedToArray3 = _interopRequireDefault(_slicedToArray2);
var _typeof2 = require('babel-runtime/helpers/typeof');
var _typeof3 = _interopRequireDefault(_typeof2);
var _url = require('url');
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var toPath = exports.toPath = function toPath(locale, defaultLocale) {
if (!locale || !defaultLocale) {
return '';
}
if ((typeof locale === 'undefined' ? 'undefined' : (0, _typeof3.default)(locale)) !== 'object' || !locale.hasOwnProperty('country') || !locale.hasOwnProperty('language')) {
throw new Error('Expected locale to be an object with country and language properties.');
}
if (typeof defaultLocale !== 'string' || defaultLocale.length !== 5 || defaultLocale[2] !== '-') {
throw new Error('Expected defaultLocale to be a locale string.');
}
var localeArr = [];
var _defaultLocale$split = defaultLocale.split('-'),
_defaultLocale$split2 = (0, _slicedToArray3.default)(_defaultLocale$split, 2),
defaultLanguage = _defaultLocale$split2[0],
defaultCountry = _defaultLocale$split2[1];
var country = locale.country,
language = locale.language;
if (language !== defaultLanguage) {
localeArr.push(language);
}
if (country !== defaultCountry) {
localeArr.push(country);
}
if (localeArr.length > 0) {
return '/' + localeArr.join('-');
}
return '';
};
var ensurePath = exports.ensurePath = function ensurePath(url, locale, defaultLocale) {
var localePrefix = toPath(locale, defaultLocale);
if (localePrefix === '') {
return url;
}
var _parseUrl = (0, _url.parse)(url),
pathname = _parseUrl.pathname;
var _pathname$substr$spli = pathname.substr(1).split('/'),
_pathname$substr$spli2 = (0, _slicedToArray3.default)(_pathname$substr$spli, 1),
localeSegment = _pathname$substr$spli2[0];
if (localeSegment) {
var _defaultLocale$split3 = defaultLocale.split('-'),
_defaultLocale$split4 = (0, _slicedToArray3.default)(_defaultLocale$split3, 2),
defaultCountry = _defaultLocale$split4[1];
var _localeSegment$split = localeSegment.split('-'),
_localeSegment$split2 = (0, _slicedToArray3.default)(_localeSegment$split, 2),
language = _localeSegment$split2[0],
_localeSegment$split3 = _localeSegment$split2[1],
country = _localeSegment$split3 === undefined ? defaultCountry : _localeSegment$split3;
if (language === locale.language && country === locale.country) {
return url;
}
}
return localePrefix + url;
};
var trimPath = exports.trimPath = function trimPath(url, defaultLocale, siteLocales) {
if (!defaultLocale || !siteLocales) {
return url;
}
var _parseUrl2 = (0, _url.parse)(url),
pathname = _parseUrl2.pathname;
var _pathname$substr$spli3 = pathname.substr(1).split('/'),
_pathname$substr$spli4 = (0, _slicedToArray3.default)(_pathname$substr$spli3, 1),
localeSegment = _pathname$substr$spli4[0];
if (localeSegment) {
var _defaultLocale$split5 = defaultLocale.split('-'),
_defaultLocale$split6 = (0, _slicedToArray3.default)(_defaultLocale$split5, 2),
defaultCountry = _defaultLocale$split6[1];
var _localeSegment$split4 = localeSegment.split('-'),
_localeSegment$split5 = (0, _slicedToArray3.default)(_localeSegment$split4, 2),
language = _localeSegment$split5[0],
_localeSegment$split6 = _localeSegment$split5[1],
country = _localeSegment$split6 === undefined ? defaultCountry : _localeSegment$split6;
if (siteLocales.indexOf(language + '-' + country) !== -1) {
return url.substr(localeSegment.length + 1);
}
}
return url;
};
|
import {graphql, useStaticQuery} from 'gatsby';
import _ from 'lodash/fp';
const useTextFile = () => {
const data = useStaticQuery(graphql`
query {
allTextFile {
nodes {
content
name
}
}
}
`);
return _.indexBy('name', data.allTextFile.nodes);
};
export default useTextFile;
|
/**************************************************************************/
/* */
/* WWIV Version 5 */
/* Copyright (C)1998-2020, WWIV Software Services */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, */
/* software distributed under the License is distributed on an */
/* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, */
/* either express or implied. See the License for the specific */
/* language governing permissions and limitations under the License. */
/* */
/**************************************************************************/
#ifndef INCLUDED_LOCAL_IO_UNIX_CONSOLE_H
#define INCLUDED_LOCAL_IO_UNIX_CONSOLE_H
#include <cstdio>
#include <string>
#include <termios.h>
#include "local_io/local_io.h"
class UnixConsoleIO : public LocalIO {
public:
// Constructor/Destructor
UnixConsoleIO();
explicit UnixConsoleIO(const LocalIO& copy) = delete ;
virtual ~UnixConsoleIO();
virtual void LocalGotoXY(int x, int y) override;
[[nodiscard]] int WhereX() const noexcept override;
[[nodiscard]] int WhereY() const noexcept override;
virtual void LocalLf() override;
virtual void LocalCr() override;
virtual void LocalCls() override;
virtual void LocalClrEol() override;
virtual void LocalBackspace() override;
virtual void LocalPutchRaw(unsigned char ch) override;
// Overridden by TestLocalIO in tests
virtual void LocalPutch(unsigned char ch) override;
virtual void LocalPuts(const std::string& text) override;
virtual void LocalXYPuts(int x, int y, const std::string& text) override;
virtual void set_protect(int l) override;
virtual void savescreen() override;
virtual void restorescreen() override;
virtual bool LocalKeyPressed() override;
virtual unsigned char LocalGetChar() override;
virtual void SaveCurrentLine(char *cl, char *atr, char *xl, char *cc) override;
virtual void MakeLocalWindow(int x, int y, int xlen, int ylen) override;
virtual void SetCursor(int cursorStyle) override;
virtual void LocalWriteScreenBuffer(const char *pszBuffer) override;
virtual int GetDefaultScreenBottom() override;
virtual void UpdateNativeTitleBar(const std::string& system_name, int instance_number) override;
private:
virtual void LocalFastPuts(const std::string &text) override;
int m_cursorPositionX = 0;
int m_cursorPositionY = 0;
FILE *ttyf = nullptr;
struct termios ttysav;
void set_attr_xy(int x, int y, int a);
};
#endif // __INCLUDED_LOCAL_IO_UNIX_CONSOLE_H__
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GARNET_DRIVERS_WLAN_MEDIATEK_RALINK_DRIVER_H_
#define GARNET_DRIVERS_WLAN_MEDIATEK_RALINK_DRIVER_H_
#include <lib/async/dispatcher.h>
// Retrieves the async_dispatcher_t* for this driver.
//
// This pointer is guaranteed to be valid after the driver .init hook returns and before the driver
// .release hook is called. Therefore any device created and bound by this driver may assume the
// async_dispatcher_t* is initialized and running.
async_dispatcher_t* ralink_async_t();
#endif // GARNET_DRIVERS_WLAN_MEDIATEK_RALINK_DRIVER_H_
|
#pragma once
#include "FEServiceInterface.h"
#include "FETextFontEffectFile.h"
#include "Kernel/ServiceBase.h"
#include "Kernel/Factory.h"
#include "Kernel/Hashtable2.h"
namespace Mengine
{
class FEService
: public ServiceBase<FEServiceInterface>
{
public:
FEService();
~FEService() override;
public:
bool _initializeService() override;
void _finalizeService() override;
public:
TextFontEffectInterfacePtr createTextFontEffect( const FileGroupInterfacePtr & _fileGroup, const FilePath & _filePath, const ConstString & _name, uint32_t _sample ) override;
protected:
typedef Hashtable2<ConstString, FilePath, FETextFontEffectFilePtr> HashtableTextFontEffects;
HashtableTextFontEffects m_textFontEffects;
FactoryPtr m_factoryFETextFontEffectFile;
FactoryPtr m_factoryFETextFontEffectCustom;
};
}
|
/*global define */
/*
| Copyright 2012 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
*/
define(
({
map: {
error: "ไม่สามารถสร้างแผนที่"
},
tooltips: {
search: "ค้นหา", // Command button to open a dialog box for finding a feature or an address (depending on app)
locate: "ตำแหน่งปัจจุบัน", // Command button to zoom and pan to the current geographical position reported by the browser
markup: "ส่งการแก้ไข", // Command button to submit a correction to the app's host
collect: "ตัวกรอง/แก้ไข", // Command button to open a filter and template picker to add features to the map and to edit them afterwards
dijitLegend: "แสดงคำอธิบาย", //Display the legend
filter: "คัดกรองชั้นข้อมูลแผนที่", // Explains purpose of type-in box affiliated with template picker
basemap: "เปลี่ยนแผนที่ฐาน", // Command button to open a dialog box for switching basemaps
share: "แชร์", // Command button to open a dropdown menu for picking a type of sharing
shareViaEmail: "แชร์ผ่านอีเมล์", // Command button to share the current map extents via email
shareViaFacebook: "แชร์ผ่านเฟสบุค", // Command button to share the current map extents via a Facebook post: a URL is opened that permits the user to log into Facebook with a post that is ready to go
shareViaTwitter: "แชร์ผ่านทวิตเตอร์", // Command button to share the current map extents via a Twitter tweet: a URL is opened that permits the user to log into Twitter with a tweet that is ready to go
print: "พิมพ์แผนที่", // Command button to open a dialog box for specifying print orientation, title, and author before printing; also used inside print dialog box
fetchPrint: "เปิดดูแผนที่ที่พิมพ์แล้ว", // Command button to open a PDF containing a map that was just printed by the print map command
landscape: "แนวนอน", // Command button in the print map dialog box to select the landscape page orientation
portrait: "แสดงหน้ากระดาษในแนวตั้ง", // Command button in the print map dialog box to select the portrait page orientation
help: "ช่วยเหลือ" // Command button to open a dialog box with a short description of the app
},
labels: {
email: "อีเมล์", // Shown next to icon for sharing the current map extents via email; works with shareViaEmail tooltip
Facebook: "เฟสบุค", // Shown next to icon for sharing the current map extents via a Facebook post; works with shareViaFacebook tooltip
Twitter: "ทวิตเตอร์", // Shown next to icon for sharing the current map extents via a Twitter tweet; works with shareViaTwitter tooltip
title: "ชื่อเรื่อง", // Shown as title hint in print specification box if a title hint is not configured
author: "ผู้แต่ง" // Shown as author hint in print specification box if an author hint is not configured
},
prompts: {
search: "ค้นหา:", // Appears before a find text field in dialog box for searching for a feature
markup: "วาด", // Appears before a set of tools for drawing on the map
mapLayers: "ชั้นข้อมูลของแผนที่:", // Appears before a list of map layers; shown when the app is not configured with the layer to use for the find command; works with the searchLayerMissing message
layerFields: "ค้นหาฟิลด์ของชั้นข้อมูล" // Appears before a list of fields in the configured map find layer; shown when the app cannot find one or more of the fields that were configured for the find command; works with the searchFieldMissing message
},
messages: {
geolocationDenied: "ไม่ได้รับอนุญาตให้ค้นหาตำแหน่งปัจจุบัน", // Shown when the browser does not permit the app to get the current geographical position
geolocationUnavailable: "บราวเซอร์ไม่สามารถหาตำแหน่งปัจจุบันได้", // Shown when the browser returns an error instead of the current geographical position
geolocationTimeout: "บราวเซอร์ไม่สามารถหาตำแหน่งปัจจุบันได้ทันที", // Shown when the browser does not return within a configured time limit when asked for the current geographical position
noSearchLayerConfigured: "ไม่พบชั้นข้อมูลที่ถูกปรัแต่ง", // Appears before a list of map layers; shown when the app is not configured with any layers to use for the find command; works with the mapLayers prompt
searchLayerMissing: "ไม่พบชั้นข้อมูลที่ต้องการค้นหาในแผนที่", // Appears before a list of map layers; shown when the app is not configured with the layer to use for the find command; works with the mapLayers prompt
searchLayerNotSearchable: "ไม่พบฟิลด์ที่ค้นหาบนชั้นข้อมูลแผนที่ <br><br> ตรวจสอบว่าชั้นข้อมูลนี้อยู่ที่ส่วนเนื้อหาของแผนที่หรือไม่ เซอร์วิสที่ทับซ้อนกัน เช่น ArcGIS for Server dynamic map services ต้องเพิ่มไปยังแผนที่ 1 ชั้นข้อมูล (รวมถึงชั้นข้อมูลสารบัญ) เพื่อใช้สำหรับชั้นข้อมูลที่ค้นหา สำหรับไทล์เซอร์วิสไม่สามารถใช้เป็นชั้นข้อมูลที่ค้นหาได้",
searchFieldMissing: "ไม่พบฟิลด์ที่ต้องการค้นหาในชั้นข้อมูลของแผนที่", // Appears before a list of fields in the configured map find layer; shown when the app cannot find one or more of the fields that were configured for the find command; works with the layerFields prompt
allSearchFieldsMissing: "ไม่พบข้อมูลฟิลด์ใดๆ จากการค้นหาชั้นข้อมูลในแผนที่นี้", // Appears before a list of fields in the configured map find layer; shown when the app cannot find any of the fields that were configured for the find command; works with the layerFields prompt
fieldNotFound: "ฟิลด์นี้จะไม่อยู่ในที่ใด ๆ ของชั้นแผนที่", // Appears when a field used in the configuration was not found in any map layer
yourContentSubmitted: "ข้อมูลของคุณได้ถูกส่งแล้ว ขอบคุณ", // Appears after content has been added to the map and successfully submitted to the server
noConfiguration: "ไม่สามารถเข้าถึงการตั้งค่าของแอพพลิเคชั่นได้", // Appears if the app, during startup, cannot get access to or find the configuration information; without the information, it cannot build the UI
unableToLaunchApp: "ไม่สามารถเริ่มใช้แอพพลิเคชั่นได้" // Appears for any failure to build the user interface
}
})
);
|
#!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import yaml
import requests
from magpye import GeoMap
def gallery(method, data, style, background):
fname = "gallery/styles/{}.py".format(style)
name = os.path.basename(data)
script = '''
"""
{style}
==================
| style = "{style}"
.. image:: /_static/styles/{style}.png
:width: 400
| **magpye** has a list of predefined styles, that can be used to visualise your data.
| More options are available to customise your visualisation.
The data for this example can be downloaded from one of our repositories:
`<https://get.ecmwf.int/repository/magpye/data/{data}>`_
"""
from magpye import GeoMap
map = GeoMap(area_name="europe")
{background}
map.{method}("{data}", style="{style}")
map.gridlines(line_style="dash", labels=False)
map.coastlines()
map.legend()
map.save("{style}.png")
# sphinx_gallery_thumbnail_path = '_static/styles/{style}.png'
'''.format(
style=style,
background=background,
method=method,
data=name
)
with open(fname, "w") as stream:
stream.write(script)
def contour_shaded(data, style):
map = GeoMap(area_name="europe")
# map.coastlines(land_colour="grey")
map.contour_shaded(data, style = style)
map.gridlines(line_style="dash", labels=False)
map.coastlines()
map.legend()
map.save("_static/styles/{}.png".format(style))
gallery("contour_shaded", data, style, background="")
def contour_lines(data, style):
map = GeoMap(area_name="europe")
map.coastlines(land_colour="grey")
map.contour_lines(data, style = style)
map.gridlines(line_style="dash", labels=False)
map.coastlines()
map.legend()
map.save("_static/styles/{}.png".format(style))
gallery("contour_lines", data, style, background="map.coastlines(land_colour='grey')")
url = "http://get.ecmwf.int/repository/magpye/data"
def example(name, method, style):
methods = {"contour_shaded" : contour_shaded,
"contour_lines": contour_lines }
if "data" in style:
print (name , style["data"])
r = requests.get('{}/{}'.format(url, style["data"]))
print(r.status_code)
if r.status_code == 200:
data = "data/{}".format(style["data"])
with open(data, "wb") as stream:
stream.write(r.content)
if method in methods:
methods[method](data, name)
for root, _, files in os.walk("../magpye/static/styles"):
for f in files:
method = os.path.basename(root)
style = os.path.join(root, f)
with open(style) as stream:
s = yaml.safe_load(stream)
name, ext = f.split(".")
example(name, method, s)
|
from . import config
import pythreejs as p3
import numpy as np
def axes_1d(x, y, color="#000000", linewidth=1.5):
N = len(x)
pts = np.zeros([5, 3])
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
pts[:, 0] = x
pts[:, 1] = y
geometry = p3.BufferGeometry(attributes={
'position': p3.BufferAttribute(array=pts),
})
material = p3.LineBasicMaterial(color=color, linewidth=linewidth)
line = p3.Line(geometry=geometry,
material=material)
width = 800
height= 500
|
/*
* Copyright 2013 The Polymer Authors. All rights reserved.
* Use of this source code is goverened by a BSD-style
* license that can be found in the LICENSE file.
*/
suite('HTMLLegendElement', function() {
test('form', function() {
var form = document.createElement('form');
var fieldSet = document.createElement('fieldset');
var legend = document.createElement('legend');
form.appendChild(fieldSet);
fieldSet.appendChild(legend);
assert.equal(legend.form, form);
});
});
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config_options
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_bcrypt import Bcrypt
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'main.login'
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
db.init_app(app)
login_manager.init_app(app)
bootstrap = Bootstrap(app)
bcrypt.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
test ( "accessing cells on a 10x10 board", function() {
var life = Life({
xmax:10, // Sets the max x length
ymax:10, // Sets the max y length
initial: "zeros", // Starting values, "zeros", "ones", "random"
});
// Tests boundries
ok ( 0 == life.left_cell(1));
ok ( -1 == life.left_cell(10));
ok ( 10 == life.left_cell(11));
ok ( 9 == life.right_cell(8));
ok ( -1 == life.right_cell(9));
ok ( -1 == life.right_cell(19));
ok ( -1 == life.top_cell(0));
ok ( 0 == life.top_cell(10));
ok ( 9 == life.top_cell(19));
ok ( 10 == life.bottom_cell(0));
ok ( -1 == life.bottom_cell(95));
ok ( 0 == life.top_left_cell(11));
ok ( -1 == life.top_left_cell(0));
ok ( -1 == life.top_left_cell(10));
ok ( -1 == life.top_left_cell(1));
ok ( -1 == life.bottom_left_cell(0));
ok ( 10 == life.bottom_left_cell(1));
ok ( -1 == life.bottom_left_cell(98));
ok ( -1 == life.top_right_cell(8));
ok ( -1 == life.top_right_cell(19));
ok ( 9 == life.top_right_cell(18));
ok ( -1 == life.bottom_right_cell(9));
ok ( -1 == life.bottom_right_cell(95));
ok ( 11 == life.bottom_right_cell(0));
});
test ( "Accessing cells on a 6x5 board", function() {
var life = new Life({
xmax:6,
ymax:5,
initia: "zeros"
});
ok (3 == life.top_cell(9));
ok (4 == life.top_right_cell(9));
ok (8 == life.left_cell(9));
ok (10 == life.right_cell(9));
ok (14 == life.bottom_left_cell(9));
ok (15 == life.bottom_cell(9));
ok (16 == life.bottom_right_cell(9));
});
test ( "Counting of neighboring cells", function() {
var life = new Life({
xmax:10, // Sets the max x length
ymax:10, // Sets the max y length
initial: "zeros", // Starting values, "zeros", "ones", "random"
});
var counter = [];
life.set(1);
counter = life.run_counter();
ok (counter[0] == 1);
ok (counter[1] == undefined);
ok (counter[2] == 1);
ok (counter[10] == 1);
ok (counter[11] == 1);
ok (counter[12] == 1);
ok (counter[13] == undefined);
ok (counter[14] == undefined);
ok (counter[3] == undefined);
life.unset(1);
life.set(12);
counter = life.run_counter();
ok (counter[12] == undefined);
ok (counter[13] == 1);
ok (counter[11] == 1);
ok (counter[2] == 1);
ok (counter[1] == 1);
ok (counter[3] == 1);
ok (counter[22] == 1);
ok (counter[23] == 1);
ok (counter[21] == 1);
});
test ( "test a full run with 3 top neighboring cells on", function() { var life = new Life({
xmax:10, // Sets the max x length
ymax:10, // Sets the max y length
initial: "zeros", // Starting values, "zeros", "ones", "random"
});
life.set(1);
life.set(2);
life.set(3);
life.run();
ok (undefined == life.get(0));
ok (undefined == life.get(1));
ok (1 == life.get(2));
ok (undefined == life.get(3));
ok (undefined == life.get(11));
ok (1 == life.get(12));
ok (undefined == life.get(13));
ok (undefined == life.get(10));
ok (undefined == life.get(14));
});
test ( "Static beehive pattern", function() {
var life = new Life({
xmax: 6,
ymax:5,
initial: "zeros"
});
life.set(8);
life.set(9);
life.set(13);
life.set(16);
life.set(20);
life.set(21);
life.run();
ok (1 == life.get(8));
ok (1 == life.get(9));
ok (1 == life.get(13));
ok (1 == life.get(16));
ok (1 == life.get(20));
ok (1 == life.get(21));
life.run();
ok (1 == life.get(8));
ok (1 == life.get(9));
ok (1 == life.get(13));
ok (1 == life.get(16));
ok (1 == life.get(20));
ok (1 == life.get(21));
});
test ( "Static block patteron", function() {
var life = new Life({
xmax:4,
ymax:4,
initial: "zeros"
});
console.log("start of block pattern");
life.set(5);
life.set(6);
life.set(9);
life.set(10);
ok (1 == life.get(5));
ok (1 == life.get(6));
ok (1 == life.get(9));
ok (1 == life.get(10));
console.log(life.get_board());
life.run();
ok (1 == life.get(5));
ok (1 == life.get(6));
ok (1 == life.get(9));
ok (1 == life.get(10));
console.log(life.get_board());
life.run();
console.log(life.get_board());
ok (1 == life.get(5));
ok (1 == life.get(6));
ok (1 == life.get(9));
ok (1 == life.get(10));
console.log("end of block pattern");
});
test ( "The blinker pattern", function() {
var life = new Life({
xmax:5,
ymax:5,
initial: "zeros"
});
life.set(7);
life.set(12);
life.set(17);
life.run();
ok (1 == life.get(11));
ok (1 == life.get(12));
ok (1 == life.get(13));
ok (undefined == life.get(7));
ok (undefined == life.get(17));
life.run();
ok (undefined == life.get(11));
ok (1 == life.get(12));
ok (undefined == life.get(13));
ok (1 == life.get(7));
ok (1 == life.get(17));
life.run();
ok (1 == life.get(11));
ok (1 == life.get(12));
ok (1 == life.get(13));
ok (undefined == life.get(7));
ok (undefined == life.get(17));
});
|
function _refresh() {
let xhttp = new XMLHttpRequest();
xhttp.onreadystatechange = () => {
if (xhttp.readyState == 4 && xhttp.status == 200) {
let data = JSON.parse(xhttp.responseText);
let table = document.getElementById("messages");
table.innerHTML = "";
for (let i = 0; i < data.messages.length; i++) {
let _div = document.createElement("DIV");
_div.className = "message";
_div.innerText = data.messages[i];
table.appendChild(_div);
}
}
};
xhttp.open("GET", "/messages", true);
xhttp.send();
}
function _post() {
if (msg.value == "") return;
let xhttp = new XMLHttpRequest();
xhttp.onreadystatechange = () => {
if (xhttp.readyState == 4 && xhttp.status == 201)
_refresh();
};
xhttp.open("POST", "/messages", true);
xhttp.setRequestHeader("Content-Type", "application/json");
xhttp.send(JSON.stringify({
message: msg.value
}));
msg.value = "";
}
_refresh();
|
define(["exports", "@kinkajou/kinkajou/Kinkajou", "@kinkajou/svg-icon/SvgIcon"], function (_exports, _Kinkajou, _SvgIcon2) {
"use strict";
Object.defineProperty(_exports, "__esModule", {
value: true
});
_exports.Looks = void 0;
function _typeof(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }
function _possibleConstructorReturn(self, call) { if (call && (_typeof(call) === "object" || typeof call === "function")) { return call; } return _assertThisInitialized(self); }
function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; }
function _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }
function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }
var Looks =
/*#__PURE__*/
function (_SvgIcon) {
_inherits(Looks, _SvgIcon);
function Looks() {
_classCallCheck(this, Looks);
return _possibleConstructorReturn(this, _getPrototypeOf(Looks).apply(this, arguments));
}
_createClass(Looks, [{
key: "renderSVG",
value: function renderSVG() {
return _Kinkajou.Kinkajou.createElement("g", null, _Kinkajou.Kinkajou.createElement("path", {
d: "M12 10c-3.86 0-7 3.14-7 7h2c0-2.76 2.24-5 5-5s5 2.24 5 5h2c0-3.86-3.14-7-7-7zm0-4C5.93 6 1 10.93 1 17h2c0-4.96 4.04-9 9-9s9 4.04 9 9h2c0-6.07-4.93-11-11-11z"
}));
}
}], [{
key: "is",
get: function get() {
return 'material.image.Looks';
}
}]);
return Looks;
}(_SvgIcon2.SvgIcon);
_exports.Looks = Looks;
});
//# sourceMappingURL=Looks.js.map
|
import CodigoJava from "show-sintax/src/modulos/CodigoJava"
(() => {
const inicializar = (config) => {
CodigoJava.iniciar(config)
}
const Java = {
iniciar: (config) => {
inicializar(config)
}
}
window.Java = Java
})()
export default Java
|
# Generated by Django 3.0.9 on 2020-08-14 13:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shopping', '0014_auto_20200814_1138'),
]
operations = [
migrations.RemoveField(
model_name='price',
name='product',
),
migrations.AddField(
model_name='product',
name='price',
field=models.OneToOneField(default='', on_delete=django.db.models.deletion.CASCADE, to='shopping.Price'),
preserve_default=False,
),
migrations.RemoveField(
model_name='product',
name='picture',
),
migrations.AddField(
model_name='product',
name='picture',
field=models.ManyToManyField(related_name='pictures', to='shopping.ProductPicture'),
),
]
|
from .common import BaseTask
import json
from paperboy.utils import name_to_class
from luigi.parameter import Parameter, ParameterVisibility
class DokkuTask(BaseTask):
report = Parameter(visibility=ParameterVisibility.HIDDEN)
config = Parameter()
def __init__(self, *args, **kwargs):
super(DokkuTask, self).__init__(*args, **kwargs)
config = json.loads(kwargs.get('config', {}))
self._config = name_to_class(config.get('config')).from_json(config)
self._report = json.loads(self.report)
def run(self):
self.log.critical('report-post')
fp = self.input().open('r')
output_nb = fp.read()
fp.close()
self.log.critical(output_nb)
outputter = self.config.clazz(self._config)
outputter.write(self._report, output_nb, task_id=self.task_id)
|
# https://www.hackerrank.com/challenges/abbr/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=dynamic-programming
#
class Solution():
def abbreviation(self, a, b):
m, n = len(a), len(b)
dp = [[False]*(m+1) for _ in range(n+1)]
dp[0][0] = True
for i in range(n+1):
for j in range(m+1):
if i == 0 and j != 0:
dp[i][j] = a[j-1].islower() and dp[i][j-1]
elif i != 0 and j != 0:
if a[j-1] == b[i-1]:
dp[i][j] = dp[i-1][j-1]
elif a[j-1].upper() == b[i-1]:
dp[i][j] = dp[i-1][j-1] or dp[i][j-1]
elif not (a[j-1].isupper() and b[i-1].isupper()):
dp[i][j] = dp[i][j-1]
return "YES" if dp[n][m] else "NO"
|
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** http://code.google.com/p/makehuman/
**Authors:** Thomas Larsson
**Copyright(c):** MakeHuman Team 2001-2009
**Licensing:** GPL3 (see also http://sites.google.com/site/makehumandocs/licensing)
**Coding Standards:** See http://sites.google.com/site/makehumandocs/developers-guide
Abstract
--------
Finger bone definitions
"""
import mhx_globals as the
from mhx_globals import *
from mhx_rig import addPoseBone
FingerJoints = [
#('l-hand-2', 'j', 'l-hand-2'),
#('l-hand-3', 'j', 'l-hand-3'),
#('r-hand-2', 'j', 'r-hand-2'),
#('r-hand-3', 'j', 'r-hand-3'),
('r-hand-2', 'l', ((0.7, 'r-hand'), (0.3, 'r-finger-5-1'))),
('r-hand-3', 'l', ((0.7, 'r-hand'), (0.3, 'r-finger-2-1'))),
('l-hand-2', 'l', ((0.7, 'l-hand'), (0.3, 'l-finger-5-1'))),
('l-hand-3', 'l', ((0.7, 'l-hand'), (0.3, 'l-finger-2-1'))),
]
FingerHeadsTails = [
('Finger-1-1_L', 'r-finger-1-1', 'r-finger-1-2'),
('Finger-1-2_L', 'r-finger-1-2', 'r-finger-1-3'),
('Finger-1-3_L', 'r-finger-1-3', 'r-finger-1-4'),
('Finger-2-1_L', 'r-finger-2-1', 'r-finger-2-2'),
('Finger-2-2_L', 'r-finger-2-2', 'r-finger-2-3'),
('Finger-2-3_L', 'r-finger-2-3', 'r-finger-2-4'),
('Finger-3-1_L', 'r-finger-3-1', 'r-finger-3-2'),
('Finger-3-2_L', 'r-finger-3-2', 'r-finger-3-3'),
('Finger-3-3_L', 'r-finger-3-3', 'r-finger-3-4'),
('Finger-4-1_L', 'r-finger-4-1', 'r-finger-4-2'),
('Finger-4-2_L', 'r-finger-4-2', 'r-finger-4-3'),
('Finger-4-3_L', 'r-finger-4-3', 'r-finger-4-4'),
('Finger-5-1_L', 'r-finger-5-1', 'r-finger-5-2'),
('Finger-5-2_L', 'r-finger-5-2', 'r-finger-5-3'),
('Finger-5-3_L', 'r-finger-5-3', 'r-finger-5-4'),
('Finger-1-1_R', 'l-finger-1-1', 'l-finger-1-2'),
('Finger-1-2_R', 'l-finger-1-2', 'l-finger-1-3'),
('Finger-1-3_R', 'l-finger-1-3', 'l-finger-1-4'),
('Finger-2-1_R', 'l-finger-2-1', 'l-finger-2-2'),
('Finger-2-2_R', 'l-finger-2-2', 'l-finger-2-3'),
('Finger-2-3_R', 'l-finger-2-3', 'l-finger-2-4'),
('Finger-3-1_R', 'l-finger-3-1', 'l-finger-3-2'),
('Finger-3-2_R', 'l-finger-3-2', 'l-finger-3-3'),
('Finger-3-3_R', 'l-finger-3-3', 'l-finger-3-4'),
('Finger-4-1_R', 'l-finger-4-1', 'l-finger-4-2'),
('Finger-4-2_R', 'l-finger-4-2', 'l-finger-4-3'),
('Finger-4-3_R', 'l-finger-4-3', 'l-finger-4-4'),
('Finger-5-1_R', 'l-finger-5-1', 'l-finger-5-2'),
('Finger-5-2_R', 'l-finger-5-2', 'l-finger-5-3'),
('Finger-5-3_R', 'l-finger-5-3', 'l-finger-5-4'),
('Finger-1_R', 'l-finger-1-2', 'l-finger-1-4'),
('Finger-2_R', 'l-finger-2-1', 'l-finger-2-4'),
('Finger-3_R', 'l-finger-3-1', 'l-finger-3-4'),
('Finger-4_R', 'l-finger-4-1', 'l-finger-4-4'),
('Finger-5_R', 'l-finger-5-1', 'l-finger-5-4'),
('Finger-1_L', 'r-finger-1-2', 'r-finger-1-4'),
('Finger-2_L', 'r-finger-2-1', 'r-finger-2-4'),
('Finger-3_L', 'r-finger-3-1', 'r-finger-3-4'),
('Finger-4_L', 'r-finger-4-1', 'r-finger-4-4'),
('Finger-5_L', 'r-finger-5-1', 'r-finger-5-4'),
('Wrist-1_L', 'r-hand', 'r-hand-3'),
('Wrist-2_L', 'r-hand', 'r-hand-2'),
('Palm-1_L', 'r-hand', 'r-finger-1-1'),
('Palm-2_L', 'r-hand-3', 'r-finger-2-1'),
('Palm-3_L', 'r-hand-3', 'r-finger-3-1'),
('Palm-4_L', 'r-hand-2', 'r-finger-4-1'),
('Palm-5_L', 'r-hand-2', 'r-finger-5-1'),
('Wrist-1_R', 'l-hand', 'l-hand-3'),
('Wrist-2_R', 'l-hand', 'l-hand-2'),
('Palm-1_R', 'l-hand', 'l-finger-1-1'),
('Palm-2_R', 'l-hand-3', 'l-finger-2-1'),
('Palm-3_R', 'l-hand-3', 'l-finger-3-1'),
('Palm-4_R', 'l-hand-2', 'l-finger-4-1'),
('Palm-5_R', 'l-hand-2', 'l-finger-5-1'),
]
#
# FingerArmature
#
ThumbRoll = 90*D
FingerArmature = [
# Palm
('Wrist-1_L', 0.0, 'Hand_L', F_DEF, L_LPALM, NoBB),
('Wrist-2_L', 0.0, 'Hand_L', F_DEF, L_LPALM, NoBB),
('Palm-1_L', 0.0, 'Hand_L', F_DEF, L_LPALM, NoBB),
('Palm-2_L', 0.0, 'Wrist-1_L', F_DEF, L_LPALM, NoBB),
('Palm-3_L', 0.0, 'Wrist-1_L', F_DEF, L_LPALM, NoBB),
('Palm-4_L', 0.0, 'Wrist-2_L', F_DEF, L_LPALM, NoBB),
('Palm-5_L', 0.0, 'Wrist-2_L', F_DEF, L_LPALM, NoBB),
('Wrist-1_R', 0.0, 'Hand_R', F_DEF, L_RPALM, NoBB),
('Wrist-2_R', 0.0, 'Hand_R', F_DEF, L_RPALM, NoBB),
('Palm-1_R', 0.0, 'Hand_R', F_DEF, L_RPALM, NoBB),
('Palm-2_R', 0.0, 'Wrist-1_R', F_DEF, L_RPALM, NoBB),
('Palm-3_R', 0.0, 'Wrist-1_R', F_DEF, L_RPALM, NoBB),
('Palm-4_R', 0.0, 'Wrist-2_R', F_DEF, L_RPALM, NoBB),
('Palm-5_R', 0.0, 'Wrist-2_R', F_DEF, L_RPALM, NoBB),
# Fingers
('Finger-1-1_L', ThumbRoll, 'Palm-1_L', F_DEF, L_LHANDFK+L_LHANDIK, NoBB),
('Finger-1-2_L', ThumbRoll, 'Finger-1-1_L', F_DEF, L_LHANDFK, NoBB),
('Finger-1-3_L', ThumbRoll, 'Finger-1-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-2-1_L', 0.0, 'Palm-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-2-2_L', 0.0, 'Finger-2-1_L', F_DEF, L_LHANDFK, NoBB),
('Finger-2-3_L', 0.0, 'Finger-2-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-3-1_L', 0.0, 'Palm-3_L', F_DEF, L_LHANDFK, NoBB),
('Finger-3-2_L', 0.0, 'Finger-3-1_L', F_DEF, L_LHANDFK, NoBB),
('Finger-3-3_L', 0.0, 'Finger-3-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-4-1_L', 0.0, 'Palm-4_L', F_DEF, L_LHANDFK, NoBB),
('Finger-4-2_L', 0.0, 'Finger-4-1_L', F_DEF, L_LHANDFK, NoBB),
('Finger-4-3_L', 0.0, 'Finger-4-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-5-1_L', 0.0, 'Palm-5_L', F_DEF, L_LHANDFK, NoBB),
('Finger-5-2_L', 0.0, 'Finger-5-1_L', F_DEF, L_LHANDFK, NoBB),
('Finger-5-3_L', 0.0, 'Finger-5-2_L', F_DEF, L_LHANDFK, NoBB),
('Finger-1-1_R', -ThumbRoll, 'Palm-1_R', F_DEF, L_RHANDFK+L_RHANDIK, NoBB),
('Finger-1-2_R', -ThumbRoll, 'Finger-1-1_R', F_DEF, L_RHANDFK, NoBB),
('Finger-1-3_R', -ThumbRoll, 'Finger-1-2_R', F_DEF, L_RHANDFK, NoBB),
('Finger-2-1_R', 0.0, 'Palm-2_R', F_DEF, L_RHANDFK, NoBB),
('Finger-2-2_R', 0.0, 'Finger-2-1_R', F_DEF, L_RHANDFK, NoBB),
('Finger-2-3_R', 0.0, 'Finger-2-2_R', F_DEF, L_RHANDFK, NoBB),
('Finger-3-1_R', 0.0, 'Palm-3_R', F_DEF, L_RHANDFK, NoBB),
('Finger-3-2_R', 0.0, 'Finger-3-1_R', F_DEF, L_RHANDFK, NoBB),
('Finger-3-3_R', 0.0, 'Finger-3-2_R', F_DEF, L_RHANDFK, NoBB),
('Finger-4-1_R', 0.0, 'Palm-4_R', F_DEF, L_RHANDFK, NoBB),
('Finger-4-2_R', 0.0, 'Finger-4-1_R', F_DEF, L_RHANDFK, NoBB),
('Finger-4-3_R', 0.0, 'Finger-4-2_R', F_DEF, L_RHANDFK, NoBB),
('Finger-5-1_R', 0.0, 'Palm-5_R', F_DEF, L_RHANDFK, NoBB),
('Finger-5-2_R', 0.0, 'Finger-5-1_R', F_DEF, L_RHANDFK, NoBB),
('Finger-5-3_R', 0.0, 'Finger-5-2_R', F_DEF, L_RHANDFK, NoBB),
# Finger controls
('Finger-1_L', ThumbRoll, 'Finger-1-1_L', F_WIR, L_LHANDIK, NoBB),
('Finger-2_L', 0.0, 'Palm-2_L', F_WIR, L_LHANDIK, NoBB),
('Finger-3_L', 0.0, 'Palm-3_L', F_WIR, L_LHANDIK, NoBB),
('Finger-4_L', 0.0, 'Palm-4_L', F_WIR, L_LHANDIK, NoBB),
('Finger-5_L', 0.0, 'Palm-5_L', F_WIR, L_LHANDIK, NoBB),
('Finger-1_R', -ThumbRoll, 'Finger-1-1_R', F_WIR, L_RHANDIK, NoBB),
('Finger-2_R', 0.0, 'Palm-2_R', F_WIR, L_RHANDIK, NoBB),
('Finger-3_R', 0.0, 'Palm-3_R', F_WIR, L_RHANDIK, NoBB),
('Finger-4_R', 0.0, 'Palm-4_R', F_WIR, L_RHANDIK, NoBB),
('Finger-5_R', 0.0, 'Palm-5_R', F_WIR, L_RHANDIK, NoBB),
]
#
# defineFingerConstraints():
#
limitRotThumb = ('LimitRot', C_OW_LOCAL, 1, ['LimitRot', (-1.37,0.5, 0,0, -60*D,60*D), (1,0,1)])
limitRotFingers = ('LimitRot', C_OW_LOCAL, 1, ['LimitRot', (-1.37,0.5, 0,0, -30*D,30*D), (1,0,1)])
def defineFingerConstraints():
fconstraints = {}
for fnum in range(1,6):
for suffix in ["_L", "_R"]:
finger = "Finger-%d%s" % (fnum, suffix)
for lnum in range(1,4):
if fnum == 1:
if lnum == 1:
cnss = []
else:
cnss = [ ('CopyRot', C_LOCAL, 1, ['Rot', finger, (1,0,0), (0,0,0), True]) ]
cnss.append( limitRotThumb )
else:
if lnum == 1:
cnss = [ ('CopyRot', C_LOCAL, 1, ['Rot', finger, (1,0,1), (0,0,0), True]) ]
else:
cnss = [ ('CopyRot', C_LOCAL, 1, ['Rot', finger, (1,0,0), (0,0,0), True]) ]
cnss.append( limitRotFingers )
fconstraints["%d-%d%s" % (fnum, lnum, suffix)] = cnss
return fconstraints
fconstraints = defineFingerConstraints()
#
# FingerControlPoses(fp):
#
customShape = 'MHCircle05'
customShape = None
def FingerControlPoses(fp):
for suffix in ['_L', '_R']:
for fnum in range(1,6):
fing = 'Finger-%d%s' % (fnum, suffix)
if fnum == 1:
lim = limitRotThumb
else:
lim = limitRotFingers
addPoseBone(fp, fing, 'MHKnuckle', None, (1,1,1), (0,1,0), (1,0,1), (1,1,1), 0, [lim])
for lnum in range(1,4):
if (fnum == 1 and lnum <= 2) or (fnum >= 2 and lnum == 1):
rot = (0,1,0)
ik = (1,0,1)
else:
rot = (0,1,1)
ik = (0,0,1)
fing = 'Finger-%d-%d%s' % (fnum, lnum, suffix)
addPoseBone(fp, fing, customShape, None, (1,1,1), rot, (1,1,1), ik, 0,
fconstraints["%d-%d%s" % (fnum, lnum, suffix)])
palm = 'Palm-%d%s' % (fnum, suffix)
addPoseBone(fp, palm, None, None, (1,1,1), (0,0,0), (1,1,1), (1,1,1), 0, [])
addPoseBone(fp, 'Wrist-1%s' % suffix, None, None, (1,1,1), (0,0,0), (1,1,1), (1,1,1), 0, [])
addPoseBone(fp, 'Wrist-2%s' % suffix, None, None, (1,1,1), (0,0,0), (1,1,1), (1,1,1), 0, [])
return
#
# getFingerPropDrivers():
#
def getFingerPropDrivers():
drivers = []
for fnum in range(1,6):
for lnum in range(1,4):
if (fnum != 1) or (lnum != 1):
finger = 'Finger-%d-%d' % (fnum,lnum)
drivers.append( (finger, 'Rot', ['FingerControl'], 'x1') )
return drivers
|
# PYTRIS™ Copyright (c) 2017 Jason Kim All Rights Reserved.
import pygame
from mino import *
from random import *
from pygame.locals import *
# Define
block_size = 17 # Height, width of single block
width = 10 # Board width
height = 20 # Board height
framerate = 30 # Bigger -> Slower
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((300, 374))
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
pygame.display.set_caption("PYTRIS™")
class ui_variables:
# Fonts
font_path = "./assets/fonts/OpenSans-Light.ttf"
h1 = pygame.font.Font(font_path, 50)
h2 = pygame.font.Font(font_path, 30)
h4 = pygame.font.Font(font_path, 20)
h5 = pygame.font.Font(font_path, 13)
h6 = pygame.font.Font(font_path, 10)
# Sounds
click_sound = pygame.mixer.Sound("assets/sounds/SFX_ButtonUp.wav")
move_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceMoveLR.wav")
drop_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceHardDrop.wav")
single_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearSingle.wav")
double_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearDouble.wav")
triple_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearTriple.wav")
tetris_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialTetris.wav")
# Background colors
black = (10, 10, 10) #rgb(10, 10, 10)
white = (255, 255, 255) #rgb(255, 255, 255)
grey_1 = (26, 26, 26) #rgb(26, 26, 26)
grey_2 = (35, 35, 35) #rgb(35, 35, 35)
grey_3 = (55, 55, 55) #rgb(55, 55, 55)
# Tetrimino colors
cyan = (69, 206, 204) #rgb(69, 206, 204) # I
blue = (64, 111, 249) #rgb(64, 111, 249) # J
orange = (253, 189, 53) #rgb(253, 189, 53) # L
yellow = (246, 227, 90) #rgb(246, 227, 90) # O
green = (98, 190, 68) #rgb(98, 190, 68) # S
pink = (242, 64, 235) #rgb(242, 64, 235) # T
red = (225, 13, 27) #rgb(225, 13, 27) # Z
t_color = [grey_2, cyan, blue, orange, yellow, green, pink, red, grey_3]
# Draw single block
def draw_block(x, y, color):
pygame.draw.rect(
screen,
color,
Rect(x, y, block_size, block_size)
)
pygame.draw.rect(
screen,
ui_variables.grey_1,
Rect(x, y, block_size, block_size),
1
)
# Draw game screen
def draw_board(next, hold, score, level, goal):
screen.fill(ui_variables.grey_1)
pygame.draw.rect(
screen,
ui_variables.white,
Rect(204, 0, 96, 374)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4):
for j in range(4):
dx = 220 + block_size * j
dy = 150 + block_size * i
if grid_n[i][j] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_n[i][j]],
Rect(dx, dy, block_size, block_size)
)
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = 220 + block_size * j
dy = 50 + block_size * i
if grid_h[i][j] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_h[i][j]],
Rect(dx, dy, block_size, block_size)
)
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.black)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.black)
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.black)
score_value = ui_variables.h4.render(str(score), 1, ui_variables.black)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.black)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.black)
text_goal = ui_variables.h5.render("GOAL", 1, ui_variables.black)
goal_value = ui_variables.h4.render(str(goal), 1, ui_variables.black)
# Place texts
screen.blit(text_hold, (215, 14))
screen.blit(text_next, (215, 114))
screen.blit(text_score, (215, 214))
screen.blit(score_value, (220, 230))
screen.blit(text_level, (215, 264))
screen.blit(level_value, (220, 280))
screen.blit(text_goal, (215, 314))
screen.blit(goal_value, (220, 330))
# Draw board
for x in range(width):
for y in range(height):
dx = 17 + block_size * x
dy = 17 + block_size * y
draw_block(dx, dy, ui_variables.t_color[matrix[x][y]])
# Draw a tetrimino
def draw_mino(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
tx, ty = x, y
while not is_bottom(tx, ty, mino, r):
ty += 1
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[tx + j][ty + i] = 8
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[x + j][y + i] = grid[i][j]
# Erase a tetrimino
def erase_mino(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for j in range(20):
for i in range(10):
if matrix[i][j] == 8:
matrix[i][j] = 0
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[x + j][y + i] = 0
def is_bottom(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (y + i + 1) > 19:
return True
elif matrix[x + j][y + i + 1] != 0 and matrix[x + j][y + i + 1] != 8:
return True
return False
def is_leftedge(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j - 1) < 0:
return True
elif matrix[x + j - 1][y + i] != 0:
return True
return False
def is_rightedge(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j + 1) > 9:
return True
elif matrix[x + j + 1][y + i] != 0:
return True
return False
def is_turnable(x, y, mino, r):
if r != 3:
grid = tetrimino.mino_map[mino - 1][r + 1]
else:
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 19:
return False
elif matrix[x + j][y + i] != 0:
return False
return True
def is_stackable(mino):
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
#print(grid[i][j], matrix[3 + j][i])
if grid[i][j] != 0 and matrix[3 + j][i] != 0:
return False
return True
# Initial values
blink = True
start = False
done = False
game_over = False
score = 0
level = 1
goal = level * 5
bottom_count = 0
hard_drop = False
dx, dy = 3, 0
rotation = 0
mino = randint(1, 7)
next_mino = randint(1, 7)
hold = False
hold_mino = -1
matrix = [[0 for y in range(height)] for x in range(width)]
###########################################################
# Loop Start
###########################################################
while not done:
# Game screen
if start:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
# Set speed
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
# Draw a mino
draw_mino(dx, dy, mino, rotation)
draw_board(next_mino, hold_mino, score, level, goal)
# Erase a mino
erase_mino(dx, dy, mino, rotation)
# Move mino down
if not is_bottom(dx, dy, mino, rotation):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 4:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation)
draw_board(next_mino, hold_mino, score, level, goal)
if is_stackable(next_mino):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
start = False
game_over = True
else:
bottom_count += 1
# Erase line
erase_count = 0
for j in range(20):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count == 1:
ui_variables.single_sound.play()
score += 50 * level
elif erase_count == 2:
ui_variables.double_sound.play()
score += 150 * level
elif erase_count == 3:
ui_variables.triple_sound.play()
score += 350 * level
elif erase_count == 4:
ui_variables.tetris_sound.play()
score += 1000 * level
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation)
if event.key == K_SPACE:
ui_variables.drop_sound.play()
while not is_bottom(dx, dy, mino, rotation):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 5)
elif event.key == K_LSHIFT:
if hold == False:
ui_variables.move_sound.play()
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
elif event.key == K_UP:
if is_turnable(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation += 1
if rotation == 4:
rotation = 0
elif event.key == K_DOWN:
if not is_bottom(dx, dy, mino, rotation):
ui_variables.move_sound.play()
dy += 1
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
draw_mino(dx, dy, mino, rotation)
draw_board(next_mino, hold_mino, score, level, goal)
pygame.display.update()
# Game over screen
elif game_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
ui_variables.click_sound.play()
game_over = False
hold = False
dx, dy = 3, 0
rotation = 0
mino = randint(1, 7)
next_mino = randint(1, 7)
hold_mino = -1
score = 0
matrix = [[0 for y in range(height)] for x in range(width)]
over_text = ui_variables.h2.render("GAME OVER", 1, ui_variables.white)
over_start = ui_variables.h5.render("Press space to continue", 1, ui_variables.white)
if game_over == True:
draw_board(next_mino, hold_mino, score, level, goal)
screen.blit(over_text, (20, 100))
if blink:
screen.blit(over_start, (32, 160))
blink = False
else:
blink = True
pygame.display.update()
clock.tick(3)
# Start screen
else:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
ui_variables.click_sound.play()
start = True
screen.fill(ui_variables.white)
pygame.draw.rect(
screen,
ui_variables.grey_1,
Rect(0, 187, 300, 187)
)
title = ui_variables.h1.render("PYTRIS™", 1, ui_variables.grey_1)
title_start = ui_variables.h5.render("Press space to start", 1, ui_variables.white)
title_info = ui_variables.h6.render("Copyright (c) 2017 Jason Kim All Rights Reserved.", 1, ui_variables.white)
if blink:
screen.blit(title_start, (92, 195))
blink = False
else:
blink = True
screen.blit(title, (65, 120))
screen.blit(title_info, (40, 335))
if not start:
pygame.display.update()
clock.tick(3)
pygame.quit()
|
// components/check-info2/index.js
Component({
/**
* 组件的属性列表
*/
properties: {
info: Object
},
/**
* 组件的初始数据
*/
data: {
},
/**
* 组件的方法列表
*/
methods: {
}
})
|
// Imports
import { createReadStream, writeFileSync } from 'fs';
import { readFile } from 'fs/promises';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import OCL from 'openchemlib';
import { molecules } from 'sdf-parser';
// Import CID Spectra
const cidHigh = JSON.parse(await readFile('./hmdb/cidSpectra/cidHigh.json'));
const cidLow = JSON.parse(await readFile('./hmdb/cidSpectra/cidLow.json'));
const cidMed = JSON.parse(await readFile('./hmdb/cidSpectra/cidMed.json'));
// import structures form sdf file
const __dirname = dirname(fileURLToPath(import.meta.url));
const entries = [];
const stream = createReadStream(join(__dirname, 'hmdb/structures.sdf')).pipe(
molecules(),
);
// convert structures in oclID and create variable with oclID and Database id
for await (let entry of stream) {
const molecule = OCL.Molecule.fromMolfile(entry.molfile);
entries.push({
oclID: molecule.getIDCode(),
id: entry.DATABASE_ID,
});
}
// Get structures who were measured in CID and separate them in function of energy level
const cidLowStructures = [];
const cidMedStructures = [];
const cidHighStructures = [];
for (let i = 0; i < entries.length; i++) {
for (let s = 0; s < cidLow.length; s++) {
if (entries[i].id === cidLow[s].id) {
cidLowStructures.push(entries[i]);
}
}
for (let a = 0; a < cidMed.length; a++) {
if (entries[i].id === cidMed[a].id) {
cidMedStructures.push(entries[i]);
}
}
for (let b = 0; b < cidHigh.length; b++) {
if (entries[i].id === cidHigh[b].id) {
cidHighStructures.push(entries[i]);
}
}
}
// Write 3 json files containing structures of each CID spectra in function of energy level
writeFileSync(
join(__dirname, 'hmdb/molecules/cidLowStructures.json'),
JSON.stringify(cidLowStructures),
'utf8',
);
writeFileSync(
join(__dirname, 'hmdb/molecules/cidMedStructures.json'),
JSON.stringify(cidMedStructures),
'utf8',
);
writeFileSync(
join(__dirname, 'hmdb/molecules/cidHighStructures.json'),
JSON.stringify(cidHighStructures),
'utf8',
);
|
"""Config flow for Modern Forms."""
from __future__ import annotations
from typing import Any, cast
from aiomodernforms import ModernFormsConnectionError, ModernFormsDevice
import voluptuous as vol
from homeassistant.components import zeroconf
from homeassistant.config_entries import SOURCE_ZEROCONF, ConfigFlow
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN
class ModernFormsFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a ModernForms config flow."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle setup by user for Modern Forms integration."""
return await self._handle_config_flow(user_input)
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle zeroconf discovery."""
host = discovery_info[zeroconf.ATTR_HOSTNAME].rstrip(".")
name, _ = host.rsplit(".")
self.context.update(
{
CONF_HOST: discovery_info[zeroconf.ATTR_HOST],
CONF_NAME: name,
CONF_MAC: discovery_info[zeroconf.ATTR_PROPERTIES].get(CONF_MAC),
"title_placeholders": {"name": name},
}
)
# Prepare configuration flow
return await self._handle_config_flow(cast(dict, discovery_info), True)
async def async_step_zeroconf_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by zeroconf."""
return await self._handle_config_flow(user_input)
async def _handle_config_flow(
self, user_input: dict[str, Any] | None = None, prepare: bool = False
) -> FlowResult:
"""Config flow handler for ModernForms."""
source = self.context.get("source")
# Request user input, unless we are preparing discovery flow
if user_input is None:
user_input = {}
if not prepare:
if source == SOURCE_ZEROCONF:
return self._show_confirm_dialog()
return self._show_setup_form()
if source == SOURCE_ZEROCONF:
user_input[CONF_HOST] = self.context.get(CONF_HOST)
user_input[CONF_MAC] = self.context.get(CONF_MAC)
if user_input.get(CONF_MAC) is None or not prepare:
session = async_get_clientsession(self.hass)
device = ModernFormsDevice(user_input[CONF_HOST], session=session)
try:
device = await device.update()
except ModernFormsConnectionError:
if source == SOURCE_ZEROCONF:
return self.async_abort(reason="cannot_connect")
return self._show_setup_form({"base": "cannot_connect"})
user_input[CONF_MAC] = device.info.mac_address
user_input[CONF_NAME] = device.info.device_name
# Check if already configured
await self.async_set_unique_id(user_input[CONF_MAC])
self._abort_if_unique_id_configured(updates={CONF_HOST: user_input[CONF_HOST]})
title = device.info.device_name
if source == SOURCE_ZEROCONF:
title = self.context.get(CONF_NAME)
if prepare:
return await self.async_step_zeroconf_confirm()
return self.async_create_entry(
title=title,
data={CONF_HOST: user_input[CONF_HOST], CONF_MAC: user_input[CONF_MAC]},
)
def _show_setup_form(self, errors: dict | None = None) -> FlowResult:
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
errors=errors or {},
)
def _show_confirm_dialog(self, errors: dict | None = None) -> FlowResult:
"""Show the confirm dialog to the user."""
name = self.context.get(CONF_NAME)
return self.async_show_form(
step_id="zeroconf_confirm",
description_placeholders={"name": name},
errors=errors or {},
)
|
from app import app
from config import host, port, debug
if __name__ == "__main__":
app.run(host=host, port=port, debug=debug)
|
/* Code for WATS 3020 FizzBuzz Assignment */
let isInteger,
maxNumber,
fbResults,
fbText;
// TODO: Initialize a variable `isInteger` to use as a control value for the
// `while` loop. Set the initial value to `false`.
// TODO: Create a `while` loop so user will be prompted to enter a number until
// they enter a good number. This loop should be controlled by a conditional
// expression that looks at the value of `isSafeInteger`. When `isSafeInteger`
// becomes `true`, the `while` loop should stop looping.
// TODO: Inside `while` loop prompt the user for the `maxNumber` value.
// TODO: Inside the `while` loop, use a conditional to verify if the `maxNumber`
// is a suitable integer. (It should be an integer greater than zero.)
// TODO: If the value of `maxNumber` is suitable, then change the value of
// `isInteger` so the `while` loop stops looping.
// TODO: Initialize the `fbResults` variable to an empty Array
// TODO: Create a `for` loop that will execute the `maxNumber` of times.
// TODO: Create logic inside the `for` loop to calculate FizzBuzz. This will
// require the use of several conditional statements that use the `%` operator.
// Store the results of this logic in an array called `fbResults`.
// TODO: In preparation for creating the output text for your FizzBuzz results,
// initialize the `fbText` variable to an empty string.
// TODO: Use a `for ... of` loop to concatenate the values from `fbResults`
// into the variable `fbText`.
///////////////////////////////////////////////////////////////////////
// Do not edit below this line unless you are doing something fancy!
//////////////////////////////////////////////////////////////////////
let numDisplay = document.querySelector("#max-number");
numDisplay.innerHTML = maxNumber;
let output = document.querySelector("#output");
output.innerHTML = fbText;
|
import operator
from operator import le, lt
import textwrap
from typing import TYPE_CHECKING, Optional, Tuple, Union, cast
import numpy as np
from pandas._config import get_option
from pandas._libs.interval import (
VALID_CLOSED,
Interval,
IntervalMixin,
intervals_to_interval_bounds,
)
from pandas._libs.missing import NA
from pandas._typing import ArrayLike, Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_dtype_equal,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
ABCIntervalIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core.algorithms import take, value_counts
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.construction import array, extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index
from pandas.core.ops import invalid_comparison, unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
from pandas.core.arrays import DatetimeArray, TimedeltaArray
_interval_shared_docs = {}
_shared_docs_kwargs = dict(
klass="IntervalArray", qualname="arrays.IntervalArray", name=""
)
_interval_shared_docs[
"class"
] = """
%(summary)s
.. versionadded:: %(versionadded)s
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
%(klass)s.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
copy : bool, default False
Copy the input data.
%(name)s\
verify_integrity : bool, default True
Verify that the %(klass)s is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
%(extra_attributes)s\
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
%(extra_methods)s\
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an %(klass)s.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_
for more.
%(examples)s\
"""
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalArray",
summary="Pandas array for interval data that are closed on the same side.",
versionadded="0.24.0",
name="",
extra_attributes="",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, closed: right, dtype: interval[int64]
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""
),
)
)
class IntervalArray(IntervalMixin, ExtensionArray):
ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
# ---------------------------------------------------------------------
# Constructors
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
verify_integrity: bool = True,
):
if isinstance(data, (ABCSeries, ABCIntervalIndex)) and is_interval_dtype(
data.dtype
):
data = data._values # TODO: extract_array?
if isinstance(data, cls):
left = data._left
right = data._right
closed = closed or data.closed
if dtype is None or data.dtype == dtype:
# This path will preserve id(result._combined)
# TODO: could also validate dtype before going to simple_new
combined = data._combined
if copy:
combined = combined.copy()
result = cls._simple_new(combined, closed=closed)
if verify_integrity:
result._validate()
return result
else:
# don't allow scalars
if is_scalar(data):
msg = (
f"{cls.__name__}(...) must be called with a collection "
f"of some kind, {data} was passed"
)
raise TypeError(msg)
# might need to convert empty or purely na data
data = maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None
)
closed = closed or infer_closed
closed = closed or "right"
left, right = _maybe_cast_inputs(left, right, copy, dtype)
combined = _get_combined_data(left, right)
result = cls._simple_new(combined, closed=closed)
if verify_integrity:
result._validate()
return result
@classmethod
def _simple_new(cls, data, closed="right"):
result = IntervalMixin.__new__(cls)
result._combined = data
result._left = data[:, 0]
result._right = data[:, 1]
result._closed = closed
return result
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values, original):
if len(values) == 0:
# An empty array returns object-dtype here. We can't create
# a new IA from an (empty) object-dtype array, so turn it into the
# correct dtype.
values = values.astype(original.dtype.subtype)
return cls(values, closed=original.closed)
_interval_shared_docs["from_breaks"] = textwrap.dedent(
"""
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
_interval_shared_docs["from_arrays"] = textwrap.dedent(
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
%(klass)s
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
if len(left) != len(right):
raise ValueError("left and right must have the same length")
closed = closed or "right"
left, right = _maybe_cast_inputs(left, right, copy, dtype)
combined = _get_combined_data(left, right)
result = cls._simple_new(combined, closed)
result._validate()
return result
_interval_shared_docs["from_tuples"] = textwrap.dedent(
"""
Construct an %(klass)s from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
<IntervalArray>
[(0, 1], (1, 2]]
Length: 2, closed: right, dtype: interval[int64]
"""
),
)
)
def from_tuples(cls, data, closed="right", copy=False, dtype=None):
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError as err:
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
raise ValueError(msg) from err
except TypeError as err:
msg = f"{name}.from_tuples received an invalid item, {d}"
raise TypeError(msg) from err
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
def _validate(self):
"""
Verify that the IntervalArray is valid.
Checks that
* closed is valid
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if self.closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {self.closed}"
raise ValueError(msg)
if len(self._left) != len(self._right):
msg = "left and right must have the same length"
raise ValueError(msg)
left_mask = notna(self._left)
right_mask = notna(self._right)
if not (left_mask == right_mask).all():
msg = (
"missing values must be missing in the same "
"location both left and right sides"
)
raise ValueError(msg)
if not (self._left[left_mask] <= self._right[left_mask]).all():
msg = "left side of interval must be <= right side"
raise ValueError(msg)
# ---------------------------------------------------------------------
# Descriptive
@property
def dtype(self):
return IntervalDtype(self.left.dtype)
@property
def nbytes(self) -> int:
return self.left.nbytes + self.right.nbytes
@property
def size(self) -> int:
# Avoid materializing self.values
return self.left.size
# ---------------------------------------------------------------------
# EA Interface
def __iter__(self):
return iter(np.asarray(self))
def __len__(self) -> int:
return len(self._left)
def __getitem__(self, key):
key = check_array_indexer(self, key)
result = self._combined[key]
if is_integer(key):
left, right = result[0], result[1]
if isna(left):
return self._fill_value
return Interval(left, right, self.closed)
# TODO: need to watch out for incorrectly-reducing getitem
if np.ndim(result) > 2:
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
return type(self)._simple_new(result, closed=self.closed)
def __setitem__(self, key, value):
value_left, value_right = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
self._left[key] = value_left
self._right[key] = value_right
def _cmp_method(self, other, op):
# ensure pandas array for list-like and eliminate non-interval scalars
if is_list_like(other):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
other = array(other)
elif not isinstance(other, Interval):
# non-interval scalar -> no matches
return invalid_comparison(self, other, op)
# determine the dtype of the elements we want to compare
if isinstance(other, Interval):
other_dtype = pandas_dtype("interval")
elif not is_categorical_dtype(other.dtype):
other_dtype = other.dtype
else:
# for categorical defer to categories for dtype
other_dtype = other.categories.dtype
# extract intervals if we have interval categories with matching closed
if is_interval_dtype(other_dtype):
if self.closed != other.categories.closed:
return invalid_comparison(self, other, op)
other = other.categories.take(
other.codes, allow_fill=True, fill_value=other.categories._na_value
)
# interval-like -> need same closed and matching endpoints
if is_interval_dtype(other_dtype):
if self.closed != other.closed:
return invalid_comparison(self, other, op)
elif not isinstance(other, Interval):
other = type(self)(other)
if op is operator.eq:
return (self._left == other.left) & (self._right == other.right)
elif op is operator.ne:
return (self._left != other.left) | (self._right != other.right)
elif op is operator.gt:
return (self._left > other.left) | (
(self._left == other.left) & (self._right > other.right)
)
elif op is operator.ge:
return (self == other) | (self > other)
elif op is operator.lt:
return (self._left < other.left) | (
(self._left == other.left) & (self._right < other.right)
)
else:
# operator.lt
return (self == other) | (self < other)
# non-interval/non-object dtype -> no matches
if not is_object_dtype(other_dtype):
return invalid_comparison(self, other, op)
# object dtype -> iteratively check for intervals
result = np.zeros(len(self), dtype=bool)
for i, obj in enumerate(other):
try:
result[i] = op(self[i], obj)
except TypeError:
if obj is NA:
# comparison with np.nan returns NA
# github.com/pandas-dev/pandas/pull/37124#discussion_r509095092
result[i] = op is operator.ne
else:
raise
return result
@unpack_zerodim_and_defer("__eq__")
def __eq__(self, other):
return self._cmp_method(other, operator.eq)
@unpack_zerodim_and_defer("__ne__")
def __ne__(self, other):
return self._cmp_method(other, operator.ne)
@unpack_zerodim_and_defer("__gt__")
def __gt__(self, other):
return self._cmp_method(other, operator.gt)
@unpack_zerodim_and_defer("__ge__")
def __ge__(self, other):
return self._cmp_method(other, operator.ge)
@unpack_zerodim_and_defer("__lt__")
def __lt__(self, other):
return self._cmp_method(other, operator.lt)
@unpack_zerodim_and_defer("__le__")
def __le__(self, other):
return self._cmp_method(other, operator.le)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
(Not implemented yet for IntervalArray)
Method to use for filling holes in reindexed Series
limit : int, default None
(Not implemented yet for IntervalArray)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
raise TypeError("Filling by method is not supported for IntervalArray.")
if limit is not None:
raise TypeError("limit is not supported for IntervalArray.")
value_left, value_right = self._validate_fillna_value(value)
left = self.left.fillna(value=value_left)
right = self.right.fillna(value=value_right)
combined = _get_combined_data(left, right)
return type(self)._simple_new(combined, closed=self.closed)
def astype(self, dtype, copy=True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
from pandas import Index
from pandas.core.arrays.string_ import StringDtype
if dtype is not None:
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
if dtype == self.dtype:
return self.copy() if copy else self
# need to cast to different subtype
try:
# We need to use Index rules for astype to prevent casting
# np.nan entries to int subtypes
new_left = Index(self._left, copy=False).astype(dtype.subtype)
new_right = Index(self._right, copy=False).astype(dtype.subtype)
except TypeError as err:
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg) from err
# TODO: do astype directly on self._combined
combined = _get_combined_data(new_left, new_right)
return type(self)._simple_new(combined, closed=self.closed)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self))
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError) as err:
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg) from err
def equals(self, other) -> bool:
if type(self) != type(other):
return False
return bool(
self.closed == other.closed
and self.left.equals(other.left)
and self.right.equals(other.right)
)
@classmethod
def _concat_same_type(cls, to_concat):
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed = {interval.closed for interval in to_concat}
if len(closed) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
# TODO: will this mess up on dt64tz?
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
combined = _get_combined_data(left, right) # TODO: 1-stage concat
return cls._simple_new(combined, closed=closed)
def copy(self):
"""
Return a copy of the array.
Returns
-------
IntervalArray
"""
combined = self._combined.copy()
return type(self)._simple_new(combined, closed=self.closed)
def isna(self) -> np.ndarray:
return isna(self._left)
def shift(self, periods: int = 1, fill_value: object = None) -> "IntervalArray":
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
# ExtensionArray.shift doesn't work for two reasons
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
# 2. IntervalArray._from_sequence only accepts NaN for missing values,
# not other values like NaT
empty_len = min(abs(periods), len(self))
if isna(fill_value):
from pandas import Index
fill_value = Index(self._left, copy=False)._na_value
empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
else:
empty = self._from_sequence([fill_value] * empty_len)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
nv.validate_take(tuple(), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
fill_left, fill_right = self._validate_fill_value(fill_value)
left_take = take(
self._left, indices, allow_fill=allow_fill, fill_value=fill_left
)
right_take = take(
self._right, indices, allow_fill=allow_fill, fill_value=fill_right
)
combined = _get_combined_data(left_take, right_take)
return type(self)._simple_new(combined, closed=self.closed)
def _validate_listlike(self, value):
# list-like of intervals
try:
array = IntervalArray(value)
# TODO: self._check_closed_matches(array, name="value")
value_left, value_right = array.left, array.right
except TypeError as err:
# wrong type: not interval or NA
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg) from err
return value_left, value_right
def _validate_scalar(self, value):
if isinstance(value, Interval):
self._check_closed_matches(value, name="value")
left, right = value.left, value.right
elif is_valid_nat_for_dtype(value, self.left.dtype):
# GH#18295
left = right = value
else:
raise ValueError(
"can only insert Interval objects and NA into an IntervalArray"
)
return left, right
def _validate_fill_value(self, value):
return self._validate_scalar(value)
def _validate_fillna_value(self, value):
# This mirrors Datetimelike._validate_fill_value
try:
return self._validate_scalar(value)
except ValueError as err:
msg = (
"'IntervalArray.fillna' only supports filling with a "
f"scalar 'pandas.Interval'. Got a '{type(value).__name__}' instead."
)
raise TypeError(msg) from err
def _validate_insert_value(self, value):
return self._validate_scalar(value)
def _validate_setitem_value(self, value):
needs_float_conversion = False
if is_valid_nat_for_dtype(value, self.left.dtype):
# na value: need special casing to set directly on numpy arrays
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
needs_float_conversion = True
elif is_datetime64_any_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.datetime64("NaT")
elif is_timedelta64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.timedelta64("NaT")
value_left, value_right = value, value
elif is_interval_dtype(value) or isinstance(value, Interval):
# scalar interval
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
else:
return self._validate_listlike(value)
if needs_float_conversion:
raise ValueError("Cannot set float NaN to integer-backed IntervalArray")
return value_left, value_right
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each interval.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
# TODO: implement this is a non-naive way!
return value_counts(np.asarray(self), dropna=dropna)
# ---------------------------------------------------------------------
# Rendering Methods
def _format_data(self):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = f"[{first}]"
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = f"[{first}, {last}]"
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
head_str = ", ".join(head)
tail_str = ", ".join(tail)
summary = f"[{head_str} ... {tail_str}]"
else:
tail = [formatter(x) for x in self]
tail_str = ", ".join(tail)
summary = f"[{tail_str}]"
return summary
def __repr__(self) -> str:
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = self._format_data()
class_name = f"<{type(self).__name__}>\n"
template = (
f"{class_name}"
f"{data}\n"
f"Length: {len(self)}, closed: {self.closed}, dtype: {self.dtype}"
)
return template
def _format_space(self):
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
# ---------------------------------------------------------------------
# Vectorized Interval Properties/Attributes
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
an Index.
"""
from pandas import Index
return Index(self._left, copy=False)
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
an Index.
"""
from pandas import Index
return Index(self._right, copy=False)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalArray.
"""
try:
return self.right - self.left
except TypeError as err:
# length not defined for some types, e.g. string
msg = (
"IntervalArray contains Intervals without defined length, "
"e.g. Intervals with string endpoints"
)
raise TypeError(msg) from err
@property
def mid(self):
"""
Return the midpoint of each Interval in the IntervalArray as an Index.
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
_interval_shared_docs["overlaps"] = textwrap.dedent(
"""
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Parameters
----------
other : %(klass)s
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
%(examples)s
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
"""
)
@Appender(
_interval_shared_docs["overlaps"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
elif not isinstance(other, Interval):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
# ---------------------------------------------------------------------
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
"""
return self._closed
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
Return an %(klass)s identical to the current one, but closed on the
specified side.
.. versionadded:: 0.24.0
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
new_index : %(klass)s
%(examples)s\
"""
)
@Appender(
_interval_shared_docs["set_closed"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
>>> index.set_closed('both')
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, closed: both, dtype: interval[int64]
"""
),
)
)
def set_closed(self, closed):
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
return type(self)._simple_new(self._combined, closed=closed)
_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
Return True if the %(klass)s is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
else False.
"""
# https://github.com/python/mypy/issues/1362
# Mypy does not support decorated properties
@property # type: ignore[misc]
@Appender(
_interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
)
def is_non_overlapping_monotonic(self):
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self._right[:-1] < self._left[1:]).all()
or (self._left[:-1] > self._right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self._right[:-1] <= self._left[1:]).all()
or (self._left[:-1] >= self._right[1:]).all()
)
# ---------------------------------------------------------------------
# Conversion
def __array__(self, dtype=None) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self._left
right = self._right
mask = self.isna()
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowIntervalType
try:
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError as err:
raise TypeError(
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
"is not supported"
) from err
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
[
pyarrow.array(self._left, type=subtype, from_pandas=True),
pyarrow.array(self._right, type=subtype, from_pandas=True),
],
names=["left", "right"],
)
mask = self.isna()
if mask.any():
# if there are missing values, set validity bitmap also on the array level
null_bitmap = pyarrow.array(~mask).buffers()[1]
storage_array = pyarrow.StructArray.from_buffers(
storage_array.type,
len(storage_array),
[null_bitmap],
children=[storage_array.field(0), storage_array.field(1)],
)
if type is not None:
if type.equals(interval_type.storage_type):
return storage_array
elif isinstance(type, ArrowIntervalType):
# ensure we have the same subtype and closed attributes
if not type.equals(interval_type):
raise TypeError(
"Not supported to convert IntervalArray to type with "
f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
f"and 'closed' ({self.closed} vs {type.closed}) attributes"
)
else:
raise TypeError(
f"Not supported to convert IntervalArray to '{type}' type"
)
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
_interval_shared_docs[
"to_tuples"
] = """
Return an %(return_type)s of tuples of the form (left, right).
Parameters
----------
na_tuple : bool, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
@Appender(
_interval_shared_docs["to_tuples"] % dict(return_type="ndarray", examples="")
)
def to_tuples(self, na_tuple=True):
tuples = com.asarray_tuplesafe(zip(self._left, self._right))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
# ---------------------------------------------------------------------
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
combined = self._combined.repeat(repeats, 0)
return type(self)._simple_new(combined, closed=self.closed)
_interval_shared_docs["contains"] = textwrap.dedent(
"""
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the %(klass)s.
.. versionadded:: 0.25.0
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
See Also
--------
Interval.contains : Check whether Interval object contains value.
%(klass)s.overlaps : Check if an Interval overlaps the values in the
%(klass)s.
Examples
--------
%(examples)s
>>> intervals.contains(0.5)
array([ True, False, False])
"""
)
@Appender(
_interval_shared_docs["contains"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def contains(self, other):
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
return (self._left < other if self.open_left else self._left <= other) & (
other < self._right if self.open_right else other <= self._right
)
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values)
def _maybe_cast_inputs(
left_orig: Union["Index", ArrayLike],
right_orig: Union["Index", ArrayLike],
copy: bool,
dtype: Optional[Dtype],
) -> Tuple["Index", "Index"]:
left = ensure_index(left_orig, copy=copy)
right = ensure_index(right_orig, copy=copy)
if dtype is not None:
# GH#19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if not is_interval_dtype(dtype):
msg = f"dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg)
dtype = cast(IntervalDtype, dtype)
if dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = (
f"must not have differing left [{type(left).__name__}] and "
f"right [{type(right).__name__}] types"
)
raise ValueError(msg)
elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
# GH#19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalArray"
)
raise TypeError(msg)
elif isinstance(left, ABCPeriodIndex):
msg = "Period dtypes are not supported, use a PeriodIndex instead"
raise ValueError(msg)
elif isinstance(left, ABCDatetimeIndex) and not is_dtype_equal(
left.dtype, right.dtype
):
left_arr = cast("DatetimeArray", left._data)
right_arr = cast("DatetimeArray", right._data)
msg = (
"left and right must have the same time zone, got "
f"'{left_arr.tz}' and '{right_arr.tz}'"
)
raise ValueError(msg)
return left, right
def _get_combined_data(
left: Union["Index", ArrayLike], right: Union["Index", ArrayLike]
) -> Union[np.ndarray, "DatetimeArray", "TimedeltaArray"]:
# For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
from pandas.core.ops.array_ops import maybe_upcast_datetimelike_array
left = maybe_upcast_datetimelike_array(left)
left = extract_array(left, extract_numpy=True)
right = maybe_upcast_datetimelike_array(right)
right = extract_array(right, extract_numpy=True)
lbase = getattr(left, "_ndarray", left).base
rbase = getattr(right, "_ndarray", right).base
if lbase is not None and lbase is rbase:
# If these share data, then setitem could corrupt our IA
right = right.copy()
if isinstance(left, np.ndarray):
assert isinstance(right, np.ndarray) # for mypy
combined = np.concatenate(
[left.reshape(-1, 1), right.reshape(-1, 1)],
axis=1,
)
else:
left = cast(Union["DatetimeArray", "TimedeltaArray"], left)
right = cast(Union["DatetimeArray", "TimedeltaArray"], right)
combined = type(left)._concat_same_type(
[left.reshape(-1, 1), right.reshape(-1, 1)],
axis=1,
)
return combined
|
from django.contrib import admin
from .models import Item, Trait, VotedTrait
class VotedTraitInLine(admin.TabularInline):
model = Item.traits.through
extra = 3
class ItemAdmin(admin.ModelAdmin):
inlines = [VotedTraitInLine]
admin.site.register(Item, ItemAdmin)
admin.site.register(Trait)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import copy
import numpy as np
import itertools
from ppdet.metrics.map_utils import draw_pr_curve
from ppdet.metrics.json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res
import paddlex.utils.logging as logging
def get_infer_results(outs, catid, bias=0):
"""
Get result at the stage of inference.
The output format is dictionary containing bbox or mask result.
For example, bbox result is a list and each element contains
image_id, category_id, bbox and score.
"""
if outs is None or len(outs) == 0:
raise ValueError(
'The number of valid detection result if zero. Please use reasonable model and check input data.'
)
im_id = outs['im_id']
infer_res = {}
if 'bbox' in outs:
if len(outs['bbox']) > 0 and len(outs['bbox'][0]) > 6:
infer_res['bbox'] = get_det_poly_res(
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
else:
infer_res['bbox'] = get_det_res(
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
if 'mask' in outs:
# mask post process
infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox'],
outs['bbox_num'], im_id, catid)
if 'segm' in outs:
infer_res['segm'] = get_solov2_segm_res(outs, im_id, catid)
return infer_res
def cocoapi_eval(anns,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000),
classwise=False):
"""
Args:
anns: Evaluation result.
style (str): COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt (str): Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file (str): COCO annotations file.
max_dets (tuple): COCO evaluation maxDets.
classwise (bool): Whether per-category AP and draw P-R Curve or not.
"""
assert coco_gt is not None or anno_file is not None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt is None:
coco_gt = COCO(anno_file)
logging.info("Start evaluate...")
coco_dt = loadRes(coco_gt, anns)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if classwise:
# Compute per-category AP and PR curve
try:
from terminaltables import AsciiTable
except Exception as e:
logging.error(
'terminaltables not found, plaese install terminaltables. '
'for example: `pip install terminaltables`.')
raise e
precisions = coco_eval.eval['precision']
cat_ids = coco_gt.getCatIds()
# precision: (iou, recall, cls, area range, max dets)
assert len(cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = coco_gt.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(str(nm["name"]), '{:0.3f}'.format(float(ap))))
pr_array = precisions[0, :, idx, 0, 2]
recall_array = np.arange(0.0, 1.01, 0.01)
draw_pr_curve(
pr_array,
recall_array,
out_dir=style + '_pr_curve',
file_name='{}_precision_recall_curve.jpg'.format(nm["name"]))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(
*[results_flatten[i::num_columns] for i in range(num_columns)])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logging.info('Per-category of {} AP: \n{}'.format(style, table.table))
logging.info("per-category PR curve has output to {} folder.".format(
style + '_pr_curve'))
# flush coco evaluation result
sys.stdout.flush()
return coco_eval.stats
def loadRes(coco_obj, anns):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
# This function has the same functionality as pycocotools.COCO.loadRes,
# except that the input anns is list of results rather than a json file.
# Refer to
# https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/coco.py#L305,
# matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
# or matplotlib.backends is imported for the first time
# pycocotools import matplotlib
import matplotlib
matplotlib.use('Agg')
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
import time
res = COCO()
res.dataset['images'] = [img for img in coco_obj.dataset['images']]
tic = time.time()
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
[ann['image_id'] for ann in anns])
res.dataset['images'] = [
img for img in res.dataset['images'] if img['id'] in imgIds
]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
'categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
'categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
'categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1 - x0) * (y1 - y0)
ann['id'] = id + 1
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
res.dataset['annotations'] = anns
res.createIndex()
return res
|
"""
Django settings for classDeck project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
from dotenv import load_dotenv
import environ
load_dotenv()
env = environ.Env()
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
'classroom',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'classDeck.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'classDeck.wsgi.application'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
]
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
SITE_ID = 2
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
AUTH_USER_MODEL = 'classroom.User'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
MESSAGE_TAGS = {
messages.DEBUG: 'alert-secondary',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
WHITENOISE_USE_FINDERS = True
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
CLOUDINARY_STORAGE ={
'CLOUD_NAME': 'defmhlaju',
'API_KEY': env('API_KEY'),
'API_SECRET': env('API_SECRET')
}
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_USE_TLS = True
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
|
from __future__ import division
from __future__ import print_function
# Preprocessing of Option Quotes
# ==============================
#
# This notebook demonstrates the preprocessing of equity options, in preparation for the estimation of the parameters of a stochastic model.
# A number of preliminary calculations must be performed:
#
# 1. Calculation of implied risk-free rate and dividend yield, and derivation of forward prices
# 2. Calculation of forward at-the-money volatility. There is probably no option struck at the forward price, so this item must be computed by interpolation.
# 3. Calculation of the Black-=Scholes implied bid and ask volatility, given bid and ask option prices.
# 4. Calculation of 'Quick Delta': this is a common measure of moneyness, useful for representing the volatility smile.
#
# Each step is now described.
#
# Calculation of implied dividend yield and risk-free rate
# --------------------------------------------------------
#
# Recall the put-call parity relationship with continuous dividends:
#
# $$
# C_t - P_t = S_t e^{-d (T-t)} - K e^{-r (T-t)}
# $$
#
# where
#
# * $C_t$ price of call at time $t$
# * $P_t$ price of put at time $t$
# * $S_t$ spot price of underlying asset
# * $d$ continuous dividend yield
# * $r$ risk-free rate
# * $T$ Expity
#
# For each maturity, we estimate the linear regression:
#
# $$
# C_t - P_t = a_0 + a_1 K
# $$
#
# which yields
#
# $$
# r = - \frac{1}{T} \ln (-a_1)
# $$
# $$
# d = \frac{1}{T} \ln \left( \frac{S_t}{a_0} \right)
# $$
#
# Calculation of forward at-the-money volatility
# ----------------------------------------------
#
# We next want to estimate the implied volatility of an option struck at the forward price. In general, such option is not traded, and the volatility must therefore be estimated. The calculation involves 3 steps, performed separately on calls and puts:
#
# 1. Estimate the bid ($\sigma_b(K)$) and ask ($\sigma_a(K)$) Black-Scholes volatility for each quote.
# 2. Compute a mid-market implied volatility for each quote:
# $$
# \sigma(K) = \frac{\sigma_b(K)+\sigma_a(K)}{2}
# $$
# 3. Let $F$ be the forward price, the corresponding mid-market implied volatility is computed by linear interpolation between the two quuotes braketing $F$.
#
# The forward ATM volatility is the average of the volatilities computed on calls and puts.
#
# Quick Delta
# -----------
#
# Recall that the delta of a European call is defined as $N(d_1)$, where
#
# $$
# d_{1} = \frac{1}{\sigma \sqrt{T}} \left[ \ln \left( \frac{S}{K} \right) + \left( r + \frac{1}{2}\sigma^2 \right)T \right]
# $$
#
# The "Quick Delta" (QD) is a popular measure of moneyness, inspired from the definition of delta:
#
# $$
# QD(K) = N \left( \frac{1}{\sigma \sqrt{T}} \ln \left( \frac{F_T}{K} \right) \right)
# $$
#
# Note that $QD(F_T)=0.5$, for all maturities, while the regular forward delta is a function of time to expiry. This property of Quick Delta makes it convenient for representing the volatility smile.
#
# Data Filters
# ------------
#
# A number of filters may be applied, in an attempt to exclude inconsistent or erroneous data.
#
# 1. Exclusion of maturities shorter than $tMin$
# 2. Exclusion of maturities with less than $nMin$ quotes
# 3. Exclusion of quotes with Quick Delta less than $QDMin$ or higher than $QDMax$
#
# Implementation
# --------------
#
# This logic is implemented in the function `Compute_IV`, presented below. The function takes as argument a `pandas DataFrame` and returns another
# `DataFrame`, with one row per quote and 14 columns:
#
# 1. Type: 'C'/'P'
# 2. Strike
# 3. dtExpiry
# 4. dtTrade
# 5. Spot
# 6. IVBid: Black-Scholes implied volatility (bid)
# 7. IVAsk: Black-Scholes implied volatility (ask)
# 8. QD: Quick Delta
# 9. iRate: risk-free rate (continuously compounded)
# 10. iDiv: dividend yield (continuously compounded)
# 11. Fwd: Forward price
# 12. TTM: Time to maturity, in fraction of years (ACT/365)
# 13. PBid: Premium (bid)
# 14. PAsk: Premium (ask)
# <codecell>
import pandas
import dateutil
import re
import datetime
import numpy as np
from pandas import DataFrame
from scipy.interpolate import interp1d
from scipy.stats import norm
from scipy.linalg import lstsq
import quantlib.pricingengines.blackformula
from quantlib.pricingengines.blackformula import blackFormulaImpliedStdDev
from quantlib.instruments.option import Call, Put
def Compute_IV(optionDataFrame, tMin=0, nMin=0, QDMin=0, QDMax=1, keepOTMData=True):
"""
Pre-processing of a standard European option quote file.
- Calculation of implied risk-free rate and dividend yield
- Calculation of implied volatility
- Estimate ATM volatility for each expiry
- Compute implied volatility and Quick Delta for each quote
Options for filtering the input data set:
- maturities with less than nMin strikes are ignored
- maturities shorter than tMin (ACT/365 daycount) are ignored
- strikes with Quick Delta < qdMin or > qdMax are ignored
"""
grouped = optionDataFrame.groupby('dtExpiry')
isFirst = True
for spec, group in grouped:
print('processing group %s' % spec)
# implied vol for this type/expiry group
indx = group.index
dtTrade = group['dtTrade'][indx[0]]
dtExpiry = group['dtExpiry'][indx[0]]
spot = group['Spot'][indx[0]]
daysToExpiry = (dtExpiry-dtTrade).days
timeToMaturity = daysToExpiry/365.0
# exclude groups with too short time to maturity
if timeToMaturity < tMin:
continue
# exclude groups with too few data points
df_call = group[group['Type'] == 'C']
df_put = group[group['Type'] == 'P']
if (len(df_call) < nMin) | (len(df_put) < nMin):
continue
# calculate forward, implied interest rate and implied div. yield
df_C = DataFrame((df_call['PBid']+df_call['PAsk'])/2,
columns=['PremiumC'])
df_C.index = df_call['Strike']
df_P = DataFrame((df_put['PBid']+df_put['PAsk'])/2,
columns=['PremiumP'])
df_P.index = df_put['Strike']
# use 'inner' join because some strikes are not quoted for C and P
df_all = df_C.join(df_P, how='inner')
df_all['Strike'] = df_all.index
df_all['C-P'] = df_all['PremiumC'] - df_all['PremiumP']
y = np.array(df_all['C-P'])
x = np.array(df_all['Strike'])
A = np.vstack((x, np.ones(x.shape))).T
b = np.linalg.lstsq(A, y, rcond=None)[0]
# intercept is last coef
iRate = -np.log(-b[0])/timeToMaturity
dRate = np.log(spot/b[1])/timeToMaturity
discountFactor = np.exp(-iRate*timeToMaturity)
Fwd = spot * np.exp((iRate-dRate)*timeToMaturity)
print('Fwd: %f int rate: %f div yield: %f' % (Fwd, iRate, dRate))
# mid-market ATM volatility
def impvol(cp, strike, premium):
try:
vol = blackFormulaImpliedStdDev(cp, strike,
forward=Fwd, blackPrice=premium, discount=discountFactor,
TTM=timeToMaturity)
except RuntimeError:
vol = np.nan
return vol/np.sqrt(timeToMaturity)
# implied bid/ask vol for all options
df_call = df_call.assign(IVBid = [impvol(Call, strike, price) for strike, price
in zip(df_call['Strike'], df_call['PBid'])],
IVAsk = [impvol(Call, strike, price) for strike, price
in zip(df_call['Strike'], df_call['PBid'])])
df_call = df_call.assign(IVMid = (df_call.IVBid + df_call.IVAsk)/2)
df_put = df_put.assign(IVBid = [impvol(Put, strike, price) for strike, price
in zip(df_put['Strike'], df_put['PBid'])],
IVAsk = [impvol(Put, strike, price) for strike, price
in zip(df_put['Strike'], df_put['PAsk'])])
df_put = df_put.assign(IVMid = (df_put['IVBid'] + df_put['IVAsk'])/2)
f_call = interp1d(df_call['Strike'].values, df_call['IVMid'].values)
f_put = interp1d(df_put['Strike'].values, df_put['IVMid'].values)
atmVol = (f_call(Fwd)+f_put(Fwd))/2
print('ATM vol: %f' % atmVol)
# Quick Delta, computed with ATM vol
rv = norm()
df_call = (df_call.
assign(QuickDelta=
rv.cdf(np.log(Fwd/df_call.Strike.values) / (atmVol*np.sqrt(timeToMaturity)))))
df_put = (df_put.
assign(QuickDelta=
rv.cdf(np.log(Fwd/df_put.Strike.values)/(atmVol*np.sqrt(timeToMaturity)))))
# keep data within QD range
df_call = df_call[(df_call['QuickDelta'] >= QDMin) & \
(df_call['QuickDelta'] <= QDMax) ]
df_put = df_put[ (df_put['QuickDelta'] >= QDMin) & \
(df_put['QuickDelta'] <= QDMax) ]
# final assembly...
df_cp = df_call.append(df_put, ignore_index=True)
df_cp['iRate'] = iRate
df_cp['iDiv'] = dRate
df_cp['ATMVol'] = atmVol
df_cp['Fwd'] = Fwd
df_cp['TTM'] = timeToMaturity
df_cp['CP'] = [1 if t == 'C' else -1 for t in df_cp['Type']]
# keep only OTM data ?
if keepOTMData:
df_cp = df_cp[((df_cp['Strike']>=Fwd) & (df_cp['Type'] == 'C')) |
((df_cp['Strike']<Fwd) & (df_cp['Type'] == 'P'))]
if isFirst:
df_final = df_cp
isFirst = False
else:
df_final = df_final.append(df_cp, ignore_index=True)
return df_final
# <markdowncell>
# Example
# -------
#
# Using the SPX data set found in the data folder, the above procedure generates a `DataFrame` suited for use in a calibration program.
# <codecell>
if __name__ == '__main__':
option_data_frame = pandas.read_pickle('../data/df_SPX_24jan2011.pkl')
df_final = Compute_IV(option_data_frame, tMin=1.0/12, nMin=6, QDMin=.2, QDMax=.8)
# save a csv file and pickled data frame
df_final.to_csv('../data/df_options_SPX_24jan2011.csv', index=False)
df_final.to_pickle('../data/df_options_SPX_24jan2011.pkl')
|
var onePerRound = require('../index.js')({
maxPerRound: 1
})
var twoPerRound = require('../index.js')({
maxPerRound: 2
})
var odd = {
participants: [
{ id: 1, seed: 1000 },
{ id: 2, seed: 1050 },
{ id: 3, seed: 950 }
],
matches: [
{
round: 1,
home: { id: 1, points: 1 },
away: { id: 3, points: 1 }
},
{
round: 1,
home: { id: 2, points: 0 },
away: { id: null, points: 0 }
}
]
}
var even = {
participants: [
{ id: 'ID 1', seed: 700 },
{ id: 'ID 2', seed: 625 },
{ id: 'ID 3', seed: 950 },
{ id: 'ID 4', seed: 800 }
],
matches: [
{
round: 1,
home: { id: 'ID 3', points: 1 },
away: { id: 'ID 4', points: 0 }
},
{
round: 1,
home: { id: 'ID 1', points: 0 },
away: { id: 'ID 2', points: 1 }
},
{
round: 2,
home: { id: 'ID 3', points: 1 },
away: { id: 'ID 2', points: 0 }
},
{
round: 2,
home: { id: 'ID 4', points: 0 },
away: { id: 'ID 1', points: 1 }
},
]
}
var byeTest = {
participants: [
{ id: 'Team 1',
seed: 3636,
disbanded: false,
droppedOut: false },
{ id: 'Team 2',
seed: 4001,
disbanded: false,
droppedOut: false },
{ id: 'Team 3',
seed: 4001,
disbanded: false,
droppedOut: false },
{ id: 'Team 4',
seed: 4011,
disbanded: false,
droppedOut: false },
{ id: 'Team 5',
seed: 4029,
disbanded: false,
droppedOut: false },
{ id: 'Team 6',
seed: 4030,
disbanded: false,
droppedOut: false },
{ id: 'Team 7',
seed: 4043,
disbanded: false,
droppedOut: false },
{ id: 'Team 8',
seed: 4044,
disbanded: false,
droppedOut: false },
{ id: 'Team 9',
seed: 4066,
disbanded: false,
droppedOut: false },
{ id: 'Team 10',
seed: 4142,
disbanded: false,
droppedOut: false },
{ id: 'Team 11',
seed: 4174,
disbanded: false,
droppedOut: false },
{ id: 'Team 12',
seed: 4179,
disbanded: false,
droppedOut: false },
{ id: 'Team 13',
seed: 4183,
disbanded: false,
droppedOut: false },
{ id: 'Team 14',
seed: 4194,
disbanded: false,
droppedOut: false },
{ id: 'Team 15',
seed: 4199,
disbanded: false,
droppedOut: false },
{ id: 'Team 16',
seed: 4209,
disbanded: false,
droppedOut: false },
{ id: 'Team 17',
seed: 4233,
disbanded: false,
droppedOut: false },
{ id: 'Team 18',
seed: 4270,
disbanded: false,
droppedOut: false },
{ id: 'Team 19',
seed: 4362,
disbanded: false,
droppedOut: false }
],
matches: [
{ round: 1,
home: { id: 'Team 19', points: 1 },
away: { id: 'Team 17', points: 1 } },
{ round: 1,
home: { id: 'Team 18', points: 0 },
away: { id: 'Team 14', points: 2 } },
{ round: 1,
home: { id: 'Team 15', points: 1 },
away: { id: 'Team 13', points: 1 } },
{ round: 1,
home: { id: 'Team 11', points: 1 },
away: { id: 'Team 10', points: 1 } },
{ round: 1,
home: { id: 'Team 16', points: 1 },
away: { id: 'Team 7', points: 1 } },
{ round: 1,
home: { id: 'Team 9', points: 1 },
away: { id: 'Team 4', points: 1 } },
{ round: 1,
home: { id: 'Team 8', points: 2 },
away: { id: 'Team 6', points: 0 } },
{ round: 1,
home: { id: 'Team 2', points: 0 },
away: { id: 'Team 3', points: 2 } },
{ round: 1,
home: { id: 'Team 5', points: 1 },
away: { id: 'Team 1', points: 1 } },
{ round: 1,
home: { id: 'Team 12', points: 2 },
away: { id: null, points: 0 } },
{ round: 2,
home: { id: 'Team 7', points: 2 },
away: { id: 'Team 19', points: 0 } },
{ round: 2,
home: { id: 'Team 10', points: 1 },
away: { id: 'Team 17', points: 1 } },
{ round: 2,
home: { id: 'Team 11', points: 2 },
away: { id: 'Team 15', points: 0 } },
{ round: 2,
home: { id: 'Team 9', points: 1 },
away: { id: 'Team 18', points: 1 } },
{ round: 2,
home: { id: 'Team 14', points: 2 },
away: { id: 'Team 8', points: 0 } },
{ round: 2,
home: { id: 'Team 16', points: 0 },
away: { id: 'Team 4', points: 2 } },
{ round: 2,
home: { id: 'Team 6', points: 1 },
away: { id: 'Team 12', points: 1 } },
{ round: 2,
home: { id: 'Team 3', points: 1 },
away: { id: 'Team 5', points: 1 } },
{ round: 2,
home: { id: 'Team 13', points: 2 },
away: { id: 'Team 1', points: 0 } },
{ round: 2,
home: { id: 'Team 2', points: 2 },
away: { id: null, points: 0 } }
]
}
var oddModifiedMedian = twoPerRound.getModifiedMedianScores(2, odd.participants, odd.matches)
var evenModifiedMedian = onePerRound.getModifiedMedianScores(3, even.participants, even.matches)
var oddStandings = twoPerRound.getStandings(2, odd.participants, odd.matches)
var evenStandings = onePerRound.getStandings(3, even.participants, even.matches)
var oddMatchups = twoPerRound.getMatchups(2, odd.participants, odd.matches)
var evenMatchups = onePerRound.getMatchups(3, even.participants, even.matches)
var byeMatchups = twoPerRound.getMatchups(3, byeTest.participants, byeTest.matches)
console.log(byeMatchups)
if (Object.entries(oddModifiedMedian).length !== 3) {
throw new Error('getModifiedMedian incorrect for odd data')
}
if (Object.entries(evenModifiedMedian).length !== 4) {
throw new Error('getModifiedMedian incorrect for even data')
}
if (oddStandings.length !== 3) {
throw new Error('getStandings incorrect for odd data')
}
if (evenStandings.length !== 4) {
throw new Error('getStandings incorrect for even data')
}
if (oddMatchups.length !== 2) {
throw new Error('getStandings incorrect for odd data')
}
if (evenMatchups.length !== 2) {
throw new Error('getStandings incorrect for even data')
}
|
/**
* @name EmojiStatistics
* @author DevilBro
* @authorId 278543574059057154
* @version 2.9.7
* @description Shows you an Overview of Emojis and Emoji Servers
* @invite Jx3TjNS
* @donate https://www.paypal.me/MircoWittrien
* @patreon https://www.patreon.com/MircoWittrien
* @website https://mwittrien.github.io/
* @source https://github.com/mwittrien/BetterDiscordAddons/tree/master/Plugins/EmojiStatistics/
* @updateUrl https://mwittrien.github.io/BetterDiscordAddons/Plugins/EmojiStatistics/EmojiStatistics.plugin.js
*/
module.exports = (_ => {
const config = {
"info": {
"name": "EmojiStatistics",
"author": "DevilBro",
"version": "2.9.7",
"description": "Shows you an Overview of Emojis and Emoji Servers"
}
};
return !window.BDFDB_Global || (!window.BDFDB_Global.loaded && !window.BDFDB_Global.started) ? class {
getName () {return config.info.name;}
getAuthor () {return config.info.author;}
getVersion () {return config.info.version;}
getDescription () {return `The Library Plugin needed for ${config.info.name} is missing. Open the Plugin Settings to download it. \n\n${config.info.description}`;}
downloadLibrary () {
require("request").get("https://mwittrien.github.io/BetterDiscordAddons/Library/0BDFDB.plugin.js", (e, r, b) => {
if (!e && b && r.statusCode == 200) require("fs").writeFile(require("path").join(BdApi.Plugins.folder, "0BDFDB.plugin.js"), b, _ => BdApi.showToast("Finished downloading BDFDB Library", {type: "success"}));
else BdApi.alert("Error", "Could not download BDFDB Library Plugin. Try again later or download it manually from GitHub: https://mwittrien.github.io/downloader/?library");
});
}
load () {
if (!window.BDFDB_Global || !Array.isArray(window.BDFDB_Global.pluginQueue)) window.BDFDB_Global = Object.assign({}, window.BDFDB_Global, {pluginQueue: []});
if (!window.BDFDB_Global.downloadModal) {
window.BDFDB_Global.downloadModal = true;
BdApi.showConfirmationModal("Library Missing", `The Library Plugin needed for ${config.info.name} is missing. Please click "Download Now" to install it.`, {
confirmText: "Download Now",
cancelText: "Cancel",
onCancel: _ => {delete window.BDFDB_Global.downloadModal;},
onConfirm: _ => {
delete window.BDFDB_Global.downloadModal;
this.downloadLibrary();
}
});
}
if (!window.BDFDB_Global.pluginQueue.includes(config.info.name)) window.BDFDB_Global.pluginQueue.push(config.info.name);
}
start () {this.load();}
stop () {}
getSettingsPanel () {
let template = document.createElement("template");
template.innerHTML = `<div style="color: var(--header-primary); font-size: 16px; font-weight: 300; white-space: pre; line-height: 22px;">The Library Plugin needed for ${config.info.name} is missing.\nPlease click <a style="font-weight: 500;">Download Now</a> to install it.</div>`;
template.content.firstElementChild.querySelector("a").addEventListener("click", this.downloadLibrary);
return template.content.firstElementChild;
}
} : (([Plugin, BDFDB]) => {
var emojiReplicaList;
return class EmojiStatistics extends Plugin {
onLoad () {
this.patchedModules = {
after: {
EmojiPicker: "type"
}
};
this.css = `
.${this.name}-table ${BDFDB.dotCN._emojistatisticsiconcell} {
justify-content: center;
width: 48px;
padding: 0;
}
.${this.name}-table ${BDFDB.dotCN._emojistatisticsnamecell} {
width: 300px;
}
.${this.name}-table ${BDFDB.dotCN._emojistatisticsamountcell} {
width: 120px;
}
${BDFDB.dotCNS.emojipicker + BDFDB.dotCN.emojipickerheader} {
grid-template-columns: auto 24px 24px;
}
${BDFDB.dotCNS.emojipicker + BDFDB.dotCN._emojistatisticsstatisticsbutton} {
width: 24px;
height: 24px;
grid-column: 3/4;
}
`;
}
onStart () {
BDFDB.PatchUtils.forceAllUpdates(this);
}
onStop () {
BDFDB.PatchUtils.forceAllUpdates(this);
}
processEmojiPicker (e) {
this.loadEmojiList();
let [children, index] = BDFDB.ReactUtils.findParent(e.returnvalue, {name: "DiversitySelector"});
if (index > -1) children.push(BDFDB.ReactUtils.createElement(BDFDB.LibraryComponents.TooltipContainer, {
text: this.labels.modal_header,
children: BDFDB.ReactUtils.createElement(BDFDB.LibraryComponents.Clickable, {
className: BDFDB.disCN._emojistatisticsstatisticsbutton,
children: BDFDB.ReactUtils.createElement("div", {
className: BDFDB.disCN.emojipickerdiversityemojiitemimage,
style: {backgroundImage: `url(${BDFDB.LibraryModules.EmojiStateUtils.getURL(BDFDB.LibraryModules.EmojiUtils.convertNameToSurrogate("mag_right"))})`}
})
}),
onClick: _ => {
this.showEmojiInformationModal();
e.instance.props.closePopout();
}
}));
}
loadEmojiList () {
emojiReplicaList = {};
let guilds = BDFDB.LibraryModules.GuildStore.getGuilds();
for (let id in guilds) for (let emoji of BDFDB.LibraryModules.GuildEmojiStore.getGuildEmoji(id)) {
if (emoji.managed) emojiReplicaList[emoji.name] = emojiReplicaList[emoji.name] != undefined;
}
}
showEmojiInformationModal () {
BDFDB.ModalUtils.open(this, {
size: "LARGE",
header: this.labels.modal_header,
children: BDFDB.ReactUtils.createElement(BDFDB.LibraryComponents.Table, {
className: `${this.name}-table`,
stickyHeader: true,
sortData: false,
columns: [
{key: "icon", cell: "icon", sortKey: "index"},
{key: "name", cell: "name"},
{key: "total", cell: "amount", reverse: true},
{key: "global", cell: "amount", reverse: true},
{key: "local", cell: "amount", reverse: true},
{key: "copies", cell: "amount", reverse: true}
].map(data => ({
key: data.sortKey || data.key,
sort: true,
reverse: data.reverse,
cellClassName: BDFDB.disCN[`_emojistatistics${data.cell}cell`],
renderHeader: _ => this.labels[`modal_titles${data.key}`],
render: item => {
if (data.key == "icon") return BDFDB.ReactUtils.createElement(BDFDB.LibraryComponents.GuildComponents.Guild, {
guild: item.guild,
menu: false,
tooltip: false
});
else if (data.key == "name") return BDFDB.ReactUtils.createElement(BDFDB.LibraryComponents.TextScroller, {
children: item.guild.name
});
else return item[data.key];
}
})),
data: BDFDB.LibraryModules.FolderStore.getFlattenedGuilds().map((guild, i) => {
let itemData = {
index: i,
guild: guild,
global: 0,
local: 0,
copies: 0
}
for (let emoji of BDFDB.LibraryModules.GuildEmojiStore.getGuildEmoji(guild.id)) {
if (emoji.managed) {
itemData.global++;
if (emojiReplicaList[emoji.name]) itemData.copies++;
}
else itemData.local++;
}
itemData.total = itemData.global + itemData.local;
return itemData;
})
})
});
}
setLabelsByLanguage () {
switch (BDFDB.LanguageUtils.getLanguage().id) {
case "bg": // Bulgarian
return {
modal_header: "Статистика на емотикони",
modal_titlescopies: "Копия",
modal_titlesglobal: "Глобален",
modal_titlesicon: "Икона",
modal_titleslocal: "Местен",
modal_titlesname: "Име на сървъра",
modal_titlestotal: "Обща сума"
};
case "da": // Danish
return {
modal_header: "Statistik over emojis",
modal_titlescopies: "Kopier",
modal_titlesglobal: "Global",
modal_titlesicon: "Ikon",
modal_titleslocal: "Lokal",
modal_titlesname: "Server navn",
modal_titlestotal: "Total"
};
case "de": // German
return {
modal_header: "Emoji Statistiken",
modal_titlescopies: "Kopien",
modal_titlesglobal: "Global",
modal_titlesicon: "Symbol",
modal_titleslocal: "Lokal",
modal_titlesname: "Servername",
modal_titlestotal: "Gesamt"
};
case "el": // Greek
return {
modal_header: "Στατιστικά στοιχεία emoji",
modal_titlescopies: "Αντίγραφα",
modal_titlesglobal: "Παγκόσμια",
modal_titlesicon: "Εικόνισμα",
modal_titleslocal: "Τοπικός",
modal_titlesname: "Ονομα διακομιστή",
modal_titlestotal: "Σύνολο"
};
case "es": // Spanish
return {
modal_header: "Estadísticas de emojis",
modal_titlescopies: "Copias",
modal_titlesglobal: "Global",
modal_titlesicon: "Icono",
modal_titleslocal: "Local",
modal_titlesname: "Nombre del servidor",
modal_titlestotal: "Total"
};
case "fi": // Finnish
return {
modal_header: "Emojien tilastot",
modal_titlescopies: "Kopiot",
modal_titlesglobal: "Maailmanlaajuinen",
modal_titlesicon: "Kuvake",
modal_titleslocal: "Paikallinen",
modal_titlesname: "Palvelimen nimi",
modal_titlestotal: "Kaikki yhteensä"
};
case "fr": // French
return {
modal_header: "Statistiques des emojis",
modal_titlescopies: "Copies",
modal_titlesglobal: "Global",
modal_titlesicon: "Icône",
modal_titleslocal: "Local",
modal_titlesname: "Nom du serveur",
modal_titlestotal: "Total"
};
case "hr": // Croatian
return {
modal_header: "Statistika emojija",
modal_titlescopies: "Kopije",
modal_titlesglobal: "Globalno",
modal_titlesicon: "Ikona",
modal_titleslocal: "Lokalno",
modal_titlesname: "Ime poslužitelja",
modal_titlestotal: "Ukupno"
};
case "hu": // Hungarian
return {
modal_header: "A hangulatjelek statisztikája",
modal_titlescopies: "Másolatok",
modal_titlesglobal: "Globális",
modal_titlesicon: "Ikon",
modal_titleslocal: "Helyi",
modal_titlesname: "Szerver név",
modal_titlestotal: "Teljes"
};
case "it": // Italian
return {
modal_header: "Statistiche di emoji",
modal_titlescopies: "Copie",
modal_titlesglobal: "Globale",
modal_titlesicon: "Icona",
modal_titleslocal: "Locale",
modal_titlesname: "Nome del server",
modal_titlestotal: "Totale"
};
case "ja": // Japanese
return {
modal_header: "絵文字の統計",
modal_titlescopies: "コピー",
modal_titlesglobal: "グローバル",
modal_titlesicon: "アイコン",
modal_titleslocal: "地元",
modal_titlesname: "サーバーの名前",
modal_titlestotal: "合計"
};
case "ko": // Korean
return {
modal_header: "이모티콘 통계",
modal_titlescopies: "사본",
modal_titlesglobal: "글로벌",
modal_titlesicon: "상",
modal_titleslocal: "현지",
modal_titlesname: "서버 이름",
modal_titlestotal: "합계"
};
case "lt": // Lithuanian
return {
modal_header: "Emoji statistika",
modal_titlescopies: "Kopijos",
modal_titlesglobal: "Visuotinis",
modal_titlesicon: "Piktograma",
modal_titleslocal: "Vietinis",
modal_titlesname: "Serverio pavadinimas",
modal_titlestotal: "Iš viso"
};
case "nl": // Dutch
return {
modal_header: "Statistieken van emoji's",
modal_titlescopies: "Kopieën",
modal_titlesglobal: "Globaal",
modal_titlesicon: "Icoon",
modal_titleslocal: "Lokaal",
modal_titlesname: "Server naam",
modal_titlestotal: "Totaal"
};
case "no": // Norwegian
return {
modal_header: "Statistikk over emoji",
modal_titlescopies: "Kopier",
modal_titlesglobal: "Global",
modal_titlesicon: "Ikon",
modal_titleslocal: "Lokalt",
modal_titlesname: "Server navn",
modal_titlestotal: "Total"
};
case "pl": // Polish
return {
modal_header: "Statystyki emotikonów",
modal_titlescopies: "Kopie",
modal_titlesglobal: "Światowy",
modal_titlesicon: "Ikona",
modal_titleslocal: "Lokalny",
modal_titlesname: "Nazwa serwera",
modal_titlestotal: "Całkowity"
};
case "pt-BR": // Portuguese (Brazil)
return {
modal_header: "Estatísticas de emojis",
modal_titlescopies: "Cópias",
modal_titlesglobal: "Global",
modal_titlesicon: "Ícone",
modal_titleslocal: "Local",
modal_titlesname: "Nome do servidor",
modal_titlestotal: "Total"
};
case "ro": // Romanian
return {
modal_header: "Statistici ale emoji-urilor",
modal_titlescopies: "Copii",
modal_titlesglobal: "Global",
modal_titlesicon: "Pictogramă",
modal_titleslocal: "Local",
modal_titlesname: "Numele serverului",
modal_titlestotal: "Total"
};
case "ru": // Russian
return {
modal_header: "Статистика смайлов",
modal_titlescopies: "Копии",
modal_titlesglobal: "Глобальный",
modal_titlesicon: "Икона",
modal_titleslocal: "Местный",
modal_titlesname: "Название сервера",
modal_titlestotal: "Всего"
};
case "sv": // Swedish
return {
modal_header: "Statistik för emojis",
modal_titlescopies: "Kopior",
modal_titlesglobal: "Global",
modal_titlesicon: "Ikon",
modal_titleslocal: "Lokal",
modal_titlesname: "Server namn",
modal_titlestotal: "Total"
};
case "th": // Thai
return {
modal_header: "สถิติของอิโมจิ",
modal_titlescopies: "สำเนา",
modal_titlesglobal: "ทั่วโลก",
modal_titlesicon: "ไอคอน",
modal_titleslocal: "ท้องถิ่น",
modal_titlesname: "ชื่อเซิร์ฟเวอร์",
modal_titlestotal: "รวม"
};
case "tr": // Turkish
return {
modal_header: "Emojilerin istatistikleri",
modal_titlescopies: "Kopya sayısı",
modal_titlesglobal: "Küresel",
modal_titlesicon: "Simge",
modal_titleslocal: "Yerel",
modal_titlesname: "Sunucu adı",
modal_titlestotal: "Toplam"
};
case "uk": // Ukrainian
return {
modal_header: "Статистика смайликів",
modal_titlescopies: "Копії",
modal_titlesglobal: "Глобальний",
modal_titlesicon: "Піктограма",
modal_titleslocal: "Місцеві",
modal_titlesname: "Ім'я сервера",
modal_titlestotal: "Разом"
};
case "vi": // Vietnamese
return {
modal_header: "Thống kê biểu tượng cảm xúc",
modal_titlescopies: "Bản sao",
modal_titlesglobal: "Toàn cầu",
modal_titlesicon: "Biểu tượng",
modal_titleslocal: "Địa phương",
modal_titlesname: "Tên máy chủ",
modal_titlestotal: "Toàn bộ"
};
case "zh-CN": // Chinese (China)
return {
modal_header: "表情符号统计",
modal_titlescopies: "份数",
modal_titlesglobal: "全球",
modal_titlesicon: "图标",
modal_titleslocal: "本地",
modal_titlesname: "服务器名称",
modal_titlestotal: "总"
};
case "zh-TW": // Chinese (Taiwan)
return {
modal_header: "表情符號統計",
modal_titlescopies: "份數",
modal_titlesglobal: "全球",
modal_titlesicon: "圖標",
modal_titleslocal: "本地",
modal_titlesname: "服務器名稱",
modal_titlestotal: "總"
};
default: // English
return {
modal_header: "Emoji Statistics",
modal_titlescopies: "Copies",
modal_titlesglobal: "Global",
modal_titlesicon: "Icon",
modal_titleslocal: "Local",
modal_titlesname: "Server Name",
modal_titlestotal: "Total"
};
}
}
};
})(window.BDFDB_Global.PluginUtils.buildPlugin(config));
})();
|
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_duration
from payments.models import Payment
class Command(BaseCommand):
help = "Cancels expired Payments"
def add_arguments(self, parser):
parser.add_argument('-n', dest='sim', action='store_true', help="Simulate")
parser.add_argument('-e', '--exp-time', action='store',
help="Expiration time.", default='3 00:00:00')
def handle(self, *args, **options):
now = timezone.now()
expdate = now - parse_duration(options['exp_time'])
self.stdout.write("Now: " + now.isoformat())
self.stdout.write("Exp: " + expdate.isoformat())
expired = Payment.objects.filter(created__lte=expdate, status='new',
paid_amount=0)
for p in expired:
self.stdout.write("Payment #%d (%s): %s" % (p.id, p.user.username, p.created))
if not options['sim']:
p.status = 'cancelled'
p.save()
|
#
# Generated with CustomWizardBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class CustomWizardBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="CustomWizard", package_path="sima/custom", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("title","string","",default=""))
self.attributes.append(Attribute("selectionType","string","When an object of the given type is selected a popup menu will be enabled,",default=""))
self.attributes.append(Attribute("menuLabel","string","Menu label shown in the popup",default=""))
self.attributes.append(BlueprintAttribute("pages","sima/custom/CustomWizardPage","",True,Dimension("*")))
self.attributes.append(Attribute("inline","boolean","Use inline script or external",default=True))
self.attributes.append(Attribute("path","string","Path to the output file.",default=""))
self.attributes.append(Attribute("finishScript","string","This script will be run when finishing the wizard. Use the variable selection to get hold of the object selected in the navigator",default=""))
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more genesisds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port, wait_until
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult():
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(P2PInterface):
def __init__(self, block_store, tx_store):
super().__init__()
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self):
self.closed = True
def on_headers(self, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
self.send_message(response)
def on_getdata(self, message):
[self.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[self.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX
self.tx_request_map[i.hash] = True
elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK
self.block_request_map[i.hash] = True
def on_inv(self, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance():
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.p2p_connections= []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
node = TestNode(self.block_store, self.tx_store)
node.peer_connect('127.0.0.1', p2p_port(i))
self.p2p_connections.append(node)
def clear_all_connections(self):
self.p2p_connections = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.p2p_connections)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
return all(node.wait_for_verack() for node in self.p2p_connections)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.p2p_connections)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.p2p_connections
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
[ c.send_getheaders() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.p2p_connections
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
[ c.send_mempool() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.lastInv.sort() for c in self.p2p_connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.p2p_connections:
if outcome is None:
if c.bestblockhash != self.p2p_connections[0].bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.bestblockhash == blockhash:
return False
if blockhash not in c.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.p2p_connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.lastInv != self.p2p_connections[0].lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.lastInv:
return False
if txhash not in c.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 0
tests = self.test_generator.get_tests()
for test_instance in tests:
test_number += 1
logger.info("Running test %d: %s line %s" % (test_number, tests.gi_code.co_filename, tests.gi_frame.f_lineno))
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.p2p_connections:
if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.send_inv(block) for c in self.p2p_connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.p2p_connections ]
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.send_header(block_header) for c in self.p2p_connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.p2p_connections:
c.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.send_inv(tx) for c in self.p2p_connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
[ c.disconnect_node() for c in self.p2p_connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
//Copyright 2012-2016 <>< Charles Lohr Under the MIT/x11 License, NewBSD License or ColorChord License. You Choose.
#include "cnhttp.h"
#include <string.h>
#include <stdio.h>
#include "sha1.h"
struct HTTPConnection HTTPConnections[HTTP_CONNECTIONS];
#define HTDEBUG( x... ) printf( x )
//#define HTDEBUG( x... )
//#define ISKEEPALIVE "keep-alive"
#define ISKEEPALIVE "close"
struct HTTPConnection HTTPConnections[HTTP_CONNECTIONS];
struct HTTPConnection * curhttp;
uint8_t * curdata;
uint16_t curlen;
uint8_t wsmask[4];
uint8_t wsmaskplace;
void CloseEvent();
void InternalStartHTTP( );
void HTTPHandleInternalCallback( );
void HTTPClose( )
{
//This is dead code, but it is a testament to Charles.
//Do not do this here. Wait for the ESP to tell us the
//socket is successfully closed.
//curhttp->state = HTTP_STATE_NONE;
curhttp->state = HTTP_WAIT_CLOSE;
et_espconn_disconnect( curhttp->socket );
CloseEvent();
}
void HTTPGotData( )
{
uint8_t c;
curhttp->timeout = 0;
while( curlen-- )
{
c = HTTPPOP;
// sendhex2( h->state ); sendchr( ' ' );
switch( curhttp->state )
{
case HTTP_STATE_WAIT_METHOD:
if( c == ' ' )
{
curhttp->state = HTTP_STATE_WAIT_PATH;
curhttp->state_deets = 0;
}
break;
case HTTP_STATE_WAIT_PATH:
curhttp->pathbuffer[curhttp->state_deets++] = c;
if( curhttp->state_deets == MAX_HTTP_PATHLEN )
{
curhttp->state_deets--;
}
if( c == ' ' )
{
//Tricky: If we're a websocket, we need the whole header.
curhttp->pathbuffer[curhttp->state_deets-1] = 0;
curhttp->state_deets = 0;
if( strncmp( (const char*)curhttp->pathbuffer, "/d/ws", 5 ) == 0 )
{
curhttp->state = HTTP_STATE_DATA_WEBSOCKET;
curhttp->state_deets = 0;
}
else
{
curhttp->state = HTTP_STATE_WAIT_PROTO;
}
}
break;
case HTTP_STATE_WAIT_PROTO:
if( c == '\n' )
{
curhttp->state = HTTP_STATE_WAIT_FLAG;
}
break;
case HTTP_STATE_WAIT_FLAG:
if( c == '\n' )
{
curhttp->state = HTTP_STATE_DATA_XFER;
InternalStartHTTP( );
}
else if( c != '\r' )
{
curhttp->state = HTTP_STATE_WAIT_INFLAG;
}
break;
case HTTP_STATE_WAIT_INFLAG:
if( c == '\n' )
{
curhttp->state = HTTP_STATE_WAIT_FLAG;
curhttp->state_deets = 0;
}
break;
case HTTP_STATE_DATA_XFER:
//Ignore any further data?
curlen = 0;
break;
case HTTP_STATE_DATA_WEBSOCKET:
WebSocketGotData( c );
break;
case HTTP_WAIT_CLOSE:
if( curhttp->keep_alive )
{
curhttp->state = HTTP_STATE_WAIT_METHOD;
}
else
{
HTTPClose( );
}
break;
default:
break;
};
}
}
static void DoHTTP( uint8_t timed )
{
switch( curhttp->state )
{
case HTTP_STATE_NONE: //do nothing if no state.
curhttp->send_pending = 0;
break;
case HTTP_STATE_DATA_XFER:
curhttp->send_pending = 1;
if( TCPCanSend( curhttp->socket, 1300 ) ) //TCPDoneSend
{
if( curhttp->is_dynamic )
{
HTTPCustomCallback( );
}
else
{
HTTPHandleInternalCallback( );
}
}
break;
case HTTP_WAIT_CLOSE:
curhttp->send_pending = 0;
if( TCPDoneSend( curhttp->socket ) )
{
if( curhttp->keep_alive )
{
curhttp->state = HTTP_STATE_WAIT_METHOD;
}
else
{
HTTPClose( );
}
}
break;
case HTTP_STATE_DATA_WEBSOCKET:
curhttp->send_pending = 0;
if( TCPCanSend( curhttp->socket, 1300 ) ) //TCPDoneSend
{
WebSocketTickInternal();
}
break;
default:
if( timed )
{
if( curhttp->timeout++ > HTTP_SERVER_TIMEOUT )
{
HTTPClose( );
}
}
}
}
void HTTPTick( uint8_t timed )
{
uint8_t i;
for( i = 0; i < HTTP_CONNECTIONS; i++ )
{
if( curhttp ) { HTDEBUG( "HTTPRXQ\n" ); break; }
curhttp = &HTTPConnections[i];
DoHTTP( timed );
curhttp = 0;
}
}
void HTTPHandleInternalCallback( )
{
uint16_t i, bytestoread;
if( curhttp->isdone )
{
HTTPClose( );
return;
}
if( curhttp->is404 )
{
DataStartPacket();
PushString("HTTP/1.1 404 Not Found\r\nConnection: close\r\n\r\nFile not found.");
EndTCPWrite( curhttp->socket );
curhttp->isdone = 1;
return;
}
if( curhttp->isfirst )
{
char stto[10];
uint8_t slen = strlen( curhttp->pathbuffer );
const char * k;
DataStartPacket();;
//TODO: Content Length? MIME-Type?
PushString("HTTP/1.1 200 Ok\r\n");
#ifdef CUSTOM_HTTPHEADER_CODE
CUSTOM_HTTPHEADER_CODE
#endif
if( curhttp->bytesleft < 0xfffffffe )
{
PushString("Connection: "ISKEEPALIVE"\r\nContent-Length: ");
Uint32To10Str( stto, curhttp->bytesleft );
PushBlob( stto, strlen( stto ) );
curhttp->keep_alive = 1;
}
else
{
PushString("Connection: close");
curhttp->keep_alive = 0;
}
PushString( "\r\nContent-Type: " );
//Content-Type?
while( slen && ( curhttp->pathbuffer[--slen] != '.' ) );
k = &curhttp->pathbuffer[slen+1];
if( strcmp( k, "mp3" ) == 0 ) PushString( "audio/mpeg3" );
else if( strcmp( k, "jpg" ) == 0 ) PushString( "image/jpeg" );
else if( strcmp( k, "png" ) == 0 ) PushString( "image/png" );
else if( strcmp( k, "css" ) == 0 ) PushString( "text/css" );
else if( strcmp( k, "js" ) == 0 ) PushString( "text/javascript" );
else if( strcmp( k, "gz" ) == 0 ) PushString( "text/plain\r\nContent-Encoding: gzip\r\nCache-Control: public, max-age=3600" );
else if( curhttp->bytesleft == 0xfffffffe ) PushString( "text/plain" );
else PushString( "text/html" );
PushString( "\r\n\r\n" );
EndTCPWrite( curhttp->socket );
curhttp->isfirst = 0;
return;
}
DataStartPacket();
for( i = 0; i < 2 && curhttp->bytesleft; i++ )
{
int bpt = curhttp->bytesleft;
if( bpt > MFS_SECTOR ) bpt = MFS_SECTOR;
curhttp->bytesleft = MFSReadSector( databuff_ptr, &curhttp->data.filedescriptor );
databuff_ptr += bpt;
}
EndTCPWrite( curhttp->socket );
if( !curhttp->bytesleft )
curhttp->isdone = 1;
}
void InternalStartHTTP( )
{
int32_t clusterno;
int8_t i;
char * path = &curhttp->pathbuffer[0];
if( curhttp->pathbuffer[0] == '/' )
path++;
if( path[0] == 'd' && path[1] == '/' )
{
curhttp->is_dynamic = 1;
curhttp->isfirst = 1;
curhttp->isdone = 0;
curhttp->is404 = 0;
HTTPCustomStart();
return;
}
if( !path[0] )
{
path = "index.html";
}
for( i = 0; path[i]; i++ )
if( path[i] == '?' ) path[i] = 0;
i = MFSOpenFile( path, &curhttp->data.filedescriptor );
curhttp->bytessofar = 0;
if( i < 0 )
{
HTDEBUG( "404(%s)\n", path );
curhttp->is404 = 1;
curhttp->isfirst = 1;
curhttp->isdone = 0;
curhttp->is_dynamic = 0;
}
else
{
curhttp->isfirst = 1;
curhttp->isdone = 0;
curhttp->is404 = 0;
curhttp->is_dynamic = 0;
curhttp->bytesleft = curhttp->data.filedescriptor.filelen;
}
}
void http_disconnetcb(int conn ) {
int r = conn;
if( r>=0 )
{
if( !HTTPConnections[r].is_dynamic ) MFSClose( &HTTPConnections[r].data.filedescriptor );
HTTPConnections[r].state = 0;
}
}
void http_recvcb(int conn, char *pusrdata, unsigned short length)
{
int whichhttp = conn;
//Though it might be possible for this to interrupt the other
//tick task, I don't know if this is actually a probelem.
//I'm adding this back-up-the-register just in case.
if( curhttp ) { HTDEBUG( "Unexpected Race Condition\n" ); return; }
curhttp = &HTTPConnections[whichhttp];
curdata = (uint8_t*)pusrdata;
curlen = length;
HTTPGotData();
curhttp = 0 ;
}
int httpserver_connectcb( int socket )
{
int i;
for( i = 0; i < HTTP_CONNECTIONS; i++ )
{
if( HTTPConnections[i].state == 0 )
{
HTTPConnections[i].socket = socket;
HTTPConnections[i].state = HTTP_STATE_WAIT_METHOD;
break;
}
}
if( i == HTTP_CONNECTIONS )
{
HTTPConnections[i].data.filedescriptor.file = 0;
HTTPConnections[i].rcb = 0;
HTTPConnections[i].ccb = 0;
HTTPConnections[i].rcbDat = 0;
HTTPConnections[i].corked_data_place = 0;
return -1;
}
return i;
}
int URLDecode( char * decodeinto, int maxlen, const char * buf )
{
int i = 0;
for( ; buf && *buf; buf++ )
{
char c = *buf;
if( c == '+' )
{
decodeinto[i++] = ' ';
}
else if( c == '?' || c == '&' )
{
break;
}
else if( c == '%' )
{
if( *(buf+1) && *(buf+2) )
{
decodeinto[i++] = hex2byte( buf+1 );
buf += 2;
}
}
else
{
decodeinto[i++] = c;
}
if( i >= maxlen -1 ) break;
}
decodeinto[i] = 0;
return i;
}
#ifndef SHA1_HASH_LEN
#define SHA1_HASH_LEN SHA1_DIGEST_SIZE
#endif
void WebSocketGotData( uint8_t c )
{
switch( curhttp->state_deets )
{
case 0:
{
int i = 0;
char inkey[120];
unsigned char hash[SHA1_HASH_LEN];
SHA1_CTX c;
int inkeylen = 0;
curhttp->is_dynamic = 1;
while( curlen > 20 )
{
curdata++; curlen--;
if( strncmp( curdata, "Sec-WebSocket-Key: ", 19 ) == 0 )
{
break;
}
}
if( curlen <= 21 )
{
HTDEBUG( "No websocket key found.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
return;
}
curdata+= 19;
curlen -= 19;
#define WS_KEY_LEN 36
#define WS_KEY "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
#define WS_RETKEY_SIZEM1 32
while( curlen > 1 )
{
uint8_t lc = *(curdata++);
inkey[i] = lc;
curlen--;
if( lc == '\r' )
{
inkey[i] = 0;
break;
}
i++;
if( i >= sizeof( inkey ) - WS_KEY_LEN - 5 )
{
HTDEBUG( "Websocket key too big.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
return;
}
}
if( curlen <= 1 )
{
HTDEBUG( "Invalid websocket key found.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
return;
}
if( i + WS_KEY_LEN + 1 >= sizeof( inkey ) )
{
HTDEBUG( "WSKEY Too Big.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
return;
}
memcpy( &inkey[i], WS_KEY, WS_KEY_LEN + 1 );
i += WS_KEY_LEN;
SHA1_Init( &c );
SHA1_Update( &c, inkey, i );
SHA1_Final( hash, &c );
#if (WS_RETKEY_SIZE > MAX_HTTP_PATHLEN - 10 )
#error MAX_HTTP_PATHLEN too short.
#endif
my_base64_encode( hash, SHA1_HASH_LEN, curhttp->pathbuffer + (MAX_HTTP_PATHLEN-WS_RETKEY_SIZEM1) );
curhttp->bytessofar = 0;
curhttp->bytesleft = 0;
NewWebSocket();
//Respond...
curhttp->state_deets = 1;
break;
}
case 1:
if( c == '\n' ) curhttp->state_deets = 2;
break;
case 2:
if( c == '\r' ) curhttp->state_deets = 3;
else curhttp->state_deets = 1;
break;
case 3:
if( c == '\n' ) curhttp->state_deets = 4;
else curhttp->state_deets = 1;
break;
case 5: //Established connection.
{
//XXX TODO: Seems to malfunction on large-ish packets. I know it has problems with 140-byte payloads.
if( curlen < 5 ) //Can't interpret packet.
break;
uint8_t fin = c & 1;
uint8_t opcode = c << 4;
uint16_t payloadlen = *(curdata++);
curlen--;
if( !(payloadlen & 0x80) )
{
HTDEBUG( "Unmasked packet.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
break;
}
if( opcode == 128 )
{
//Close connection.
//HTDEBUG( "CLOSE\n" );
//curhttp->state = HTTP_WAIT_CLOSE;
//break;
}
payloadlen &= 0x7f;
if( payloadlen == 127 )
{
//Very long payload.
//Not supported.
HTDEBUG( "Unsupported payload packet.\n" );
curhttp->state = HTTP_WAIT_CLOSE;
break;
}
else if( payloadlen == 126 )
{
payloadlen = (curdata[0] << 8) | curdata[1];
curdata += 2;
curlen -= 2;
}
wsmask[0] = curdata[0];
wsmask[1] = curdata[1];
wsmask[2] = curdata[2];
wsmask[3] = curdata[3];
curdata += 4;
curlen -= 4;
wsmaskplace = 0;
//XXX Warning: When packets get larger, they may split the
//websockets packets into multiple parts. We could handle this
//but at the cost of prescious RAM. I am chosing to just drop those
//packets on the floor, and restarting the connection.
if( curlen < payloadlen )
{
extern int cork_binary_rx;
cork_binary_rx = 1;
//HTDEBUG( "Websocket Fragmented. %d %d\n", curlen, payloadlen );
//curhttp->state = HTTP_WAIT_CLOSE;
HTDEBUG( "Websocket Fragmented. %d %d\n", curlen, payloadlen );
curhttp->state = HTTP_WAIT_CLOSE;
return;
}
WebSocketData( payloadlen );
curlen -= payloadlen;
curdata += payloadlen;
break;
}
default:
break;
}
}
void WebSocketTickInternal()
{
switch( curhttp->state_deets )
{
case 4: //Has key full HTTP header, etc. wants response.
DataStartPacket();;
PushString( "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: " );
PushString( curhttp->pathbuffer + (MAX_HTTP_PATHLEN-WS_RETKEY_SIZEM1) );
PushString( "\r\n\r\n" );
EndTCPWrite( curhttp->socket );
curhttp->state_deets = 5;
curhttp->keep_alive = 0;
break;
case 5:
WebSocketTick();
break;
}
}
void WebSocketSend( uint8_t * data, int size )
{
DataStartPacket();;
PushByte( 0x82 ); //0x81 is text.
if( size >= 126 )
{
PushByte( 0x00 | 126 );
PushByte( size>>8 );
PushByte( size&0xff );
}
else
{
PushByte( 0x00 | size );
}
PushBlob( data, size );
EndTCPWrite( curhttp->socket );
curhttp->send_pending = 1;
}
uint8_t WSPOPMASK()
{
uint8_t mask = wsmask[wsmaskplace];
wsmaskplace = (wsmaskplace+1)&3;
return (*curdata++)^(mask);
}
|
# -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def iter_entry_points(group_name):
try:
import pkg_resources
except (ImportError, IOError):
return []
return pkg_resources.iter_entry_points(group_name)
def find_plugin_lexers():
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def eval(x,y):
dat=[]
c = 0
for i in [0.3,0.5,0.7,1.0]:
for j in [1.3,1.5,1.7,2.0]:
c+=1
dat.append([c,i,j,x,y,(i-x)*(j-y)])
return dat
def run(xin):
inx = open(xin,'r')
for line in inx:
if line.startswith('x =' ):
x=float(line.split('=')[1])
elif line.startswith('case =' ):
case=line.split('=')[1].strip()
elif line.startswith('auxfile ='):
aux=line.split('=')[1].strip()
iny = open(aux,'r')
for line in iny:
if line.startswith('y ='):
y=float(line.split('=')[1])
dat = eval(x,y)
# here we simulate the hardcoded output file
outf = open('fixed_output'+'.csv','w')
outf.writelines('step,i,j,x,y,poly\n')
for e in dat:
outf.writelines(','.join(str(i) for i in e)+'\n')
outf.close()
if __name__=='__main__':
import sys
args = sys.argv
inp1 = args[args.index('-i')+1] if '-i' in args else None
run(inp1)
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import re
import requests
import json
from hashlib import sha256
from urlparse import urljoin
from urllib import quote
import electrum
from electrum import bitcoin
from electrum.bitcoin import *
from electrum.mnemonic import Mnemonic
from electrum import version
from electrum.wallet import Multisig_Wallet, BIP32_Wallet
from electrum.i18n import _
from electrum.plugins import BasePlugin, run_hook, hook
from electrum.util import NotEnoughFunds
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/', debug=False):
self.base_url = base_url
self.debug = debug
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print '%s %s %s' % (method, url, data)
response = requests.request(method, url, **kwargs)
if self.debug:
print response.text
print
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
return self.send_request('get', 'cosigner/%s' % quote(id))
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = '2fa'
self.m = 2
self.n = 3
self.is_billing = False
self.billing_info = None
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
if xpub2 is None and not self.storage.get('use_trustedcoin'):
return 'show_disclaimer'
if xpub2 is None:
return 'create_extended_seed'
if xpub3 is None:
return 'create_remote_key'
def make_seed(self):
return Mnemonic('english').make_seed(num_bits=256, prefix=SEED_PREFIX)
def can_sign_without_server(self):
return self.master_private_keys.get('x2/') is not None
def get_max_amount(self, config, inputs, recipient, fee):
from electrum.transaction import Transaction
sendable = sum(map(lambda x:x['value'], inputs))
for i in inputs:
self.add_input_info(i)
xf = self.extra_fee()
if xf and sendable >= xf:
billing_address = self.billing_info['billing_address']
sendable -= xf
outputs = [(TYPE_ADDRESS, recipient, sendable),
(TYPE_ADDRESS, billing_address, xf)]
else:
outputs = [(TYPE_ADDRESS, recipient, sendable)]
dummy_tx = Transaction.from_io(inputs, outputs)
if fee is None:
fee = self.estimate_fee(config, dummy_tx.estimated_size())
amount = max(0, sendable - fee)
return amount, fee
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
price = int(self.price_per_tx.get(1))
assert price <= 100000
return price
def make_unsigned_transaction(self, coins, outputs, config,
fixed_fee=None, change_addr=None):
mk_tx = lambda o: BIP32_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee()
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# trustedcoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
BIP32_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
def get_user_id(self):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub_hot = self.master_public_keys["x1/"]
xpub_cold = self.master_public_keys["x2/"]
long_id = make_long_id(xpub_hot, xpub_cold)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
# Utility functions
def make_xpub(xpub, s):
_, _, _, c, cK = deserialize_xkey(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
xpub2 = ("0488B21E" + "00" + "00000000" + "00000000").decode("hex") + c2 + cK2
return EncodeBase58Check(xpub2)
def restore_third_key(wallet):
long_user_id, short_id = wallet.get_user_id()
xpub3 = make_xpub(signing_xpub, long_user_id)
wallet.add_master_public_key('x3/', xpub3)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(billing_xpub, long_id)
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
address = public_key_to_bc_address( cK )
return address
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def set_enabled(self, wallet, enabled):
wallet.storage.put('use_' + self.name, enabled)
def is_enabled(self):
return True
@hook
def get_additional_fee(self, wallet, tx):
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return amount
def request_billing_info(self, wallet):
billing_info = server.get(wallet.get_user_id()[1])
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
return True
def create_extended_seed(self, wallet, window):
seed = wallet.make_seed()
window.show_and_verify_seed(seed, is_valid=self.is_valid_seed)
password = window.request_password()
wallet.storage.put('seed_version', wallet.seed_version)
wallet.storage.put('use_encryption', password is not None)
words = seed.split()
n = len(words)/2
wallet.add_xprv_from_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_xpub_from_seed(' '.join(words[n:]), 'x2/')
wallet.storage.write()
msg = [
_("Your wallet file is: %s.")%os.path.abspath(wallet.storage.path),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "%s" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.") % _('Cancel'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
msg = '\n\n'.join(msg)
self.confirm(window, msg)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def on_restore_wallet(self, wallet, wizard):
assert isinstance(wallet, self.wallet_class)
seed = wizard.request_seed(RESTORE_MSG, is_valid=self.is_valid_seed)
password = wizard.request_password()
wallet.add_seed(seed, password)
words = seed.split()
n = len(words)/2
wallet.add_xprv_from_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_xprv_from_seed(' '.join(words[n:]), 'x2/', password)
restore_third_key(wallet)
wallet.create_main_account()
return wallet
def create_remote_key(self, wallet, window):
email = self.accept_terms_of_use(window)
xpub_hot = wallet.master_public_keys["x1/"]
xpub_cold = wallet.master_public_keys["x2/"]
# Generate third key deterministically.
long_user_id, short_id = wallet.get_user_id()
xpub3 = make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub_hot, xpub_cold, email)
except socket.error:
window.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
raise e
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
window.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
window.show_message(str(e))
return
if self.setup_google_auth(window, short_id, otp_secret):
wallet.add_master_public_key('x3/', xpub3)
wallet.create_main_account()
|
"""Support for Buienradar.nl weather service."""
import logging
from buienradar.constants import (
CONDCODE,
CONDITION,
DATETIME,
MAX_TEMP,
MIN_TEMP,
RAIN,
WINDAZIMUTH,
WINDSPEED,
)
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
# Reuse data and API logic from the sensor implementation
from .const import DEFAULT_TIMEFRAME
from .util import BrData
_LOGGER = logging.getLogger(__name__)
DATA_CONDITION = "buienradar_condition"
CONF_FORECAST = "forecast"
CONDITION_CLASSES = {
"cloudy": ["c", "p"],
"fog": ["d", "n"],
"hail": [],
"lightning": ["g"],
"lightning-rainy": ["s"],
"partlycloudy": ["b", "j", "o", "r"],
"pouring": ["l", "q"],
"rainy": ["f", "h", "k", "m"],
"snowy": ["u", "i", "v", "t"],
"snowy-rainy": ["w"],
"sunny": ["a"],
"windy": [],
"windy-variant": [],
"exceptional": [],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_FORECAST, default=True): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the buienradar platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
coordinates = {CONF_LATITUDE: float(latitude), CONF_LONGITUDE: float(longitude)}
# create weather data:
data = BrData(hass, coordinates, DEFAULT_TIMEFRAME, None)
# create weather device:
_LOGGER.debug("Initializing buienradar weather: coordinates %s", coordinates)
# create condition helper
if DATA_CONDITION not in hass.data:
cond_keys = [str(chr(x)) for x in range(97, 123)]
hass.data[DATA_CONDITION] = dict.fromkeys(cond_keys)
for cond, condlst in CONDITION_CLASSES.items():
for condi in condlst:
hass.data[DATA_CONDITION][condi] = cond
async_add_entities([BrWeather(data, config)])
# schedule the first update in 1 minute from now:
await data.schedule_update(1)
class BrWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, data, config):
"""Initialise the platform with a data instance and station name."""
self._stationname = config.get(CONF_NAME)
self._forecast = config.get(CONF_FORECAST)
self._data = data
@property
def attribution(self):
"""Return the attribution."""
return self._data.attribution
@property
def name(self):
"""Return the name of the sensor."""
return (
self._stationname or f"BR {self._data.stationname or '(unknown station)'}"
)
@property
def condition(self):
"""Return the current condition."""
if self._data and self._data.condition:
ccode = self._data.condition.get(CONDCODE)
if ccode:
conditions = self.hass.data.get(DATA_CONDITION)
if conditions:
return conditions.get(ccode)
@property
def temperature(self):
"""Return the current temperature."""
return self._data.temperature
@property
def pressure(self):
"""Return the current pressure."""
return self._data.pressure
@property
def humidity(self):
"""Return the name of the sensor."""
return self._data.humidity
@property
def visibility(self):
"""Return the current visibility in km."""
if self._data.visibility is None:
return None
return round(self._data.visibility / 1000, 1)
@property
def wind_speed(self):
"""Return the current windspeed in km/h."""
if self._data.wind_speed is None:
return None
return round(self._data.wind_speed * 3.6, 1)
@property
def wind_bearing(self):
"""Return the current wind bearing (degrees)."""
return self._data.wind_bearing
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def forecast(self):
"""Return the forecast array."""
if not self._forecast:
return None
fcdata_out = []
cond = self.hass.data[DATA_CONDITION]
if not self._data.forecast:
return None
for data_in in self._data.forecast:
# remap keys from external library to
# keys understood by the weather component:
condcode = data_in.get(CONDITION, []).get(CONDCODE)
data_out = {
ATTR_FORECAST_TIME: data_in.get(DATETIME),
ATTR_FORECAST_CONDITION: cond[condcode],
ATTR_FORECAST_TEMP_LOW: data_in.get(MIN_TEMP),
ATTR_FORECAST_TEMP: data_in.get(MAX_TEMP),
ATTR_FORECAST_PRECIPITATION: data_in.get(RAIN),
ATTR_FORECAST_WIND_BEARING: data_in.get(WINDAZIMUTH),
ATTR_FORECAST_WIND_SPEED: round(data_in.get(WINDSPEED) * 3.6, 1),
}
fcdata_out.append(data_out)
return fcdata_out
|
/*!
* YieldFarming
* Boilerplate for a Static website using EJS and SASS
* https://yieldfarming.info
* @author Jongseung Lim -- https://yieldfarming.info
* Copyright 2021. MIT Licensed.
*/
$(function() {
consoleInit();
start(main);
});
const ALCX_POOL_ABI = [{"inputs":[{"internalType":"contract IMintableERC20","name":"_reward","type":"address"},{"internalType":"address","name":"_governance","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"governance","type":"address"}],"name":"GovernanceUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"pendingGovernance","type":"address"}],"name":"PendingGovernanceUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"poolId","type":"uint256"},{"indexed":true,"internalType":"contract IERC20","name":"token","type":"address"}],"name":"PoolCreated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"poolId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"rewardWeight","type":"uint256"}],"name":"PoolRewardWeightUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"rewardRate","type":"uint256"}],"name":"RewardRateUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"user","type":"address"},{"indexed":true,"internalType":"uint256","name":"poolId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"TokensClaimed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"user","type":"address"},{"indexed":true,"internalType":"uint256","name":"poolId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"TokensDeposited","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"user","type":"address"},{"indexed":true,"internalType":"uint256","name":"poolId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"TokensWithdrawn","type":"event"},{"inputs":[],"name":"acceptGovernance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"claim","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract IERC20","name":"_token","type":"address"}],"name":"createPool","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"},{"internalType":"uint256","name":"_depositAmount","type":"uint256"}],"name":"deposit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"exit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getPoolRewardRate","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getPoolRewardWeight","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getPoolToken","outputs":[{"internalType":"contract IERC20","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getPoolTotalDeposited","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_account","type":"address"},{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getStakeTotalDeposited","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_account","type":"address"},{"internalType":"uint256","name":"_poolId","type":"uint256"}],"name":"getStakeTotalUnclaimed","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"governance","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pendingGovernance","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"poolCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"reward","outputs":[{"internalType":"contract IMintableERC20","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"rewardRate","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_pendingGovernance","type":"address"}],"name":"setPendingGovernance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_rewardRate","type":"uint256"}],"name":"setRewardRate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256[]","name":"_rewardWeights","type":"uint256[]"}],"name":"setRewardWeights","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract IERC20","name":"","type":"address"}],"name":"tokenPoolIds","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalRewardWeight","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_poolId","type":"uint256"},{"internalType":"uint256","name":"_withdrawAmount","type":"uint256"}],"name":"withdraw","outputs":[],"stateMutability":"nonpayable","type":"function"}]
async function getAlcxPoolInfo(App, pool, poolIndex) {
const [token, rewardRate, totalDeposited, userStaked, userUnclaimed] =
await App.ethcallProvider.all([
pool.getPoolToken(poolIndex), pool.getPoolRewardRate(poolIndex),
pool.getPoolTotalDeposited(poolIndex), pool.getStakeTotalDeposited(App.YOUR_ADDRESS, poolIndex),
pool.getStakeTotalUnclaimed(App.YOUR_ADDRESS, poolIndex)
]);
const poolToken = await getToken(App, token, pool.address);
return {
poolToken,
rewardsPerWeek : rewardRate / 1e18 * 604800 / 13.5,
totalDeposited : totalDeposited / 1e18,
userStaked : userStaked / 1e18,
userUnclaimed : userUnclaimed / 1e18
}
}
const alcxContract_deposit = async function(alcxAbi, alcxAddress, poolIndex, stakeTokenAddr, App) {
const signer = App.provider.getSigner()
const STAKING_TOKEN = new ethers.Contract(stakeTokenAddr, ERC20_ABI, signer)
const ALCX_CONTRACT = new ethers.Contract(alcxAddress, alcxAbi, signer)
const currentTokens = await STAKING_TOKEN.balanceOf(App.YOUR_ADDRESS)
const allowedTokens = await STAKING_TOKEN.allowance(App.YOUR_ADDRESS, alcxAddress)
let allow = Promise.resolve()
if (allowedTokens / 1e18 < currentTokens / 1e18) {
showLoading()
allow = STAKING_TOKEN.approve(alcxAddress, ethers.constants.MaxUint256)
.then(function(t) {
return App.provider.waitForTransaction(t.hash)
})
.catch(function() {
hideLoading()
alert('Try resetting your approval to 0 first')
})
}
if (currentTokens / 1e18 > 0) {
showLoading()
allow
.then(async function() {
ALCX_CONTRACT.deposit(poolIndex, currentTokens, {gasLimit: 500000})
.then(function(t) {
App.provider.waitForTransaction(t.hash).then(function() {
hideLoading()
})
})
.catch(function() {
hideLoading()
_print('Something went wrong.')
})
})
.catch(function() {
hideLoading()
_print('Something went wrong.')
})
} else {
alert('You have no tokens to stake!!')
}
}
const alcxContract_withdraw = async function(alcxAbi, alcxAddress, poolIndex, App) {
const signer = App.provider.getSigner()
const ALCX_CONTRACT = new ethers.Contract(alcxAddress, alcxAbi, signer)
const currentStakedAmount = await ALCX_CONTRACT.getStakeTotalDeposited(App.YOUR_ADDRESS, poolIndex)
if (currentStakedAmount / 1e18 > 0) {
showLoading()
const t = await ALCX_CONTRACT.withdraw(poolIndex, currentStakedAmount, {gasLimit: 500000});
return App.provider.waitForTransaction(t.hash);
}
}
const alcxContract_claim = async function(alcxAbi, alcxAddress, poolIndex, App) {
const signer = App.provider.getSigner()
const ALCX_CONTRACT = new ethers.Contract(alcxAddress, alcxAbi, signer)
const earnedTokenAmount = await ALCX_CONTRACT.getStakeTotalUnclaimed(App.YOUR_ADDRESS, poolIndex) / 1e18
if (earnedTokenAmount > 0) {
showLoading()
const t = await ALCX_CONTRACT.claim(poolIndex, {gasLimit: 500000});
return App.provider.waitForTransaction(t.hash);
}
}
function printAlcxContractLinks(App, alcxAbi, alcxAddr, poolIndex, poolAddress,
rewardTokenTicker, stakeTokenTicker, unstaked, userStaked, pendingRewardTokens, rewardTokenPrice) {
let fixedDecimals = 2;
const approveAndDeposit = async function() {
return alcxContract_deposit(alcxAbi, alcxAddr, poolIndex, poolAddress, App)
}
const withdraw = async function() {
return alcxContract_withdraw(alcxAbi, alcxAddr, poolIndex, App)
}
const claim = async function() {
return alcxContract_claim(alcxAbi, alcxAddr, poolIndex, App, pendingRewardsFunction)
}
const etherscanUrl = `<a href='https://etherscan.io/address/${poolAddress}' target='_blank'>Staking Contract</a>`;
_print(etherscanUrl);
_print_link(`Deposit ${unstaked.toFixed(fixedDecimals)} ${stakeTokenTicker}`, approveAndDeposit)
_print_link(`Withdraw ${userStaked.toFixed(fixedDecimals)} ${stakeTokenTicker}`, withdraw)
_print_link(`Claim ${pendingRewardTokens.toFixed(fixedDecimals)} ${rewardTokenTicker} ($${formatMoney(pendingRewardTokens*rewardTokenPrice)})`, claim)
_print(`Staking or unstaking also claims rewards.`)
_print(`\n`);
}
function printAlcxPool(App, alcxAbi, alcxAddr, prices, poolInfo, poolIndex, poolPrices,
rewardTokenTicker, rewardTokenAddress) {
const rewardPrice = getParameterCaseInsensitive(prices, rewardTokenAddress)?.usd;
poolPrices.print_price();
printAPR(rewardTokenTicker, rewardPrice, poolInfo.rewardsPerWeek, poolPrices.stakeTokenTicker,
poolPrices.staked_tvl, poolInfo.userStaked, poolPrices.price, 2);
if (poolInfo.userStaked > 0) poolPrices.print_contained_price(poolInfo.userStaked);
printAlcxContractLinks(App, alcxAbi, alcxAddr, poolIndex, poolInfo.poolToken.address,
rewardTokenTicker, poolPrices.stakeTokenTicker, poolInfo.poolToken.unstaked,
poolInfo.userStaked, poolInfo.userUnclaimed, rewardPrice);
}
async function main() {
const App = await init_ethers();
const tokens = {}
_print(`Initialized ${App.YOUR_ADDRESS}\n`);
_print("Reading smart contracts...\n");
const ALCX_POOL_ADDRESS = "0xab8e74017a8cc7c15ffccd726603790d26d7deca";
const rewardTokenTicker = "ALCX";
const rewardTokenAddress = "0xdbdb4d16eda451d0503b854cf79d55697f90c8df";
const ALCX_POOL = new ethcall.Contract(ALCX_POOL_ADDRESS, ALCX_POOL_ABI);
const [poolCount] = await App.ethcallProvider.all([ALCX_POOL.poolCount()]);
const poolInfos = await Promise.all([...Array(poolCount / 1).keys()].map(async (x) =>
await getAlcxPoolInfo(App, ALCX_POOL, x)));
var tokenAddresses = [].concat.apply([], poolInfos.filter(x => x.poolToken).map(x => x.poolToken.tokens));
var prices = await lookUpTokenPrices(tokenAddresses);
prices["0xBC6DA0FE9aD5f3b0d58160288917AA56653660E9"] = { usd : 1 } //temporary of course
await Promise.all(tokenAddresses.map(async (address) => {
tokens[address] = await getToken(App, address, ALCX_POOL_ADDRESS);
}));
const poolPrices = poolInfos.map(poolInfo => getPoolPrices(tokens, prices, poolInfo.poolToken));
_print("Finished reading smart contracts.\n");
for (i = 0; i < poolCount; i++) {
if (i != 3) { //TIME pool
printAlcxPool(App, ALCX_POOL_ABI, ALCX_POOL_ADDRESS, prices,
poolInfos[i], i, poolPrices[i], rewardTokenTicker, rewardTokenAddress);
}
}
hideLoading();
}
|
document.querySelector(".hamburguer").addEventListener("click", () =>
document.querySelector(".container").classList.toggle("show-menu")
);
document.querySelector("#qtde").addEventListener("change", atualizarPreco)
document.querySelector("#js").addEventListener("change", atualizarPreco)
document.querySelector("#layout-sim").addEventListener("change", atualizarPreco)
document.querySelector("#layout-nao").addEventListener("change", atualizarPreco)
document.querySelector("#prazo").addEventListener("change", function() {
const prazo = document.querySelector("#prazo").value
document.querySelector("label[for=prazo]").innerHTML = `Prazo: ${prazo} semanas`
atualizarPreco()
})
function atualizarPreco(){
const qtde = document.querySelector("#qtde").value
const temJS = document.querySelector("#js").checked
const incluiLayout = document.querySelector("#layout-sim").checked
const prazo = document.querySelector("#prazo").value
let preco = qtde * 100;
if (temJS) preco *= 1.1
if (incluiLayout) preco += 500
let taxaUrgencia = 1 - prazo*0.1
preco *= 1 + taxaUrgencia
document.querySelector("#preco").innerHTML = `R$ ${preco.toFixed(2)}`
}
|
module.exports.publishPrice = function(productId,productPrice)
{
var kafka = require('kafka-node')
var Producer = kafka.Producer
var KeyedMessage = kafka.KeyedMessage
var client = new kafka.KafkaClient()
var producer = new Producer(client)
var record = new KeyedMessage(productId,productPrice)
var payloads = [
{
topic : 'product',
messages : record
}
];
producer.on('ready', function () {
producer.send(payloads, function (err, data) {
if(err)
{
return err;
}
else
{
return data;
}
});
});
producer.on('error', function (err) {return err})
}
|
from datetime import datetime
from flask import current_app
from website import db, login_manager
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.png')
password = db.Column(db.String(20), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=600):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.name}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.now)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.png')
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}', '{self.image_file}')"
|
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 32, 32
channel = 3
GPU = True
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__()
self.enc1 = torch.nn.Conv2d(channel, 32, kernel_size=3, padding=1)
self.enc2 = torch.nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.dec2 = torch.nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2)
self.dec1 = torch.nn.ConvTranspose2d(32, channel, kernel_size=2, stride=2)
def forward(self, x):
x = self.enc1(x)
x = F.max_pool2d(x, 2)
x = self.enc2(x)
x = F.max_pool2d(x, 2)
x = self.dec2(x)
x = self.dec1(x)
return x
import pickle
import os
def load_cifar10():
path = 'cifar-10-batches-py'
if not os.path.exists(path):
os.system("wget {}".format(path))
os.system("tar xvf {}".format(path))
# train data
train_x = np.ndarray([0, 32, 32, 3], dtype=np.float32)
train_y = np.ndarray([0, ], dtype=np.int)
for i in range(1, 6):
data_path = path + '/data_batch_{}'.format(i)
with open(data_path, 'rb') as f:
datas = pickle.load(f, encoding='bytes')
print(data_path)
x = datas[b'data']
x = x.reshape(x.shape[0], 3, 32, 32)
x = x.transpose(0, 2, 3, 1)
train_x = np.vstack((train_x, x))
y = np.array(datas[b'labels'], dtype=np.int)
train_y = np.hstack((train_y, y))
# test data
data_path = path + '/test_batch'
with open(data_path, 'rb') as f:
datas = pickle.load(f, encoding='bytes')
print(data_path)
x = datas[b'data']
x = x.reshape(x.shape[0], 3, 32, 32)
test_x = x.transpose(0, 2, 3, 1)
test_y = np.array(datas[b'labels'], dtype=np.int)
return train_x, train_y, test_x, test_y
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = Mynet().to(device)
opt = torch.optim.Adam(model.parameters(), lr=0.001)
model.train()
train_x, train_y, test_x, test_y = load_cifar10()
xs = train_x / 255
xs = xs.transpose(0, 3, 1, 2)
# training
mb = 512
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(5000):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
opt.zero_grad()
y = model(x)
loss = torch.nn.MSELoss()(y, t)
loss.backward()
opt.step()
#pred = y.argmax(dim=1, keepdim=True)
acc = y.eq(t.view_as(y)).sum().item() / mb
if (i+1) % 100 == 0:
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
# test
def test():
device = torch.device("cuda" if GPU else "cpu")
model = Mynet().to(device)
model.eval()
model.load_state_dict(torch.load('cnn.pt'))
train_x, train_y, test_x, test_y = load_cifar10()
xs = test_x / 255
xs = xs.transpose(0, 3, 1, 2)
for i in range(10):
x = xs[i]
x = np.expand_dims(x, axis=0)
x = torch.tensor(x, dtype=torch.float).to(device)
pred = model(x)
pred = pred.view(channel, img_height, img_width)
pred = pred.detach().cpu().numpy()
pred -= pred.min()
pred /= pred.max()
pred = pred.transpose(1,2,0)
_x = x.detach().cpu().numpy()[0]
#_x = (_x + 1) / 2
if channel == 1:
pred = pred[..., 0]
_x = _x[0]
cmap = 'gray'
else:
_x = _x.transpose(1,2,0)
cmap = None
plt.subplot(1,2,1)
plt.title("input")
plt.imshow(_x, cmap=cmap)
plt.subplot(1,2,2)
plt.title("predicted")
plt.imshow(pred, cmap=cmap)
plt.show()
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Iterable, cast
from ezdxf import ARROWS
from ezdxf.entities import factory
from ezdxf.lldxf.const import BYBLOCK
from ezdxf.math import Vec3, fit_points_to_cad_cv
if TYPE_CHECKING:
from ezdxf.eztypes import DXFGraphic, Leader, Insert, Spline
def virtual_entities(leader: 'Leader') -> Iterable['DXFGraphic']:
# Source: https://atlight.github.io/formats/dxf-leader.html
# GDAL: DXF LEADER implementation:
# https://github.com/OSGeo/gdal/blob/master/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_leader.cpp
# LEADER DXF Reference:
# http://help.autodesk.com/view/OARX/2018/ENU/?guid=GUID-396B2369-F89F-47D7-8223-8B7FB794F9F3
from ezdxf.entities import DimStyleOverride
assert leader.dxftype() == 'LEADER'
vertices = Vec3.list(leader.vertices) # WCS
if len(vertices) < 2:
# This LEADER entities should be removed by the auditor if loaded or
# ignored at exporting, if created by an ezdxf-user (log).
raise ValueError('More than 1 vertex required.')
dxf = leader.dxf
doc = leader.doc
# Some default values depend on the measurement system
# 0/1 = imperial/metric
if doc:
measurement = doc.header.get('$MEASUREMENT', 0)
else:
measurement = 0
# Set default styling attributes values:
dimtad = 1
dimgap = 0.625 if measurement else 0.0625
dimscale = 1.0
dimclrd = dxf.color
dimltype = dxf.linetype
dimlwd = dxf.lineweight
override = None
if doc:
# get styling attributes from associated DIMSTYLE and/or XDATA override
override = DimStyleOverride(cast('Dimension', leader))
dimtad = override.get('dimtad', dimtad)
dimgap = override.get('dimgap', dimgap)
dimscale = override.get('dimscale', dimscale)
if dimscale == 0.0: # special but unknown meaning
dimscale = 1.0
dimclrd = override.get('dimclrd', dimclrd)
dimltype = override.get('dimltype', dimltype)
dimlwd = override.get('dimlwd', dimlwd)
text_width = dxf.text_width
hook_line_vector = Vec3(dxf.horizontal_direction)
has_text_annotation = dxf.annotation_type == 0
if has_text_annotation and dxf.has_hookline:
if dxf.hookline_direction == 1:
hook_line_vector = -hook_line_vector
if dimtad != 0 and text_width > 0:
hook_line = hook_line_vector * (dimgap * dimscale + text_width)
vertices.append(vertices[-1] + hook_line)
dxfattribs = leader.graphic_properties()
dxfattribs['color'] = dimclrd
dxfattribs['linetype'] = dimltype
dxfattribs['lineweight'] = dimlwd
if dxfattribs.get('color') == BYBLOCK:
dxfattribs['color'] = dxf.block_color
if dxf.path_type == 1: # Spline
start_tangent = (vertices[1] - vertices[0])
end_tangent = (vertices[-1] - vertices[-2])
bspline = fit_points_to_cad_cv(
vertices,
degree=3,
tangents=[start_tangent, end_tangent]
)
spline = cast('Spline', factory.new('SPLINE', doc=doc))
spline.apply_construction_tool(bspline)
yield spline
else:
attribs = dict(dxfattribs)
prev = vertices[0]
for vertex in vertices[1:]:
attribs['start'] = prev
attribs['end'] = vertex
yield factory.new(dxftype='LINE', dxfattribs=attribs, doc=doc)
prev = vertex
if dxf.has_arrowhead and override:
arrow_name = override.get('dimldrblk', '')
if arrow_name is None:
return
size = override.get('dimasz', 2.5 if measurement else 0.1875) * dimscale
rotation = (vertices[0] - vertices[1]).angle_deg
if doc and arrow_name in doc.blocks:
dxfattribs.update({
'name': arrow_name,
'insert': vertices[0],
'rotation': rotation,
'xscale': size,
'yscale': size,
'zscale': size,
})
# create a virtual block reference
insert = cast('Insert',
factory.new('INSERT', dxfattribs=dxfattribs, doc=doc))
yield from insert.virtual_entities()
else: # render standard arrows
yield from ARROWS.virtual_entities(
name=arrow_name,
insert=vertices[0],
size=size,
rotation=rotation,
dxfattribs=dxfattribs,
)
|
# sqlalchemy/sql/events.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import SchemaEventTarget
from .. import event
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`_schema.MetaData`, :class:`_schema.Table`,
:class:`_schema.Column`.
:class:`_schema.MetaData` and :class:`_schema.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`_schema.Column` is associated
with its :class:`_schema.Table`, when a
:class:`_schema.ForeignKeyConstraint`
is associated with a :class:`_schema.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute(text(
"ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)
))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
For all :class:`.DDLEvent` events, the ``propagate=True`` keyword argument
will ensure that a given event handler is propagated to copies of the
object, which are made when using the :meth:`_schema.Table.to_metadata`
method::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
propagate=True
)
new_table = some_table.to_metadata(new_metadata)
The above :class:`.DDL` object will also be associated with the
:class:`_schema.Table` object represented by ``new_table``.
.. seealso::
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
r"""Called before CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def after_create(self, target, connection, **kw):
r"""Called after CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def before_drop(self, target, connection, **kw):
r"""Called before DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def after_drop(self, target, connection, **kw):
r"""Called after DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`_schema.Table` is being reflected.
This event is most easily used by applying it to a specific
:class:`_schema.MetaData` instance, where it will take effect for
all :class:`_schema.Table` objects within that
:class:`_schema.MetaData` that undergo reflection::
metadata = MetaData()
@event.listens_for(metadata, 'column_reflect')
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
# under this MetaData
# will use the above event hook
my_table = Table("my_table", metadata, autoload_with=some_engine)
.. versionadded:: 1.4.0b2 The :meth:`_events.DDLEvents.column_reflect`
hook may now be applied to a :class:`_schema.MetaData` object as
well as the :class:`_schema.MetaData` class itself where it will
take place for all :class:`_schema.Table` objects associated with
the targeted :class:`_schema.MetaData`.
It may also be applied to the :class:`_schema.Table` class across
the board::
from sqlalchemy import Table
@event.listens_for(Table, 'column_reflect')
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
It can also be applied to a specific :class:`_schema.Table` at the
point that one is being reflected using the
:paramref:`_schema.Table.listeners` parameter::
t1 = Table(
"my_table",
autoload_with=some_engine,
listeners=[
('column_reflect', receive_column_reflect)
]
)
A future release will allow it to be associated with a specific
:class:`_schema.MetaData` object as well.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`:
* ``name`` - the column's name, is applied to the
:paramref:`_schema.Column.name` parameter
* ``type`` - the type of this column, which should be an instance
of :class:`~sqlalchemy.types.TypeEngine`, is applied to the
:paramref:`_schema.Column.type` parameter
* ``nullable`` - boolean flag if the column is NULL or NOT NULL,
is applied to the :paramref:`_schema.Column.nullable` parameter
* ``default`` - the column's server default value. This is
normally specified as a plain string SQL expression, however the
event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`,
or :func:`_expression.text` object as well. Is applied to the
:paramref:`_schema.Column.server_default` parameter
The event is called before any action is taken against
this dictionary, and the contents can be modified; the following
additional keys may be added to the dictionary to further modify
how the :class:`_schema.Column` is constructed:
* ``key`` - the string key that will be used to access this
:class:`_schema.Column` in the ``.c`` collection; will be applied
to the :paramref:`_schema.Column.key` parameter. Is also used
for ORM mapping. See the section
:ref:`mapper_automated_reflection_schemes` for an example.
* ``quote`` - force or un-force quoting on the column name;
is applied to the :paramref:`_schema.Column.quote` parameter.
* ``info`` - a dictionary of arbitrary data to follow along with
the :class:`_schema.Column`, is applied to the
:paramref:`_schema.Column.info` parameter.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
.. seealso::
:ref:`mapper_automated_reflection_schemes` -
in the ORM mapping documentation
:ref:`automap_intercepting_columns` -
in the :ref:`automap_toplevel` documentation
:ref:`metadata_reflection_dbagnostic_types` - in
the :ref:`metadata_reflection_toplevel` documentation
"""
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition. # noqa: E501
The version of the OpenAPI document: 1.0.0-dev.1
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfp_server_api.configuration import Configuration
class ApiListExperimentsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'experiments': 'list[ApiExperiment]',
'total_size': 'int',
'next_page_token': 'str'
}
attribute_map = {
'experiments': 'experiments',
'total_size': 'total_size',
'next_page_token': 'next_page_token'
}
def __init__(self, experiments=None, total_size=None, next_page_token=None, local_vars_configuration=None): # noqa: E501
"""ApiListExperimentsResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._experiments = None
self._total_size = None
self._next_page_token = None
self.discriminator = None
if experiments is not None:
self.experiments = experiments
if total_size is not None:
self.total_size = total_size
if next_page_token is not None:
self.next_page_token = next_page_token
@property
def experiments(self):
"""Gets the experiments of this ApiListExperimentsResponse. # noqa: E501
A list of experiments returned. # noqa: E501
:return: The experiments of this ApiListExperimentsResponse. # noqa: E501
:rtype: list[ApiExperiment]
"""
return self._experiments
@experiments.setter
def experiments(self, experiments):
"""Sets the experiments of this ApiListExperimentsResponse.
A list of experiments returned. # noqa: E501
:param experiments: The experiments of this ApiListExperimentsResponse. # noqa: E501
:type: list[ApiExperiment]
"""
self._experiments = experiments
@property
def total_size(self):
"""Gets the total_size of this ApiListExperimentsResponse. # noqa: E501
The total number of experiments for the given query. # noqa: E501
:return: The total_size of this ApiListExperimentsResponse. # noqa: E501
:rtype: int
"""
return self._total_size
@total_size.setter
def total_size(self, total_size):
"""Sets the total_size of this ApiListExperimentsResponse.
The total number of experiments for the given query. # noqa: E501
:param total_size: The total_size of this ApiListExperimentsResponse. # noqa: E501
:type: int
"""
self._total_size = total_size
@property
def next_page_token(self):
"""Gets the next_page_token of this ApiListExperimentsResponse. # noqa: E501
The token to list the next page of experiments. # noqa: E501
:return: The next_page_token of this ApiListExperimentsResponse. # noqa: E501
:rtype: str
"""
return self._next_page_token
@next_page_token.setter
def next_page_token(self, next_page_token):
"""Sets the next_page_token of this ApiListExperimentsResponse.
The token to list the next page of experiments. # noqa: E501
:param next_page_token: The next_page_token of this ApiListExperimentsResponse. # noqa: E501
:type: str
"""
self._next_page_token = next_page_token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiListExperimentsResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiListExperimentsResponse):
return True
return self.to_dict() != other.to_dict()
|
from django.urls import path
from . import views
#Redirecciones dentro de la app api
urlpatterns = [
path('', views.index, name='index'),
path('update_config/', views.update_config, name='update_config'),
path('obtener_informacion/', views.get_public_information, name='obtener_informacion'),
]
|
import tensorflow as tf
import numpy as np
data = [
[ [0,0], [0] ],
[ [0,1], [1] ],
[ [1,0], [1] ],
[ [1,1], [0] ],
]
features = [i[0] for i in data]
labels = [i[1] for i in data]
model = tf.keras.Sequential([
tf.keras.layers.Dense(units=8, input_shape=(2,), activation='tanh'),
tf.keras.layers.Dense(units=1, activation='sigmoid')
])
model.compile(optimizer=tf.keras.optimizers.SGD(0.6), loss='mean_squared_error')
model.fit(features, labels, batch_size=1, epochs=5000)
print(
model.predict( np.array([ [0,0], [0,1], [1,0], [1,1] ]) )
)
# epochs=5000
# [[0.00407845]
# [0.9950259 ]
# [0.9946092 ]
# [0.00599459]]
# epochs=2000
# [[0.00601724]
# [0.9888445 ]
# [0.9894794 ]
# [0.0120151 ]]
# epochs=1000
# [[0.00819433]
# [0.9847765 ]
# [0.9835819 ]
# [0.01765227]]
# epochs=500
# [[0.01448798]
# [0.97655636]
# [0.97679883]
# [0.02622622]]
|
/**
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @format
* @flow strict-local
*/
'use strict';
import type {IncomingMessage, ServerResponse} from 'http';
import type {CacheStore} from 'metro-cache';
import typeof MetroCache from 'metro-cache';
import type {CustomResolver} from 'metro-resolver';
import type {MixedSourceMap} from 'metro-source-map';
import type {JsTransformerConfig} from 'metro-transform-worker';
import type {TransformResult} from 'metro/src/DeltaBundler';
import type {
DeltaResult,
Graph,
Module,
SerializerOptions,
} from 'metro/src/DeltaBundler/types.flow.js';
import type {Reporter} from 'metro/src/lib/reporting';
import type {TransformVariants} from 'metro/src/ModuleGraph/types.flow.js';
import type Server from 'metro/src/Server';
export type PostProcessBundleSourcemap = ({
code: Buffer | string,
map: MixedSourceMap,
outFileName: string,
...
}) => {
code: Buffer | string,
map: MixedSourceMap | string,
...
};
type ExtraTransformOptions = {
+preloadedModules: {[path: string]: true, ...} | false,
+ramGroups: Array<string>,
+transform: {|
+experimentalImportSupport: boolean,
+inlineRequires: {+blockList: {[string]: true, ...}, ...} | boolean,
+nonInlinedRequires?: $ReadOnlyArray<string>,
+unstable_disableES6Transforms?: boolean,
|},
...
};
export type GetTransformOptionsOpts = {|
dev: boolean,
hot: boolean,
platform: ?string,
|};
export type GetTransformOptions = (
entryPoints: $ReadOnlyArray<string>,
options: GetTransformOptionsOpts,
getDependenciesOf: (string) => Promise<Array<string>>,
) => Promise<ExtraTransformOptions>;
export type Middleware = (
IncomingMessage,
ServerResponse,
((e: ?Error) => mixed),
) => mixed;
type ResolverConfigT = {|
assetExts: $ReadOnlyArray<string>,
assetResolutions: $ReadOnlyArray<string>,
blacklistRE?: RegExp | Array<RegExp>,
blockList: RegExp | Array<RegExp>,
disableHierarchicalLookup: boolean,
dependencyExtractor: ?string,
emptyModulePath: string,
extraNodeModules: {[name: string]: string, ...},
hasteImplModulePath: ?string,
unstable_hasteMapModulePath: ?string,
nodeModulesPaths: $ReadOnlyArray<string>,
platforms: $ReadOnlyArray<string>,
resolveRequest: ?CustomResolver,
resolverMainFields: $ReadOnlyArray<string>,
sourceExts: $ReadOnlyArray<string>,
useWatchman: boolean,
|};
type SerializerConfigT = {|
createModuleIdFactory: () => (path: string) => number,
customSerializer: ?(
entryPoint: string,
preModules: $ReadOnlyArray<Module<>>,
graph: Graph<>,
options: SerializerOptions,
) => Promise<string | {|code: string, map: string|}>,
experimentalSerializerHook: (graph: Graph<>, delta: DeltaResult<>) => mixed,
getModulesRunBeforeMainModule: (entryFilePath: string) => Array<string>,
getPolyfills: ({platform: ?string, ...}) => $ReadOnlyArray<string>,
getRunModuleStatement: (number | string) => string,
polyfillModuleNames: $ReadOnlyArray<string>,
postProcessBundleSourcemap: PostProcessBundleSourcemap,
processModuleFilter: (modules: Module<>) => boolean,
|};
type TransformerConfigT = {|
...JsTransformerConfig,
getTransformOptions: GetTransformOptions,
transformVariants: TransformVariants,
workerPath: string,
publicPath: string,
experimentalImportBundleSupport: boolean,
|};
type MetalConfigT = {|
cacheStores: $ReadOnlyArray<CacheStore<TransformResult<>>>,
cacheVersion: string,
hasteMapCacheDirectory?: string,
maxWorkers: number,
projectRoot: string,
stickyWorkers: boolean,
transformerPath: string,
reporter: Reporter,
resetCache: boolean,
watchFolders: $ReadOnlyArray<string>,
|};
type ServerConfigT = {|
enhanceMiddleware: (Middleware, Server) => Middleware,
useGlobalHotkey: boolean,
port: number,
unstable_serverRoot: ?string,
rewriteRequestUrl: string => string,
runInspectorProxy: boolean,
verifyConnections: boolean,
|};
type SymbolicatorConfigT = {|
customizeFrame: ({
+file: ?string,
+lineNumber: ?number,
+column: ?number,
+methodName: ?string,
...
}) => ?{|+collapse?: boolean|} | Promise<?{|+collapse?: boolean|}>,
|};
export type InputConfigT = $Shape<{|
...MetalConfigT,
...$ReadOnly<{|
cacheStores:
| $ReadOnlyArray<CacheStore<TransformResult<>>>
| (MetroCache => $ReadOnlyArray<CacheStore<TransformResult<>>>),
resolver: $Shape<ResolverConfigT>,
server: $Shape<ServerConfigT>,
serializer: $Shape<SerializerConfigT>,
symbolicator: $Shape<SymbolicatorConfigT>,
transformer: $Shape<TransformerConfigT>,
|}>,
|}>;
export type IntermediateConfigT = {|
...MetalConfigT,
...{|
resolver: ResolverConfigT,
server: ServerConfigT,
serializer: SerializerConfigT,
symbolicator: SymbolicatorConfigT,
transformer: TransformerConfigT,
|},
|};
export type ConfigT = $ReadOnly<{|
...$ReadOnly<MetalConfigT>,
...$ReadOnly<{|
resolver: $ReadOnly<ResolverConfigT>,
server: $ReadOnly<ServerConfigT>,
serializer: $ReadOnly<SerializerConfigT>,
symbolicator: $ReadOnly<SymbolicatorConfigT>,
transformer: $ReadOnly<TransformerConfigT>,
|}>,
|}>;
export type YargArguments = {
config?: string,
cwd?: string,
port?: string | number,
host?: string,
projectRoot?: string,
watchFolders?: Array<string>,
assetExts?: Array<string>,
sourceExts?: Array<string>,
platforms?: Array<string>,
'max-workers'?: string | number,
maxWorkers?: string | number,
transformer?: string,
'reset-cache'?: boolean,
resetCache?: boolean,
runInspectorProxy?: boolean,
verbose?: boolean,
...
};
|
module.exports = require('./src/nested-set')
|
/**
* Module dependencies
*/
var start = require('./common')
, Aggregate = require('../lib/aggregate')
, mongoose = start.mongoose
, Schema = mongoose.Schema
, assert = require('assert');
/**
* Test data
*/
var EmployeeSchema = new Schema({
name: String,
sal: Number,
dept: String,
customers: [String]
});
mongoose.model('Employee', EmployeeSchema);
function setupData(callback) {
var saved = 0
, emps = [
{ name: "Alice", sal: 18000, dept: "sales", customers: [ 'Eve', 'Fred' ] }
, { name: "Bob", sal: 15000, dept: "sales", customers: [ 'Gary', 'Herbert', 'Isaac' ] }
, { name: "Carol", sal: 14000, dept: "r&d" }
, { name: "Dave", sal: 14500, dept: "r&d" }
]
, db = start()
, Employee = db.model('Employee');
emps.forEach(function(data) {
var emp = new Employee(data);
emp.save(function() {
if (++saved === emps.length) {
callback(db);
}
});
});
}
function clearData(db, callback) {
db.model('Employee').remove(function() {
db.close(callback);
});
}
/**
* Test.
*/
describe('aggregate: ', function() {
describe('append', function() {
it('(pipeline)', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.append({ $a: 1 }, { $b: 2 }, { $c: 3 }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $a: 1 }, { $b: 2 }, { $c: 3 }]);
aggregate.append({ $d: 4 }, { $c: 5 });
assert.deepEqual(aggregate._pipeline, [{ $a: 1 }, { $b: 2 }, { $c: 3 }, { $d: 4 }, { $c: 5 }]);
done();
});
it('throws if non-operator parameter is passed', function(done) {
var aggregate = new Aggregate()
, regexp = /Arguments must be aggregate pipeline operators/;
assert.throws(function() {
aggregate.append({ $a: 1 }, "string");
}, regexp);
assert.throws(function() {
aggregate.append({ $a: 1 }, ["array"]);
}, regexp);
assert.throws(function() {
aggregate.append({ $a: 1 }, { a: 1 });
}, regexp);
done();
});
it('does not throw when 0 args passed', function(done) {
var aggregate = new Aggregate();
assert.doesNotThrow(function() {
aggregate.append();
});
done();
});
it('called from constructor', function(done) {
var aggregate = new Aggregate({ $a: 1 }, { $b: 2 }, { $c: 3 });
assert.deepEqual(aggregate._pipeline, [{ $a: 1 }, { $b: 2 }, { $c: 3 }]);
done();
});
});
describe('project', function() {
it('(object)', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.project({ a: 1, b: 1, c: 0 }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $project: { a: 1, b: 1, c: 0 } }]);
aggregate.project({ b: 1 });
assert.deepEqual(aggregate._pipeline, [{ $project: { a: 1, b: 1, c: 0 } }, { $project: { b: 1 } }]);
done();
});
it('(string)', function(done) {
var aggregate = new Aggregate();
aggregate.project(" a b -c ");
assert.deepEqual(aggregate._pipeline, [{ $project: { a: 1, b: 1, c: 0 } }]);
aggregate.project("b");
assert.deepEqual(aggregate._pipeline, [{ $project: { a: 1, b: 1, c: 0 } }, { $project: { b: 1 } }]);
done();
});
it('("a","b","c")', function(done) {
assert.throws(function() {
var aggregate = new Aggregate();
aggregate.project("a", "b", "c");
}, /Invalid project/);
done();
});
it('["a","b","c"]', function(done) {
assert.throws(function() {
var aggregate = new Aggregate();
aggregate.project(["a", "b", "c"]);
}, /Invalid project/);
done();
});
});
describe('group', function() {
it('works', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.group({ a: 1, b: 2 }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $group: { a: 1, b: 2 } }]);
aggregate.group({ c: 3 });
assert.deepEqual(aggregate._pipeline, [{ $group: { a: 1, b: 2 } }, { $group: { c: 3 } }]);
done();
});
});
describe('skip', function() {
it('works', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.skip(42), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $skip: 42 }]);
aggregate.skip(42);
assert.deepEqual(aggregate._pipeline, [{ $skip: 42 }, { $skip: 42 }]);
done();
});
});
describe('limit', function() {
it('works', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.limit(42), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $limit: 42 }]);
aggregate.limit(42);
assert.deepEqual(aggregate._pipeline, [{ $limit: 42 }, { $limit: 42 }]);
done();
});
});
describe('unwind', function() {
it('("field")', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.unwind("field"), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $unwind: "$field" }]);
aggregate.unwind("a", "b", "c");
assert.deepEqual(aggregate._pipeline, [
{ $unwind: "$field" }
, { $unwind: "$a" }
, { $unwind: "$b" }
, { $unwind: "$c" }
]);
done();
});
});
describe('match', function() {
it('works', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.match({ a: 1 }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $match: { a: 1 } }]);
aggregate.match({ b: 2 });
assert.deepEqual(aggregate._pipeline, [{ $match: { a: 1 } }, { $match: { b: 2 } }]);
done();
});
});
describe('sort', function() {
it('(object)', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.sort({ a: 1, b: 'asc', c: 'descending' }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $sort: { a: 1, b: 1, c: -1 } }]);
aggregate.sort({ b: 'desc' });
assert.deepEqual(aggregate._pipeline, [{ $sort: { a: 1, b: 1, c: -1 } }, { $sort: { b: -1 } }]);
done();
});
it('(string)', function(done) {
var aggregate = new Aggregate();
aggregate.sort(" a b -c ");
assert.deepEqual(aggregate._pipeline, [{ $sort: { a: 1, b: 1, c: -1 } }]);
aggregate.sort("b");
assert.deepEqual(aggregate._pipeline, [{ $sort: { a: 1, b: 1, c: -1 } }, { $sort: { b: 1 } }]);
done();
});
it('("a","b","c")', function(done) {
assert.throws(function() {
var aggregate = new Aggregate();
aggregate.sort("a", "b", "c");
}, /Invalid sort/);
done();
});
it('["a","b","c"]', function(done) {
assert.throws(function() {
var aggregate = new Aggregate();
aggregate.sort(["a", "b", "c"]);
}, /Invalid sort/);
done();
});
});
describe('near', function() {
it('works', function(done) {
var aggregate = new Aggregate();
assert.equal(aggregate.near({ a: 1 }), aggregate);
assert.deepEqual(aggregate._pipeline, [{ $geoNear: { a: 1 } }]);
aggregate.near({ b: 2 });
assert.deepEqual(aggregate._pipeline, [{ $geoNear: { a: 1 } }, { $geoNear: { b: 2 } }]);
done();
});
it('works with discriminators (gh-3304)', function(done) {
var aggregate = new Aggregate();
var stub = {
schema: {
discriminatorMapping: {
key: '__t',
value: 'subschema',
isRoot: false
}
}
};
aggregate._model = stub;
assert.equal(aggregate.near({ a: 1 }), aggregate);
// Run exec so we apply discriminator pipeline
assert.throws(function() {
aggregate.exec();
}, /Cannot read property 'aggregate' of undefined|Cannot call method 'aggregate' of undefined/);
assert.deepEqual(aggregate._pipeline,
[{ $geoNear: { a: 1, query: { __t: 'subschema' } } }]);
aggregate = new Aggregate();
aggregate._model = stub;
aggregate.near({ b: 2, query: { x: 1 } });
assert.throws(function() {
aggregate.exec();
}, /Cannot read property 'aggregate' of undefined|Cannot call method 'aggregate' of undefined/);
assert.deepEqual(aggregate._pipeline,
[{ $geoNear: { b: 2, query: { x: 1, __t: 'subschema' } } }]);
done();
});
});
describe('bind', function() {
it('works', function(done) {
var aggregate = new Aggregate()
, model = { foo: 42 };
assert.equal(aggregate.model(model), aggregate);
assert.equal(aggregate._model, model);
done();
});
});
describe('exec', function() {
it('project', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.project({ sal: 1, sal_k: { $divide: [ "$sal", 1000 ] } })
.exec(function(err, docs) {
assert.ifError(err);
docs.forEach(function(doc) {
assert.equal(doc.sal / 1000, doc.sal_k);
});
clearData(db, function() { done(); });
});
});
});
it('group', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.group({ _id: "$dept" })
.exec(function(err, docs) {
var depts;
assert.ifError(err);
assert.equal(docs.length, 2);
depts = docs.map(function(doc) { return doc._id; });
assert.notEqual(depts.indexOf("sales"), -1);
assert.notEqual(depts.indexOf("r&d"), -1);
clearData(db, function() { done(); });
});
});
});
it('skip', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.skip(1)
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 3);
clearData(db, function() { done(); });
});
});
});
it('limit', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.limit(3)
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 3);
clearData(db, function() { done(); });
});
});
});
it('unwind', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.unwind('customers')
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 5);
clearData(db, function() { done(); });
});
});
});
it('match', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.match({ sal: { $gt: 15000 } })
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 1);
clearData(db, function() { done(); });
});
});
});
it('sort', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.sort("sal")
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs[0].sal, 14000);
clearData(db, function() { done(); });
});
});
});
it('complex pipeline', function(done) {
var aggregate = new Aggregate();
setupData(function(db) {
aggregate
.model(db.model('Employee'))
.match({ sal: { $lt: 16000 } })
.unwind('customers')
.project({ emp: "$name", cust: "$customers" })
.sort('-cust')
.skip(2)
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 1);
assert.equal(docs[0].cust, 'Gary');
assert.equal(docs[0].emp, 'Bob');
clearData(db, function() { done(); });
});
});
});
it('explain()', function(done) {
var aggregate = new Aggregate();
start.mongodVersion(function(err, version) {
if (err) {
return done(err);
}
var mongo26 = 2 < version[0] || (2 == version[0] && 6 <= version[1]);
if (!mongo26) {
return done();
}
setupData(function(db) {
aggregate.
model(db.model('Employee')).
match({ sal: { $lt: 16000 } }).
explain(function(err, output) {
assert.ifError(err);
assert.ok(output);
// make sure we got explain output
assert.ok(output.stages);
clearData(db, function() { done(); });
});
});
});
});
describe('error when empty pipeline', function() {
it('without a callback', function(done) {
var agg = new Aggregate;
setupData(function(db) {
agg.model(db.model('Employee'));
var promise = agg.exec();
assert.ok(promise instanceof mongoose.Promise);
promise.onResolve(function(err) {
assert.ok(err);
assert.equal(err.message, "Aggregate has empty pipeline");
done();
});
});
});
it('with a callback', function(done) {
var aggregate = new Aggregate()
, callback;
setupData(function(db) {
aggregate.model(db.model('Employee'));
callback = function(err) {
assert.ok(err);
assert.equal(err.message, "Aggregate has empty pipeline");
done();
};
aggregate.exec(callback);
});
});
});
describe('error when not bound to a model', function() {
it('with callback', function(done) {
var aggregate = new Aggregate();
aggregate.skip(0);
assert.throws(function() {
aggregate.exec();
}, 'Aggregate not bound to any Model');
done();
});
});
it('handles aggregation options', function(done) {
setupData(function(db) {
start.mongodVersion(function(err, version) {
if (err) throw err;
var mongo26_or_greater = 2 < version[0] || (2 == version[0] && 6 <= version[1]);
var m = db.model('Employee');
var match = { $match: { sal: { $gt: 15000 }}};
var pref = 'primaryPreferred';
var aggregate = m.aggregate(match).read(pref);
if (mongo26_or_greater) {
aggregate.allowDiskUse(true);
}
assert.equal(aggregate.options.readPreference.mode, pref);
if (mongo26_or_greater) {
assert.equal(aggregate.options.allowDiskUse, true);
}
aggregate
.exec(function(err, docs) {
assert.ifError(err);
assert.equal(1, docs.length);
assert.equal(docs[0].sal, 18000);
clearData(db, done);
});
});
});
});
});
it('cursor (gh-3160)', function(done) {
var db = start();
var MyModel = db.model('gh3160', { name: String });
MyModel.
aggregate([{ $match: { name: 'test' } }]).
cursor({ async: true }).
exec(function(error, cursor) {
assert.ifError(error);
assert.ok(cursor);
db.close(done);
});
});
});
|
#Bubble sort em ordem descrescente com pilha#
from sys import maxsize
lista = [1,2,3,4,5,6,7,8,9,10]
def bubbleDecrescente(lista):
for i in range(len(lista)-1,0,-1):
for i in range(i):
if lista[i]<lista[i+1]:
aux = lista[i]
lista[i] = lista[i+1]
lista[i+1] = aux
return(lista)
print(bubbleDecrescente(lista))
def criaPilha():
pilha = []
return pilha
def isEmpty(pilha):
return len(pilha) == 0
def push(pilha, item):
pilha.append(item)
def pop(pilha):
if (isEmpty(pilha)):
return str(-maxsize -1)
return pilha.pop()
def peek(pilha):
if (isEmpty(pilha)):
return str(-maxsize -1)
return pilha[len(pilha) - 1]
pilha = criaPilha()
x = []
for i in range(10):
push(pilha, str(lista[i]))
for i in range(10):
x.append((pop(pilha)))
print(x)
|
// Contato menu
let ghost = document.getElementsByClassName('ghost')[0]
let main = document.getElementsByTagName('main')[0]
let teste = false
var ghost_disable = () =>{
document.body.style.backgroundColor = "whitesmoke"
main.style.display = "block";
ghost.style.display = "none";
}
var ghost_enable = () =>{
document.body.style.backgroundColor = "#222f3e"
main.style.display = "none";
ghost.style.display = "block";
}
function show_ghost(){
animation()
document.body.style.overflow = "hidden"
ghost_enable()
}
function hidden_ghost(){
animation()
document.body.style.overflow = "auto"
setTimeout(() => {
ghost_disable()
}, 600);
}
function animation(){
ghost.classList.toggle('off', teste)
teste = !teste
}
// Portifólio
let square = document.getElementsByClassName('square')
for (var item = 0; item < square.length; item++){
square[item].addEventListener('mouseover', show_square)
square[item].addEventListener('mouseout', hidden_square)
}
function show_square(){
let link = this.children[1]
link.style.gridRow = '1/3'
link.children[0].style.display = "none"
link.children[1].style.display = "block"
}
function hidden_square(){
let link = this.children[1]
link.style.gridRow = '2/3'
link.children[0].style.display = "block"
link.children[1].style.display = "none"
}
// Menu toggle
var show = true
var menu = document.querySelector('.menu_section')
var btn = document.querySelector('.toggle')
var ul = document.getElementById('ul_menu')
btn.addEventListener('click', () =>{
document.body.style.overflow = show ? "hidden" : "initial"
menu.classList.toggle('on', show)
show = !show
})
for (var li = 0; li < ul.children.length; li++){
ul.children[li].addEventListener('click', () =>{
document.body.style.overflow = "initial"
menu.setAttribute('class', "menu_section")
show = !show
})
}
|
import logging
import sys
from config.configs import get_config
from src.pretrain import Pretrain_Trainer
#from src.finetune import Finetune_Trainer
def main(parser, usage_mode):
# TO BE UPDATED
supported_tasks = ['classification','summarization']
if usage_mode == 'pretrain':
sys.stdout.write('#################################################\n')
sys.stdout.write('You have entered PreTrain mode.\n')
sys.stdout.write('#################################################\n')
# train with Pretrain_Trainer
trainer = Pretrain_Trainer(parser)
trainer.train_test()
elif usage_mode == 'finetune':
sys.stdout.write('#################################################\n')
sys.stdout.write('You have entered Finetune mode.\n')
sys.stdout.write('#################################################\n')
# train with Finetune_Trainer
#trainer = Finetune_Trainer(parser)
#trainer.train_test()
elif usage_mode in supported_tasks:
assert "Not supported yet!"
else:
assert "You have gave wrong mode"
if __name__ == "__main__":
sys.stdout.write('#################################################\n')
sys.stdout.write('You have entered __main__.\n')
sys.stdout.write('#################################################\n')
# define ArgumentParser
parser = get_config()
# get user input of whether purpose is train or inference
usage_mode = input('Enter the mode you want to use :')
# run main
main(parser, usage_mode)
sys.stdout.write('#################################################\n')
sys.stdout.write('You are exiting __main__.\n')
sys.stdout.write('#################################################\n')
|
/* Copyright (C) 1997-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _BYTESWAP_H
#define _BYTESWAP_H 1
#include <sgx/features.h>
/* Get the machine specific, optimized definitions. */
#include <sgx/bits/byteswap.h>
/* The following definitions must all be macros since otherwise some
of the possible optimizations are not possible. */
/* Return a value with all bytes in the 16 bit argument swapped. */
#define bswap_16(x) __bswap_16 (x)
/* Return a value with all bytes in the 32 bit argument swapped. */
#define bswap_32(x) __bswap_32 (x)
/* Return a value with all bytes in the 64 bit argument swapped. */
#define bswap_64(x) __bswap_64 (x)
#endif /* byteswap.h */
|
# encoding: UTF-8
from vnpy.trader import vtConstant
from xtpGateway import XtpGateway
gatewayClass = XtpGateway
gatewayName = 'XTP'
gatewayDisplayName = 'XTP'
gatewayType = vtConstant.GATEWAYTYPE_EQUITY
gatewayQryEnabled = True
|
# Generated by Django 2.1.7 on 2019-03-25 23:40
from django.db import migrations, models
import newsWebsite.models
class Migration(migrations.Migration):
dependencies = [
('newsWebsite', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='author',
name='picture',
field=models.ImageField(default='authorPictures/default_author.png', upload_to=newsWebsite.models.picture_upload_path),
),
]
|
function unitTests(doneCallback) {
console.log('[Tester] unit test completed');
doneCallback(0);
}
function styleCheck(doneCallback) {
var exec = require('child_process').exec;
var cmd = 'jscs . --preset=google';
exec(cmd, function(error, stdout, stderr) {
if (stdout !== '' || stderr !== '') {
console.log('[Tester] style check completed with errors');
console.log(stdout);
console.log(stderr);
doneCallback(1);
} else {
console.log('[Tester] style check completed');
doneCallback(0);
}
});
}
function runTests(code) {
exitCode = exitCode || code;
currentTested++;
if (currentTested >= TEST_NUMBER) {
process.exit(exitCode);
}
TEST_METHODS[currentTested](runTests);
}
var exitCode = 0;
var currentTested = -1;
const TEST_METHODS = [unitTests, styleCheck];
const TEST_NUMBER = TEST_METHODS.length;
runTests(0);
|
/*
* This file is part of the Nautilus AeroKernel developed
* by the Hobbes and V3VEE Projects with funding from the
* United States National Science Foundation and the Department of Energy.
*
* The V3VEE Project is a joint project between Northwestern University
* and the University of New Mexico. The Hobbes Project is a collaboration
* led by Sandia National Laboratories that includes several national
* laboratories and universities. You can find out more at:
* http://www.v3vee.org and
* http://xtack.sandia.gov/hobbes
*
* Copyright (c) 2015, Kyle C. Hale <kh@u.northwestern.edu>
* Copyright (c) 2015, The V3VEE Project <http://www.v3vee.org>
* The Hobbes Project <http://xstack.sandia.gov/hobbes>
* All rights reserved.
*
* Author: Kyle C. Hale <kh@u.northwestern.edu>
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "LICENSE.txt".
*/
#ifndef __LOWLEVEL_H__
#define __LOWLEVEL_H__
#ifdef NAUT_CONFIG_RISCV_HOST
#define ENTRY(x) \
.globl x; \
.align 4, 0x00,0x01;\
x:
#define PTRLOG 3
#define SZREG 8
#define REG_S sd
#define REG_L ld
#define REG_SC sc.d
#define ROFF(N, R) N*SZREG(R)
#else
#define GEN_NOP(x) .byte x
#define NOP_1BYTE 0x90
#define NOP_2BYTE 0x66,0x90
#define NOP_3BYTE 0x0f,0x1f,0x00
#define NOP_4BYTE 0x0f,0x1f,0x40,0
#define NOP_5BYTE 0x0f,0x1f,0x44,0x00,0
#define NOP_6BYTE 0x66,0x0f,0x1f,0x44,0x00,0
#define NOP_7BYTE 0x0f,0x1f,0x80,0,0,0,0
#define NOP_8BYTE 0x0f,0x1f,0x84,0x00,0,0,0,0
#define ENTRY(x) \
.globl x; \
.align 4, 0x90;\
x:
#endif
#define GLOBAL(x) \
.globl x; \
x:
#define END(x) \
.size x, .-x
#endif
|
"""
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is |rRed text|n and this is normal again.
This is {rRed text{n and this is normal again. # soon to be depreciated
This is %crRed text%cn and this is normal again. # depreciated
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user. Depreciated example forms are available by extending
the ansi mapping.
"""
from builtins import object, range
import re
from django.conf import settings
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
from future.utils import with_metaclass
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\", "\|\|")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra | for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
Args:
ansimatch (re.matchobject): The match.
Returns:
processed (str): The processed match string.
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
Args:
ansimatch (re.matchobject): The match.
Returns:
processed (str): The processed match string.
"""
return self.ansi_bright_bgs_map.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, use_xterm256=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
Args:
rgbmatch (re.matchobject): The match.
use_xterm256 (bool, optional): Don't convert 256-colors to 16.
Returns:
processed (str): The processed match string.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
grayscale = rgbtag[0 + int(background)] == '='
if not grayscale:
# 6x6x6 color-cube (xterm indexes 16-231)
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
else:
# grayscale values (xterm indexes 0, 232-255, 15) for full spectrum
letter = rgbtag[int(background) + 1]
if letter == 'a':
colval = 16 # pure black @ index 16 (first color cube entry)
elif letter == 'z':
colval = 231 # pure white @ index 231 (last color cube entry)
else:
# letter in range [b..y] (exactly 24 values!)
colval = 134 + ord(letter)
# ansi fallback logic expects r,g,b values in [0..5] range
gray = (ord(letter)-97)/5.0
red, green, blue = gray, gray, gray
if use_xterm256:
if not grayscale:
colval = 16 + (red * 36) + (green * 6) + blue
return "\033[%s8;5;%sm" % (3 + int(background), colval)
# replaced since some clients (like Potato) does not accept codes with leading zeroes, see issue #1024.
# return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval // 100, (colval % 100) // 10, colval%10)
else:
# xterm256 not supported, convert the rgb value to ansi instead
if red == green == blue and red < 3:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
Args:
string (str): The string to strip.
Returns:
string (str): The processed string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
Args:
string (str): The string to strip.
Returns:
string (str): The processed string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to the stored
mapping.
Args:
string (str): The string to parse.
strip_ansi (boolean, optional): Strip all found ansi markup.
xterm256 (boolean, optional): If actually using xterm256 or if
these values should be converted to 16-color ANSI.
mxp (boolean, optional): Parse MXP commands in string.
Returns:
string (str): The parsed string.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s-%s" % (string, strip_ansi, xterm256, mxp)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{u', ANSI_UNDERLINE), # underline
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK), # pure black background
# alternative |-format
(r'|n', ANSI_NORMAL), # reset
(r'|/', ANSI_RETURN), # line break
(r'|-', ANSI_TAB), # tab
(r'|_', ANSI_SPACE), # space
(r'|*', ANSI_INVERSE), # invert
(r'|^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'|u', ANSI_UNDERLINE), # underline
(r'|r', hilite + ANSI_RED),
(r'|g', hilite + ANSI_GREEN),
(r'|y', hilite + ANSI_YELLOW),
(r'|b', hilite + ANSI_BLUE),
(r'|m', hilite + ANSI_MAGENTA),
(r'|c', hilite + ANSI_CYAN),
(r'|w', hilite + ANSI_WHITE), # pure white
(r'|x', hilite + ANSI_BLACK), # dark grey
(r'|R', unhilite + ANSI_RED),
(r'|G', unhilite + ANSI_GREEN),
(r'|Y', unhilite + ANSI_YELLOW),
(r'|B', unhilite + ANSI_BLUE),
(r'|M', unhilite + ANSI_MAGENTA),
(r'|C', unhilite + ANSI_CYAN),
(r'|W', unhilite + ANSI_WHITE), # light grey
(r'|X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'|h', hilite),
(r'|H', unhilite),
(r'|!R', ANSI_RED),
(r'|!G', ANSI_GREEN),
(r'|!Y', ANSI_YELLOW),
(r'|!B', ANSI_BLUE),
(r'|!M', ANSI_MAGENTA),
(r'|!C', ANSI_CYAN),
(r'|!W', ANSI_WHITE), # light grey
(r'|!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'|[R', ANSI_BACK_RED),
(r'|[G', ANSI_BACK_GREEN),
(r'|[Y', ANSI_BACK_YELLOW),
(r'|[B', ANSI_BACK_BLUE),
(r'|[M', ANSI_BACK_MAGENTA),
(r'|[C', ANSI_BACK_CYAN),
(r'|[W', ANSI_BACK_WHITE), # light grey background
(r'|[X', ANSI_BACK_BLACK) # pure black background
]
ext_ansi_map += settings.COLOR_ANSI_EXTRA_MAP
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222'), # dark grey background
# |-style variations
(r'|[r', r'|[500'),
(r'|[g', r'|[050'),
(r'|[y', r'|[550'),
(r'|[b', r'|[005'),
(r'|[m', r'|[505'),
(r'|[c', r'|[055'),
(r'|[w', r'|[555'), # white background
(r'|[x', r'|[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', ""), # {[123 - background colour
# |-style
(r'\|[0-5]{3}', ""), # |123 - foreground colour
(r'\|\[[0-5]{3}', ""), # |[123 - background colour
# grayscale entries including ansi extremes: {=a .. {=z
(r'\{=[a-z]', ""),
(r'\{\[=[a-z]', ""),
(r'\|=[a-z]', ""),
(r'\|\[=[a-z]', ""),
]
mxp_re = r'\|lc(.*?)\|lt(.*?)\|le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([r"(?<!\|)%s" % re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs_map = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
Args:
string (str): The string to parse.
strip_ansi (bool, optional): Strip all ANSI sequences.
parser (ansi.AnsiParser, optional): A parser instance to use.
xterm256 (bool, optional): Support xterm256 or not.
mxp (bool, optional): Support MXP markup or not.
Returns:
string (str): The parsed string.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_ansi(string, parser=ANSI_PARSER):
"""
Strip all ansi from the string. This handles the Evennia-specific
markup.
Args:
string (str): The string to strip.
parser (ansi.AnsiParser, optional): The parser to use.
Returns:
string (str): The stripped string.
"""
return parser.parse_ansi(string, strip_ansi=True)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string. This assumes pure
ANSI-bytecodes in the string.
Args:
string (str): The string to parse.
parser (bool, optional): The parser to use.
Returns:
string (str): the stripped string.
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi
parser.
Returns:
string (str): The raw, escaped string.
"""
return string.replace('{', '{{').replace('|', '||')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on
functions used for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead
of ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
# just skip out if there are no more strings
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(with_metaclass(ANSIMeta, unicode)):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = [x is None for x in [code_indexes, char_indexes, clean_string]]
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements
due to a bug in the Python interpreter. You can always do
unicode() or str() around the resulting ANSIString and print
that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in range(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
# raw_string not long enough
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in range(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, list(range(0, len(self._raw_string)))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def split(self, by=None, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
drop_spaces = by is None
if drop_spaces:
by = " "
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
if drop_spaces:
return [part for part in res if part != ""]
return res
def rsplit(self, by=None, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
drop_spaces = by is None
if drop_spaces:
by = " "
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
if drop_spaces:
return [part for part in res if part != ""]
return res
def strip(self, chars=None):
"""
Strip from both ends, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
# count continuous sequence of chars from left and right
nlen = len(clean)
nlstripped = nlen - len(clean.lstrip(chars))
nrstripped = nlen - len(clean.rstrip(chars))
# within the stripped regions, only retain parts of the raw
# string *not* matching the clean string (these are ansi/mxp tags)
lstripped = ""
ic, ir1 = 0, 0
while nlstripped:
if ic >= nlstripped:
break
elif raw[ir1] != clean[ic]:
lstripped += raw[ir1]
else:
ic += 1
ir1 += 1
rstripped = ""
ic, ir2 = nlen-1, len(raw)-1
while nrstripped:
if nlen - ic > nrstripped:
break
elif raw[ir2] != clean[ic]:
rstripped += raw[ir2]
else:
ic -= 1
ir2 -= 1
rstripped = rstripped[::-1]
return ANSIString(lstripped + raw[ir1:ir2+1] + rstripped)
def lstrip(self, chars=None):
"""
Strip from the left, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
# count continuous sequence of chars from left and right
nlen = len(clean)
nlstripped = nlen - len(clean.lstrip(chars))
# within the stripped regions, only retain parts of the raw
# string *not* matching the clean string (these are ansi/mxp tags)
lstripped = ""
ic, ir1 = 0, 0
while nlstripped:
if ic >= nlstripped:
break
elif raw[ir1] != clean[ic]:
lstripped += raw[ir1]
else:
ic += 1
ir1 += 1
return ANSIString(lstripped + raw[ir1:])
def rstrip(self, chars=None):
"""
Strip from the right, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
nlen = len(clean)
nrstripped = nlen - len(clean.rstrip(chars))
rstripped = ""
ic, ir2 = nlen-1, len(raw)-1
while nrstripped:
if nlen - ic > nrstripped:
break
elif raw[ir2] != clean[ic]:
rstripped += raw[ir2]
else:
ic -= 1
ir2 -= 1
rstripped = rstripped[::-1]
return ANSIString(raw[:ir2+1] + rstripped)
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=list(range(0, len(line))),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(range(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import shutil
import traceback
from collections import Counter
from collections import defaultdict
from functools import partial
from itertools import chain
from operator import itemgetter
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
import re
import attr
import saneyaml
from license_expression import ExpressionError
from license_expression import Licensing
from commoncode.fileutils import copyfile
from commoncode.fileutils import file_base_name
from commoncode.fileutils import file_name
from commoncode.fileutils import resource_iter
from licensedcode import MIN_MATCH_HIGH_LENGTH
from licensedcode import MIN_MATCH_LENGTH
from licensedcode import SMALL_RULE
from licensedcode.tokenize import index_tokenizer
from licensedcode.tokenize import key_phrase_tokenizer
from licensedcode.tokenize import KEY_PHRASE_OPEN
from licensedcode.tokenize import KEY_PHRASE_CLOSE
from licensedcode.spans import Span
from textcode.analysis import numbered_text_lines
"""
Reference License and license Rule structures persisted as a combo of a YAML
data file and one or more text files containing license or notice texts.
"""
# Set to True to print more detailed representations of objects when tracing
TRACE_REPR = False
# these are globals but always side-by-side with the code so do no not move them around
data_dir = join(abspath(dirname(__file__)), 'data')
licenses_data_dir = join(data_dir, 'licenses')
rules_data_dir = join(data_dir, 'rules')
FOSS_CATEGORIES = set([
'Copyleft',
'Copyleft Limited',
'Patent License',
'Permissive',
'Public Domain',
])
OTHER_CATEGORIES = set([
'Commercial',
'Proprietary Free',
'Free Restricted',
'Source-available',
'Unstated License',
])
CATEGORIES = FOSS_CATEGORIES | OTHER_CATEGORIES
@attr.s(slots=True)
class License(object):
"""
A license consists of these files, where <key> is the license key:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
A License object is identified by a unique `key` and its data stored in the
`src_dir` directory. Key is a lower-case unique ascii string.
"""
__attrib = partial(attr.ib, repr=False)
# mandatory unique key: lower case ASCII characters, digits, underscore and dots.
key = attr.ib(repr=True)
src_dir = __attrib(default=licenses_data_dir)
# if this is a deprecated license, add also notes explaining why
is_deprecated = __attrib(default=False)
# if this license text is not in English, set this field to a two letter
# ISO 639-1 language code https://en.wikipedia.org/wiki/ISO_639-1
# NOTE: this is not yet supported.
# NOTE: each translation of a license text MUST have a different license key
language = __attrib(default='en')
# commonly used short name, often abbreviated.
short_name = __attrib(default=None)
# full name.
name = __attrib(default=None)
# Permissive, Copyleft, etc
category = __attrib(default=None)
owner = __attrib(default=None)
homepage_url = __attrib(default=None)
notes = __attrib(default=None)
# if this is a license exception, the license key this exception applies to
is_exception = __attrib(default=False)
# if the license falls in unknwon category then this flag should be set to true
is_unknown = __attrib(default=False)
# SPDX key for SPDX licenses
spdx_license_key = __attrib(default=None)
# list of other keys, such as deprecated ones
other_spdx_license_keys = __attrib(default=attr.Factory(list))
# OSI License Key
osi_license_key = __attrib(default=None)
# Various URLs for info
text_urls = __attrib(default=attr.Factory(list))
osi_url = __attrib(default=None)
faq_url = __attrib(default=None)
other_urls = __attrib(default=attr.Factory(list))
# various alternate keys for this license
key_aliases = __attrib(default=attr.Factory(list))
minimum_coverage = __attrib(default=0)
standard_notice = __attrib(default=None)
# lists of copuyrights, emails and URLs that can be ignored when detected
# in this license as they are part of the license or rule text itself
ignorable_copyrights = __attrib(default=attr.Factory(list))
ignorable_authors = __attrib(default=attr.Factory(list))
ignorable_holders = __attrib(default=attr.Factory(list))
ignorable_urls = __attrib(default=attr.Factory(list))
ignorable_emails = __attrib(default=attr.Factory(list))
# data file paths and known extensions
data_file = __attrib(default=None)
text_file = __attrib(default=None)
def __attrs_post_init__(self, *args, **kwargs):
if self.src_dir:
self.set_file_paths()
if exists(self.data_file):
self.load()
def set_file_paths(self):
self.data_file = join(self.src_dir, f'{self.key}.yml')
self.text_file = join(self.src_dir, f'{self.key}.LICENSE')
def relocate(self, target_dir, new_key=None):
"""
Return a copy of this License object relocated to a new ``target_dir``
with data and license text files saved to the new ``target_dir``.
Use the ``new_key`` as license key if provided.
"""
if not target_dir:
raise ValueError(
f'Cannot relocate {self.key} License to empty directory '
)
if target_dir == self.src_dir:
raise ValueError(
f'Cannot relocate {self.key} License to its current directory.'
)
if new_key:
key = new_key
else:
key = self.key
newl = License(key=key, src_dir=target_dir)
# copy fields
excluded_fields = ('key', 'src_dir', 'data_file', 'text_file',)
all_fields = attr.fields(self.__class__)
attrs = [f.name for f in all_fields if f.name not in excluded_fields]
for name in attrs:
setattr(newl, name, getattr(self, name))
# save it all to files
if self.text:
copyfile(self.text_file, newl.text_file)
newl.dump()
return newl
def update(self, mapping):
for k, v in mapping.items():
setattr(self, k, v)
def __copy__(self):
oldl = self.to_dict()
newl = License(key=self.key)
newl.update(oldl)
return newl
@property
def text(self):
"""
License text, re-loaded on demand.
"""
return self._read_text(self.text_file)
def to_dict(self):
"""
Return an ordered mapping of license data (excluding texts).
Fields with empty values are not included.
"""
# do not dump false, empties and paths
def dict_fields(attr, value):
if not value:
return False
if attr.name in ('data_file', 'text_file', 'src_dir',):
return False
# default to English
if attr.name == 'language' and value == 'en':
return False
if attr.name == 'minimum_coverage' and value == 100:
return False
return True
data = attr.asdict(self, filter=dict_fields, dict_factory=dict)
cv = data.get('minimum_coverage', 0)
if cv:
data['minimum_coverage'] = as_int(cv)
return data
def dump(self):
"""
Dump a representation of this license as two files:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
def write(location, byte_string):
# we write as binary because rules and licenses texts and data are UTF-8-encoded bytes
with io.open(location, 'wb') as of:
of.write(byte_string)
as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8')
write(self.data_file, as_yaml)
if self.text:
write(self.text_file, self.text.encode('utf-8'))
def load(self):
"""
Populate license data from a YAML file stored in of self.src_dir.
Does not load text files yet.
Unknown fields are ignored and not bound to the License object.
"""
try:
with io.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read(), allow_duplicate_keys=False)
for k, v in data.items():
if k == 'minimum_coverage':
v = as_int(v)
if k == 'key':
assert self.key == v, (
'The license "key" attribute in the .yml file MUST ' +
'be the same as the base name of this license .LICENSE ' +
'and .yml data files license files. ' +
f'Yet file name = {self.key} and license key = {v}'
)
setattr(self, k, v)
except Exception as e:
# this is a rare case: fail loudly
print()
print('#############################')
print('INVALID LICENSE YAML FILE:', f'file://{self.data_file}')
print('#############################')
print(e)
print('#############################')
raise e
def _read_text(self, location):
if not exists(location):
text = ''
else:
with io.open(location, encoding='utf-8') as f:
text = f.read()
return text
def spdx_keys(self):
"""
Yield SPDX keys for this license.
"""
if self.spdx_license_key:
yield self.spdx_license_key
for key in self.other_spdx_license_keys:
yield key
@staticmethod
def validate(licenses, verbose=False, no_dupe_urls=False):
"""
Check that the ``licenses`` a mapping of {key: License} are valid.
Return dictionaries of infos, errors and warnings mapping a license key
to validation issue messages. Print messages if ``verbose`` is True.
NOTE: we DO NOT run this validation as part of loading or constructing
License objects. Instead this is invoked ONLY as part of the test suite.
"""
infos = defaultdict(list)
warnings = defaultdict(list)
errors = defaultdict(list)
# used for global dedupe of texts
by_spdx_key = defaultdict(list)
by_text = defaultdict(list)
by_short_name = defaultdict(list)
by_name = defaultdict(list)
for key, lic in licenses.items():
warn = warnings[key].append
info = infos[key].append
error = errors[key].append
by_name[lic.name].append(lic)
by_short_name[lic.short_name].append(lic)
if not lic.short_name:
error('No short name')
elif len(lic.short_name) > 50:
error('short name must be under 50 characters.')
if not lic.name:
error('No name')
if not lic.category:
error('No category')
if lic.category and lic.category not in CATEGORIES:
cats = '\n'.join(sorted(CATEGORIES))
error(
f'Unknown license category: {lic.category}.\n' +
f'Use one of these valid categories:\n{cats}'
)
if not lic.owner:
error('No owner')
if lic.is_unknown:
if not "unknown" in lic.key:
error('is_unknown should not be true')
# URLS dedupe and consistency
if no_dupe_urls:
if lic.text_urls and not all(lic.text_urls):
warn('Some empty text_urls values')
if lic.other_urls and not all(lic.other_urls):
warn('Some empty other_urls values')
# redundant URLs used multiple times
if lic.homepage_url:
if lic.homepage_url in lic.text_urls:
warn('Homepage URL also in text_urls')
if lic.homepage_url in lic.other_urls:
warn('Homepage URL also in other_urls')
if lic.homepage_url == lic.faq_url:
warn('Homepage URL same as faq_url')
if lic.homepage_url == lic.osi_url:
warn('Homepage URL same as osi_url')
if lic.osi_url or lic.faq_url:
if lic.osi_url == lic.faq_url:
warn('osi_url same as faq_url')
all_licenses = lic.text_urls + lic.other_urls
for url in lic.osi_url, lic.faq_url, lic.homepage_url:
if url:
all_licenses.append(url)
if not len(all_licenses) == len(set(all_licenses)):
warn('Some duplicated URLs')
# local text consistency
text = lic.text
license_qtokens = tuple(index_tokenizer(text))
if not license_qtokens:
info('No license text')
else:
# for global dedupe
by_text[license_qtokens].append(f'{key}: TEXT')
# SPDX consistency
if lic.spdx_license_key:
by_spdx_key[lic.spdx_license_key].append(key)
else:
# SPDX license key is now mandatory
error('No SPDX license key')
for oslk in lic.other_spdx_license_keys:
by_spdx_key[oslk].append(key)
# global SPDX consistency
multiple_spdx_keys_used = {
k: v for k, v in by_spdx_key.items()
if len(v) > 1
}
if multiple_spdx_keys_used:
for k, lkeys in multiple_spdx_keys_used.items():
errors['GLOBAL'].append(
f'SPDX key: {k} used in multiple licenses: ' +
', '.join(sorted(lkeys)))
# global text dedupe
multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1}
if multiple_texts:
for k, msgs in multiple_texts.items():
errors['GLOBAL'].append(
'Duplicate texts in multiple licenses: ' +
', '.join(sorted(msgs))
)
# global short_name dedupe
for short_name, licenses in by_short_name.items():
if len(licenses) == 1:
continue
errors['GLOBAL'].append(
f'Duplicate short name: {short_name} in licenses: ' +
', '.join(l.key for l in licenses)
)
# global name dedupe
for name, licenses in by_name.items():
if len(licenses) == 1:
continue
errors['GLOBAL'].append(
f'Duplicate name: {name} in licenses: ' +
', '.join(l.key for l in licenses)
)
errors = {k: v for k, v in errors.items() if v}
warnings = {k: v for k, v in warnings.items() if v}
infos = {k: v for k, v in infos.items() if v}
if verbose:
print('Licenses validation errors:')
for key, msgs in sorted(errors.items()):
print(f'ERRORS for: {key}:', '\n'.join(msgs))
print('Licenses validation warnings:')
for key, msgs in sorted(warnings.items()):
print(f'WARNINGS for: {key}:', '\n'.join(msgs))
print('Licenses validation infos:')
for key, msgs in sorted(infos.items()):
print(f'INFOS for: {key}:', '\n'.join(msgs))
return errors, warnings, infos
def ignore_editor_tmp_files(location):
return location.endswith('.swp')
def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False):
"""
Return a mapping of {key: License} loaded from license data and text files
found in ``licenses_data_dir``. Raise Exceptions if there are dangling or
orphaned files. Optionally include deprecated license if ``with_deprecated``
is True.
"""
licenses = {}
used_files = set()
all_files = set(resource_iter(
licenses_data_dir,
ignored=ignore_editor_tmp_files,
with_dirs=False,
follow_symlinks=True,
))
for data_file in sorted(all_files):
if data_file.endswith('.yml'):
key = file_base_name(data_file)
lic = License(key=key, src_dir=licenses_data_dir)
used_files.add(data_file)
if exists(lic.text_file):
used_files.add(lic.text_file)
if not with_deprecated and lic.is_deprecated:
continue
licenses[key] = lic
dangling = all_files.difference(used_files)
if dangling:
msg = (
f'Some License files are orphaned in {licenses_data_dir!r}.\n' +
'\n'.join(f'file://{f}' for f in sorted(dangling))
)
raise Exception(msg)
if not licenses:
msg = (
'No licenses were loaded. Check to see if the license data files '
f'are available at "{licenses_data_dir}".'
)
raise Exception(msg)
return licenses
def get_rules(
licenses_db=None,
licenses_data_dir=licenses_data_dir,
rules_data_dir=rules_data_dir
):
"""
Yield Rule objects loaded from a ``licenses_db`` and license files found in
``licenses_data_dir`` and rule files found in `rules_data_dir`. Raise an
Exception if a rule is inconsistent or incorrect.
"""
licenses_db = licenses_db or load_licenses(licenses_data_dir=licenses_data_dir)
rules = list(load_rules(rules_data_dir=rules_data_dir))
validate_rules(rules, licenses_db)
licenses_as_rules = build_rules_from_licenses(licenses_db)
return chain(licenses_as_rules, rules)
class InvalidRule(Exception):
pass
def _validate_all_rules(rules, licenses_by_key):
"""
Return a mapping of {error message: [list of Rule]} from validating a list
of ``rules`` Rule integrity and correctness using known licenses from a
mapping of ``licenses_by_key`` {key: License}`.
"""
licensing = Licensing(licenses_by_key.values())
errors = defaultdict(list)
for rule in rules:
for err_msg in rule.validate(licensing):
errors[err_msg].append(rule)
return errors
def validate_rules(rules, licenses_by_key, with_text=False):
"""
Return a mapping of {error message: [list of Rule]) from validating a list
of ``rules`` Rule integrity and correctness using known licenses from a
mapping of ``licenses_by_key`` {key: License}`.
"""
errors = _validate_all_rules(rules, licenses_by_key)
if errors:
message = ['Errors while validating rules:']
for msg, rules in errors.items():
message.append('')
message.append(msg)
for rule in rules:
message.append(f' {rule!r}')
if rule.text_file:
message.append(f' file://{rule.text_file}')
if rule.data_file:
message.append(f' file://{rule.data_file}')
if with_text:
txt = rule.text()[:100].strip()
message.append(f' {txt}...')
raise InvalidRule('\n'.join(message))
def build_rules_from_licenses(licenses):
"""
Return an iterable of rules built from each license text from a ``licenses``
iterable of License objects.
"""
for license_key, license_obj in licenses.items():
text_file = join(license_obj.src_dir, license_obj.text_file)
if exists(text_file):
minimum_coverage = license_obj.minimum_coverage or 0
yield Rule(
text_file=text_file,
license_expression=license_key,
# a license text is always 100% relevant
has_stored_relevance=True,
relevance=100,
has_stored_minimum_coverage=bool(minimum_coverage),
minimum_coverage=minimum_coverage,
is_from_license=True,
is_license_text=True,
ignorable_copyrights=license_obj.ignorable_copyrights,
ignorable_holders=license_obj.ignorable_holders,
ignorable_authors=license_obj.ignorable_authors,
ignorable_urls=license_obj.ignorable_urls,
ignorable_emails=license_obj.ignorable_emails,
)
def get_all_spdx_keys(licenses_db):
"""
Return an iterable of SPDX license keys collected from a `licenses_db`
mapping of {key: License} objects.
"""
for lic in licenses_db.values():
for spdx_key in lic.spdx_keys():
yield spdx_key
def get_essential_spdx_tokens():
"""
Yield essential SPDX tokens.
"""
yield 'spdx'
yield 'license'
yield 'licence'
yield 'identifier'
yield 'licenseref'
def get_all_spdx_key_tokens(licenses_db):
"""
Yield SPDX token strings collected from a ``licenses_db`` mapping of {key:
License} objects.
"""
for tok in get_essential_spdx_tokens():
yield tok
for spdx_key in get_all_spdx_keys(licenses_db):
for token in index_tokenizer(spdx_key):
yield token
def load_rules(rules_data_dir=rules_data_dir):
"""
Return an iterable of rules loaded from rule files in ``rules_data_dir``.
"""
# TODO: OPTIMIZE: create a graph of rules to account for containment and
# similarity clusters?
seen_files = set()
processed_files = set()
lower_case_files = set()
case_problems = set()
space_problems = []
model_errors = []
for data_file in resource_iter(rules_data_dir, with_dirs=False):
if data_file.endswith('.yml'):
base_name = file_base_name(data_file)
if ' ' in base_name:
space_problems.append(data_file)
rule_file = join(rules_data_dir, f'{base_name}.RULE')
try:
rule = Rule(data_file=data_file, text_file=rule_file)
yield rule
except Exception as re:
model_errors.append(str(re))
# accumulate sets to ensures we do not have illegal names or extra
# orphaned files
data_lower = data_file.lower()
if data_lower in lower_case_files:
case_problems.add(data_lower)
else:
lower_case_files.add(data_lower)
rule_lower = rule_file.lower()
if rule_lower in lower_case_files:
case_problems.add(rule_lower)
else:
lower_case_files.add(rule_lower)
processed_files.update([data_file, rule_file])
if not data_file.endswith('~'):
seen_files.add(data_file)
unknown_files = seen_files - processed_files
if unknown_files or case_problems or model_errors or space_problems:
msg = ''
if model_errors:
errors = '\n'.join(model_errors)
msg += (
'\nInvalid rule YAML file in directory: '
f'{rules_data_dir!r}\n{errors}'
)
if unknown_files:
files = '\n'.join(sorted(f'f"ile://{f}"' for f in unknown_files))
msg += (
'\nOrphaned files in rule directory: '
f'{rules_data_dir!r}\n{files}'
)
if case_problems:
files = '\n'.join(sorted(f'"file://{f}"' for f in case_problems))
msg += (
'\nRule files with non-unique name in rule directory: '
f'{rules_data_dir!r}\n{files}'
)
if space_problems:
files = '\n'.join(sorted(f'"file://{f}"' for f in space_problems))
msg += (
'\nRule filename cannot contain spaces: '
f'{rules_data_dir!r}\n{files}'
)
raise InvalidRule(msg)
@attr.s(slots=True)
class BasicRule(object):
"""
A detection rule object is a text to use for detection and corresponding
detected licenses and metadata. This is a basic metadata object that does
not have specific support for data and text files.
"""
licensing = Licensing()
###########
# FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING
###########
# optional rule id int typically assigned at indexing time
rid = attr.ib(default=None, repr=TRACE_REPR)
# unique identifier
identifier = attr.ib(default=None)
# License expression string
license_expression = attr.ib(default=None)
# License expression object, created at build time
license_expression_object = attr.ib(default=None, repr=False)
# An indication of what this rule importance is (e.g. how important is its
# text when detected as a licensing clue) as one of several "is_license_xxx"
# flags. These flags are mutually exclusive and a license can only have one
# of these as "yes"/True.
# A license full text: this provides the highest level of confidence wrt
# detection
is_license_text = attr.ib(default=False, repr=False)
# A license notice: this provides a strong confidence wrt detection
is_license_notice = attr.ib(default=False, repr=False)
# A reference is for a mere short license reference such as its bare name or
# a URL that provides a weaker clue when detected
is_license_reference = attr.ib(default=False, repr=False)
# A tag is for a structured licensing tag such as a package manifest
# metadata or an SPDX license identifier or similar package manifest tags. A
# tag provides a strong clue with high confidence when detected even though
# it may be very short.
is_license_tag = attr.ib(default=False, repr=False)
# An intro is a short introductory statment that may be placed before an
# actual license text, notice or reference. For instance "This file is
# licensed under ...". An intro is a weak clue that there some license
# afterwards. It should be ignored or merged with the following license
# detected immediately after.
is_license_intro = attr.ib(default=False, repr=False)
# Is this rule text a false positive when matched exactly? If yes, it will
# filtered out at the end if matched (unless part of a large match)
is_false_positive = attr.ib(default=False, repr=False)
# Is this rule text only to be matched with a minimum coverage e.g. a
# minimum proportion of tokens as a float between 0 and 100 where 100 means
# all tokens must be matched and a smaller value means a smaller propertion
# of matched tokens is acceptable. this is computed unless this is provided
# here.
minimum_coverage = attr.ib(default=0)
has_stored_minimum_coverage = attr.ib(default=False, repr=False)
# same as minimum_coverage but divided/100
_minimum_containment = attr.ib(default=0, repr=False)
# Can this rule be matched if there are unknown words in its matched range?
# The default is to allow known and unknown words. Unknown words are words
# that do not exist in the text of any indexed license or license detection
# rule.
only_known_words = attr.ib(default=False)
# what is the relevance of a match to this rule text? a float between 0 and
# 100 where 100 means highly relevant and 0 menas not relevant at all.
# For instance a match to the "gpl" or the "cpol" words have a fairly low
# relevance as they are a weak indication of an actual license and could be
# a false positive. In somce cases, this may even be used to discard obvious
# false positive matches automatically.
relevance = attr.ib(default=100)
has_stored_relevance = attr.ib(default=False, repr=False)
# The rule contains a reference to some file name that comtains the text
referenced_filenames = attr.ib(default=attr.Factory(list), repr=False)
# optional, free text
notes = attr.ib(default=None, repr=False)
# set to True if the rule is built from a .LICENSE full text
is_from_license = attr.ib(default=False, repr=False)
# lists of copuyrights, emails and URLs that can be ignored when detected
# in this license as they are part of the license or rule text itself
ignorable_copyrights = attr.ib(default=attr.Factory(list), repr=False)
ignorable_holders = attr.ib(default=attr.Factory(list), repr=False)
ignorable_authors = attr.ib(default=attr.Factory(list), repr=False)
ignorable_urls = attr.ib(default=attr.Factory(list), repr=False)
ignorable_emails = attr.ib(default=attr.Factory(list), repr=False)
###########################################################################
# path to the YAML data file for this rule
data_file = attr.ib(default=None, repr=False)
# path to the rule text file
text_file = attr.ib(default=None, repr=False)
# text of this rule for special cases where the rule is not backed by a file:
# for SPDX license expression dynamic rules or testing
stored_text = attr.ib(default=None, repr=False)
# spans with ispan positions which must be present in the license match for
# this rule to be considered a valid match
key_phrase_spans = attr.ib(default=attr.Factory(list), repr=False)
# These attributes are computed upon text loading or setting the thresholds
###########################################################################
# lengths in tokens
length = attr.ib(default=0)
min_matched_length = attr.ib(default=0, repr=TRACE_REPR)
high_length = attr.ib(default=0, repr=TRACE_REPR)
min_high_matched_length = attr.ib(default=0, repr=TRACE_REPR)
# lengths in unique token.
length_unique = attr.ib(default=0, repr=TRACE_REPR)
min_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR)
high_length_unique = attr.ib(default=0, repr=TRACE_REPR)
min_high_matched_length_unique = attr.ib(default=0, repr=TRACE_REPR)
is_small = attr.ib(default=False, repr=TRACE_REPR)
has_computed_thresholds = attr.ib(default=False, repr=False)
def __attrs_post_init__(self, *args, **kwargs):
self.setup()
def setup(self):
"""
Setup a few basic computed attributes after instance creation.
"""
self.relevance = as_int(float(self.relevance or 100))
self.minimum_coverage = as_int(float(self.minimum_coverage or 0))
if self.license_expression:
try:
expression = self.licensing.parse(self.license_expression)
except:
exp = self.license_expression
trace = traceback.format_exc()
raise InvalidRule(
f'Unable to parse rule License expression: {exp!r} '
f'for: file://{self.data_file}\n{trace}'
)
if expression is None:
raise InvalidRule(
f'Invalid rule License expression parsed to empty: '
f'{self.license_expression!r} for: file://{self.data_file}'
)
self.license_expression = expression.render()
self.license_expression_object = expression
@property
def has_unknown(self):
"""
Return True if any of this rule licenses is an unknown license.
"""
# TODO: consider using the license_expression_object and the is_unknown
# license flag instead
return self.license_expression and 'unknown' in self.license_expression
def validate(self, licensing=None):
"""
Validate this rule using the provided ``licensing`` Licensing and yield
one error message for each type of error detected.
"""
is_false_positive = self.is_false_positive
license_flags = (
self.is_license_notice,
self.is_license_text,
self.is_license_reference,
self.is_license_tag,
self.is_license_intro,
)
has_license_flags = any(license_flags)
has_many_license_flags = len([l for l in license_flags if l]) != 1
license_expression = self.license_expression
ignorables = (
self.ignorable_copyrights,
self.ignorable_holders,
self.ignorable_authors,
self.ignorable_urls,
self.ignorable_emails,
)
if is_false_positive:
if not self.notes:
yield 'is_false_positive rule must have notes.'
if has_license_flags:
yield 'is_false_positive rule cannot have is_license_* flags.'
if license_expression:
yield 'is_false_positive rule cannot have a license_expression.'
if self.has_stored_relevance:
yield 'is_false_positive rule cannot have a stored relevance.'
if self.referenced_filenames:
yield 'is_false_positive rule cannot have referenced_filenames.'
if any(ignorables):
yield 'is_false_positive rule cannot have ignorable_* attributes.'
if not (0 <= self.minimum_coverage <= 100):
yield 'Invalid rule minimum_coverage. Should be between 0 and 100.'
if not is_false_positive:
if not (0 <= self.relevance <= 100):
yield 'Invalid rule relevance. Should be between 0 and 100.'
if has_many_license_flags:
yield 'Invalid rule is_license_* flags. Only one allowed.'
if not has_license_flags:
yield 'At least one is_license_* flag is needed.'
if not check_is_list_of_strings(self.referenced_filenames):
yield 'referenced_filenames must be a list of strings'
if not all(check_is_list_of_strings(i) for i in ignorables):
yield 'ignorables must be a list of strings'
if not license_expression:
yield 'Missing license_expression.'
else:
if licensing:
try:
licensing.parse(license_expression, validate=True, simple=True)
except ExpressionError as e:
yield f'Failed to parse and validate license_expression: {license_expression} with error: {e}'
if self.referenced_filenames:
if len(set(self.referenced_filenames)) != len(self.referenced_filenames):
yield 'referenced_filenames cannot contain duplicates.'
def license_keys(self, unique=True):
"""
Return a list of license keys for this rule.
"""
if not self.license_expression:
return []
return self.licensing.license_keys(
self.license_expression_object,
unique=unique,
)
def same_licensing(self, other):
"""
Return True if the other rule has the same licensing as this rule.
"""
if self.license_expression and other.license_expression:
return self.licensing.is_equivalent(
self.license_expression_object,
other.license_expression_object,
)
def licensing_contains(self, other):
"""
Return True if this rule licensing contains the other rule licensing.
"""
if self.license_expression and other.license_expression:
return self.licensing.contains(
self.license_expression_object,
other.license_expression_object,
)
def spdx_license_expression(self, licensing=None):
if not licensing:
from licensedcode.cache import get_licensing
licensing = get_licensing()
parsed = licensing.parse(self.license_expression)
return parsed.render(template='{symbol.spdx_license_key}')
def get_length(self, unique=False):
return self.length_unique if unique else self.length
def get_min_matched_length(self, unique=False):
return (self.min_matched_length_unique if unique
else self.min_matched_length)
def get_high_length(self, unique=False):
return self.high_length_unique if unique else self.high_length
def get_min_high_matched_length(self, unique=False):
return (self.min_high_matched_length_unique if unique
else self.min_high_matched_length)
def to_dict(self):
"""
Return an ordered mapping of self, excluding texts. Used for
serialization. Empty values are not included.
"""
data = {}
is_false_positive = self.is_false_positive
if self.license_expression:
data['license_expression'] = self.license_expression
flags = (
'is_false_positive',
'is_license_text',
'is_license_notice',
'is_license_reference',
'is_license_tag',
'is_license_intro',
'only_known_words',
)
for flag in flags:
tag_value = getattr(self, flag, False)
if tag_value:
data[flag] = tag_value
if self.has_stored_relevance and self.relevance and not is_false_positive:
data['relevance'] = as_int(self.relevance)
if self.has_stored_minimum_coverage and self.minimum_coverage > 0 and not is_false_positive:
data['minimum_coverage'] = as_int(self.minimum_coverage)
if self.referenced_filenames and not is_false_positive:
data['referenced_filenames'] = self.referenced_filenames
if self.notes:
data['notes'] = self.notes
if not is_false_positive:
ignorables = (
'ignorable_copyrights',
'ignorable_holders',
'ignorable_authors',
'ignorable_urls',
'ignorable_emails',
)
for igno in ignorables:
tag_value = getattr(self, igno, False)
if tag_value:
data[igno] = tag_value
return data
def text(self):
"""
Return the rule text loaded from its text file.
"""
if self.text_file and exists(self.text_file):
# IMPORTANT: use the same process as query text loading for symmetry
numbered_lines = numbered_text_lines(
self.text_file,
demarkup=False,
plain_text=True,
)
return ''.join(l for _, l in numbered_lines)
# used for non-file backed rules
elif self.stored_text:
return self.stored_text
else:
raise InvalidRule(
f'Inconsistent rule text for: {self.identifier}\n'
f'file://{self.text_file}'
)
def check_is_list_of_strings(l):
"""
Return True if `l` is a list of strings or an empty list, False otherwise.
"""
if isinstance(l, list):
if l:
return all(isinstance(i, str) for i in l)
return True
return False
def as_int(num):
"""
Convert ``num`` to int if ``num`` is not an int and this would not lead to
loss of information, e.g. when ``num`` is an int stored as a float type.
"""
if isinstance(num, str):
num = float(num)
if isinstance(num, float):
n = int(num)
if n == num:
return n
return num
@attr.s(slots=True)
class Rule(BasicRule):
"""
A detection rule object with support for data and text files.
"""
def __attrs_post_init__(self, *args, **kwargs):
self.load_data()
self.setup()
def load_data(self):
"""
Load data from data file. Check presence of text file.
"""
if not self.text_file:
# for SPDX or tests only
if not self.stored_text :
raise InvalidRule(
f'Invalid rule without its corresponding text file: {self}')
self.identifier = '_tst_' + str(len(self.stored_text))
else:
self.identifier = file_name(self.text_file)
if self.data_file:
try:
self.load()
except Exception:
data_file = self.data_file
trace = traceback.format_exc()
raise InvalidRule(f'While loading: file://{data_file}\n{trace}')
def tokens(self):
"""
Return an iterable of token strings for this rule. Length, relevance and
minimum_coverage may be recomputed as a side effect.
"""
length = 0
text = self.text()
text = text.strip()
# We tag this rule as being a bare URL if it starts with a scheme and is
# on one line: this is used to determine a matching approach
if (
text.startswith(('http://', 'https://', 'ftp://'))
and '\n' not in text[:1000]
):
self.minimum_coverage = 100
for token in index_tokenizer(self.text()):
length += 1
yield token
self.length = length
self.compute_relevance()
def key_phrases(self):
"""
Return an iterable of Spans marking the positions of key phrases that must
be present for this rule to be a valid match.
"""
yield from get_key_phrases(self.text())
def compute_thresholds(self, small_rule=SMALL_RULE):
"""
Compute and set thresholds either considering the occurrence of all
tokens or the occurence of unique tokens.
"""
min_cov, self.min_matched_length, self.min_high_matched_length = (
compute_thresholds_occurences(
self.minimum_coverage,
self.length,
self.high_length,
)
)
if not self.has_stored_minimum_coverage:
self.minimum_coverage = min_cov
self._minimum_containment = self.minimum_coverage / 100
self.min_matched_length_unique, self.min_high_matched_length_unique = (
compute_thresholds_unique(
self.minimum_coverage,
self.length,
self.length_unique, self.high_length_unique,
)
)
self.is_small = self.length < small_rule
def dump(self):
"""
Dump a representation of this rule as two files:
- a .yml for the rule data in YAML (self.data_file)
- a .RULE: the rule text as a UTF-8 file (self.text_file)
Does nothing if this rule was created from a License (e.g.
`is_from_license` is True)
"""
if self.is_from_license:
return
def write(location, byte_string):
# we write as binary because rules and licenses texts and data are
# UTF-8-encoded bytes
with io.open(location, 'wb') as of:
of.write(byte_string)
if self.data_file:
as_yaml = saneyaml.dump(self.to_dict(), indent=4, encoding='utf-8')
write(self.data_file, as_yaml)
write(self.text_file, self.text().encode('utf-8'))
def load(self):
"""
Load self from a .RULE YAML file stored in self.data_file.
Does not load the rule text file.
Unknown fields are ignored and not bound to the Rule object.
"""
try:
with io.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read(), allow_duplicate_keys=False)
except Exception as e:
print('#############################')
print('INVALID LICENSE RULE FILE:', f'file://{self.data_file}')
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly stop.
raise e
known_attributes = set(attr.fields_dict(self.__class__))
data_file_attributes = set(data)
unknown_attributes = data_file_attributes.difference(known_attributes)
if unknown_attributes:
unknown_attributes = ', '.join(sorted(unknown_attributes))
msg = 'License rule {} data file has unknown attributes: {}'
raise InvalidRule(msg.format(self, unknown_attributes))
self.license_expression = data.get('license_expression')
self.is_false_positive = data.get('is_false_positive', False)
relevance = as_int(float(data.get('relevance') or 0))
# Keep track if we have a stored relevance of not.
if relevance:
self.relevance = relevance
self.has_stored_relevance = True
else:
self.relevance = 100
self.has_stored_relevance = False
minimum_coverage = as_int(float(data.get('minimum_coverage') or 0))
self._minimum_containment = minimum_coverage / 100
if minimum_coverage:
# Keep track if we have a stored minimum_coverage of not.
self.minimum_coverage = minimum_coverage
self.has_stored_minimum_coverage = True
else:
self.minimum_coverage = 0
self.has_stored_minimum_coverage = False
self.is_license_text = data.get('is_license_text', False)
self.is_license_notice = data.get('is_license_notice', False)
self.is_license_tag = data.get('is_license_tag', False)
self.is_license_reference = data.get('is_license_reference', False)
self.is_license_intro = data.get('is_license_intro', False)
self.only_known_words = data.get('only_known_words', False)
self.referenced_filenames = data.get('referenced_filenames', []) or []
# these are purely informational and not used at run time
notes = data.get('notes')
if notes:
self.notes = notes.strip()
self.ignorable_copyrights = data.get('ignorable_copyrights', [])
self.ignorable_holders = data.get('ignorable_holders', [])
self.ignorable_authors = data.get('ignorable_authors', [])
self.ignorable_urls = data.get('ignorable_urls', [])
self.ignorable_emails = data.get('ignorable_emails', [])
return self
def compute_relevance(self, _threshold=18.0):
"""
Compute and set the `relevance` attribute for this rule. The relevance
is a float between 0 and 100 where 100 means highly relevant and 0 means
not relevant at all.
For instance a match to the "gpl" or the "cpol" words have a fairly low
relevance as they are a weak indication of an actual license and could
be a false positive and should therefore be assigned a low relevance. In
contrast a match to most or all of the apache-2.0 license text is highly
relevant. The Rule relevance is used as the basis to compute a match
score.
The relevance is either pre-defined in the rule YAML data file with the
"relevance" attribute or computed base on the rule length here using
this approach:
- false positive rule has 100 relevance.
- rule length equal or larger than threshold has 100 relevance
- rule length smaller than threshold has 100/threshold relevance rounded
down.
The current threshold is 18 words.
"""
# false positive rules with no license and their matches are never returned
if isinstance(self, SpdxRule) or self.is_false_positive:
# use the default max relevance of 100
self.relevance = 100
self.has_stored_relevance = True
return
relevance_of_one_word = round((1 / _threshold) * 100, 2)
computed = int(self.length * relevance_of_one_word)
computed_relevance = min([100, computed])
if self.has_stored_relevance:
if self.relevance == computed_relevance:
self.has_stored_relevance = False
else:
self.relevance = computed_relevance
def rule_dir(self):
"""
Return the directory of this rule.
"""
if not (self.text_file and self.data_file):
raise Exception(f'Cannot obtain rule directory for: {self!r}')
return dirname(self.data_file)
def rename_and_relocate(self, name_prefix):
"""
Generate a new rule name and relocate the rule files to this new name
using the ``name_prefix`` prefix. The new rule name is guaranteed to be
unique and not conflicting with any existing rule name.
"""
new_base_loc = find_rule_base_location(
name_prefix=name_prefix,
rules_directory=self.rule_dir()
)
new_data_file = f'{new_base_loc}.yml'
shutil.move(self.data_file, new_data_file)
self.data_file = new_data_file
new_text_file = f'{new_base_loc}.RULE'
shutil.move(self.text_file, new_text_file)
self.text_file = new_text_file
def compute_thresholds_occurences(
minimum_coverage,
length,
high_length,
_MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH,
_MIN_MATCH_LENGTH=MIN_MATCH_LENGTH,
):
"""
Compute and return thresholds considering the occurrence of all tokens.
"""
if minimum_coverage == 100:
min_matched_length = length
min_high_matched_length = high_length
return minimum_coverage, min_matched_length, min_high_matched_length
if length < 3:
min_high_matched_length = high_length
min_matched_length = length
minimum_coverage = 100
elif length < 10:
min_matched_length = length
min_high_matched_length = high_length
minimum_coverage = 80
elif length < 30:
min_matched_length = length // 2
min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH)
minimum_coverage = 50
elif length < 200:
min_matched_length = _MIN_MATCH_LENGTH
min_high_matched_length = min(high_length, _MIN_MATCH_HIGH_LENGTH)
# minimum_coverage = max(15, int(length//10))
else: # if length >= 200:
min_matched_length = length // 10
min_high_matched_length = high_length // 10
# minimum_coverage = int(length//10)
return minimum_coverage, min_matched_length, min_high_matched_length
def compute_thresholds_unique(
minimum_coverage,
length,
length_unique,
high_length_unique,
_MIN_MATCH_HIGH_LENGTH=MIN_MATCH_HIGH_LENGTH,
_MIN_MATCH_LENGTH=MIN_MATCH_LENGTH,
):
"""
Compute and set thresholds considering the occurrence of only unique tokens.
"""
if minimum_coverage == 100:
min_matched_length_unique = length_unique
min_high_matched_length_unique = high_length_unique
return min_matched_length_unique, min_high_matched_length_unique
if length > 200:
min_matched_length_unique = length // 10
min_high_matched_length_unique = high_length_unique // 10
elif length < 5:
min_matched_length_unique = length_unique
min_high_matched_length_unique = high_length_unique
elif length < 10:
if length_unique < 2:
min_matched_length_unique = length_unique
else:
min_matched_length_unique = length_unique - 1
min_high_matched_length_unique = high_length_unique
elif length < 20:
min_matched_length_unique = high_length_unique
min_high_matched_length_unique = high_length_unique
else:
min_matched_length_unique = _MIN_MATCH_LENGTH
highu = (int(high_length_unique // 2)) or high_length_unique
min_high_matched_length_unique = min(highu, _MIN_MATCH_HIGH_LENGTH)
return min_matched_length_unique, min_high_matched_length_unique
@attr.s(slots=True, repr=False)
class SpdxRule(Rule):
"""
A specialized rule object that is used for the special case of SPDX license
expressions.
Since we may have an infinite possible number of SPDX expressions and these
are not backed by a traditional rule text file, we use this class to handle
the specifics of these how rules that are built at matching time: one rule
is created for each detected SPDX license expression.
"""
def __attrs_post_init__(self, *args, **kwargs):
self.identifier = f'spdx-license-identifier: {self.license_expression}'
expression = None
try:
expression = self.licensing.parse(self.license_expression)
except:
raise InvalidRule(
'Unable to parse License rule expression: '
f'{self.license_expression!r} for: SPDX rule: '
f'{self.stored_text}\n' + traceback.format_exc()
)
if expression is None:
raise InvalidRule(
'Unable to parse License rule expression: '
f'{self.license_expression!r} for: {self.data_file!r}'
)
self.license_expression = expression.render()
self.license_expression_object = expression
self.is_license_tag = True
self.is_small = False
self.relevance = 100
self.has_stored_relevance = True
def load(self):
raise NotImplementedError
def dump(self):
raise NotImplementedError
def _print_rule_stats():
"""
Print rules statistics.
"""
from licensedcode.cache import get_index
idx = get_index()
rules = idx.rules_by_rid
sizes = Counter(r.length for r in rules)
print('Top 15 lengths: ', sizes.most_common(15))
print(
'15 smallest lengths: ',
sorted(sizes.items(),
key=itemgetter(0))[:15],
)
high_sizes = Counter(r.high_length for r in rules)
print('Top 15 high lengths: ', high_sizes.most_common(15))
print(
'15 smallest high lengths: ',
sorted(high_sizes.items(),
key=itemgetter(0))[:15],
)
def update_ignorables(licensish, verbose=False):
"""
Update ignorables and return the ``licensish`` Rule or License using the
latest values detected in its text.
Display progress messages if ``verbose`` is True.
"""
if verbose:
print(f'Processing: file://{licensish.text_file}')
if not exists(licensish.text_file):
return licensish
ignorables = get_ignorables(text_file=licensish.text_file, verbose=verbose)
set_ignorables(licensish, ignorables, verbose=verbose)
return licensish
def set_ignorables(licensish, ignorables, verbose=False):
"""
Update ``licensish`` Rule or License using the mapping of ``ignorables``
attributes.
Display progress messages if ``verbose`` is True.
"""
for key, value in ignorables.items():
if verbose:
existing = getattr(licensish, key, None)
print(f'Updating ignorable: {key} from: {existing!r} to: {value!r}')
setattr(licensish, key, value)
return licensish
def get_ignorables(text_file, verbose=False):
"""
Return a mapping of ignorable clues lists found in a ``text_file`` for
copyrights, holders, authors, urls, emails. Do not include items with empty
values.
Display progress messages if ``verbose`` is True.
"""
from cluecode.copyrights import detect_copyrights
from cluecode.finder import find_urls
from cluecode.finder import find_emails
# redundant clues found in a license or rule text
# collect and set ignorable copyrights, holders and authors
copyrights = set()
holders = set()
authors = set()
for dtype, value, _start, _end in detect_copyrights(text_file):
if verbose:
print(f' Found {dtype}: {value}')
if dtype == 'copyrights':
copyrights.add(value)
elif dtype == 'holders':
holders.add(value)
elif dtype == 'authors':
authors.add(value)
# collect and set ignorable emails and urls
urls = set(u for (u, _ln) in find_urls(text_file) if u)
if verbose:
print(f' Found urls: {urls}')
emails = set(e for (e, _ln) in find_emails(text_file) if e)
if verbose:
print(f' Found emails: {emails}')
ignorables = build_ignorables_mapping(
copyrights, holders, authors, urls, emails)
if verbose:
print(f' Found ignorables: {ignorables}')
return ignorables
def get_normalized_ignorables(licensish):
"""
Return a sorted mapping of ignorables built from a licensish Rule or License.
"""
return build_ignorables_mapping(
copyrights=licensish.ignorable_copyrights,
holders=licensish.ignorable_holders,
authors=licensish.ignorable_authors,
urls=licensish.ignorable_urls,
emails=licensish.ignorable_emails,
)
def build_ignorables_mapping(copyrights, holders, authors, urls, emails):
"""
Return a sorted mapping of ignorables built from lists of ignorable clues.
"""
ignorables = dict(
ignorable_copyrights=sorted(copyrights or []),
ignorable_holders=sorted(holders or []),
ignorable_authors=sorted(authors or []),
ignorable_urls=sorted(urls or []),
ignorable_emails=sorted(emails or []),
)
return {k: v for k, v in sorted(ignorables.items()) if v}
def find_rule_base_location(name_prefix, rules_directory=rules_data_dir):
"""
Return a new, unique and non-existing base location in ``rules_directory``
with a file name but without an extension suitable to create a new rule
without overwriting any existing rule. Use the ``name_prefix`` string as a
prefix for this name.
"""
cleaned = (
name_prefix
.lower()
.strip()
.replace(' ', '_')
.replace('(', '')
.replace(')', '')
.strip('_-')
)
template = cleaned + '_{idx}'
idx = 1
while True:
base_name = template.format(idx=idx)
base_loc = join(rules_directory, base_name)
if not exists(f'{base_loc}.RULE'):
return base_loc
idx += 1
def get_key_phrases(text):
"""
Return an iterable of Spans marking the positions of key phrases in the given
text string. Words are considered to be key phrases if they are enclosed in the
KEY_PHRASE_OPEN and KEY_PHRASE_CLOSE characters.
"""
key_phrase_iterator = key_phrase_tokenizer(text)
key_phrase_index = 0
for token in key_phrase_iterator:
if token.startswith(KEY_PHRASE_OPEN):
span_positions = []
# keep appending key phrase until we hit KEY_PHRASE_CLOSE
for key_phrase in key_phrase_iterator:
if key_phrase.endswith(KEY_PHRASE_CLOSE):
break
span_positions.append(key_phrase_index)
key_phrase_index += 1
if not key_phrase.endswith(KEY_PHRASE_CLOSE):
span_start_position = span_positions[0] if span_positions else 0
raise InvalidRule("Key phrase definition started at token '%d' is not closed" % span_start_position)
if span_positions:
yield Span(span_positions)
else:
key_phrase_index += 1
|
$(function() {
if( !!$.prototype.dropzone ){
$('.field_show .images-field .dropzone').dropzone({
url: "add-image",
dictDefaultMessage: '',
addRemoveLinks: false,
init: function(){
var field_value = this.element.attributes.field_value.nodeValue;
var images_url = 'list-images?value=' + field_value;
var thisDropzone = this;
$.getJSON(images_url, function(data) {
$.each(data, function(index, val) {
var upload = { bytesSent: val.size }
var mockFile = {
name: val.name,
size: val.size,
item_id: val.item_id,
dataURL: val.url,
accepted: true
};
thisDropzone.files.push(mockFile);
thisDropzone.emit('addedfile', mockFile);
thisDropzone.createThumbnailFromUrl(
mockFile,
thisDropzone.options.thumbnailWidth,
thisDropzone.options.thumbnailHeight,
thisDropzone.options.thumbnailMethod,
true,
function(thumbnail) {
thisDropzone.emit('thumbnail', mockFile, thumbnail);
}
);
thisDropzone.emit('complete', mockFile);
});
});
thisDropzone.disable();
}
});
$('.field_edit .images-field .dropzone').dropzone({
url: 'add-image',
dictDefaultMessage: 'drop images here or click to upload',
dictRemoveFile: 'remove',
dictCancelUpload: 'cancel upload',
acceptedFiles: 'image/jpeg,image/png,image/gif',
addRemoveLinks: true,
init: function(){
var field_value = this.element.attributes.field_value.nodeValue;
var field_name = this.element.attributes.field_name.nodeValue;
var images_url = 'list-images?value=' + field_value;
var thisDropzone = this;
this.on('success', function(file, item_id) {
file.item_id = item_id;
});
$.getJSON(images_url, function(data) {
$.each(data, function(index, val) {
var upload = { bytesSent: val.size }
var mockFile = {
name: val.name,
size: val.size,
item_id: val.item_id,
field_name: field_name,
dataURL: val.url,
accepted: true
};
thisDropzone.files.push(mockFile);
thisDropzone.emit('addedfile', mockFile);
thisDropzone.createThumbnailFromUrl(
mockFile,
thisDropzone.options.thumbnailWidth,
thisDropzone.options.thumbnailHeight,
thisDropzone.options.thumbnailMethod,
true,
function(thumbnail) {
thisDropzone.emit('thumbnail', mockFile, thumbnail);
}
);
thisDropzone.emit('complete', mockFile);
});
});
},
sending: function(file, xhr, formData) {
var field_name = this.element.attributes.field_name.nodeValue;
var field_value = this.element.attributes.field_value.nodeValue;
var csrf_token = this.element.attributes.token.nodeValue;
console.log('sending')
formData.append("field_name", field_name);
formData.append("field_value", field_value);
formData.append("csrf_token", csrf_token);
console.log('added token')
},
removedfile: function(file) {
var id = file.item_id,
name = file.name,
formData = new FormData();
var csrf_token = this.element.attributes.token.nodeValue;
console.log(csrf_token);
formData.append('id', id);
formData.append('name', encodeURIComponent(name));
formData.append('csrf_token', csrf_token)
$.ajax({
type: 'POST',
url: 'remove-image',
processData: false,
contentType: false,
// data: "id=" + id + '&name=' + encodeURIComponent(name),
data: formData,
// dataType: 'html'
});
var _ref;
return (_ref = file.previewElement) != null ? _ref.parentNode.removeChild(file.previewElement) : void 0;
},
});
};
});
|
"""test_34111 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "test"
admin.site.site_title = "test Admin Portal"
admin.site.index_title = "test Admin"
# swagger
api_info = openapi.Info(
title="test API",
default_version="v1",
description="API documentation for test App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.gkehub_v1alpha2.types import membership
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-gke-hub',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class GkeHubTransport(abc.ABC):
"""Abstract transport class for GkeHub."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'gkehub.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials are service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_memberships: gapic_v1.method.wrap_method(
self.list_memberships,
default_timeout=None,
client_info=client_info,
),
self.get_membership: gapic_v1.method.wrap_method(
self.get_membership,
default_timeout=None,
client_info=client_info,
),
self.create_membership: gapic_v1.method.wrap_method(
self.create_membership,
default_timeout=None,
client_info=client_info,
),
self.delete_membership: gapic_v1.method.wrap_method(
self.delete_membership,
default_timeout=None,
client_info=client_info,
),
self.update_membership: gapic_v1.method.wrap_method(
self.update_membership,
default_timeout=None,
client_info=client_info,
),
self.generate_connect_manifest: gapic_v1.method.wrap_method(
self.generate_connect_manifest,
default_timeout=None,
client_info=client_info,
),
self.initialize_hub: gapic_v1.method.wrap_method(
self.initialize_hub,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_memberships(self) -> Callable[
[membership.ListMembershipsRequest],
Union[
membership.ListMembershipsResponse,
Awaitable[membership.ListMembershipsResponse]
]]:
raise NotImplementedError()
@property
def get_membership(self) -> Callable[
[membership.GetMembershipRequest],
Union[
membership.Membership,
Awaitable[membership.Membership]
]]:
raise NotImplementedError()
@property
def create_membership(self) -> Callable[
[membership.CreateMembershipRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def delete_membership(self) -> Callable[
[membership.DeleteMembershipRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def update_membership(self) -> Callable[
[membership.UpdateMembershipRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def generate_connect_manifest(self) -> Callable[
[membership.GenerateConnectManifestRequest],
Union[
membership.GenerateConnectManifestResponse,
Awaitable[membership.GenerateConnectManifestResponse]
]]:
raise NotImplementedError()
@property
def initialize_hub(self) -> Callable[
[membership.InitializeHubRequest],
Union[
membership.InitializeHubResponse,
Awaitable[membership.InitializeHubResponse]
]]:
raise NotImplementedError()
__all__ = (
'GkeHubTransport',
)
|
from typing import Any, Dict, List, Optional
from watchmen_model.chart import ChartTruncationType
from watchmen_model.console import Report, ReportDimension, ReportIndicator
from watchmen_utilities import ArrayHelper, is_blank
class ReportSchema:
def __init__(self, report: Report):
self.report = report
def get_report(self) -> Report:
return self.report
# noinspection PyMethodMayBeStatic
def as_indicator_name(self, indicator: ReportIndicator, index: int) -> str:
if is_blank(indicator.name):
return f'indicator_{index + 1}'
else:
return indicator.name
# noinspection PyMethodMayBeStatic
def as_dimension_name(self, dimension: ReportDimension, index: int) -> str:
if is_blank(dimension.name):
return f'dimension_{index + 1}'
else:
return dimension.name
def get_result_columns(self) -> List[str]:
return [
*ArrayHelper(self.get_report().indicators).map_with_index(
lambda x, index: self.as_indicator_name(x, index)).to_list(),
*ArrayHelper(self.get_report().dimensions).map_with_index(
lambda x, index: self.as_dimension_name(x, index)).to_list()
]
def get_sort_type(self) -> ChartTruncationType:
chart = self.get_report().chart
if chart is not None and chart.settings is not None and chart.settings.truncation is not None:
return chart.settings.truncation.type
return ChartTruncationType.NONE
def get_truncation_count(self) -> Optional[int]:
chart = self.get_report().chart
if chart is not None and chart.settings is not None and chart.settings.truncation is not None:
if is_blank(chart.settings.truncation.count):
return None
return chart.settings.truncation.count
return None
def translate_to_array_row(self, row: Dict[str, Any]) -> List[Any]:
return [
*ArrayHelper(self.get_report().indicators).map_with_index(
lambda x, index: row.get(self.as_indicator_name(x, index))).to_list(),
*ArrayHelper(self.get_report().dimensions).map_with_index(
lambda x, index: row.get(self.as_dimension_name(x, index))).to_list()
]
def translate_to_array_table(self, data: List[Dict[str, Any]]) -> List[List[Any]]:
return ArrayHelper(data).map(self.translate_to_array_row).to_list()
|
// Import React
import React from "react";
// Import Spectacle Core tags
import {
Appear,
BlockQuote,
Cite,
Code,
CodePane,
Deck,
Fill,
Fit,
Heading,
Image,
Layout,
Link,
ListItem,
List,
Markdown,
Quote,
Slide,
Spectacle,
Text
} from "spectacle";
// Import image preloader util
import preloader from "spectacle/lib/utils/preloader";
// Import theme
import createTheme from "spectacle/lib/themes/default";
// Import custom component
import Interactive from "../assets/interactive";
// Require CSS
require("normalize.css");
require("spectacle/lib/themes/default/index.css");
const images = {
city: require("../assets/city.jpg"),
kat: require("../assets/kat.png"),
logo: require("../assets/formidable-logo.svg"),
markdown: require("../assets/markdown.png")
};
preloader(images);
const theme = createTheme({
primary: "#4a90e2"
}, {
primary: "Helvetica",
tertiary: 'Fira Code'
});
export default class Presentation extends React.Component {
render() {
return (
<Spectacle theme={theme}>
<Deck transition={["slide"]} transitionDuration={500}>
<Slide>
<Heading><Code>undefined is not an option</Code></Heading>
<Text italic textSize={20}>— or —</Text>
<Text textSize={30}>How I Am Learning to Stop Making the Same Dumb Mistakes and Love Immutability and Pure Functions</Text>
</Slide>
<Slide>
<Text>Browsers constantly remind us what <Undefined /> is NOT.</Text>
</Slide>
<Slide transition={['fade']} bgImage='http://i.stack.imgur.com/PpQe2.gif'>
</Slide>
<Slide transition={['fade']} bgImage='http://i.stack.imgur.com/PpQe2.gif' bgDarken={0.2}>
<Appear>
<Text><Markdown>Can we say what `undefined` _is?_</Markdown></Text>
</Appear>
</Slide>
<Slide>
<Text><Undefined /> is...</Text>
<List>
<ClickListItem>A hard-to-find bug</ClickListItem>
<ClickListItem>A late night hotfix</ClickListItem>
<ClickListItem>An angry customer</ClickListItem>
</List>
</Slide>
<Slide>
<Text>Where does <Undefined /> come from?</Text>
<Appear>
<Text>Some dumb mistakes I make:</Text>
</Appear>
<List>
<ClickListItem>Trying to access data that isn't there</ClickListItem>
<ClickListItem>Trying to call a function that I think is there but isn't</ClickListItem>
<ClickListItem>Modifying data without realizing it</ClickListItem>
<ClickListItem>Relying on globals that aren't there</ClickListItem>
<ClickListItem>Failing to account for every possible state</ClickListItem>
<ClickListItem>Avoiding writing tests because it's a lot of work for the seeming payoff</ClickListItem>
</List>
</Slide>
<Slide>
<Text>An example</Text>
<CodePane lang='js' source={require('raw!./examples/01-react-component.example')} />
</Slide>
<Slide>
<Text>Problems</Text>
<List>
<ListItem><Markdown>`componentDidMount` is called _after_ initial render, so the program will have already tried to map 'undefined' and will fail</Markdown></ListItem>
<ListItem>Network timeout</ListItem>
<ListItem>Auth error</ListItem>
<ListItem>FOIT</ListItem>
</List>
</Slide>
<Slide>
<Text>Testing our code</Text>
<CodePane lang='js' source={require('raw!./examples/01-react-component.test.example')} />
</Slide>
<Slide>
<Text>Shortcomings of our test</Text>
<List>
<ClickListItem>`componentDidMount` is not _really_ covered. Functionality is assumed by setting state directly.</ClickListItem>
<ClickListItem>`vFetch` is not covered at all, but it is coupled to `componentDidMount` which in turn is coupled to `ListOfThings`. To test one method, the whole class is needed.</ClickListItem>
<ClickListItem>How do we test `vFetch`? Hit a real API server? Hit a mock server? Mock out `vFetch` itself and force it to return the data we want?</ClickListItem>
</List>
</Slide>
<Slide>
<Text>A different approach</Text>
<CodePane lang='js' source={require('raw!./examples/02-react-component.example')} />
</Slide>
<Slide>
<Text>The action creator</Text>
<CodePane lang='js' source={require('raw!./examples/02-action.example')} />
</Slide>
<Slide>
<Text>The reducer</Text>
<CodePane lang='js' source={require('raw!./examples/02-reducer.example')} />
</Slide>
<Slide>
<Text>Observations</Text>
<List>
<ClickListItem>We have an initial state (`state === []`) so we won't have any errors from mapping `undefined`, even when no data has been fetched yet. An empty array is mappable.</ClickListItem>
<ClickListItem>We're mutating the `state` param in-place, which means that it can now be changed in multiple ways. Yay, multiple sources of truth!</ClickListItem>
</List>
</Slide>
<Slide>
<Text>But Ben, ain't nobody write code like that. We all know that you're just supposed to return a new state object, not modify it in-place!</Text>
</Slide>
<Slide>
<Text>This is how we'd actually write that reducer</Text>
<CodePane lang="js" source={require('raw!./examples/03-reducer.example')} />
</Slide>
<Slide>
<Text>But the crazy thing is I've written code like that elsewhere</Text>
<CodePane lang="js" source={require('raw!./examples/03-bad-code.example')} />
</Slide>
<Slide>
<Text>It's actually easier to just not mutate our data.</Text>
<CodePane lang="js" source={require('raw!./examples/03-good-code.example')} />
</Slide>
<Slide>
<Text>Since we've separated our concerns out, testing this is crazy easy now.</Text>
<List>
<ClickListItem>Component: if I give you _this_ array of things, do you produce _that_ DOM tree?</ClickListItem>
<ClickListItem>Reducer: if I pass you _this_ action object, do you return _that_ state tree?</ClickListItem>
<ClickListItem>Action creator: if I call you, do you fetch data and then dispatch _this_ action?</ClickListItem>
<ClickListItem>^^ rats, this one still is a little complicated</ClickListItem>
</List>
</Slide>
<Slide>
<Text>Reducer test</Text>
<CodePane lang="js" source={require('raw!./examples/03-reducer.test.example')} />
</Slide>
<Slide>
<Text>Component test</Text>
<CodePane lang="js" source={require('raw!./examples/03-react-component.test.example')} />
</Slide>
<Slide>
<Markdown>Easy-to-test code is also _better_ code.</Markdown>
<List>
<ClickListItem>Testing _computation_ is easy; testing _mutation_ is harder.</ClickListItem>
<ClickListItem>Separate out your computations from your mutations.</ClickListItem>
<ClickListItem>Immutable data means fewer guards, and accidental mutation is impossible.</ClickListItem>
<ClickListItem>Immutability opens up performance optimizations that are impossible with mutability.</ClickListItem>
</List>
</Slide>
<Slide>
<Markdown>You barely have to be awake to test pure functions.</Markdown>
<List>
<ClickListItem>Imagine if `2 + 2` tried to mutate `2` in place?</ClickListItem>
<ClickListItem>Imagine if the implemenation of `2 + 2` was `(a, b) => Google(a + '+' + b)[0]`.</ClickListItem>
<ClickListItem>With pure functions, the data you have at your disposal are params. That's it. The only way to assign your computations to a variable is through `return`.</ClickListItem>
</List>
</Slide>
<Slide>
<Markdown>Functional purity allows us to do neat things like currying, partial application, and composition.</Markdown>
</Slide>
<Slide>
<Text>Sanitize and sort an array of people procedurally:</Text>
<CodePane lang="js" source={require('raw!./examples/04-sort-people.example')} />
</Slide>
<Slide>
<Text>Sanitize and sort an array of people OOP-ly:</Text>
<CodePane lang="js" source={require('raw!./examples/04-sort-people-oop.example')} />
</Slide>
<Slide>
<Text>Sanitize and sort an array of people functionally:</Text>
<CodePane lang="js" source={require('raw!./examples/04-sort-people-fp.example')} />
</Slide>
<Slide>
<Text>Suggestions</Text>
<List>
<ClickListItem>Read Dr. Boolean's [Mostly Adequate Guide to Functional Programming in Javascript](https://drboolean.gitbooks.io/mostly-adequate-guide)</ClickListItem>
<ClickListItem>Subscribe to the [Deterministic Newsletter](https://deterministic.curated.co/).</ClickListItem>
<ClickListItem>Work through the Knowthen [Elm for Beginners](http://courses.knowthen.com/courses/elm-for-beginners) course</ClickListItem>
<ClickListItem>Read through [Elm in Action](https://www.manning.com/books/elm-in-action) with us.</ClickListItem>
</List>
</Slide>
<Slide>
<Text>Questions?</Text>
</Slide>
</Deck>
</Spectacle>
)
}
}
const Undefined = () => <Code>Undefined</Code>
const ClickListItem = ({ children }) => <Appear>
<ListItem><Markdown>{children}</Markdown></ListItem>
</Appear>
|