text
stringlengths 3
1.05M
|
|---|
const puppeteer = require('puppeteer');
const { format } = require('url');
const { join }= require('path');
(async () => {
// https://github.com/Googlechrome/puppeteer/issues/290
const browser = await puppeteer.launch({
args: ['--no-sandbox', '--disable-setuid-sandbox']
});
const page = await browser.newPage();
page.on('error', e => {
console.error(e.stack);
process.exit(1);
});
await page.exposeFunction('hostWrite', d => {
process.stdout.write(Buffer.from(d.data).toString());
});
await page.exposeFunction('hostDone', async code => {
process.exitCode = code;
await browser.close();
});
await page.goto(format({
protocol: 'file',
pathname: join(__dirname, 'test.html')
}));
})();
|
import torch
from torch.autograd import Variable
def train(epoch, dataloader, net, criterion, optimizer, opt):
net.train()
for i, (adj_matrix, annotation, target) in enumerate(dataloader, 0):
net.zero_grad()
padding = torch.zeros(len(annotation), opt.n_node, opt.state_dim - opt.annotation_dim).double()
init_input = torch.cat((annotation, padding), 2)
if opt.cuda:
init_input = init_input.cuda()
adj_matrix = adj_matrix.cuda()
annotation = annotation.cuda()
target = target.cuda()
init_input = Variable(init_input)
adj_matrix = Variable(adj_matrix)
annotation = Variable(annotation)
target = Variable(target)
output = net(init_input, annotation, adj_matrix)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if i % int(len(dataloader) / 10 + 1) == 0 and opt.verbal:
print('[%d/%d][%d/%d] Loss: %.4f' % (epoch, opt.niter, i, len(dataloader), loss.data[0]))
|
import requests as req
def insertion():
stuff = req.post('http://127.0.0.1:8000/user/ben/glossner/bgslide/hello')
insertion()
|
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
import { _, AgPanel, Autowired, Component, PostConstruct, Promise } from "@ag-grid-community/core";
import { TabbedChartMenu } from "./tabbedChartMenu";
var ChartMenu = /** @class */ (function (_super) {
__extends(ChartMenu, _super);
function ChartMenu(eChartContainer, eMenuPanelContainer, chartController) {
var _this = _super.call(this, ChartMenu.TEMPLATE) || this;
_this.eChartContainer = eChartContainer;
_this.eMenuPanelContainer = eMenuPanelContainer;
_this.chartController = chartController;
_this.buttons = {
chartSettings: ['menu', function () { return _this.showMenu("chartSettings"); }],
chartData: ['menu', function () { return _this.showMenu("chartData"); }],
chartFormat: ['menu', function () { return _this.showMenu("chartFormat"); }],
chartUnlink: ['linked', function (e) { return _this.toggleDetached(e); }],
chartDownload: ['save', function () { return _this.saveChart(); }]
};
_this.tabs = [];
_this.menuVisible = false;
return _this;
}
ChartMenu.prototype.postConstruct = function () {
this.createButtons();
this.refreshMenuClasses();
};
ChartMenu.prototype.isVisible = function () {
return this.menuVisible;
};
ChartMenu.prototype.getToolbarOptions = function () {
var _this = this;
var tabOptions = [
'chartSettings',
'chartData',
'chartFormat',
'chartUnlink',
'chartDownload'
];
var toolbarItemsFunc = this.gridOptionsWrapper.getChartToolbarItemsFunc();
if (toolbarItemsFunc) {
var params = {
api: this.gridOptionsWrapper.getApi(),
columnApi: this.gridOptionsWrapper.getColumnApi(),
defaultItems: tabOptions
};
tabOptions = toolbarItemsFunc(params).filter(function (option) {
if (!_this.buttons[option]) {
console.warn("ag-Grid: '" + option + " is not a valid Chart Toolbar Option");
return false;
}
return true;
});
}
// pivot charts use the column tool panel instead of the data panel
if (this.chartController.isPivotChart()) {
tabOptions = tabOptions.filter(function (option) { return option !== 'chartData'; });
}
var ignoreOptions = ['chartUnlink', 'chartDownload'];
this.tabs = tabOptions.filter(function (option) { return ignoreOptions.indexOf(option) === -1; });
return tabOptions.filter(function (value) {
return ignoreOptions.indexOf(value) !== -1 ||
(_this.tabs.length && value === _this.tabs[0]);
});
};
ChartMenu.prototype.toggleDetached = function (e) {
var target = e.target;
var active = _.containsClass(target, 'ag-icon-linked');
_.addOrRemoveCssClass(target, 'ag-icon-linked', !active);
_.addOrRemoveCssClass(target, 'ag-icon-unlinked', active);
this.chartController.detachChartRange();
};
ChartMenu.prototype.createButtons = function () {
var _this = this;
var chartToolbarOptions = this.getToolbarOptions();
var gui = this.getGui();
chartToolbarOptions.forEach(function (button) {
var buttonConfig = _this.buttons[button];
var iconName = buttonConfig[0], callback = buttonConfig[1];
var buttonEl = _.createIconNoSpan(iconName, _this.gridOptionsWrapper, undefined, true);
_.addCssClass(buttonEl, 'ag-chart-menu-icon');
_this.addDestroyableEventListener(buttonEl, 'click', callback);
gui.appendChild(buttonEl);
});
};
ChartMenu.prototype.saveChart = function () {
var event = { type: ChartMenu.EVENT_DOWNLOAD_CHART };
this.dispatchEvent(event);
};
ChartMenu.prototype.createMenuPanel = function (defaultTab) {
var _this = this;
var width = this.gridOptionsWrapper.chartMenuPanelWidth();
var menuPanel = this.menuPanel = this.wireBean(new AgPanel({
minWidth: width,
width: width,
height: '100%',
closable: true,
hideTitleBar: true,
cssIdentifier: 'chart-menu'
}));
menuPanel.setParentComponent(this);
this.eMenuPanelContainer.appendChild(menuPanel.getGui());
this.tabbedMenu = this.wireBean(new TabbedChartMenu({
controller: this.chartController,
type: this.chartController.getChartType(),
panels: this.tabs
}));
this.addDestroyableEventListener(menuPanel, Component.EVENT_DESTROYED, function () { return _this.tabbedMenu.destroy(); });
return new Promise(function (res) {
window.setTimeout(function () {
menuPanel.setBodyComponent(_this.tabbedMenu);
_this.tabbedMenu.showTab(defaultTab);
_this.addDestroyableEventListener(_this.eChartContainer, 'click', function (event) {
if (_this.getGui().contains(event.target)) {
return;
}
if (_this.menuVisible) {
_this.hideMenu();
}
});
res(menuPanel);
}, 100);
});
};
ChartMenu.prototype.showContainer = function () {
if (!this.menuPanel) {
return;
}
this.menuVisible = true;
this.showParent(this.menuPanel.getWidth());
this.refreshMenuClasses();
};
ChartMenu.prototype.showMenu = function (tabName) {
var tab = this.tabs.indexOf(tabName);
if (!this.menuPanel) {
this.createMenuPanel(tab).then(this.showContainer.bind(this));
}
else {
this.showContainer();
}
};
ChartMenu.prototype.hideMenu = function () {
var _this = this;
this.hideParent();
window.setTimeout(function () {
_this.menuVisible = false;
_this.refreshMenuClasses();
}, 500);
};
ChartMenu.prototype.refreshMenuClasses = function () {
_.addOrRemoveCssClass(this.eChartContainer, 'ag-chart-menu-visible', this.menuVisible);
_.addOrRemoveCssClass(this.eChartContainer, 'ag-chart-menu-hidden', !this.menuVisible);
};
ChartMenu.prototype.showParent = function (width) {
this.eMenuPanelContainer.style.minWidth = width + "px";
};
ChartMenu.prototype.hideParent = function () {
this.eMenuPanelContainer.style.minWidth = '0';
};
ChartMenu.prototype.destroy = function () {
_super.prototype.destroy.call(this);
if (this.menuPanel && this.menuPanel.isAlive()) {
this.menuPanel.destroy();
}
};
ChartMenu.EVENT_DOWNLOAD_CHART = "downloadChart";
ChartMenu.TEMPLATE = "<div class=\"ag-chart-menu\"></div>";
__decorate([
Autowired("gridOptionsWrapper")
], ChartMenu.prototype, "gridOptionsWrapper", void 0);
__decorate([
PostConstruct
], ChartMenu.prototype, "postConstruct", null);
return ChartMenu;
}(Component));
export { ChartMenu };
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.WrapTight = void 0;
const xml_components_1 = require("file/xml-components");
class WrapTightAttributes extends xml_components_1.XmlAttributeComponent {
constructor() {
super(...arguments);
this.xmlKeys = {
distT: "distT",
distB: "distB",
};
}
}
class WrapTight extends xml_components_1.XmlComponent {
constructor(margins = {
top: 0,
bottom: 0,
}) {
super("wp:wrapTight");
this.root.push(new WrapTightAttributes({
distT: margins.top,
distB: margins.bottom,
}));
}
}
exports.WrapTight = WrapTight;
//# sourceMappingURL=wrap-tight.js.map
|
import re
from typing import List
from urllib.parse import urlparse
import pytest
from bs4 import BeautifulSoup
from google.appengine.ext import ndb
from werkzeug.test import Client
from backend.common.consts.account_permission import AccountPermission
from backend.common.consts.media_type import MediaType
from backend.common.consts.suggestion_state import SuggestionState
from backend.common.models.media import Media
from backend.common.models.suggestion import Suggestion
from backend.common.models.team import Team
from backend.common.suggestions.suggestion_creator import (
SuggestionCreationStatus,
SuggestionCreator,
)
@pytest.fixture
def login_user_with_permission(login_user):
login_user.permissions = [AccountPermission.REVIEW_MEDIA]
return login_user
def get_suggestion_queue(web_client: Client) -> List[str]:
response = web_client.get("/suggest/team/social/review")
assert response.status_code == 200
soup = BeautifulSoup(response.data, "html.parser")
review_form = soup.find(id="review_social")
assert review_form is not None
suggestions = review_form.find_all(class_="suggestion-item")
queue = []
for suggestion in suggestions:
accept_button = suggestion.find(
"input",
attrs={
"name": re.compile("accept_reject-.*"),
"value": re.compile("accept::.*"),
},
)
assert accept_button is not None
reject_button = suggestion.find(
"input",
attrs={
"name": re.compile("accept_reject-.*"),
"value": re.compile("reject::.*"),
},
)
assert reject_button is not None
queue.append(accept_button["value"].split("::")[1])
return queue
def createSuggestion(logged_in_user) -> str:
status = SuggestionCreator.createTeamMediaSuggestion(
logged_in_user.account_key,
"http://twitter.com/frc1124",
"frc1124",
None,
None,
True,
)
assert status[0] == SuggestionCreationStatus.SUCCESS
return Suggestion.render_media_key_name(
None, "team", "frc1124", "twitter-profile", "frc1124"
)
def test_login_redirect(web_client: Client) -> None:
response = web_client.get("/suggest/team/social/review")
assert response.status_code == 302
assert urlparse(response.headers["Location"]).path == "/account/login"
def test_no_permissions(login_user, web_client: Client) -> None:
response = web_client.get("/suggest/team/social/review")
assert response.status_code == 401
def test_nothing_to_review(login_user_with_permission, web_client: Client) -> None:
queue = get_suggestion_queue(web_client)
assert queue == []
def test_accept_suggestion(
login_user_with_permission,
ndb_stub,
web_client: Client,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/social/review",
data={
f"accept_reject-{suggestion_id}": f"accept::{suggestion_id}",
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
media = Media.get_by_id(Media.render_key_name(MediaType.TWITTER_PROFILE, "frc1124"))
assert media is not None
assert media.foreign_key == "frc1124"
assert media.media_type_enum == MediaType.TWITTER_PROFILE
assert ndb.Key(Team, "frc1124") in media.references
assert media.preferred_references == []
def test_reject_suggestion(
login_user_with_permission, ndb_stub, web_client: Client
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/social/review",
data={
f"accept_reject-{suggestion_id}": f"reject::{suggestion_id}",
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_REJECTED
medias = Media.query().fetch()
assert medias == []
|
function loadOptions() {
chrome.storage.sync.get({
/*handlePDF: false,*/
saveLocally: false,
saveChromeBookmarks: true
}, function(items) {
//document.getElementById('handlePDF').checked = items.handlePDF;
document.getElementById('saveLocally').checked = items.saveLocally;
document.getElementById('saveChromeBookmarks').checked = items.saveChromeBookmarks;
});
}
function saveOptions() {
//var handlePDF = document.getElementById("handlePDF").checked;
var saveLocally = document.getElementById("saveLocally").checked;
var saveChromeBookmarks = document.getElementById("saveChromeBookmarks").checked;
chrome.storage.sync.set({
/*handlePDF: handlePDF,*/
saveLocally:saveLocally,
saveChromeBookmarks:saveChromeBookmarks
}, function() {
//window.close();
});
}
function restoreOptions() {
chrome.storage.sync.set({
/*handlePDF: false,*/
saveLocally: false,
saveChromeBookmarks: true
}, function() {
//document.getElementById("handlePDF").checked = false;
document.getElementById('saveLocally').checked = false;
document.getElementById('saveChromeBookmarks').checked = true;
});
}
/*function donateOptions() {
chrome.tabs.create({ url: 'https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=R9JRASMAABUUE&item_name=Yawas+Web+and+PDF+Highlighter¤cy_code=USD&source=yawasextension' });
}*/
document.addEventListener('DOMContentLoaded', loadOptions);
//document.querySelector('#save').addEventListener('click', saveOptions);
document.querySelector('#restore').addEventListener('click', restoreOptions);
//document.querySelector('#donate').addEventListener('click', donateOptions);
//let handlePDFElem = document.getElementById("handlePDF");
//handlePDFElem.addEventListener('change',saveOptions);
let saveLocallyElem = document.getElementById("saveLocally");
saveLocallyElem.addEventListener('change',saveOptions);
let saveChromeBookmarksElem = document.getElementById("saveChromeBookmarks");
saveChromeBookmarksElem.addEventListener('change',saveOptions);
let importButton = document.getElementById('importChromeBookmarks');
importButton.addEventListener('click', () => chrome.runtime.sendMessage({ msg: "startImportFunc" }));
chrome.runtime.onMessage.addListener(function requestCallback(request, sender, sendResponse) {
if (request.msg === 'importMessage')
importButton.textContent = request.n + ' imported';
});
|
/******************************************************************************
* Simulator.c
*
* Copyright (C) 2017 WANG YIFU
*
* Auther: Wang Yifu
* E-mail: 340064514@qq.com
*
* Note:
*
******************************************************************************/
#ifndef __SIMULATOR_H
#define __SIMULATOR_H
#include <ncurses.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <unistd.h>
#ifdef __SIMULATOR_C
#define SIMULATOR_EXT
#else
#define SIMULATOR_EXT extern
#endif
#define NCURSES_COLOR_BK_WHITE 1
#define NCURSES_COLOR_BK_BLACK 2
typedef struct led_sim_dat {
WINDOW *ledScreenWin;
volatile unsigned char simRunFlag;
} LED_SIM_DAT;
SIMULATOR_EXT LED_SIM_DAT LedSimDat;
void Sim_Init(void);
void Sim_DrawDotsStart(void);
void Sim_DrawDot(int ledCoordX, int ledCoordY);
void Sim_DrawDotsEnd(void);
void Sim_ClearDot(int ledCoordX, int ledCoordY);
void Sim_LedScreenRefresh(void);
#endif
|
"use strict";
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
/*
*
* More info at [www.dropzonejs.com](https//www.dropzonejs.com)
*
* Copyright (c) 2012, Matias Meno
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
// The Emitter class provides the ability to call `.on()` on Dropzone to listen
// to events.
// It is strongly based on component's emitter class, and I removed the
// functionality because of the dependency hell with different frameworks.
var Emitter = function () {
function Emitter() {
_classCallCheck(this, Emitter);
}
_createClass(Emitter, [{
key: "on",
// Add an event listener for given event
value: function on(event, fn) {
this._callbacks = this._callbacks || {};
// Create namespace for this event
if (!this._callbacks[event]) {
this._callbacks[event] = [];
}
this._callbacks[event].push(fn);
return this;
}
}, {
key: "emit",
value: function emit(event) {
this._callbacks = this._callbacks || {};
var callbacks = this._callbacks[event];
if (callbacks) {
for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
args[_key - 1] = arguments[_key];
}
for (var _iterator = callbacks, _isArray = true, _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) {
var _ref;
if (_isArray) {
if (_i >= _iterator.length) break;
_ref = _iterator[_i++];
} else {
_i = _iterator.next();
if (_i.done) break;
_ref = _i.value;
}
var callback = _ref;
callback.apply(this, args);
}
}
return this;
}
// Remove event listener for given event. If fn is not provided, all event
// listeners for that event will be removed. If neither is provided, all
// event listeners will be removed.
}, {
key: "off",
value: function off(event, fn) {
if (!this._callbacks || arguments.length === 0) {
this._callbacks = {};
return this;
}
// specific event
var callbacks = this._callbacks[event];
if (!callbacks) {
return this;
}
// remove all handlers
if (arguments.length === 1) {
delete this._callbacks[event];
return this;
}
// remove specific handler
for (var i = 0; i < callbacks.length; i++) {
var callback = callbacks[i];
if (callback === fn) {
callbacks.splice(i, 1);
break;
}
}
return this;
}
}]);
return Emitter;
}();
var Dropzone = function (_Emitter) {
_inherits(Dropzone, _Emitter);
_createClass(Dropzone, null, [{
key: "initClass",
value: function initClass() {
// Exposing the emitter class, mainly for tests
this.prototype.Emitter = Emitter;
/*
This is a list of all available events you can register on a dropzone object.
You can register an event handler like this:
dropzone.on("dragEnter", function() { });
*/
this.prototype.events = ["drop", "dragstart", "dragend", "dragenter", "dragover", "dragleave", "addedfile", "addedfiles", "removedfile", "thumbnail", "error", "errormultiple", "processing", "processingmultiple", "uploadprogress", "totaluploadprogress", "sending", "sendingmultiple", "success", "successmultiple", "canceled", "canceledmultiple", "complete", "completemultiple", "reset", "maxfilesexceeded", "maxfilesreached", "queuecomplete"];
this.prototype.defaultOptions = {
/**
* Has to be specified on elements other than form (or when the form
* doesn't have an `action` attribute). You can also
* provide a function that will be called with `files` and
* must return the url (since `v3.12.0`)
*/
url: null,
/**
* Can be changed to `"put"` if necessary. You can also provide a function
* that will be called with `files` and must return the method (since `v3.12.0`).
*/
method: "post",
/**
* Will be set on the XHRequest.
*/
withCredentials: false,
/**
* The timeout for the XHR requests in milliseconds (since `v4.4.0`).
*/
timeout: 30000,
/**
* How many file uploads to process in parallel (See the
* Enqueuing file uploads* documentation section for more info)
*/
parallelUploads: 2,
/**
* Whether to send multiple files in one request. If
* this it set to true, then the fallback file input element will
* have the `multiple` attribute as well. This option will
* also trigger additional events (like `processingmultiple`). See the events
* documentation section for more information.
*/
uploadMultiple: false,
/**
* Whether you want files to be uploaded in chunks to your server. This can't be
* used in combination with `uploadMultiple`.
*
* See [chunksUploaded](#config-chunksUploaded) for the callback to finalise an upload.
*/
chunking: false,
/**
* If `chunking` is enabled, this defines whether **every** file should be chunked,
* even if the file size is below chunkSize. This means, that the additional chunk
* form data will be submitted and the `chunksUploaded` callback will be invoked.
*/
forceChunking: false,
/**
* If `chunking` is `true`, then this defines the chunk size in bytes.
*/
chunkSize: 2000000,
/**
* If `true`, the individual chunks of a file are being uploaded simultaneously.
*/
parallelChunkUploads: false,
/**
* Whether a chunk should be retried if it fails.
*/
retryChunks: false,
/**
* If `retryChunks` is true, how many times should it be retried.
*/
retryChunksLimit: 3,
/**
* If not `null` defines how many files this Dropzone handles. If it exceeds,
* the event `maxfilesexceeded` will be called. The dropzone element gets the
* class `dz-max-files-reached` accordingly so you can provide visual feedback.
*/
maxFilesize: 256,
/**
* The name of the file param that gets transferred.
* **NOTE**: If you have the option `uploadMultiple` set to `true`, then
* Dropzone will append `[]` to the name.
*/
paramName: "file",
/**
* Whether thumbnails for images should be generated
*/
createImageThumbnails: true,
/**
* In MB. When the filename exceeds this limit, the thumbnail will not be generated.
*/
maxThumbnailFilesize: 10,
/**
* If `null`, the ratio of the image will be used to calculate it.
*/
thumbnailWidth: 120,
/**
* The same as `thumbnailWidth`. If both are null, images will not be resized.
*/
thumbnailHeight: 120,
/**
* How the images should be scaled down in case both, `thumbnailWidth` and `thumbnailHeight` are provided.
* Can be either `contain` or `crop`.
*/
thumbnailMethod: 'crop',
/**
* If set, images will be resized to these dimensions before being **uploaded**.
* If only one, `resizeWidth` **or** `resizeHeight` is provided, the original aspect
* ratio of the file will be preserved.
*
* The `options.transformFile` function uses these options, so if the `transformFile` function
* is overridden, these options don't do anything.
*/
resizeWidth: null,
/**
* See `resizeWidth`.
*/
resizeHeight: null,
/**
* The mime type of the resized image (before it gets uploaded to the server).
* If `null` the original mime type will be used. To force jpeg, for example, use `image/jpeg`.
* See `resizeWidth` for more information.
*/
resizeMimeType: null,
/**
* The quality of the resized images. See `resizeWidth`.
*/
resizeQuality: 0.8,
/**
* How the images should be scaled down in case both, `resizeWidth` and `resizeHeight` are provided.
* Can be either `contain` or `crop`.
*/
resizeMethod: 'contain',
/**
* The base that is used to calculate the filesize. You can change this to
* 1024 if you would rather display kibibytes, mebibytes, etc...
* 1024 is technically incorrect, because `1024 bytes` are `1 kibibyte` not `1 kilobyte`.
* You can change this to `1024` if you don't care about validity.
*/
filesizeBase: 1000,
/**
* Can be used to limit the maximum number of files that will be handled by this Dropzone
*/
maxFiles: null,
/**
* An optional object to send additional headers to the server. Eg:
* `{ "My-Awesome-Header": "header value" }`
*/
headers: null,
/**
* If `true`, the dropzone element itself will be clickable, if `false`
* nothing will be clickable.
*
* You can also pass an HTML element, a CSS selector (for multiple elements)
* or an array of those. In that case, all of those elements will trigger an
* upload when clicked.
*/
clickable: true,
/**
* Whether hidden files in directories should be ignored.
*/
ignoreHiddenFiles: true,
/**
* The default implementation of `accept` checks the file's mime type or
* extension against this list. This is a comma separated list of mime
* types or file extensions.
*
* Eg.: `image/*,application/pdf,.psd`
*
* If the Dropzone is `clickable` this option will also be used as
* [`accept`](https://developer.mozilla.org/en-US/docs/HTML/Element/input#attr-accept)
* parameter on the hidden file input as well.
*/
acceptedFiles: null,
/**
* **Deprecated!**
* Use acceptedFiles instead.
*/
acceptedMimeTypes: null,
/**
* If false, files will be added to the queue but the queue will not be
* processed automatically.
* This can be useful if you need some additional user input before sending
* files (or if you want want all files sent at once).
* If you're ready to send the file simply call `myDropzone.processQueue()`.
*
* See the [enqueuing file uploads](#enqueuing-file-uploads) documentation
* section for more information.
*/
autoProcessQueue: true,
/**
* If false, files added to the dropzone will not be queued by default.
* You'll have to call `enqueueFile(file)` manually.
*/
autoQueue: true,
/**
* If `true`, this will add a link to every file preview to remove or cancel (if
* already uploading) the file. The `dictCancelUpload`, `dictCancelUploadConfirmation`
* and `dictRemoveFile` options are used for the wording.
*/
addRemoveLinks: false,
/**
* Defines where to display the file previews – if `null` the
* Dropzone element itself is used. Can be a plain `HTMLElement` or a CSS
* selector. The element should have the `dropzone-previews` class so
* the previews are displayed properly.
*/
previewsContainer: null,
/**
* This is the element the hidden input field (which is used when clicking on the
* dropzone to trigger file selection) will be appended to. This might
* be important in case you use frameworks to switch the content of your page.
*/
hiddenInputContainer: "body",
/**
* If null, no capture type will be specified
* If camera, mobile devices will skip the file selection and choose camera
* If microphone, mobile devices will skip the file selection and choose the microphone
* If camcorder, mobile devices will skip the file selection and choose the camera in video mode
* On apple devices multiple must be set to false. AcceptedFiles may need to
* be set to an appropriate mime type (e.g. "image/*", "audio/*", or "video/*").
*/
capture: null,
/**
* **Deprecated**. Use `renameFile` instead.
*/
renameFilename: null,
/**
* A function that is invoked before the file is uploaded to the server and renames the file.
* This function gets the `File` as argument and can use the `file.name`. The actual name of the
* file that gets used during the upload can be accessed through `file.upload.filename`.
*/
renameFile: null,
/**
* If `true` the fallback will be forced. This is very useful to test your server
* implementations first and make sure that everything works as
* expected without dropzone if you experience problems, and to test
* how your fallbacks will look.
*/
forceFallback: false,
/**
* The text used before any files are dropped.
*/
dictDefaultMessage: "Drop files here to upload",
/**
* The text that replaces the default message text it the browser is not supported.
*/
dictFallbackMessage: "Your browser does not support drag'n'drop file uploads.",
/**
* The text that will be added before the fallback form.
* If you provide a fallback element yourself, or if this option is `null` this will
* be ignored.
*/
dictFallbackText: "Please use the fallback form below to upload your files like in the olden days.",
/**
* If the filesize is too big.
* `{{filesize}}` and `{{maxFilesize}}` will be replaced with the respective configuration values.
*/
dictFileTooBig: "File is too big ({{filesize}}MiB). Max filesize: {{maxFilesize}}MiB.",
/**
* If the file doesn't match the file type.
*/
dictInvalidFileType: "You can't upload files of this type.",
/**
* If the server response was invalid.
* `{{statusCode}}` will be replaced with the servers status code.
*/
dictResponseError: "Server responded with {{statusCode}} code.",
/**
* If `addRemoveLinks` is true, the text to be used for the cancel upload link.
*/
dictCancelUpload: "Cancel upload",
/**
* If `addRemoveLinks` is true, the text to be used for confirmation when cancelling upload.
*/
dictCancelUploadConfirmation: "Are you sure you want to cancel this upload?",
/**
* If `addRemoveLinks` is true, the text to be used to remove a file.
*/
dictRemoveFile: "Remove file",
/**
* If this is not null, then the user will be prompted before removing a file.
*/
dictRemoveFileConfirmation: null,
/**
* Displayed if `maxFiles` is st and exceeded.
* The string `{{maxFiles}}` will be replaced by the configuration value.
*/
dictMaxFilesExceeded: "You can not upload any more files.",
/**
* Allows you to translate the different units. Starting with `tb` for terabytes and going down to
* `b` for bytes.
*/
dictFileSizeUnits: { tb: "TB", gb: "GB", mb: "MB", kb: "KB", b: "b" },
/**
* Called when dropzone initialized
* You can add event listeners here
*/
init: function init() {},
/**
* Can be an **object** of additional parameters to transfer to the server, **or** a `Function`
* that gets invoked with the `files`, `xhr` and, if it's a chunked upload, `chunk` arguments. In case
* of a function, this needs to return a map.
*
* The default implementation does nothing for normal uploads, but adds relevant information for
* chunked uploads.
*
* This is the same as adding hidden input fields in the form element.
*/
params: function params(files, xhr, chunk) {
if (chunk) {
return {
dzuuid: chunk.file.upload.uuid,
dzchunkindex: chunk.index,
dztotalfilesize: chunk.file.size,
dzchunksize: this.options.chunkSize,
dztotalchunkcount: chunk.file.upload.totalChunkCount,
dzchunkbyteoffset: chunk.index * this.options.chunkSize
};
}
},
/**
* A function that gets a [file](https://developer.mozilla.org/en-US/docs/DOM/File)
* and a `done` function as parameters.
*
* If the done function is invoked without arguments, the file is "accepted" and will
* be processed. If you pass an error message, the file is rejected, and the error
* message will be displayed.
* This function will not be called if the file is too big or doesn't match the mime types.
*/
accept: function accept(file, done) {
return done();
},
/**
* The callback that will be invoked when all chunks have been uploaded for a file.
* It gets the file for which the chunks have been uploaded as the first parameter,
* and the `done` function as second. `done()` needs to be invoked when everything
* needed to finish the upload process is done.
*/
chunksUploaded: function chunksUploaded(file, done) {
done();
},
/**
* Gets called when the browser is not supported.
* The default implementation shows the fallback input field and adds
* a text.
*/
fallback: function fallback() {
// This code should pass in IE7... :(
var messageElement = void 0;
this.element.className = this.element.className + " dz-browser-not-supported";
for (var _iterator2 = this.element.getElementsByTagName("div"), _isArray2 = true, _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) {
var _ref2;
if (_isArray2) {
if (_i2 >= _iterator2.length) break;
_ref2 = _iterator2[_i2++];
} else {
_i2 = _iterator2.next();
if (_i2.done) break;
_ref2 = _i2.value;
}
var child = _ref2;
if (/(^| )dz-message($| )/.test(child.className)) {
messageElement = child;
child.className = "dz-message"; // Removes the 'dz-default' class
break;
}
}
if (!messageElement) {
messageElement = Dropzone.createElement("<div class=\"dz-message\"><span></span></div>");
this.element.appendChild(messageElement);
}
var span = messageElement.getElementsByTagName("span")[0];
if (span) {
if (span.textContent != null) {
span.textContent = this.options.dictFallbackMessage;
} else if (span.innerText != null) {
span.innerText = this.options.dictFallbackMessage;
}
}
return this.element.appendChild(this.getFallbackForm());
},
/**
* Gets called to calculate the thumbnail dimensions.
*
* It gets `file`, `width` and `height` (both may be `null`) as parameters and must return an object containing:
*
* - `srcWidth` & `srcHeight` (required)
* - `trgWidth` & `trgHeight` (required)
* - `srcX` & `srcY` (optional, default `0`)
* - `trgX` & `trgY` (optional, default `0`)
*
* Those values are going to be used by `ctx.drawImage()`.
*/
resize: function resize(file, width, height, resizeMethod) {
var info = {
srcX: 0,
srcY: 0,
srcWidth: file.width,
srcHeight: file.height
};
var srcRatio = file.width / file.height;
// Automatically calculate dimensions if not specified
if (width == null && height == null) {
width = info.srcWidth;
height = info.srcHeight;
} else if (width == null) {
width = height * srcRatio;
} else if (height == null) {
height = width / srcRatio;
}
// Make sure images aren't upscaled
width = Math.min(width, info.srcWidth);
height = Math.min(height, info.srcHeight);
var trgRatio = width / height;
if (info.srcWidth > width || info.srcHeight > height) {
// Image is bigger and needs rescaling
if (resizeMethod === 'crop') {
if (srcRatio > trgRatio) {
info.srcHeight = file.height;
info.srcWidth = info.srcHeight * trgRatio;
} else {
info.srcWidth = file.width;
info.srcHeight = info.srcWidth / trgRatio;
}
} else if (resizeMethod === 'contain') {
// Method 'contain'
if (srcRatio > trgRatio) {
height = width / srcRatio;
} else {
width = height * srcRatio;
}
} else {
throw new Error("Unknown resizeMethod '" + resizeMethod + "'");
}
}
info.srcX = (file.width - info.srcWidth) / 2;
info.srcY = (file.height - info.srcHeight) / 2;
info.trgWidth = width;
info.trgHeight = height;
return info;
},
/**
* Can be used to transform the file (for example, resize an image if necessary).
*
* The default implementation uses `resizeWidth` and `resizeHeight` (if provided) and resizes
* images according to those dimensions.
*
* Gets the `file` as the first parameter, and a `done()` function as the second, that needs
* to be invoked with the file when the transformation is done.
*/
transformFile: function transformFile(file, done) {
if ((this.options.resizeWidth || this.options.resizeHeight) && file.type.match(/image.*/)) {
return this.resizeImage(file, this.options.resizeWidth, this.options.resizeHeight, this.options.resizeMethod, done);
} else {
return done(file);
}
},
/**
* A string that contains the template used for each dropped
* file. Change it to fulfill your needs but make sure to properly
* provide all elements.
*
* If you want to use an actual HTML element instead of providing a String
* as a config option, you could create a div with the id `tpl`,
* put the template inside it and provide the element like this:
*
* document
* .querySelector('#tpl')
* .innerHTML
*
*/
previewTemplate: "<div class=\"dz-preview dz-file-preview\">\n <div class=\"dz-image\"><img data-dz-thumbnail /></div>\n <div class=\"dz-details\">\n <div class=\"dz-size\"><span data-dz-size></span></div>\n <div class=\"dz-filename\"><span data-dz-name></span></div>\n </div>\n <div class=\"dz-progress\"><span class=\"dz-upload\" data-dz-uploadprogress></span></div>\n <div class=\"dz-error-message\"><span data-dz-errormessage></span></div>\n <div class=\"dz-success-mark\">\n <svg width=\"54px\" height=\"54px\" viewBox=\"0 0 54 54\" version=\"1.1\" xmlns=\"https//www.w3.org/2000/svg\" xmlns:xlink=\"https//www.w3.org/1999/xlink\" xmlns:sketch=\"https//www.bohemiancoding.com/sketch/ns\">\n <title>Check</title>\n <defs></defs>\n <g id=\"Page-1\" stroke=\"none\" stroke-width=\"1\" fill=\"none\" fill-rule=\"evenodd\" sketch:type=\"MSPage\">\n <path d=\"M23.5,31.8431458 L17.5852419,25.9283877 C16.0248253,24.3679711 13.4910294,24.366835 11.9289322,25.9289322 C10.3700136,27.4878508 10.3665912,30.0234455 11.9283877,31.5852419 L20.4147581,40.0716123 C20.5133999,40.1702541 20.6159315,40.2626649 20.7218615,40.3488435 C22.2835669,41.8725651 24.794234,41.8626202 26.3461564,40.3106978 L43.3106978,23.3461564 C44.8771021,21.7797521 44.8758057,19.2483887 43.3137085,17.6862915 C41.7547899,16.1273729 39.2176035,16.1255422 37.6538436,17.6893022 L23.5,31.8431458 Z M27,53 C41.3594035,53 53,41.3594035 53,27 C53,12.6405965 41.3594035,1 27,1 C12.6405965,1 1,12.6405965 1,27 C1,41.3594035 12.6405965,53 27,53 Z\" id=\"Oval-2\" stroke-opacity=\"0.198794158\" stroke=\"#747474\" fill-opacity=\"0.816519475\" fill=\"#FFFFFF\" sketch:type=\"MSShapeGroup\"></path>\n </g>\n </svg>\n </div>\n <div class=\"dz-error-mark\">\n <svg width=\"54px\" height=\"54px\" viewBox=\"0 0 54 54\" version=\"1.1\" xmlns=\"https//www.w3.org/2000/svg\" xmlns:xlink=\"https//www.w3.org/1999/xlink\" xmlns:sketch=\"https//www.bohemiancoding.com/sketch/ns\">\n <title>Error</title>\n <defs></defs>\n <g id=\"Page-1\" stroke=\"none\" stroke-width=\"1\" fill=\"none\" fill-rule=\"evenodd\" sketch:type=\"MSPage\">\n <g id=\"Check-+-Oval-2\" sketch:type=\"MSLayerGroup\" stroke=\"#747474\" stroke-opacity=\"0.198794158\" fill=\"#FFFFFF\" fill-opacity=\"0.816519475\">\n <path d=\"M32.6568542,29 L38.3106978,23.3461564 C39.8771021,21.7797521 39.8758057,19.2483887 38.3137085,17.6862915 C36.7547899,16.1273729 34.2176035,16.1255422 32.6538436,17.6893022 L27,23.3431458 L21.3461564,17.6893022 C19.7823965,16.1255422 17.2452101,16.1273729 15.6862915,17.6862915 C14.1241943,19.2483887 14.1228979,21.7797521 15.6893022,23.3461564 L21.3431458,29 L15.6893022,34.6538436 C14.1228979,36.2202479 14.1241943,38.7516113 15.6862915,40.3137085 C17.2452101,41.8726271 19.7823965,41.8744578 21.3461564,40.3106978 L27,34.6568542 L32.6538436,40.3106978 C34.2176035,41.8744578 36.7547899,41.8726271 38.3137085,40.3137085 C39.8758057,38.7516113 39.8771021,36.2202479 38.3106978,34.6538436 L32.6568542,29 Z M27,53 C41.3594035,53 53,41.3594035 53,27 C53,12.6405965 41.3594035,1 27,1 C12.6405965,1 1,12.6405965 1,27 C1,41.3594035 12.6405965,53 27,53 Z\" id=\"Oval-2\" sketch:type=\"MSShapeGroup\"></path>\n </g>\n </g>\n </svg>\n </div>\n</div>",
// END OPTIONS
// (Required by the dropzone documentation parser)
/*
Those functions register themselves to the events on init and handle all
the user interface specific stuff. Overwriting them won't break the upload
but can break the way it's displayed.
You can overwrite them if you don't like the default behavior. If you just
want to add an additional event handler, register it on the dropzone object
and don't overwrite those options.
*/
// Those are self explanatory and simply concern the DragnDrop.
drop: function drop(e) {
return this.element.classList.remove("dz-drag-hover");
},
dragstart: function dragstart(e) {},
dragend: function dragend(e) {
return this.element.classList.remove("dz-drag-hover");
},
dragenter: function dragenter(e) {
return this.element.classList.add("dz-drag-hover");
},
dragover: function dragover(e) {
return this.element.classList.add("dz-drag-hover");
},
dragleave: function dragleave(e) {
return this.element.classList.remove("dz-drag-hover");
},
paste: function paste(e) {},
// Called whenever there are no files left in the dropzone anymore, and the
// dropzone should be displayed as if in the initial state.
reset: function reset() {
return this.element.classList.remove("dz-started");
},
// Called when a file is added to the queue
// Receives `file`
addedfile: function addedfile(file) {
var _this2 = this;
if (this.element === this.previewsContainer) {
this.element.classList.add("dz-started");
}
if (this.previewsContainer) {
file.previewElement = Dropzone.createElement(this.options.previewTemplate.trim());
file.previewTemplate = file.previewElement; // Backwards compatibility
this.previewsContainer.appendChild(file.previewElement);
for (var _iterator3 = file.previewElement.querySelectorAll("[data-dz-name]"), _isArray3 = true, _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) {
var _ref3;
if (_isArray3) {
if (_i3 >= _iterator3.length) break;
_ref3 = _iterator3[_i3++];
} else {
_i3 = _iterator3.next();
if (_i3.done) break;
_ref3 = _i3.value;
}
var node = _ref3;
node.textContent = file.name;
}
for (var _iterator4 = file.previewElement.querySelectorAll("[data-dz-size]"), _isArray4 = true, _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) {
if (_isArray4) {
if (_i4 >= _iterator4.length) break;
node = _iterator4[_i4++];
} else {
_i4 = _iterator4.next();
if (_i4.done) break;
node = _i4.value;
}
node.innerHTML = this.filesize(file.size);
}
if (this.options.addRemoveLinks) {
file._removeLink = Dropzone.createElement("<a class=\"dz-remove\" href=\"javascript:undefined;\" data-dz-remove>" + this.options.dictRemoveFile + "</a>");
file.previewElement.appendChild(file._removeLink);
}
var removeFileEvent = function removeFileEvent(e) {
e.preventDefault();
e.stopPropagation();
if (file.status === Dropzone.UPLOADING) {
return Dropzone.confirm(_this2.options.dictCancelUploadConfirmation, function () {
return _this2.removeFile(file);
});
} else {
if (_this2.options.dictRemoveFileConfirmation) {
return Dropzone.confirm(_this2.options.dictRemoveFileConfirmation, function () {
return _this2.removeFile(file);
});
} else {
return _this2.removeFile(file);
}
}
};
for (var _iterator5 = file.previewElement.querySelectorAll("[data-dz-remove]"), _isArray5 = true, _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) {
var _ref4;
if (_isArray5) {
if (_i5 >= _iterator5.length) break;
_ref4 = _iterator5[_i5++];
} else {
_i5 = _iterator5.next();
if (_i5.done) break;
_ref4 = _i5.value;
}
var removeLink = _ref4;
removeLink.addEventListener("click", removeFileEvent);
}
}
},
// Called whenever a file is removed.
removedfile: function removedfile(file) {
if (file.previewElement != null && file.previewElement.parentNode != null) {
file.previewElement.parentNode.removeChild(file.previewElement);
}
return this._updateMaxFilesReachedClass();
},
// Called when a thumbnail has been generated
// Receives `file` and `dataUrl`
thumbnail: function thumbnail(file, dataUrl) {
if (file.previewElement) {
file.previewElement.classList.remove("dz-file-preview");
for (var _iterator6 = file.previewElement.querySelectorAll("[data-dz-thumbnail]"), _isArray6 = true, _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) {
var _ref5;
if (_isArray6) {
if (_i6 >= _iterator6.length) break;
_ref5 = _iterator6[_i6++];
} else {
_i6 = _iterator6.next();
if (_i6.done) break;
_ref5 = _i6.value;
}
var thumbnailElement = _ref5;
thumbnailElement.alt = file.name;
thumbnailElement.src = dataUrl;
}
return setTimeout(function () {
return file.previewElement.classList.add("dz-image-preview");
}, 1);
}
},
// Called whenever an error occurs
// Receives `file` and `message`
error: function error(file, message) {
if (file.previewElement) {
file.previewElement.classList.add("dz-error");
if (typeof message !== "String" && message.error) {
message = message.error;
}
for (var _iterator7 = file.previewElement.querySelectorAll("[data-dz-errormessage]"), _isArray7 = true, _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) {
var _ref6;
if (_isArray7) {
if (_i7 >= _iterator7.length) break;
_ref6 = _iterator7[_i7++];
} else {
_i7 = _iterator7.next();
if (_i7.done) break;
_ref6 = _i7.value;
}
var node = _ref6;
node.textContent = message;
}
}
},
errormultiple: function errormultiple() {},
// Called when a file gets processed. Since there is a cue, not all added
// files are processed immediately.
// Receives `file`
processing: function processing(file) {
if (file.previewElement) {
file.previewElement.classList.add("dz-processing");
if (file._removeLink) {
return file._removeLink.textContent = this.options.dictCancelUpload;
}
}
},
processingmultiple: function processingmultiple() {},
// Called whenever the upload progress gets updated.
// Receives `file`, `progress` (percentage 0-100) and `bytesSent`.
// To get the total number of bytes of the file, use `file.size`
uploadprogress: function uploadprogress(file, progress, bytesSent) {
if (file.previewElement) {
for (var _iterator8 = file.previewElement.querySelectorAll("[data-dz-uploadprogress]"), _isArray8 = true, _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) {
var _ref7;
if (_isArray8) {
if (_i8 >= _iterator8.length) break;
_ref7 = _iterator8[_i8++];
} else {
_i8 = _iterator8.next();
if (_i8.done) break;
_ref7 = _i8.value;
}
var node = _ref7;
node.nodeName === 'PROGRESS' ? node.value = progress : node.style.width = progress + "%";
}
}
},
// Called whenever the total upload progress gets updated.
// Called with totalUploadProgress (0-100), totalBytes and totalBytesSent
totaluploadprogress: function totaluploadprogress() {},
// Called just before the file is sent. Gets the `xhr` object as second
// parameter, so you can modify it (for example to add a CSRF token) and a
// `formData` object to add additional information.
sending: function sending() {},
sendingmultiple: function sendingmultiple() {},
// When the complete upload is finished and successful
// Receives `file`
success: function success(file) {
if (file.previewElement) {
return file.previewElement.classList.add("dz-success");
}
},
successmultiple: function successmultiple() {},
// When the upload is canceled.
canceled: function canceled(file) {
return this.emit("error", file, "Upload canceled.");
},
canceledmultiple: function canceledmultiple() {},
// When the upload is finished, either with success or an error.
// Receives `file`
complete: function complete(file) {
if (file._removeLink) {
file._removeLink.textContent = this.options.dictRemoveFile;
}
if (file.previewElement) {
return file.previewElement.classList.add("dz-complete");
}
},
completemultiple: function completemultiple() {},
maxfilesexceeded: function maxfilesexceeded() {},
maxfilesreached: function maxfilesreached() {},
queuecomplete: function queuecomplete() {},
addedfiles: function addedfiles() {}
};
this.prototype._thumbnailQueue = [];
this.prototype._processingThumbnail = false;
}
// global utility
}, {
key: "extend",
value: function extend(target) {
for (var _len2 = arguments.length, objects = Array(_len2 > 1 ? _len2 - 1 : 0), _key2 = 1; _key2 < _len2; _key2++) {
objects[_key2 - 1] = arguments[_key2];
}
for (var _iterator9 = objects, _isArray9 = true, _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) {
var _ref8;
if (_isArray9) {
if (_i9 >= _iterator9.length) break;
_ref8 = _iterator9[_i9++];
} else {
_i9 = _iterator9.next();
if (_i9.done) break;
_ref8 = _i9.value;
}
var object = _ref8;
for (var key in object) {
var val = object[key];
target[key] = val;
}
}
return target;
}
}]);
function Dropzone(el, options) {
_classCallCheck(this, Dropzone);
var _this = _possibleConstructorReturn(this, (Dropzone.__proto__ || Object.getPrototypeOf(Dropzone)).call(this));
var fallback = void 0,
left = void 0;
_this.element = el;
// For backwards compatibility since the version was in the prototype previously
_this.version = Dropzone.version;
_this.defaultOptions.previewTemplate = _this.defaultOptions.previewTemplate.replace(/\n*/g, "");
_this.clickableElements = [];
_this.listeners = [];
_this.files = []; // All files
if (typeof _this.element === "string") {
_this.element = document.querySelector(_this.element);
}
// Not checking if instance of HTMLElement or Element since IE9 is extremely weird.
if (!_this.element || _this.element.nodeType == null) {
throw new Error("Invalid dropzone element.");
}
if (_this.element.dropzone) {
throw new Error("Dropzone already attached.");
}
// Now add this dropzone to the instances.
Dropzone.instances.push(_this);
// Put the dropzone inside the element itself.
_this.element.dropzone = _this;
var elementOptions = (left = Dropzone.optionsForElement(_this.element)) != null ? left : {};
_this.options = Dropzone.extend({}, _this.defaultOptions, elementOptions, options != null ? options : {});
// If the browser failed, just call the fallback and leave
if (_this.options.forceFallback || !Dropzone.isBrowserSupported()) {
var _ret;
return _ret = _this.options.fallback.call(_this), _possibleConstructorReturn(_this, _ret);
}
// @options.url = @element.getAttribute "action" unless @options.url?
if (_this.options.url == null) {
_this.options.url = _this.element.getAttribute("action");
}
if (!_this.options.url) {
throw new Error("No URL provided.");
}
if (_this.options.acceptedFiles && _this.options.acceptedMimeTypes) {
throw new Error("You can't provide both 'acceptedFiles' and 'acceptedMimeTypes'. 'acceptedMimeTypes' is deprecated.");
}
if (_this.options.uploadMultiple && _this.options.chunking) {
throw new Error('You cannot set both: uploadMultiple and chunking.');
}
// Backwards compatibility
if (_this.options.acceptedMimeTypes) {
_this.options.acceptedFiles = _this.options.acceptedMimeTypes;
delete _this.options.acceptedMimeTypes;
}
// Backwards compatibility
if (_this.options.renameFilename != null) {
_this.options.renameFile = function (file) {
return _this.options.renameFilename.call(_this, file.name, file);
};
}
_this.options.method = _this.options.method.toUpperCase();
if ((fallback = _this.getExistingFallback()) && fallback.parentNode) {
// Remove the fallback
fallback.parentNode.removeChild(fallback);
}
// Display previews in the previewsContainer element or the Dropzone element unless explicitly set to false
if (_this.options.previewsContainer !== false) {
if (_this.options.previewsContainer) {
_this.previewsContainer = Dropzone.getElement(_this.options.previewsContainer, "previewsContainer");
} else {
_this.previewsContainer = _this.element;
}
}
if (_this.options.clickable) {
if (_this.options.clickable === true) {
_this.clickableElements = [_this.element];
} else {
_this.clickableElements = Dropzone.getElements(_this.options.clickable, "clickable");
}
}
_this.init();
return _this;
}
// Returns all files that have been accepted
_createClass(Dropzone, [{
key: "getAcceptedFiles",
value: function getAcceptedFiles() {
return this.files.filter(function (file) {
return file.accepted;
}).map(function (file) {
return file;
});
}
// Returns all files that have been rejected
// Not sure when that's going to be useful, but added for completeness.
}, {
key: "getRejectedFiles",
value: function getRejectedFiles() {
return this.files.filter(function (file) {
return !file.accepted;
}).map(function (file) {
return file;
});
}
}, {
key: "getFilesWithStatus",
value: function getFilesWithStatus(status) {
return this.files.filter(function (file) {
return file.status === status;
}).map(function (file) {
return file;
});
}
// Returns all files that are in the queue
}, {
key: "getQueuedFiles",
value: function getQueuedFiles() {
return this.getFilesWithStatus(Dropzone.QUEUED);
}
}, {
key: "getUploadingFiles",
value: function getUploadingFiles() {
return this.getFilesWithStatus(Dropzone.UPLOADING);
}
}, {
key: "getAddedFiles",
value: function getAddedFiles() {
return this.getFilesWithStatus(Dropzone.ADDED);
}
// Files that are either queued or uploading
}, {
key: "getActiveFiles",
value: function getActiveFiles() {
return this.files.filter(function (file) {
return file.status === Dropzone.UPLOADING || file.status === Dropzone.QUEUED;
}).map(function (file) {
return file;
});
}
// The function that gets called when Dropzone is initialized. You
// can (and should) setup event listeners inside this function.
}, {
key: "init",
value: function init() {
var _this3 = this;
// In case it isn't set already
if (this.element.tagName === "form") {
this.element.setAttribute("enctype", "multipart/form-data");
}
if (this.element.classList.contains("dropzone") && !this.element.querySelector(".dz-message")) {
this.element.appendChild(Dropzone.createElement("<div class=\"dz-default dz-message\"><span>" + this.options.dictDefaultMessage + "</span></div>"));
}
if (this.clickableElements.length) {
var setupHiddenFileInput = function setupHiddenFileInput() {
if (_this3.hiddenFileInput) {
_this3.hiddenFileInput.parentNode.removeChild(_this3.hiddenFileInput);
}
_this3.hiddenFileInput = document.createElement("input");
_this3.hiddenFileInput.setAttribute("type", "file");
if (_this3.options.maxFiles === null || _this3.options.maxFiles > 1) {
_this3.hiddenFileInput.setAttribute("multiple", "multiple");
}
_this3.hiddenFileInput.className = "dz-hidden-input";
if (_this3.options.acceptedFiles !== null) {
_this3.hiddenFileInput.setAttribute("accept", _this3.options.acceptedFiles);
}
if (_this3.options.capture !== null) {
_this3.hiddenFileInput.setAttribute("capture", _this3.options.capture);
}
// Not setting `display="none"` because some browsers don't accept clicks
// on elements that aren't displayed.
_this3.hiddenFileInput.style.visibility = "hidden";
_this3.hiddenFileInput.style.position = "absolute";
_this3.hiddenFileInput.style.top = "0";
_this3.hiddenFileInput.style.left = "0";
_this3.hiddenFileInput.style.height = "0";
_this3.hiddenFileInput.style.width = "0";
document.querySelector(_this3.options.hiddenInputContainer).appendChild(_this3.hiddenFileInput);
return _this3.hiddenFileInput.addEventListener("change", function () {
var files = _this3.hiddenFileInput.files;
if (files.length) {
for (var _iterator10 = files, _isArray10 = true, _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) {
var _ref9;
if (_isArray10) {
if (_i10 >= _iterator10.length) break;
_ref9 = _iterator10[_i10++];
} else {
_i10 = _iterator10.next();
if (_i10.done) break;
_ref9 = _i10.value;
}
var file = _ref9;
_this3.addFile(file);
}
}
_this3.emit("addedfiles", files);
return setupHiddenFileInput();
});
};
setupHiddenFileInput();
}
this.URL = window.URL !== null ? window.URL : window.webkitURL;
// Setup all event listeners on the Dropzone object itself.
// They're not in @setupEventListeners() because they shouldn't be removed
// again when the dropzone gets disabled.
for (var _iterator11 = this.events, _isArray11 = true, _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) {
var _ref10;
if (_isArray11) {
if (_i11 >= _iterator11.length) break;
_ref10 = _iterator11[_i11++];
} else {
_i11 = _iterator11.next();
if (_i11.done) break;
_ref10 = _i11.value;
}
var eventName = _ref10;
this.on(eventName, this.options[eventName]);
}
this.on("uploadprogress", function () {
return _this3.updateTotalUploadProgress();
});
this.on("removedfile", function () {
return _this3.updateTotalUploadProgress();
});
this.on("canceled", function (file) {
return _this3.emit("complete", file);
});
// Emit a `queuecomplete` event if all files finished uploading.
this.on("complete", function (file) {
if (_this3.getAddedFiles().length === 0 && _this3.getUploadingFiles().length === 0 && _this3.getQueuedFiles().length === 0) {
// This needs to be deferred so that `queuecomplete` really triggers after `complete`
return setTimeout(function () {
return _this3.emit("queuecomplete");
}, 0);
}
});
var noPropagation = function noPropagation(e) {
e.stopPropagation();
if (e.preventDefault) {
return e.preventDefault();
} else {
return e.returnValue = false;
}
};
// Create the listeners
this.listeners = [{
element: this.element,
events: {
"dragstart": function dragstart(e) {
return _this3.emit("dragstart", e);
},
"dragenter": function dragenter(e) {
noPropagation(e);
return _this3.emit("dragenter", e);
},
"dragover": function dragover(e) {
// Makes it possible to drag files from chrome's download bar
// https//stackoverflow.com/questions/19526430/drag-and-drop-file-uploads-from-chrome-downloads-bar
// Try is required to prevent bug in Internet Explorer 11 (SCRIPT65535 exception)
var efct = void 0;
try {
efct = e.dataTransfer.effectAllowed;
} catch (error) {}
e.dataTransfer.dropEffect = 'move' === efct || 'linkMove' === efct ? 'move' : 'copy';
noPropagation(e);
return _this3.emit("dragover", e);
},
"dragleave": function dragleave(e) {
return _this3.emit("dragleave", e);
},
"drop": function drop(e) {
noPropagation(e);
return _this3.drop(e);
},
"dragend": function dragend(e) {
return _this3.emit("dragend", e);
}
// This is disabled right now, because the browsers don't implement it properly.
// "paste": (e) =>
// noPropagation e
// @paste e
} }];
this.clickableElements.forEach(function (clickableElement) {
return _this3.listeners.push({
element: clickableElement,
events: {
"click": function click(evt) {
// Only the actual dropzone or the message element should trigger file selection
if (clickableElement !== _this3.element || evt.target === _this3.element || Dropzone.elementInside(evt.target, _this3.element.querySelector(".dz-message"))) {
_this3.hiddenFileInput.click(); // Forward the click
}
return true;
}
}
});
});
this.enable();
return this.options.init.call(this);
}
// Not fully tested yet
}, {
key: "destroy",
value: function destroy() {
this.disable();
this.removeAllFiles(true);
if (this.hiddenFileInput != null ? this.hiddenFileInput.parentNode : undefined) {
this.hiddenFileInput.parentNode.removeChild(this.hiddenFileInput);
this.hiddenFileInput = null;
}
delete this.element.dropzone;
return Dropzone.instances.splice(Dropzone.instances.indexOf(this), 1);
}
}, {
key: "updateTotalUploadProgress",
value: function updateTotalUploadProgress() {
var totalUploadProgress = void 0;
var totalBytesSent = 0;
var totalBytes = 0;
var activeFiles = this.getActiveFiles();
if (activeFiles.length) {
for (var _iterator12 = this.getActiveFiles(), _isArray12 = true, _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) {
var _ref11;
if (_isArray12) {
if (_i12 >= _iterator12.length) break;
_ref11 = _iterator12[_i12++];
} else {
_i12 = _iterator12.next();
if (_i12.done) break;
_ref11 = _i12.value;
}
var file = _ref11;
totalBytesSent += file.upload.bytesSent;
totalBytes += file.upload.total;
}
totalUploadProgress = 100 * totalBytesSent / totalBytes;
} else {
totalUploadProgress = 100;
}
return this.emit("totaluploadprogress", totalUploadProgress, totalBytes, totalBytesSent);
}
// @options.paramName can be a function taking one parameter rather than a string.
// A parameter name for a file is obtained simply by calling this with an index number.
}, {
key: "_getParamName",
value: function _getParamName(n) {
if (typeof this.options.paramName === "function") {
return this.options.paramName(n);
} else {
return "" + this.options.paramName + (this.options.uploadMultiple ? "[" + n + "]" : "");
}
}
// If @options.renameFile is a function,
// the function will be used to rename the file.name before appending it to the formData
}, {
key: "_renameFile",
value: function _renameFile(file) {
if (typeof this.options.renameFile !== "function") {
return file.name;
}
return this.options.renameFile(file);
}
// Returns a form that can be used as fallback if the browser does not support DragnDrop
//
// If the dropzone is already a form, only the input field and button are returned. Otherwise a complete form element is provided.
// This code has to pass in IE7 :(
}, {
key: "getFallbackForm",
value: function getFallbackForm() {
var existingFallback = void 0,
form = void 0;
if (existingFallback = this.getExistingFallback()) {
return existingFallback;
}
var fieldsString = "<div class=\"dz-fallback\">";
if (this.options.dictFallbackText) {
fieldsString += "<p>" + this.options.dictFallbackText + "</p>";
}
fieldsString += "<input type=\"file\" name=\"" + this._getParamName(0) + "\" " + (this.options.uploadMultiple ? 'multiple="multiple"' : undefined) + " /><input type=\"submit\" value=\"Upload!\"></div>";
var fields = Dropzone.createElement(fieldsString);
if (this.element.tagName !== "FORM") {
form = Dropzone.createElement("<form action=\"" + this.options.url + "\" enctype=\"multipart/form-data\" method=\"" + this.options.method + "\"></form>");
form.appendChild(fields);
} else {
// Make sure that the enctype and method attributes are set properly
this.element.setAttribute("enctype", "multipart/form-data");
this.element.setAttribute("method", this.options.method);
}
return form != null ? form : fields;
}
// Returns the fallback elements if they exist already
//
// This code has to pass in IE7 :(
}, {
key: "getExistingFallback",
value: function getExistingFallback() {
var getFallback = function getFallback(elements) {
for (var _iterator13 = elements, _isArray13 = true, _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) {
var _ref12;
if (_isArray13) {
if (_i13 >= _iterator13.length) break;
_ref12 = _iterator13[_i13++];
} else {
_i13 = _iterator13.next();
if (_i13.done) break;
_ref12 = _i13.value;
}
var el = _ref12;
if (/(^| )fallback($| )/.test(el.className)) {
return el;
}
}
};
var _arr = ["div", "form"];
for (var _i14 = 0; _i14 < _arr.length; _i14++) {
var tagName = _arr[_i14];
var fallback;
if (fallback = getFallback(this.element.getElementsByTagName(tagName))) {
return fallback;
}
}
}
// Activates all listeners stored in @listeners
}, {
key: "setupEventListeners",
value: function setupEventListeners() {
return this.listeners.map(function (elementListeners) {
return function () {
var result = [];
for (var event in elementListeners.events) {
var listener = elementListeners.events[event];
result.push(elementListeners.element.addEventListener(event, listener, false));
}
return result;
}();
});
}
// Deactivates all listeners stored in @listeners
}, {
key: "removeEventListeners",
value: function removeEventListeners() {
return this.listeners.map(function (elementListeners) {
return function () {
var result = [];
for (var event in elementListeners.events) {
var listener = elementListeners.events[event];
result.push(elementListeners.element.removeEventListener(event, listener, false));
}
return result;
}();
});
}
// Removes all event listeners and cancels all files in the queue or being processed.
}, {
key: "disable",
value: function disable() {
var _this4 = this;
this.clickableElements.forEach(function (element) {
return element.classList.remove("dz-clickable");
});
this.removeEventListeners();
return this.files.map(function (file) {
return _this4.cancelUpload(file);
});
}
}, {
key: "enable",
value: function enable() {
this.clickableElements.forEach(function (element) {
return element.classList.add("dz-clickable");
});
return this.setupEventListeners();
}
// Returns a nicely formatted filesize
}, {
key: "filesize",
value: function filesize(size) {
var selectedSize = 0;
var selectedUnit = "b";
if (size > 0) {
var units = ['tb', 'gb', 'mb', 'kb', 'b'];
for (var i = 0; i < units.length; i++) {
var unit = units[i];
var cutoff = Math.pow(this.options.filesizeBase, 4 - i) / 10;
if (size >= cutoff) {
selectedSize = size / Math.pow(this.options.filesizeBase, 4 - i);
selectedUnit = unit;
break;
}
}
selectedSize = Math.round(10 * selectedSize) / 10; // Cutting of digits
}
return "<strong>" + selectedSize + "</strong> " + this.options.dictFileSizeUnits[selectedUnit];
}
// Adds or removes the `dz-max-files-reached` class from the form.
}, {
key: "_updateMaxFilesReachedClass",
value: function _updateMaxFilesReachedClass() {
if (this.options.maxFiles != null && this.getAcceptedFiles().length >= this.options.maxFiles) {
if (this.getAcceptedFiles().length === this.options.maxFiles) {
this.emit('maxfilesreached', this.files);
}
return this.element.classList.add("dz-max-files-reached");
} else {
return this.element.classList.remove("dz-max-files-reached");
}
}
}, {
key: "drop",
value: function drop(e) {
if (!e.dataTransfer) {
return;
}
this.emit("drop", e);
var files = e.dataTransfer.files;
this.emit("addedfiles", files);
// Even if it's a folder, files.length will contain the folders.
if (files.length) {
var items = e.dataTransfer.items;
if (items && items.length && items[0].webkitGetAsEntry != null) {
// The browser supports dropping of folders, so handle items instead of files
this._addFilesFromItems(items);
} else {
this.handleFiles(files);
}
}
}
}, {
key: "paste",
value: function paste(e) {
if (__guard__(e != null ? e.clipboardData : undefined, function (x) {
return x.items;
}) == null) {
return;
}
this.emit("paste", e);
var items = e.clipboardData.items;
if (items.length) {
return this._addFilesFromItems(items);
}
}
}, {
key: "handleFiles",
value: function handleFiles(files) {
var _this5 = this;
return files.map(function (file) {
return _this5.addFile(file);
});
}
// When a folder is dropped (or files are pasted), items must be handled
// instead of files.
}, {
key: "_addFilesFromItems",
value: function _addFilesFromItems(items) {
var _this6 = this;
return function () {
var result = [];
for (var _iterator14 = items, _isArray14 = true, _i15 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) {
var _ref13;
if (_isArray14) {
if (_i15 >= _iterator14.length) break;
_ref13 = _iterator14[_i15++];
} else {
_i15 = _iterator14.next();
if (_i15.done) break;
_ref13 = _i15.value;
}
var item = _ref13;
var entry;
if (item.webkitGetAsEntry != null && (entry = item.webkitGetAsEntry())) {
if (entry.isFile) {
result.push(_this6.addFile(item.getAsFile()));
} else if (entry.isDirectory) {
// Append all files from that directory to files
result.push(_this6._addFilesFromDirectory(entry, entry.name));
} else {
result.push(undefined);
}
} else if (item.getAsFile != null) {
if (item.kind == null || item.kind === "file") {
result.push(_this6.addFile(item.getAsFile()));
} else {
result.push(undefined);
}
} else {
result.push(undefined);
}
}
return result;
}();
}
// Goes through the directory, and adds each file it finds recursively
}, {
key: "_addFilesFromDirectory",
value: function _addFilesFromDirectory(directory, path) {
var _this7 = this;
var dirReader = directory.createReader();
var errorHandler = function errorHandler(error) {
return __guardMethod__(console, 'log', function (o) {
return o.log(error);
});
};
var readEntries = function readEntries() {
return dirReader.readEntries(function (entries) {
if (entries.length > 0) {
for (var _iterator15 = entries, _isArray15 = true, _i16 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) {
var _ref14;
if (_isArray15) {
if (_i16 >= _iterator15.length) break;
_ref14 = _iterator15[_i16++];
} else {
_i16 = _iterator15.next();
if (_i16.done) break;
_ref14 = _i16.value;
}
var entry = _ref14;
if (entry.isFile) {
entry.file(function (file) {
if (_this7.options.ignoreHiddenFiles && file.name.substring(0, 1) === '.') {
return;
}
file.fullPath = path + "/" + file.name;
return _this7.addFile(file);
});
} else if (entry.isDirectory) {
_this7._addFilesFromDirectory(entry, path + "/" + entry.name);
}
}
// Recursively call readEntries() again, since browser only handle
// the first 100 entries.
// See: https://developer.mozilla.org/en-US/docs/Web/API/DirectoryReader#readEntries
readEntries();
}
return null;
}, errorHandler);
};
return readEntries();
}
// If `done()` is called without argument the file is accepted
// If you call it with an error message, the file is rejected
// (This allows for asynchronous validation)
//
// This function checks the filesize, and if the file.type passes the
// `acceptedFiles` check.
}, {
key: "accept",
value: function accept(file, done) {
if (file.size > this.options.maxFilesize * 1024 * 1024) {
return done(this.options.dictFileTooBig.replace("{{filesize}}", Math.round(file.size / 1024 / 10.24) / 100).replace("{{maxFilesize}}", this.options.maxFilesize));
} else if (!Dropzone.isValidFile(file, this.options.acceptedFiles)) {
return done(this.options.dictInvalidFileType);
} else if (this.options.maxFiles != null && this.getAcceptedFiles().length >= this.options.maxFiles) {
done(this.options.dictMaxFilesExceeded.replace("{{maxFiles}}", this.options.maxFiles));
return this.emit("maxfilesexceeded", file);
} else {
return this.options.accept.call(this, file, done);
}
}
}, {
key: "addFile",
value: function addFile(file) {
var _this8 = this;
file.upload = {
uuid: Dropzone.uuidv4(),
progress: 0,
// Setting the total upload size to file.size for the beginning
// It's actual different than the size to be transmitted.
total: file.size,
bytesSent: 0,
filename: this._renameFile(file),
chunked: this.options.chunking && (this.options.forceChunking || file.size > this.options.chunkSize),
totalChunkCount: Math.ceil(file.size / this.options.chunkSize)
};
this.files.push(file);
file.status = Dropzone.ADDED;
this.emit("addedfile", file);
this._enqueueThumbnail(file);
return this.accept(file, function (error) {
if (error) {
file.accepted = false;
_this8._errorProcessing([file], error); // Will set the file.status
} else {
file.accepted = true;
if (_this8.options.autoQueue) {
_this8.enqueueFile(file);
} // Will set .accepted = true
}
return _this8._updateMaxFilesReachedClass();
});
}
// Wrapper for enqueueFile
}, {
key: "enqueueFiles",
value: function enqueueFiles(files) {
for (var _iterator16 = files, _isArray16 = true, _i17 = 0, _iterator16 = _isArray16 ? _iterator16 : _iterator16[Symbol.iterator]();;) {
var _ref15;
if (_isArray16) {
if (_i17 >= _iterator16.length) break;
_ref15 = _iterator16[_i17++];
} else {
_i17 = _iterator16.next();
if (_i17.done) break;
_ref15 = _i17.value;
}
var file = _ref15;
this.enqueueFile(file);
}
return null;
}
}, {
key: "enqueueFile",
value: function enqueueFile(file) {
var _this9 = this;
if (file.status === Dropzone.ADDED && file.accepted === true) {
file.status = Dropzone.QUEUED;
if (this.options.autoProcessQueue) {
return setTimeout(function () {
return _this9.processQueue();
}, 0); // Deferring the call
}
} else {
throw new Error("This file can't be queued because it has already been processed or was rejected.");
}
}
}, {
key: "_enqueueThumbnail",
value: function _enqueueThumbnail(file) {
var _this10 = this;
if (this.options.createImageThumbnails && file.type.match(/image.*/) && file.size <= this.options.maxThumbnailFilesize * 1024 * 1024) {
this._thumbnailQueue.push(file);
return setTimeout(function () {
return _this10._processThumbnailQueue();
}, 0); // Deferring the call
}
}
}, {
key: "_processThumbnailQueue",
value: function _processThumbnailQueue() {
var _this11 = this;
if (this._processingThumbnail || this._thumbnailQueue.length === 0) {
return;
}
this._processingThumbnail = true;
var file = this._thumbnailQueue.shift();
return this.createThumbnail(file, this.options.thumbnailWidth, this.options.thumbnailHeight, this.options.thumbnailMethod, true, function (dataUrl) {
_this11.emit("thumbnail", file, dataUrl);
_this11._processingThumbnail = false;
return _this11._processThumbnailQueue();
});
}
// Can be called by the user to remove a file
}, {
key: "removeFile",
value: function removeFile(file) {
if (file.status === Dropzone.UPLOADING) {
this.cancelUpload(file);
}
this.files = without(this.files, file);
this.emit("removedfile", file);
if (this.files.length === 0) {
return this.emit("reset");
}
}
// Removes all files that aren't currently processed from the list
}, {
key: "removeAllFiles",
value: function removeAllFiles(cancelIfNecessary) {
// Create a copy of files since removeFile() changes the @files array.
if (cancelIfNecessary == null) {
cancelIfNecessary = false;
}
for (var _iterator17 = this.files.slice(), _isArray17 = true, _i18 = 0, _iterator17 = _isArray17 ? _iterator17 : _iterator17[Symbol.iterator]();;) {
var _ref16;
if (_isArray17) {
if (_i18 >= _iterator17.length) break;
_ref16 = _iterator17[_i18++];
} else {
_i18 = _iterator17.next();
if (_i18.done) break;
_ref16 = _i18.value;
}
var file = _ref16;
if (file.status !== Dropzone.UPLOADING || cancelIfNecessary) {
this.removeFile(file);
}
}
return null;
}
// Resizes an image before it gets sent to the server. This function is the default behavior of
// `options.transformFile` if `resizeWidth` or `resizeHeight` are set. The callback is invoked with
// the resized blob.
}, {
key: "resizeImage",
value: function resizeImage(file, width, height, resizeMethod, callback) {
var _this12 = this;
return this.createThumbnail(file, width, height, resizeMethod, false, function (dataUrl, canvas) {
if (canvas === null) {
// The image has not been resized
return callback(file);
} else {
var resizeMimeType = _this12.options.resizeMimeType;
if (resizeMimeType == null) {
resizeMimeType = file.type;
}
var resizedDataURL = canvas.toDataURL(resizeMimeType, _this12.options.resizeQuality);
if (resizeMimeType === 'image/jpeg' || resizeMimeType === 'image/jpg') {
// Now add the original EXIF information
resizedDataURL = ExifRestore.restore(file.dataURL, resizedDataURL);
}
return callback(Dropzone.dataURItoBlob(resizedDataURL));
}
});
}
}, {
key: "createThumbnail",
value: function createThumbnail(file, width, height, resizeMethod, fixOrientation, callback) {
var _this13 = this;
var fileReader = new FileReader();
fileReader.onload = function () {
file.dataURL = fileReader.result;
// Don't bother creating a thumbnail for SVG images since they're vector
if (file.type === "image/svg+xml") {
if (callback != null) {
callback(fileReader.result);
}
return;
}
return _this13.createThumbnailFromUrl(file, width, height, resizeMethod, fixOrientation, callback);
};
return fileReader.readAsDataURL(file);
}
}, {
key: "createThumbnailFromUrl",
value: function createThumbnailFromUrl(file, width, height, resizeMethod, fixOrientation, callback, crossOrigin) {
var _this14 = this;
// Not using `new Image` here because of a bug in latest Chrome versions.
// See https://github.com/enyo/dropzone/pull/226
var img = document.createElement("img");
if (crossOrigin) {
img.crossOrigin = crossOrigin;
}
img.onload = function () {
var loadExif = function loadExif(callback) {
return callback(1);
};
if (typeof EXIF !== 'undefined' && EXIF !== null && fixOrientation) {
loadExif = function loadExif(callback) {
return EXIF.getData(img, function () {
return callback(EXIF.getTag(this, 'Orientation'));
});
};
}
return loadExif(function (orientation) {
file.width = img.width;
file.height = img.height;
var resizeInfo = _this14.options.resize.call(_this14, file, width, height, resizeMethod);
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
canvas.width = resizeInfo.trgWidth;
canvas.height = resizeInfo.trgHeight;
if (orientation > 4) {
canvas.width = resizeInfo.trgHeight;
canvas.height = resizeInfo.trgWidth;
}
switch (orientation) {
case 2:
// horizontal flip
ctx.translate(canvas.width, 0);
ctx.scale(-1, 1);
break;
case 3:
// 180° rotate left
ctx.translate(canvas.width, canvas.height);
ctx.rotate(Math.PI);
break;
case 4:
// vertical flip
ctx.translate(0, canvas.height);
ctx.scale(1, -1);
break;
case 5:
// vertical flip + 90 rotate right
ctx.rotate(0.5 * Math.PI);
ctx.scale(1, -1);
break;
case 6:
// 90° rotate right
ctx.rotate(0.5 * Math.PI);
ctx.translate(0, -canvas.height);
break;
case 7:
// horizontal flip + 90 rotate right
ctx.rotate(0.5 * Math.PI);
ctx.translate(canvas.width, -canvas.height);
ctx.scale(-1, 1);
break;
case 8:
// 90° rotate left
ctx.rotate(-0.5 * Math.PI);
ctx.translate(-canvas.width, 0);
break;
}
// This is a bugfix for iOS' scaling bug.
drawImageIOSFix(ctx, img, resizeInfo.srcX != null ? resizeInfo.srcX : 0, resizeInfo.srcY != null ? resizeInfo.srcY : 0, resizeInfo.srcWidth, resizeInfo.srcHeight, resizeInfo.trgX != null ? resizeInfo.trgX : 0, resizeInfo.trgY != null ? resizeInfo.trgY : 0, resizeInfo.trgWidth, resizeInfo.trgHeight);
var thumbnail = canvas.toDataURL("image/png");
if (callback != null) {
return callback(thumbnail, canvas);
}
});
};
if (callback != null) {
img.onerror = callback;
}
return img.src = file.dataURL;
}
// Goes through the queue and processes files if there aren't too many already.
}, {
key: "processQueue",
value: function processQueue() {
var parallelUploads = this.options.parallelUploads;
var processingLength = this.getUploadingFiles().length;
var i = processingLength;
// There are already at least as many files uploading than should be
if (processingLength >= parallelUploads) {
return;
}
var queuedFiles = this.getQueuedFiles();
if (!(queuedFiles.length > 0)) {
return;
}
if (this.options.uploadMultiple) {
// The files should be uploaded in one request
return this.processFiles(queuedFiles.slice(0, parallelUploads - processingLength));
} else {
while (i < parallelUploads) {
if (!queuedFiles.length) {
return;
} // Nothing left to process
this.processFile(queuedFiles.shift());
i++;
}
}
}
// Wrapper for `processFiles`
}, {
key: "processFile",
value: function processFile(file) {
return this.processFiles([file]);
}
// Loads the file, then calls finishedLoading()
}, {
key: "processFiles",
value: function processFiles(files) {
for (var _iterator18 = files, _isArray18 = true, _i19 = 0, _iterator18 = _isArray18 ? _iterator18 : _iterator18[Symbol.iterator]();;) {
var _ref17;
if (_isArray18) {
if (_i19 >= _iterator18.length) break;
_ref17 = _iterator18[_i19++];
} else {
_i19 = _iterator18.next();
if (_i19.done) break;
_ref17 = _i19.value;
}
var file = _ref17;
file.processing = true; // Backwards compatibility
file.status = Dropzone.UPLOADING;
this.emit("processing", file);
}
if (this.options.uploadMultiple) {
this.emit("processingmultiple", files);
}
return this.uploadFiles(files);
}
}, {
key: "_getFilesWithXhr",
value: function _getFilesWithXhr(xhr) {
var files = void 0;
return files = this.files.filter(function (file) {
return file.xhr === xhr;
}).map(function (file) {
return file;
});
}
// Cancels the file upload and sets the status to CANCELED
// **if** the file is actually being uploaded.
// If it's still in the queue, the file is being removed from it and the status
// set to CANCELED.
}, {
key: "cancelUpload",
value: function cancelUpload(file) {
if (file.status === Dropzone.UPLOADING) {
var groupedFiles = this._getFilesWithXhr(file.xhr);
for (var _iterator19 = groupedFiles, _isArray19 = true, _i20 = 0, _iterator19 = _isArray19 ? _iterator19 : _iterator19[Symbol.iterator]();;) {
var _ref18;
if (_isArray19) {
if (_i20 >= _iterator19.length) break;
_ref18 = _iterator19[_i20++];
} else {
_i20 = _iterator19.next();
if (_i20.done) break;
_ref18 = _i20.value;
}
var groupedFile = _ref18;
groupedFile.status = Dropzone.CANCELED;
}
if (typeof file.xhr !== 'undefined') {
file.xhr.abort();
}
for (var _iterator20 = groupedFiles, _isArray20 = true, _i21 = 0, _iterator20 = _isArray20 ? _iterator20 : _iterator20[Symbol.iterator]();;) {
var _ref19;
if (_isArray20) {
if (_i21 >= _iterator20.length) break;
_ref19 = _iterator20[_i21++];
} else {
_i21 = _iterator20.next();
if (_i21.done) break;
_ref19 = _i21.value;
}
var _groupedFile = _ref19;
this.emit("canceled", _groupedFile);
}
if (this.options.uploadMultiple) {
this.emit("canceledmultiple", groupedFiles);
}
} else if (file.status === Dropzone.ADDED || file.status === Dropzone.QUEUED) {
file.status = Dropzone.CANCELED;
this.emit("canceled", file);
if (this.options.uploadMultiple) {
this.emit("canceledmultiple", [file]);
}
}
if (this.options.autoProcessQueue) {
return this.processQueue();
}
}
}, {
key: "resolveOption",
value: function resolveOption(option) {
if (typeof option === 'function') {
for (var _len3 = arguments.length, args = Array(_len3 > 1 ? _len3 - 1 : 0), _key3 = 1; _key3 < _len3; _key3++) {
args[_key3 - 1] = arguments[_key3];
}
return option.apply(this, args);
}
return option;
}
}, {
key: "uploadFile",
value: function uploadFile(file) {
return this.uploadFiles([file]);
}
}, {
key: "uploadFiles",
value: function uploadFiles(files) {
var _this15 = this;
this._transformFiles(files, function (transformedFiles) {
if (files[0].upload.chunked) {
// This file should be sent in chunks!
// If the chunking option is set, we **know** that there can only be **one** file, since
// uploadMultiple is not allowed with this option.
var file = files[0];
var transformedFile = transformedFiles[0];
var startedChunkCount = 0;
file.upload.chunks = [];
var handleNextChunk = function handleNextChunk() {
var chunkIndex = 0;
// Find the next item in file.upload.chunks that is not defined yet.
while (file.upload.chunks[chunkIndex] !== undefined) {
chunkIndex++;
}
// This means, that all chunks have already been started.
if (chunkIndex >= file.upload.totalChunkCount) return;
startedChunkCount++;
var start = chunkIndex * _this15.options.chunkSize;
var end = Math.min(start + _this15.options.chunkSize, file.size);
var dataBlock = {
name: _this15._getParamName(0),
data: transformedFile.webkitSlice ? transformedFile.webkitSlice(start, end) : transformedFile.slice(start, end),
filename: file.upload.filename,
chunkIndex: chunkIndex
};
file.upload.chunks[chunkIndex] = {
file: file,
index: chunkIndex,
dataBlock: dataBlock, // In case we want to retry.
status: Dropzone.UPLOADING,
progress: 0,
retries: 0 // The number of times this block has been retried.
};
_this15._uploadData(files, [dataBlock]);
};
file.upload.finishedChunkUpload = function (chunk) {
var allFinished = true;
chunk.status = Dropzone.SUCCESS;
// Clear the data from the chunk
chunk.dataBlock = null;
for (var i = 0; i < file.upload.totalChunkCount; i++) {
if (file.upload.chunks[i] === undefined) {
return handleNextChunk();
}
if (file.upload.chunks[i].status !== Dropzone.SUCCESS) {
allFinished = false;
}
}
if (allFinished) {
_this15.options.chunksUploaded(file, function () {
_this15._finished(files, '', null);
});
}
};
if (_this15.options.parallelChunkUploads) {
for (var i = 0; i < file.upload.totalChunkCount; i++) {
handleNextChunk();
}
} else {
handleNextChunk();
}
} else {
var dataBlocks = [];
for (var _i22 = 0; _i22 < files.length; _i22++) {
dataBlocks[_i22] = {
name: _this15._getParamName(_i22),
data: transformedFiles[_i22],
filename: files[_i22].upload.filename
};
}
_this15._uploadData(files, dataBlocks);
}
});
}
/// Returns the right chunk for given file and xhr
}, {
key: "_getChunk",
value: function _getChunk(file, xhr) {
for (var i = 0; i < file.upload.totalChunkCount; i++) {
if (file.upload.chunks[i] !== undefined && file.upload.chunks[i].xhr === xhr) {
return file.upload.chunks[i];
}
}
}
// This function actually uploads the file(s) to the server.
// If dataBlocks contains the actual data to upload (meaning, that this could either be transformed
// files, or individual chunks for chunked upload).
}, {
key: "_uploadData",
value: function _uploadData(files, dataBlocks) {
var _this16 = this;
var xhr = new XMLHttpRequest();
// Put the xhr object in the file objects to be able to reference it later.
for (var _iterator21 = files, _isArray21 = true, _i23 = 0, _iterator21 = _isArray21 ? _iterator21 : _iterator21[Symbol.iterator]();;) {
var _ref20;
if (_isArray21) {
if (_i23 >= _iterator21.length) break;
_ref20 = _iterator21[_i23++];
} else {
_i23 = _iterator21.next();
if (_i23.done) break;
_ref20 = _i23.value;
}
var file = _ref20;
file.xhr = xhr;
}
if (files[0].upload.chunked) {
// Put the xhr object in the right chunk object, so it can be associated later, and found with _getChunk
files[0].upload.chunks[dataBlocks[0].chunkIndex].xhr = xhr;
}
var method = this.resolveOption(this.options.method, files);
var url = this.resolveOption(this.options.url, files);
xhr.open(method, url, true);
// Setting the timeout after open because of IE11 issue: https://gitlab.com/meno/dropzone/issues/8
xhr.timeout = this.resolveOption(this.options.timeout, files);
// Has to be after `.open()`. See https://github.com/enyo/dropzone/issues/179
xhr.withCredentials = !!this.options.withCredentials;
xhr.onload = function (e) {
_this16._finishedUploading(files, xhr, e);
};
xhr.onerror = function () {
_this16._handleUploadError(files, xhr);
};
// Some browsers do not have the .upload property
var progressObj = xhr.upload != null ? xhr.upload : xhr;
progressObj.onprogress = function (e) {
return _this16._updateFilesUploadProgress(files, xhr, e);
};
var headers = {
"Accept": "application/json",
"Cache-Control": "no-cache",
"X-Requested-With": "XMLHttpRequest"
};
if (this.options.headers) {
Dropzone.extend(headers, this.options.headers);
}
for (var headerName in headers) {
var headerValue = headers[headerName];
if (headerValue) {
xhr.setRequestHeader(headerName, headerValue);
}
}
var formData = new FormData();
// Adding all @options parameters
if (this.options.params) {
var additionalParams = this.options.params;
if (typeof additionalParams === 'function') {
additionalParams = additionalParams.call(this, files, xhr, files[0].upload.chunked ? this._getChunk(files[0], xhr) : null);
}
for (var key in additionalParams) {
var value = additionalParams[key];
formData.append(key, value);
}
}
// Let the user add additional data if necessary
for (var _iterator22 = files, _isArray22 = true, _i24 = 0, _iterator22 = _isArray22 ? _iterator22 : _iterator22[Symbol.iterator]();;) {
var _ref21;
if (_isArray22) {
if (_i24 >= _iterator22.length) break;
_ref21 = _iterator22[_i24++];
} else {
_i24 = _iterator22.next();
if (_i24.done) break;
_ref21 = _i24.value;
}
var _file = _ref21;
this.emit("sending", _file, xhr, formData);
}
if (this.options.uploadMultiple) {
this.emit("sendingmultiple", files, xhr, formData);
}
this._addFormElementData(formData);
// Finally add the files
// Has to be last because some servers (eg: S3) expect the file to be the last parameter
for (var i = 0; i < dataBlocks.length; i++) {
var dataBlock = dataBlocks[i];
formData.append(dataBlock.name, dataBlock.data, dataBlock.filename);
}
this.submitRequest(xhr, formData, files);
}
// Transforms all files with this.options.transformFile and invokes done with the transformed files when done.
}, {
key: "_transformFiles",
value: function _transformFiles(files, done) {
var _this17 = this;
var transformedFiles = [];
// Clumsy way of handling asynchronous calls, until I get to add a proper Future library.
var doneCounter = 0;
var _loop = function _loop(i) {
_this17.options.transformFile.call(_this17, files[i], function (transformedFile) {
transformedFiles[i] = transformedFile;
if (++doneCounter === files.length) {
done(transformedFiles);
}
});
};
for (var i = 0; i < files.length; i++) {
_loop(i);
}
}
// Takes care of adding other input elements of the form to the AJAX request
}, {
key: "_addFormElementData",
value: function _addFormElementData(formData) {
// Take care of other input elements
if (this.element.tagName === "FORM") {
for (var _iterator23 = this.element.querySelectorAll("input, textarea, select, button"), _isArray23 = true, _i25 = 0, _iterator23 = _isArray23 ? _iterator23 : _iterator23[Symbol.iterator]();;) {
var _ref22;
if (_isArray23) {
if (_i25 >= _iterator23.length) break;
_ref22 = _iterator23[_i25++];
} else {
_i25 = _iterator23.next();
if (_i25.done) break;
_ref22 = _i25.value;
}
var input = _ref22;
var inputName = input.getAttribute("name");
var inputType = input.getAttribute("type");
if (inputType) inputType = inputType.toLowerCase();
// If the input doesn't have a name, we can't use it.
if (typeof inputName === 'undefined' || inputName === null) continue;
if (input.tagName === "SELECT" && input.hasAttribute("multiple")) {
// Possibly multiple values
for (var _iterator24 = input.options, _isArray24 = true, _i26 = 0, _iterator24 = _isArray24 ? _iterator24 : _iterator24[Symbol.iterator]();;) {
var _ref23;
if (_isArray24) {
if (_i26 >= _iterator24.length) break;
_ref23 = _iterator24[_i26++];
} else {
_i26 = _iterator24.next();
if (_i26.done) break;
_ref23 = _i26.value;
}
var option = _ref23;
if (option.selected) {
formData.append(inputName, option.value);
}
}
} else if (!inputType || inputType !== "checkbox" && inputType !== "radio" || input.checked) {
formData.append(inputName, input.value);
}
}
}
}
// Invoked when there is new progress information about given files.
// If e is not provided, it is assumed that the upload is finished.
}, {
key: "_updateFilesUploadProgress",
value: function _updateFilesUploadProgress(files, xhr, e) {
var progress = void 0;
if (typeof e !== 'undefined') {
progress = 100 * e.loaded / e.total;
if (files[0].upload.chunked) {
var file = files[0];
// Since this is a chunked upload, we need to update the appropriate chunk progress.
var chunk = this._getChunk(file, xhr);
chunk.progress = progress;
chunk.total = e.total;
chunk.bytesSent = e.loaded;
var fileProgress = 0,
fileTotal = void 0,
fileBytesSent = void 0;
file.upload.progress = 0;
file.upload.total = 0;
file.upload.bytesSent = 0;
for (var i = 0; i < file.upload.totalChunkCount; i++) {
if (file.upload.chunks[i] !== undefined && file.upload.chunks[i].progress !== undefined) {
file.upload.progress += file.upload.chunks[i].progress;
file.upload.total += file.upload.chunks[i].total;
file.upload.bytesSent += file.upload.chunks[i].bytesSent;
}
}
file.upload.progress = file.upload.progress / file.upload.totalChunkCount;
} else {
for (var _iterator25 = files, _isArray25 = true, _i27 = 0, _iterator25 = _isArray25 ? _iterator25 : _iterator25[Symbol.iterator]();;) {
var _ref24;
if (_isArray25) {
if (_i27 >= _iterator25.length) break;
_ref24 = _iterator25[_i27++];
} else {
_i27 = _iterator25.next();
if (_i27.done) break;
_ref24 = _i27.value;
}
var _file2 = _ref24;
_file2.upload.progress = progress;
_file2.upload.total = e.total;
_file2.upload.bytesSent = e.loaded;
}
}
for (var _iterator26 = files, _isArray26 = true, _i28 = 0, _iterator26 = _isArray26 ? _iterator26 : _iterator26[Symbol.iterator]();;) {
var _ref25;
if (_isArray26) {
if (_i28 >= _iterator26.length) break;
_ref25 = _iterator26[_i28++];
} else {
_i28 = _iterator26.next();
if (_i28.done) break;
_ref25 = _i28.value;
}
var _file3 = _ref25;
this.emit("uploadprogress", _file3, _file3.upload.progress, _file3.upload.bytesSent);
}
} else {
// Called when the file finished uploading
var allFilesFinished = true;
progress = 100;
for (var _iterator27 = files, _isArray27 = true, _i29 = 0, _iterator27 = _isArray27 ? _iterator27 : _iterator27[Symbol.iterator]();;) {
var _ref26;
if (_isArray27) {
if (_i29 >= _iterator27.length) break;
_ref26 = _iterator27[_i29++];
} else {
_i29 = _iterator27.next();
if (_i29.done) break;
_ref26 = _i29.value;
}
var _file4 = _ref26;
if (_file4.upload.progress !== 100 || _file4.upload.bytesSent !== _file4.upload.total) {
allFilesFinished = false;
}
_file4.upload.progress = progress;
_file4.upload.bytesSent = _file4.upload.total;
}
// Nothing to do, all files already at 100%
if (allFilesFinished) {
return;
}
for (var _iterator28 = files, _isArray28 = true, _i30 = 0, _iterator28 = _isArray28 ? _iterator28 : _iterator28[Symbol.iterator]();;) {
var _ref27;
if (_isArray28) {
if (_i30 >= _iterator28.length) break;
_ref27 = _iterator28[_i30++];
} else {
_i30 = _iterator28.next();
if (_i30.done) break;
_ref27 = _i30.value;
}
var _file5 = _ref27;
this.emit("uploadprogress", _file5, progress, _file5.upload.bytesSent);
}
}
}
}, {
key: "_finishedUploading",
value: function _finishedUploading(files, xhr, e) {
var response = void 0;
if (files[0].status === Dropzone.CANCELED) {
return;
}
if (xhr.readyState !== 4) {
return;
}
if (xhr.responseType !== 'arraybuffer' && xhr.responseType !== 'blob') {
response = xhr.responseText;
if (xhr.getResponseHeader("content-type") && ~xhr.getResponseHeader("content-type").indexOf("application/json")) {
try {
response = JSON.parse(response);
} catch (error) {
e = error;
response = "Invalid JSON response from server.";
}
}
}
this._updateFilesUploadProgress(files);
if (!(200 <= xhr.status && xhr.status < 300)) {
this._handleUploadError(files, xhr, response);
} else {
if (files[0].upload.chunked) {
files[0].upload.finishedChunkUpload(this._getChunk(files[0], xhr));
} else {
this._finished(files, response, e);
}
}
}
}, {
key: "_handleUploadError",
value: function _handleUploadError(files, xhr, response) {
if (files[0].status === Dropzone.CANCELED) {
return;
}
if (files[0].upload.chunked && this.options.retryChunks) {
var chunk = this._getChunk(files[0], xhr);
if (chunk.retries++ < this.options.retryChunksLimit) {
this._uploadData(files, [chunk.dataBlock]);
return;
} else {
console.warn('Retried this chunk too often. Giving up.');
}
}
for (var _iterator29 = files, _isArray29 = true, _i31 = 0, _iterator29 = _isArray29 ? _iterator29 : _iterator29[Symbol.iterator]();;) {
var _ref28;
if (_isArray29) {
if (_i31 >= _iterator29.length) break;
_ref28 = _iterator29[_i31++];
} else {
_i31 = _iterator29.next();
if (_i31.done) break;
_ref28 = _i31.value;
}
var file = _ref28;
this._errorProcessing(files, response || this.options.dictResponseError.replace("{{statusCode}}", xhr.status), xhr);
}
}
}, {
key: "submitRequest",
value: function submitRequest(xhr, formData, files) {
xhr.send(formData);
}
// Called internally when processing is finished.
// Individual callbacks have to be called in the appropriate sections.
}, {
key: "_finished",
value: function _finished(files, responseText, e) {
for (var _iterator30 = files, _isArray30 = true, _i32 = 0, _iterator30 = _isArray30 ? _iterator30 : _iterator30[Symbol.iterator]();;) {
var _ref29;
if (_isArray30) {
if (_i32 >= _iterator30.length) break;
_ref29 = _iterator30[_i32++];
} else {
_i32 = _iterator30.next();
if (_i32.done) break;
_ref29 = _i32.value;
}
var file = _ref29;
file.status = Dropzone.SUCCESS;
this.emit("success", file, responseText, e);
this.emit("complete", file);
}
if (this.options.uploadMultiple) {
this.emit("successmultiple", files, responseText, e);
this.emit("completemultiple", files);
}
if (this.options.autoProcessQueue) {
return this.processQueue();
}
}
// Called internally when processing is finished.
// Individual callbacks have to be called in the appropriate sections.
}, {
key: "_errorProcessing",
value: function _errorProcessing(files, message, xhr) {
for (var _iterator31 = files, _isArray31 = true, _i33 = 0, _iterator31 = _isArray31 ? _iterator31 : _iterator31[Symbol.iterator]();;) {
var _ref30;
if (_isArray31) {
if (_i33 >= _iterator31.length) break;
_ref30 = _iterator31[_i33++];
} else {
_i33 = _iterator31.next();
if (_i33.done) break;
_ref30 = _i33.value;
}
var file = _ref30;
file.status = Dropzone.ERROR;
this.emit("error", file, message, xhr);
this.emit("complete", file);
}
if (this.options.uploadMultiple) {
this.emit("errormultiple", files, message, xhr);
this.emit("completemultiple", files);
}
if (this.options.autoProcessQueue) {
return this.processQueue();
}
}
}], [{
key: "uuidv4",
value: function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
var r = Math.random() * 16 | 0,
v = c === 'x' ? r : r & 0x3 | 0x8;
return v.toString(16);
});
}
}]);
return Dropzone;
}(Emitter);
Dropzone.initClass();
Dropzone.version = "5.2.0";
// This is a map of options for your different dropzones. Add configurations
// to this object for your different dropzone elemens.
//
// Example:
//
// Dropzone.options.myDropzoneElementId = { maxFilesize: 1 };
//
// To disable autoDiscover for a specific element, you can set `false` as an option:
//
// Dropzone.options.myDisabledElementId = false;
//
// And in html:
//
// <form action="/upload" id="my-dropzone-element-id" class="dropzone"></form>
Dropzone.options = {};
// Returns the options for an element or undefined if none available.
Dropzone.optionsForElement = function (element) {
// Get the `Dropzone.options.elementId` for this element if it exists
if (element.getAttribute("id")) {
return Dropzone.options[camelize(element.getAttribute("id"))];
} else {
return undefined;
}
};
// Holds a list of all dropzone instances
Dropzone.instances = [];
// Returns the dropzone for given element if any
Dropzone.forElement = function (element) {
if (typeof element === "string") {
element = document.querySelector(element);
}
if ((element != null ? element.dropzone : undefined) == null) {
throw new Error("No Dropzone found for given element. This is probably because you're trying to access it before Dropzone had the time to initialize. Use the `init` option to setup any additional observers on your Dropzone.");
}
return element.dropzone;
};
// Set to false if you don't want Dropzone to automatically find and attach to .dropzone elements.
Dropzone.autoDiscover = true;
// Looks for all .dropzone elements and creates a dropzone for them
Dropzone.discover = function () {
var dropzones = void 0;
if (document.querySelectorAll) {
dropzones = document.querySelectorAll(".dropzone");
} else {
dropzones = [];
// IE :(
var checkElements = function checkElements(elements) {
return function () {
var result = [];
for (var _iterator32 = elements, _isArray32 = true, _i34 = 0, _iterator32 = _isArray32 ? _iterator32 : _iterator32[Symbol.iterator]();;) {
var _ref31;
if (_isArray32) {
if (_i34 >= _iterator32.length) break;
_ref31 = _iterator32[_i34++];
} else {
_i34 = _iterator32.next();
if (_i34.done) break;
_ref31 = _i34.value;
}
var el = _ref31;
if (/(^| )dropzone($| )/.test(el.className)) {
result.push(dropzones.push(el));
} else {
result.push(undefined);
}
}
return result;
}();
};
checkElements(document.getElementsByTagName("div"));
checkElements(document.getElementsByTagName("form"));
}
return function () {
var result = [];
for (var _iterator33 = dropzones, _isArray33 = true, _i35 = 0, _iterator33 = _isArray33 ? _iterator33 : _iterator33[Symbol.iterator]();;) {
var _ref32;
if (_isArray33) {
if (_i35 >= _iterator33.length) break;
_ref32 = _iterator33[_i35++];
} else {
_i35 = _iterator33.next();
if (_i35.done) break;
_ref32 = _i35.value;
}
var dropzone = _ref32;
// Create a dropzone unless auto discover has been disabled for specific element
if (Dropzone.optionsForElement(dropzone) !== false) {
result.push(new Dropzone(dropzone));
} else {
result.push(undefined);
}
}
return result;
}();
};
// Since the whole Drag'n'Drop API is pretty new, some browsers implement it,
// but not correctly.
// So I created a blacklist of userAgents. Yes, yes. Browser sniffing, I know.
// But what to do when browsers *theoretically* support an API, but crash
// when using it.
//
// This is a list of regular expressions tested against navigator.userAgent
//
// ** It should only be used on browser that *do* support the API, but
// incorrectly **
//
Dropzone.blacklistedBrowsers = [
// The mac os and windows phone version of opera 12 seems to have a problem with the File drag'n'drop API.
/opera.*(Macintosh|Windows Phone).*version\/12/i];
// Checks if the browser is supported
Dropzone.isBrowserSupported = function () {
var capableBrowser = true;
if (window.File && window.FileReader && window.FileList && window.Blob && window.FormData && document.querySelector) {
if (!("classList" in document.createElement("a"))) {
capableBrowser = false;
} else {
// The browser supports the API, but may be blacklisted.
for (var _iterator34 = Dropzone.blacklistedBrowsers, _isArray34 = true, _i36 = 0, _iterator34 = _isArray34 ? _iterator34 : _iterator34[Symbol.iterator]();;) {
var _ref33;
if (_isArray34) {
if (_i36 >= _iterator34.length) break;
_ref33 = _iterator34[_i36++];
} else {
_i36 = _iterator34.next();
if (_i36.done) break;
_ref33 = _i36.value;
}
var regex = _ref33;
if (regex.test(navigator.userAgent)) {
capableBrowser = false;
continue;
}
}
}
} else {
capableBrowser = false;
}
return capableBrowser;
};
Dropzone.dataURItoBlob = function (dataURI) {
// convert base64 to raw binary data held in a string
// doesn't handle URLEncoded DataURIs - see SO answer #6850276 for code that does this
var byteString = atob(dataURI.split(',')[1]);
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
// write the bytes of the string to an ArrayBuffer
var ab = new ArrayBuffer(byteString.length);
var ia = new Uint8Array(ab);
for (var i = 0, end = byteString.length, asc = 0 <= end; asc ? i <= end : i >= end; asc ? i++ : i--) {
ia[i] = byteString.charCodeAt(i);
}
// write the ArrayBuffer to a blob
return new Blob([ab], { type: mimeString });
};
// Returns an array without the rejected item
var without = function without(list, rejectedItem) {
return list.filter(function (item) {
return item !== rejectedItem;
}).map(function (item) {
return item;
});
};
// abc-def_ghi -> abcDefGhi
var camelize = function camelize(str) {
return str.replace(/[\-_](\w)/g, function (match) {
return match.charAt(1).toUpperCase();
});
};
// Creates an element from string
Dropzone.createElement = function (string) {
var div = document.createElement("div");
div.innerHTML = string;
return div.childNodes[0];
};
// Tests if given element is inside (or simply is) the container
Dropzone.elementInside = function (element, container) {
if (element === container) {
return true;
} // Coffeescript doesn't support do/while loops
while (element = element.parentNode) {
if (element === container) {
return true;
}
}
return false;
};
Dropzone.getElement = function (el, name) {
var element = void 0;
if (typeof el === "string") {
element = document.querySelector(el);
} else if (el.nodeType != null) {
element = el;
}
if (element == null) {
throw new Error("Invalid `" + name + "` option provided. Please provide a CSS selector or a plain HTML element.");
}
return element;
};
Dropzone.getElements = function (els, name) {
var el = void 0,
elements = void 0;
if (els instanceof Array) {
elements = [];
try {
for (var _iterator35 = els, _isArray35 = true, _i37 = 0, _iterator35 = _isArray35 ? _iterator35 : _iterator35[Symbol.iterator]();;) {
if (_isArray35) {
if (_i37 >= _iterator35.length) break;
el = _iterator35[_i37++];
} else {
_i37 = _iterator35.next();
if (_i37.done) break;
el = _i37.value;
}
elements.push(this.getElement(el, name));
}
} catch (e) {
elements = null;
}
} else if (typeof els === "string") {
elements = [];
for (var _iterator36 = document.querySelectorAll(els), _isArray36 = true, _i38 = 0, _iterator36 = _isArray36 ? _iterator36 : _iterator36[Symbol.iterator]();;) {
if (_isArray36) {
if (_i38 >= _iterator36.length) break;
el = _iterator36[_i38++];
} else {
_i38 = _iterator36.next();
if (_i38.done) break;
el = _i38.value;
}
elements.push(el);
}
} else if (els.nodeType != null) {
elements = [els];
}
if (elements == null || !elements.length) {
throw new Error("Invalid `" + name + "` option provided. Please provide a CSS selector, a plain HTML element or a list of those.");
}
return elements;
};
// Asks the user the question and calls accepted or rejected accordingly
//
// The default implementation just uses `window.confirm` and then calls the
// appropriate callback.
Dropzone.confirm = function (question, accepted, rejected) {
if (window.confirm(question)) {
return accepted();
} else if (rejected != null) {
return rejected();
}
};
// Validates the mime type like this:
//
// https://developer.mozilla.org/en-US/docs/HTML/Element/input#attr-accept
Dropzone.isValidFile = function (file, acceptedFiles) {
if (!acceptedFiles) {
return true;
} // If there are no accepted mime types, it's OK
acceptedFiles = acceptedFiles.split(",");
var mimeType = file.type;
var baseMimeType = mimeType.replace(/\/.*$/, "");
for (var _iterator37 = acceptedFiles, _isArray37 = true, _i39 = 0, _iterator37 = _isArray37 ? _iterator37 : _iterator37[Symbol.iterator]();;) {
var _ref34;
if (_isArray37) {
if (_i39 >= _iterator37.length) break;
_ref34 = _iterator37[_i39++];
} else {
_i39 = _iterator37.next();
if (_i39.done) break;
_ref34 = _i39.value;
}
var validType = _ref34;
validType = validType.trim();
if (validType.charAt(0) === ".") {
if (file.name.toLowerCase().indexOf(validType.toLowerCase(), file.name.length - validType.length) !== -1) {
return true;
}
} else if (/\/\*$/.test(validType)) {
// This is something like a image/* mime type
if (baseMimeType === validType.replace(/\/.*$/, "")) {
return true;
}
} else {
if (mimeType === validType) {
return true;
}
}
}
return false;
};
// Augment jQuery
if (typeof jQuery !== 'undefined' && jQuery !== null) {
jQuery.fn.dropzone = function (options) {
return this.each(function () {
return new Dropzone(this, options);
});
};
}
if (typeof module !== 'undefined' && module !== null) {
module.exports = Dropzone;
} else {
window.Dropzone = Dropzone;
}
// Dropzone file status codes
Dropzone.ADDED = "added";
Dropzone.QUEUED = "queued";
// For backwards compatibility. Now, if a file is accepted, it's either queued
// or uploading.
Dropzone.ACCEPTED = Dropzone.QUEUED;
Dropzone.UPLOADING = "uploading";
Dropzone.PROCESSING = Dropzone.UPLOADING; // alias
Dropzone.CANCELED = "canceled";
Dropzone.ERROR = "error";
Dropzone.SUCCESS = "success";
/*
Bugfix for iOS 6 and 7
Source: https//stackoverflow.com/questions/11929099/html5-canvas-drawimage-ratio-bug-ios
based on the work of https://github.com/stomita/ios-imagefile-megapixel
*/
// Detecting vertical squash in loaded image.
// Fixes a bug which squash image vertically while drawing into canvas for some images.
// This is a bug in iOS6 devices. This function from https://github.com/stomita/ios-imagefile-megapixel
var detectVerticalSquash = function detectVerticalSquash(img) {
var iw = img.naturalWidth;
var ih = img.naturalHeight;
var canvas = document.createElement("canvas");
canvas.width = 1;
canvas.height = ih;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
var _ctx$getImageData = ctx.getImageData(1, 0, 1, ih),
data = _ctx$getImageData.data;
// search image edge pixel position in case it is squashed vertically.
var sy = 0;
var ey = ih;
var py = ih;
while (py > sy) {
var alpha = data[(py - 1) * 4 + 3];
if (alpha === 0) {
ey = py;
} else {
sy = py;
}
py = ey + sy >> 1;
}
var ratio = py / ih;
if (ratio === 0) {
return 1;
} else {
return ratio;
}
};
// A replacement for context.drawImage
// (args are for source and destination).
var drawImageIOSFix = function drawImageIOSFix(ctx, img, sx, sy, sw, sh, dx, dy, dw, dh) {
var vertSquashRatio = detectVerticalSquash(img);
return ctx.drawImage(img, sx, sy, sw, sh, dx, dy, dw, dh / vertSquashRatio);
};
// Based on MinifyJpeg
// Source: https//www.perry.cz/files/ExifRestorer.js
// https//elicon.blog57.fc2.com/blog-entry-206.html
var ExifRestore = function () {
function ExifRestore() {
_classCallCheck(this, ExifRestore);
}
_createClass(ExifRestore, null, [{
key: "initClass",
value: function initClass() {
this.KEY_STR = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
}
}, {
key: "encode64",
value: function encode64(input) {
var output = '';
var chr1 = undefined;
var chr2 = undefined;
var chr3 = '';
var enc1 = undefined;
var enc2 = undefined;
var enc3 = undefined;
var enc4 = '';
var i = 0;
while (true) {
chr1 = input[i++];
chr2 = input[i++];
chr3 = input[i++];
enc1 = chr1 >> 2;
enc2 = (chr1 & 3) << 4 | chr2 >> 4;
enc3 = (chr2 & 15) << 2 | chr3 >> 6;
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + this.KEY_STR.charAt(enc1) + this.KEY_STR.charAt(enc2) + this.KEY_STR.charAt(enc3) + this.KEY_STR.charAt(enc4);
chr1 = chr2 = chr3 = '';
enc1 = enc2 = enc3 = enc4 = '';
if (!(i < input.length)) {
break;
}
}
return output;
}
}, {
key: "restore",
value: function restore(origFileBase64, resizedFileBase64) {
if (!origFileBase64.match('data:image/jpeg;base64,')) {
return resizedFileBase64;
}
var rawImage = this.decode64(origFileBase64.replace('data:image/jpeg;base64,', ''));
var segments = this.slice2Segments(rawImage);
var image = this.exifManipulation(resizedFileBase64, segments);
return "data:image/jpeg;base64," + this.encode64(image);
}
}, {
key: "exifManipulation",
value: function exifManipulation(resizedFileBase64, segments) {
var exifArray = this.getExifArray(segments);
var newImageArray = this.insertExif(resizedFileBase64, exifArray);
var aBuffer = new Uint8Array(newImageArray);
return aBuffer;
}
}, {
key: "getExifArray",
value: function getExifArray(segments) {
var seg = undefined;
var x = 0;
while (x < segments.length) {
seg = segments[x];
if (seg[0] === 255 & seg[1] === 225) {
return seg;
}
x++;
}
return [];
}
}, {
key: "insertExif",
value: function insertExif(resizedFileBase64, exifArray) {
var imageData = resizedFileBase64.replace('data:image/jpeg;base64,', '');
var buf = this.decode64(imageData);
var separatePoint = buf.indexOf(255, 3);
var mae = buf.slice(0, separatePoint);
var ato = buf.slice(separatePoint);
var array = mae;
array = array.concat(exifArray);
array = array.concat(ato);
return array;
}
}, {
key: "slice2Segments",
value: function slice2Segments(rawImageArray) {
var head = 0;
var segments = [];
while (true) {
var length;
if (rawImageArray[head] === 255 & rawImageArray[head + 1] === 218) {
break;
}
if (rawImageArray[head] === 255 & rawImageArray[head + 1] === 216) {
head += 2;
} else {
length = rawImageArray[head + 2] * 256 + rawImageArray[head + 3];
var endPoint = head + length + 2;
var seg = rawImageArray.slice(head, endPoint);
segments.push(seg);
head = endPoint;
}
if (head > rawImageArray.length) {
break;
}
}
return segments;
}
}, {
key: "decode64",
value: function decode64(input) {
var output = '';
var chr1 = undefined;
var chr2 = undefined;
var chr3 = '';
var enc1 = undefined;
var enc2 = undefined;
var enc3 = undefined;
var enc4 = '';
var i = 0;
var buf = [];
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
var base64test = /[^A-Za-z0-9\+\/\=]/g;
if (base64test.exec(input)) {
console.warn('There were invalid base64 characters in the input text.\nValid base64 characters are A-Z, a-z, 0-9, \'+\', \'/\',and \'=\'\nExpect errors in decoding.');
}
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
while (true) {
enc1 = this.KEY_STR.indexOf(input.charAt(i++));
enc2 = this.KEY_STR.indexOf(input.charAt(i++));
enc3 = this.KEY_STR.indexOf(input.charAt(i++));
enc4 = this.KEY_STR.indexOf(input.charAt(i++));
chr1 = enc1 << 2 | enc2 >> 4;
chr2 = (enc2 & 15) << 4 | enc3 >> 2;
chr3 = (enc3 & 3) << 6 | enc4;
buf.push(chr1);
if (enc3 !== 64) {
buf.push(chr2);
}
if (enc4 !== 64) {
buf.push(chr3);
}
chr1 = chr2 = chr3 = '';
enc1 = enc2 = enc3 = enc4 = '';
if (!(i < input.length)) {
break;
}
}
return buf;
}
}]);
return ExifRestore;
}();
ExifRestore.initClass();
/*
* contentloaded.js
*
* Author: Diego Perini (diego.perini at gmail.com)
* Summary: cross-browser wrapper for DOMContentLoaded
* Updated: 20101020
* License: MIT
* Version: 1.2
*
* URL:
* https//javascript.nwbox.com/ContentLoaded/
* https//javascript.nwbox.com/ContentLoaded/MIT-LICENSE
*/
// @win window reference
// @fn function reference
var contentLoaded = function contentLoaded(win, fn) {
var done = false;
var top = true;
var doc = win.document;
var root = doc.documentElement;
var add = doc.addEventListener ? "addEventListener" : "attachEvent";
var rem = doc.addEventListener ? "removeEventListener" : "detachEvent";
var pre = doc.addEventListener ? "" : "on";
var init = function init(e) {
if (e.type === "readystatechange" && doc.readyState !== "complete") {
return;
}
(e.type === "load" ? win : doc)[rem](pre + e.type, init, false);
if (!done && (done = true)) {
return fn.call(win, e.type || e);
}
};
var poll = function poll() {
try {
root.doScroll("left");
} catch (e) {
setTimeout(poll, 50);
return;
}
return init("poll");
};
if (doc.readyState !== "complete") {
if (doc.createEventObject && root.doScroll) {
try {
top = !win.frameElement;
} catch (error) {}
if (top) {
poll();
}
}
doc[add](pre + "DOMContentLoaded", init, false);
doc[add](pre + "readystatechange", init, false);
return win[add](pre + "load", init, false);
}
};
// As a single function to be able to write tests.
Dropzone._autoDiscoverFunction = function () {
if (Dropzone.autoDiscover) {
return Dropzone.discover();
}
};
contentLoaded(window, Dropzone._autoDiscoverFunction);
function __guard__(value, transform) {
return typeof value !== 'undefined' && value !== null ? transform(value) : undefined;
}
function __guardMethod__(obj, methodName, transform) {
if (typeof obj !== 'undefined' && obj !== null && typeof obj[methodName] === 'function') {
return transform(obj, methodName);
} else {
return undefined;
}
}
|
import unittest
from sim.battle import Battle
from data import dex
class TestDecisions(unittest.TestCase):
def test_switch(self):
battle = Battle(debug=False, rng=False)
battle.join(0, [{'species': 'mew'}, {'species': 'mewtwo'}, {'species': 'magnezone'}])
battle.join(1, [{'species': 'pidgey', 'moves': ['peck']}])
battle.choose(0, dex.Decision('switch', 2))
battle.choose(1, dex.Decision('move', 0))
battle.do_turn()
mew = battle.sides[0].pokemon[0]
mewtwo = battle.sides[0].pokemon[1]
magnezone = battle.sides[0].pokemon[2]
pidgey = battle.sides[1].pokemon[0]
self.assertEqual(battle.sides[0].active_pokemon[0].species, 'magnezone')
self.assertEqual(mew.hp, mew.maxhp)
self.assertEqual(magnezone.hp, magnezone.maxhp-3)
def runTest(self):
self.test_switch()
|
'use strict';
module.exports = {
askForModuleName,
askForClientSideOpts,
askFori18n
};
function askForModuleName() {
if (this.baseName) return;
this.askModuleName(this);
}
function askForClientSideOpts() {
if (this.existingProject) return;
var done = this.async();
var getNumberedQuestion = this.getNumberedQuestion.bind(this);
var prompts = [
{
type: 'confirm',
name: 'useSass',
message: function (response) {
return getNumberedQuestion('Would you like to use the LibSass stylesheet preprocessor for your CSS?', true);
},
default: false
}
];
this.prompt(prompts).then(function (props) {
this.useSass = props.useSass;
done();
}.bind(this));
}
function askFori18n() {
if (this.existingProject || this.configOptions.skipI18nQuestion) return;
this.aski18n(this);
}
|
canvas = document.getElementById('curves');
size = 700;
img = new ArtShip(size, 2 * size, canvas);
context = img.context;
let step = img.ratio(10);
let dens = img.ratio(20);
function brush_hair (coordinates) {
for (let brush_stroke = 0; brush_stroke < img.ratio(1); brush_stroke++) {
let points = [];
for (let coordinate of coordinates) {
x = img.random(coordinate[0] - dens, coordinate[0] + dens);
y = img.random(coordinate[1] - dens, coordinate[1] + dens);
points.push([x, y]);
}
for (let point of points) {
img.circle(point[0], point[1], img.ratio(size));
img.fill(255, 255, 0, 0.05);
}
img.curve(points);
img.stroke(0.2, 255, 255, 255, 0.3);
}
}
function draw() {
img.background(0, 0, 0);
for (let i = 0; i < img.ratio(100); i++) {
let coordinates = [];
for (let j = 0; j < img.ratio(150); j++) {
let x = img.random(0, 1);
let y = img.random(-0.5, 1.5);
coordinates.push([x * img.width, y * img.height]);
}
brush_hair(coordinates);
}
pixel_image = img.pixel_image();
pixels = img.pixels(pixel_image);
step = img.ratio(100);
for (let x = step; x < pixel_image.width; x += step) {
for (let y = step; y < pixel_image.height; y += step) {
i = img.index(x, y);
pixels[i + 0] = 0; // R value
pixels[i + 1] = 0; // G value
pixels[i + 2] = 0; // B value
pixels[i + 3] = 255; // A value
}
}
img.pixel_to_vector(pixel_image);
}
draw();
|
import os
import sys
from typing import List
from setuptools import find_packages, setup
try:
from biomass import __author__, __email__, __maintainer__
except ImportError:
__author__ = __maintainer__ = "Hiroaki Imoto"
__email__ = "himoto@protein.osaka-u.ac.jp"
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "biomass", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
requirements_filepath = os.path.join(os.path.dirname(__file__), "requirements.txt")
with open(requirements_filepath) as f:
return f.read().splitlines()
def setup_package():
# Python version check.
if sys.version_info[:2] < (3, 7):
raise RuntimeError("biomass requires at least Python version 3.7")
setup(
name="biomass",
version=get_version(),
description="A Python Framework for Modeling and Analysis of Signaling Systems",
long_description=get_long_description(),
long_description_content_type="text/markdown",
license="Apache 2.0",
author=__author__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
url="https://github.com/biomass-dev/biomass",
download_url="https://github.com/biomass-dev/biomass/releases",
project_urls={
"Documentation": "https://biomass-core.readthedocs.io/en/latest/",
"Source Code": "https://github.com/biomass-dev/biomass",
"Bug Tracker": "https://github.com/biomass-dev/biomass/issues",
},
packages=find_packages(exclude=["tests", "docs"]),
install_requires=get_install_requires(),
extras_require={
"dev": [
"black>=20.8b1",
"flake8",
"isort",
"pre-commit",
"pytest",
],
"docs": [
"sphinx>=1.7",
"sphinx_rtd_theme>=0.3",
"sphinx_autodoc_typehints>=1.10",
"sphinxcontrib-bibtex>=2.2",
],
},
python_requires=">=3.7",
keywords=[
"systems",
"biology",
"modeling",
"optimization",
"sensitivity",
"analysis",
],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
if __name__ == "__main__":
setup_package()
|
# Coin partitions
# Problem 78
# Let p(n) represent the number of different ways in which n coins can be separated into piles.
# For example, five coins can be separated into piles in exactly seven different ways, so p(5)=7.
# OOOOO
# OOOO O
# OOO OO
# OOO O O
# OO OO O
# OO O O O
# O O O O O
# Find the least value of n for which p(n) is divisible by one million.
# https://projecteuler.net/problem=78
# https://en.wikipedia.org/wiki/Partition_%28number_theory%29
import datetime
import Utilities
n = 1
p = list()
p.append(1)
start_time = datetime.datetime.now()
while True:
i = 0
penta = 1
p.append(0)
while penta <= n:
if i % 4 > 1:
sign = -1
else:
sign = 1
p[n] += sign * p[n - penta]
p[n] %= 1000000
i += 1
if i % 2 == 0:
j = i // 2 + 1
else:
j = -( i // 2 + 1)
penta = j * (3 * j - 1) // 2
if p[n] == 0:
break
n += 1
stop_time = datetime.datetime.now()
print(stop_time - start_time)
print(n)
|
"""
General func/tools used in pbsmrtpipe
"""
import os
import sys
import re
import time
import logging
import logging.config
import logging.handlers
from jinja2 import Environment, PackageLoader
from pbsmrtpipe.decos import ignored
from pbsmrtpipe.constants import SLOG_PREFIX
HTML_TEMPLATE_ENV = Environment(loader=PackageLoader('pbsmrtpipe', 'html_templates'))
log = logging.getLogger(__name__)
slog = logging.getLogger(SLOG_PREFIX + __name__)
def get_or_else(option_t, default):
# Port of Option[T] from scala
if option_t is None:
return default
else:
return option_t
def validate_type_or_raise(obj, klasses, msg=None):
if not isinstance(obj, klasses):
emsg = "{o} Got type {x}, expected type {y}.".format(o=obj, x=type(obj), y=klasses)
if msg is not None:
emsg = " ".join([emsg, msg])
raise TypeError(emsg)
return obj
def log_timing(func):
"""Simple deco to log the runtime of func"""
started_at = time.time()
def wrapper(*args, **kw):
return func(*args, **kw)
run_time = time.time() - started_at
name = func.__name__
log.info("Func {f} took {s:.2f} sec ({m:.2f} min)".format(f=name, s=run_time, m=run_time / 60.0))
return wrapper
class StdOutStatusLogFilter(logging.Filter):
def filter(self, record):
return record.name.startswith(SLOG_PREFIX)
def is_verified(path, max_nfs_refresh=3):
"""Validate that a file exists. Force NFS refresh if necessary"""
for i in xrange(max_nfs_refresh):
with ignored(OSError):
# Try to force an NFS refresh
os.listdir(os.path.dirname(path))
if os.path.exists(path):
return True
# time.sleep(0.25)
return False
def get_default_logging_config_dict(master_log, master_level, pb_log, stdout_level):
"""Returns a dict configuration of the logger. """
d = {
'version': 1,
'disable_existing_loggers': False, # this fixes the problem
'formatters': {
'console': {
'format': '%(message)s'
},
'standard': {
'format': '[%(levelname)s] %(asctime)-15sZ [%(name)s] %(message)s'
},
'full': {
'format': '[%(levelname)s] %(asctime)-15sZ [%(name)s %(funcName)s %(lineno)d] %(message)s'
}
},
'filters': {
"slog_filter": {
'()': StdOutStatusLogFilter,
}
},
'handlers': {
'console': {
'level': logging.getLevelName(stdout_level),
'class': 'logging.StreamHandler',
'formatter': 'console',
'stream': 'ext://sys.stdout',
'filters': ['slog_filter']
},
"debug_file_handler": {
"class": 'logging.handlers.RotatingFileHandler',
"level": logging.getLevelName(master_level),
"formatter": "full",
"filename": master_log,
"maxBytes": "10485760",
"backupCount": "20",
"encoding": "utf8"
},
"info_file_handler": {
"class": 'logging.handlers.RotatingFileHandler',
"level": "INFO",
"formatter": "standard",
"filename": pb_log,
"maxBytes": "10485760",
"backupCount": "20",
"encoding": "utf8",
"filters": ['slog_filter']
}
},
'loggers': {
'': {
'handlers': ['console', 'info_file_handler', 'debug_file_handler'],
'level': 'DEBUG',
'propagate': True
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console', 'debug_file_handler', 'info_file_handler']
}
}
return d
def setup_internal_logs(master_log, master_level, pb_log, stdout_level):
d = get_default_logging_config_dict(master_log, master_level, pb_log, stdout_level)
logging.config.dictConfig(d)
logging.Formatter.converter = time.gmtime
return d
|
/*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ALIBABACLOUD_TDSR_MODEL_DELETEPROJECTRESULT_H_
#define ALIBABACLOUD_TDSR_MODEL_DELETEPROJECTRESULT_H_
#include <string>
#include <vector>
#include <utility>
#include <alibabacloud/core/ServiceResult.h>
#include <alibabacloud/tdsr/TdsrExport.h>
namespace AlibabaCloud
{
namespace Tdsr
{
namespace Model
{
class ALIBABACLOUD_TDSR_EXPORT DeleteProjectResult : public ServiceResult
{
public:
DeleteProjectResult();
explicit DeleteProjectResult(const std::string &payload);
~DeleteProjectResult();
std::string getErrMessage()const;
bool getSuccess()const;
protected:
void parse(const std::string &payload);
private:
std::string errMessage_;
bool success_;
};
}
}
}
#endif // !ALIBABACLOUD_TDSR_MODEL_DELETEPROJECTRESULT_H_
|
""" MICROSOFT FORUM PAGE SCRAPER
Website: answers.microsoft.com
Example:
> python 1_getsites.py --language de-de --product xbox
"""
import argparse
import re
import sys
import os
parser = argparse.ArgumentParser()
parser.add_argument("--language",
default="de-de",
type=str,
help="'en-us' or 'de-de")
parser.add_argument('--product',
default='list',
type=str,
help="'windows', 'msoffice', 'xbox', 'outlook_com', 'skype', 'surface', 'protect', 'edge', 'ie', 'musicandvideo'")
args = parser.parse_args()
# Import and set driver
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
# Set product
productsel = args.product
language = args.language
# Select products to be scraped
if productsel == "list": # default option, entire product list
products = ['windows', 'msoffice', 'xbox', 'outlook_com', 'skype', 'surface', 'protect', 'edge', 'musicandvideo', 'msteams', 'microsoftedge'] #
# Check at which position we are
if os.path.exists('scrape.log'):
with open('scrape.log', 'r') as log:
products = products[products.index(log.read().split('/')[0]):]
else: # in case a specific product should be scraped
products = [productsel]
# Loop through product list and pages
for product in products:
print(f'[START] {language}, {product}.')
# Logfile handling
if os.path.exists('scrape.log'):
with open('scrape.log', 'r') as log:
line = log.read()
i = int(line.split('/')[1])
j = line.split('/')[0]
log.close()
if j != product:
i = 1
else:
i = 1
# Go to next product, in case value has been set to 0
if i == 0:
continue
# Scrape sites
for x in range(i, 10000):
if i == 0: break
driver.get(f'https://answers.microsoft.com/{language}/{product}/forum?sort=LastReplyDate&dir=Desc&tab=All&status=all&mod=&modAge=&advFil=&postedAfter=&postedBefore=&threadType=All&isFilterExpanded=false&page=' + str(x))
# Try it three times until breaking the scrape
for tries in range(0, 3):
try:
html = driver.page_source
if ('Es wurden keine Ergebnisse gefunden' in html) or ('No results found' in html) or ('Aucun résultat trouvé' in html) or ('Nessun risultato trovato' in html) or ('No se han encontrado resultados' in html) or ('Pubblica domande, segui le discussioni, condividi le tue conoscenze' in html) or ('Posten Sie Fragen, folgen Sie Diskussionen und teilen Sie Ihr Wissen' in html) or ('Post questions, follow discussions, share your knowledge' in html) or ('Publiez des questions, suivez des discussions et partagez vos connaissances' in html) or ('Publique preguntas, siga conversaciones y comparta sus conocimientos' in html):
print(f'[EXIT] EMPTY PAGE REACHED -> - {language}, {product}.')
with open(f'scrape.log', 'w') as logfile:
logfile.write(f'{product}/0')
logfile.close()
i = 0
break
element = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CLASS_NAME, 'thread-title')))
html = driver.page_source
url_temp = re.findall(r'(https?://answers.microsoft.com/' + language + '/' + product + '/forum/[^\s]+)', html)
url_temp2 = [s.strip('"') for s in url_temp]
url_list = [x for x in url_temp2 if not x.endswith('LastReply')]
with open(f'output-{product}-{language}.txt', 'a', encoding='utf-8') as outfile:
# Prepare Links
outfile.write("\n".join(url_list) + "\n")
outfile.close()
if len(url_list) > 0:
print(f'[STATUS] - {product.capitalize()} page {str(x)} -> {len(url_list)} urls extracted.')
with open(f'scrape.log', 'w') as logfile:
logfile.write(f'{product}/{x+1}')
logfile.close()
break
elif len(url_list) == 0:
print(f'[WARNING] - page {str(x)} has an empty url list.')
break
except Exception as e:
print(f'[EXCEPT] - try {tries}/3 -> page {str(x)} took too long -> retrying...')
continue
|
"use strict";
var glob = require("glob");
var path = require("path");
module.exports = function (content, sourceMap) {
this.cacheable && this.cacheable();
var resourceDir = path.dirname(this.resourcePath);
var pattern = content.trim();
var files = glob.sync(pattern, {
cwd: resourceDir
});
if (!files.length) {
this.emitWarning('Did not find anything for glob "' + pattern + '" in directory "' + resourceDir + '"');
}
return "module.exports = {\n" + files.map(function (file) {
this.addDependency(path.resolve(resourceDir, file));
var fileName = path.basename(file, path.extname(file));
var stringifiedFileName = JSON.stringify(fileName);
var stringifiedFile = JSON.stringify(file);
return " " + stringifiedFileName + ": require(" + stringifiedFile + ")";
}, this).join(",\n") + "\n};";
};
|
import React from 'react';
import ReactDOM from 'react-dom';
import { createStore, combineReducers, applyMiddleware, compose } from 'redux';
import { Provider, connect } from 'react-redux';
import { createHashHistory } from 'history';
import { NAMESPACE_SEP } from './constant';
import createSagaMiddleware from 'redux-saga';
import * as sagaEffects from 'redux-saga/effects';
import { routerRedux } from './router';
import Plugin, { filterHooks } from './plugin';
export { connect }
let { routerMiddleware, connectRouter } = routerRedux;
let hashHistory = createHashHistory();
export default function (opts = {}) {
let history = opts.history || hashHistory;
let app = {
_history: history,
_models: [],
model,
_router: null,
router,
start
}
function model(m) {
const prefixedModel = prefixNamespace(m);// 先添加命名空间的前缀
app._models.push(prefixedModel);// 把model放在数组里去
return prefixedModel;
}
function router(router) {
app._router = router;// 定义路由
}
// 这个对象是要传给combineReducers的,是用来合并的,每个属性都是字符串,而且代表合并状态的一个分状态属性
let initialReducers = {// 初始的reducer connected-react-redux
// 当页面路径发生改变时,会向仓库派发动作,仓库状态会发生改变 router:{location,action}
router: connectRouter(app._history)
};
let plugin = new Plugin();
plugin.use(filterHooks(opts));
app.use = plugin.use.bind(plugin);
function start(container) {
for (const model of app._models) {
// initialReducers={counter:(state,action)=>newState}
initialReducers[model.namespace] = getReducer(model, plugin._handleActions);
}
let rootReducer = createReducer();// 返回一个根的reducer
let sagas = getSagas(app);
// app._store = createStore(reducers);
let sagaMiddleware = createSagaMiddleware();
const extraMiddlewares = plugin.get('onAction');
const extraEnhancers = plugin.get('extraEnhancers');
// applyMiddleware返回值是一个enhancer,增加createStore
const enhancers = [...extraEnhancers, applyMiddleware(routerMiddleware(history),
sagaMiddleware, ...extraMiddlewares)];
// let store = applyMiddleware(routerMiddleware(history),sagaMiddleware, ...extraMiddlewares)(createStore)(rootReducer, opts.initialState);
let store = createStore(rootReducer, opts.initialState, compose(...enhancers));
app._store = store;
let onStateChange = plugin.get('onStateChange');
store.subscribe(() => {
onStateChange.forEach(listener => listener(store.getState()))
});
// subscriptions
for (const model of app._models) {
runSubscription(model.subscriptions);
}
sagas.forEach(sagaMiddleware.run);// run就是启动saga执行
ReactDOM.render(
<Provider store={app._store}>
{app._router({ app, history })}
</Provider>
, document.querySelector(container));
// 向当前的应用插入一个模型 state reducers subscriptions effects
app.model = injectModel.bind(app);
function injectModel(m) {
m = model(m);// 给reducers effect名字添加命名空间前缀 添加app_models里去
initialReducers[m.namespace] = getReducer(m, plugin._handleActions);
store.replaceReducer(createReducer());// 用新的reducer替换掉老的reducer,派发默认动作,会让reducer执行,执行过完后会给 users赋上默认值
if (m.effects) {
sagaMiddleware.run(getSaga(m.effects, m));
}
if (m.subscriptions) {
runSubscription(m.subscriptions);
}
}
function runSubscription(subscriptions = {}) {
for (let key in subscriptions) {
let subscription = subscriptions[key];
subscription({ history, dispatch: app._store.dispatch }, error => {
let onError = plugin.get('onError');
onError.forEach(fn => fn(error));
});
}
}
function createReducer() {
const reducerEnhancer = plugin.get('onReducer');
let extraReducers = plugin.get('extraReducers');
return reducerEnhancer(combineReducers({
...initialReducers,
...extraReducers
}));
}
function getSagas(app) {
let sagas = [];
for (const model of app._models) {
// 把effects对象变成一个saga
sagas.push(getSaga(model.effects, model, plugin.get('onEffect')));
}
return sagas;
}
function getSaga(effects, model) {
return function* () {
// key=asyncAdd key=asyncMinus
for (const key in effects) {
const watcher = getWatcher(key, effects[key], model, plugin.get('onEffect'), plugin.get('onError'));
// 为什么要调用fork 是因为fork不可单独开一个进程去执行,而不是阻塞当前saga的执行
const task = yield sagaEffects.fork(watcher);
yield sagaEffects.fork(function* () {
yield sagaEffects.take(`${model.namespace}/@@CANCEL_EFFECTS`);
yield sagaEffects.cancel(task);
});
}
}
}
}
return app;
}
function getReducer(model, handleActions) {
let { reducers = {}, state: defaultState } = model;
let reducer = function (state = defaultState, action) {
let reducer = reducers[action.type];// action.type= "counter/add"
if (reducer) {
return reducer(state, action);
}
return state;
}
if (handleActions) {
return handleActions(reducers, defaultState);
}
return reducer;
}
function prefixType(type, model) {
if (type.indexOf('/') === -1) {
return `${model.namespace}${NAMESPACE_SEP}${type}`;
} else {
if (type.startsWith(model.namespace)) {
console.error(`Warning: [sagaEffects.put] ${type} should not be prefixed with namespace ${model.namespace}`);
}
}
return type;
}
function getWatcher(key, effect, model, onEffect, onError) {
function put(action) {
return sagaEffects.put({ ...action, type: prefixType(action.type, model) });
}
return function* () {
if (onEffect) {// onEffect=[onEffect(effect, { put }, model, actionType)]
for (const fn of onEffect) {
effect = fn(effect, { ...sagaEffects, put }, model, key);
}
}
// key=counter/asyncAdd counter/
yield sagaEffects.takeEvery(key, function* (...args) {
try {
yield effect(...args, { ...sagaEffects, put });
} catch (error) {
onError.forEach(fn => fn(error));
}
});
}
}
// 此方法就是把reducers对象的属性名从add变成counter/add
function prefix(obj, namespace) {
return Object.keys(obj).reduce((memo, key) => {
let newKey = `${namespace}${NAMESPACE_SEP}${key}`;
memo[newKey] = obj[key];
return memo;
}, {});
}
function prefixNamespace(model) {
if (model.reducers) {
model.reducers = prefix(model.reducers, model.namespace);
}
if (model.effects) {
model.effects = prefix(model.effects, model.namespace);
}
return model;
}
|
/*
Copyright (c) Uber Technologies, Inc.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
*/
// @flow
import * as React from 'react';
import { isFragment } from 'react-is';
export const flattenFragments = (
children?: React.Node,
ChildWrapper?: React.ComponentType<{}>,
depth: number = 0
): React.Node[] =>
React.Children.toArray(children).reduce(
(acc: React.Node[], child: React.Node, i: number): React.Node[] => {
if (isFragment(child)) {
acc.push(
// $FlowFixMe
...flattenFragments(child.props.children, ChildWrapper, depth + 1)
);
} else if (React.isValidElement(child)) {
if (ChildWrapper) {
acc.push(<ChildWrapper key={`${depth}.${i}`}>{child}</ChildWrapper>);
} else {
acc.push(child);
}
}
return acc;
},
[]
);
|
import React from 'react'
import { View, Text, Image, StyleSheet } from 'react-native'
import { Colors, Metrics, Media, Fonts } from '../theme'
type Props = {
title: string,
style?: number | Object | Array<number>,
textStyle?: number | Object | Array<number>,
middleComponent?: Object
}
export default function Header(props: Props) {
const { middleComponent } = props
return (
<View style={[styles.container, props.style]}>
<View style={styles.left}>
<Image resizeMode="contain" source={Media.icons.elephant} style={styles.logo} />
</View>
<View style={styles.middle}>{middleComponent}</View>
<View style={styles.right}>
<Text style={[styles.text, props.textStyle]}>{props.title.toUpperCase()}</Text>
</View>
</View>
)
}
Header.defaultProps = {
style: undefined,
textStyle: undefined,
middleComponent: <View />
}
const styles = StyleSheet.create({
container: {
flexDirection: 'row',
alignItems: 'center',
height: Metrics.headerHeight,
width: Metrics.screenWidth,
paddingHorizontal: Metrics.baseMargin,
paddingTop: Metrics.navBarHeight,
paddingBottom: 0
},
left: {
flex: 1,
alignItems: 'flex-start'
},
middle: {
flex: 2.5,
justifyContent: 'center',
alignItems: 'center'
},
right: {
flex: 1,
alignItems: 'flex-end'
},
logo: {
height: 35,
width: 35
},
text: {
color: Colors.greenLight,
fontSize: Fonts.size.regular,
fontWeight: 'bold'
}
})
|
const User = require('../model/user')
class App {
getIndex = (req, res , next) => {
res.render("index")
}
postIndex = async ( req , res , next ) => {
try {
const {name, email, phone, message} = req.body
const client = await User.find({})
const checkUser = await User.find({ $or : [{email : email}, {username : name}]})
if(checkUser.length == 0){
const newUser = await new User({
email : email,
name : name,
phone : phone,
message : message,
})
const saveUser = await newUser.save()
if(saveUser){
res.redirect('/')
res.json({message: 'Your message has been sent successfully...'})
}else{
throw "Error sending message."
}
}
}catch(error){
res.status(400)
res.json({message: error, status: 400})
console.log(error)
}
}
}
const returnApp = new App()
module.exports = returnApp
|
'use strict';
const joi = require('joi');
const delay = require('delay');
const register = server => {
server.bullish.job({
name: 'add5',
handler: (job) => delay(5000).then(() => {
return Promise.resolve(job.data.input + 5);
}),
config: {
validate: {
input: joi.number(),
}
}
});
server.bullish.job({
name: 'testPre',
handler: (job) => {
if (job.simulated === true) console.log('The cake is a lie');
return job.data.input + job.pre[0];
},
config: {
pre: [ (job) => job.data.input * 2 ],
validate: {
input: joi.number(),
}
}
});
};
module.exports.plugin = {
name: 'bullish-example-jobs',
version: '1.0.0',
register,
};
|
/**
Groups Controller
*/
// native modules
// 3rd party modules
// own modules
const DefaultController = require('./');
class GroupsController extends DefaultController {
constructor() { super('Group'); }
}
module.exports = GroupsController;
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 13:55:10 2019
@author: Subhasis
"""
import sys
import os
import shutil
import numpy as np
import h5py as h5
import pandas as pd
import yaml
import argparse
import network_data_analysis as nda
"""Find the highest spiking KCs so we can disconnect them and repeat the
network model simulation"""
# jids = [
# 22072442,
# 22087964,
# 22087965,
# 22087966,
# 22087967,
# 22087970,
# 22087971,
# 22087972,
# 22087973,
# ]
# datadir = 'Y:/Subhasis/ggn_model_data/olfactory_network'
DATADIR = '/data/rays3/ggn/fixed_net'
TEMPLATEDIR = '/data/rays3/ggn/fixed_net_templates'
def make_parser():
parser = argparse.ArgumentParser(description='Disconnect high firing KCs from data file to create a template network')
parser.add_argument('--limit', type=int, default=5, help='Upper limit of allowed KC spike count')
parser.add_argument('--sdir', type=str, help='source directory')
parser.add_argument('--tdir', type=str, help='target directory')
parser.add_argument('--jid', type=str, help='JID of source dataset')
return parser
def remove_high_firing_kcs(jid, limit, sdir, tdir):
"""Remove KCs firing more than `limit` from dataset of `jid`. `sdir`
points to directory containing the data file, `tdir` is where the
output file will be written. If source file is called `x.h5`,
output file will be `x_kc{limit}.h5`
"""
fpath = nda.find_h5_file(jid, sdir)
kc_spike_count = []
with h5.File(fpath, 'r') as fd:
for kc, spikes in fd[nda.kc_st_path].items():
kc_spike_count.append((spikes.attrs['source'], len(spikes)))
try:
forig_path = fd.attrs['original']
print(jid, ': original template:', forig_path)
except KeyError:
forig_path = fpath
syn = fd[nda.pn_kc_syn_path]
if len(syn.shape) == 2:
syn_orig = pd.DataFrame(data=syn[:, 0])
else:
syn_orig = pd.DataFrame(data=syn[:])
kc_spikes = pd.DataFrame(kc_spike_count, columns=['kc', 'spikes'])
print('# kcs > 5 spikes:', len(kc_spikes[kc_spikes['spikes'] > 5]))
print('# kcs > 10 spikes:', len(kc_spikes[kc_spikes['spikes'] > 10]))
print('# spiking kcs:', len(kc_spikes[kc_spikes['spikes'] > 0]))
orig_0 = set(np.where(syn_orig['gmax'] == 0)[0])
over = kc_spikes[kc_spikes['spikes'] > limit]
print('{} kcs spiked more than {} spikes'.format(len(over), limit))
over_syn = pd.merge(syn_orig, over, left_on='post', right_on='kc')
print('Synapses to > limit kcs', len(over_syn))
print('Synapse to these kcs set to 0?', len(np.flatnonzero(over_syn['gmax'].values == 0)))
# This is tricky - a simulation based on a template used to
# generate a datafile with external reference to the synapse
# datasets in the template. So attempt to generate another
# template by updating synapses in the produced datafile
# referred back to the original template.
fname = os.path.basename(fpath).rpartition('.')[0]
out_fname = '{}_kc{}.h5'.format(fname, limit)
outfile = os.path.join(tdir, out_fname)
if os.path.exists(outfile):
print(f'File already exists: {outfile}')
return 0
print('Copying data from {} as {} to update using {} KC spiking'.format(forig_path, outfile, fpath))
shutil.copyfile(forig_path, outfile)
print('Disabling PN synapses to KCs firing > {} spikes'.format(limit))
changed_syn_count = 0
with h5.File(outfile, 'a') as ofd:
syndf = ofd[nda.pn_kc_syn_path]
# Set the conductances to each KC spiking more than 5 spikes to 0
for row in over.itertuples():
idx = np.where(syn_orig['post'] == row.kc)[0]
# print('Common 0 syn:', len(set(idx).intersection(orig_0)))
changed_syn_count += len(idx)
syndf[idx, 0, 'gmax'] = 0.0
print('Modified: synapses set to 0 conductance:', changed_syn_count)
ofd.close()
print('Original: synapses with 0 conductance:', len(syn_orig[syn_orig['gmax'] == 0.0]))
with h5.File(outfile, 'r') as o2:
syn_new = pd.DataFrame(data=o2[nda.pn_kc_syn_path][:, 0])
print('# synapses in updated file', len(syn_new))
print('# shape of synapse data in updated file', syn_new.shape)
print('# synapses with 0 conductance', len(syn_new[syn_new['gmax'] == 0.0]))
assert (len(syn_new[syn_new['gmax'] == 0.0]) - len(syn_orig[syn_orig['gmax'] == 0.0])) == changed_syn_count
print('Finished checking updated file')
return changed_syn_count
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
changed_syn_count = remove_high_firing_kcs(args.jid, args.limit, args.sdir, args.tdir)
if changed_syn_count == 0:
sys.exit(1)
|
#encoding=utf-8
import codecs
import jieba
path='F:/pycharm/data/TextSimilarity/dict/synonym/同义词库.txt'
def construction_wordpairs(filepath):
wordpairs={}
with codecs.open(filepath, mode='r', encoding='utf8') as fr:
lines = fr.readlines()
for line in lines:
if '=' in line:
s=line.replace('\n','').split('= ')[1]
w=s.split(' ')
for i in range(0,len(w)):
tmp=[]
for word in w:
if word != w[i] and len(word)>1:
tmp.append(word)
wordpairs[w[i]]=tmp
if '#' in line:
s = line.replace('\n','').split('# ')[1]
w = s.split(' ')
for i in range(0,len(w)):
tmp=[]
for word in w:
if word != w[i] and len(word)>1:
tmp.append(word)
wordpairs[w[i]]=tmp
return wordpairs
dic = construction_wordpairs(path)
def get_cilinsynonyms(word):
return dic.get(word)
def get_synonyms_overlap(sent1,sent2):
seg1 = jieba.analyse.extract_tags(sent1, topK=3, withWeight=False,allowPOS=('n','nr','nt','ns','nz','v','vn''r'))
seg2 = jieba.analyse.extract_tags(sent2, topK=3, withWeight=False,allowPOS=('n','nr','nt','ns','nz','v','vn''r'))
s1=set()
s2=set()
for word in seg1:
words=get_cilinsynonyms(word=word)
if words is None:
return 0.0
else:
for w in words:
s1.add(w)
s1.add(word)
for word in seg2:
s2.add(word)
res=s1.intersection(s2)
if len(res)>0:
return len(res)/(min(len(seg1),len(seg2))+0.00000000001)
else:
return 0.0
if __name__ == '__main__':
print(get_cilinsynonyms(word='手机号'))
|
#pragma once
namespace Utility
{
// Return curent build process is in debug or not.
inline bool IsDebug()
{
#ifdef _DEBUG
return true;
#else
return false;
#endif
}
}
|
import torch
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
import DataSet
import torch.nn as nn
class TextRNN(nn.Module):
def __init__(self):
super(TextRNN, self).__init__()
Vocab = len(DataSet.getTEXT().vocab) ## 已知词的数量
Dim = 100 ##每个词向量长度
dropout = 0.5
hidden_size = 256 #隐藏层数量
num_classes = 3 ##类别数
num_layers = 2 ##双层LSTM
self.embedding = nn.Embedding(Vocab, Dim) ## 词向量,这里直接随机
self.lstm = nn.LSTM(Dim, hidden_size, num_layers,
bidirectional=True, batch_first=True, dropout=dropout)
self.fc = nn.Linear(hidden_size * 2, num_classes)
def forward(self, x):
# [batch len, text size]
x = self.embedding(x)
# [batch size, text size, embedding]
output, (hidden, cell) = self.lstm(x)
# output = [batch size, text size, num_directions * hidden_size]
output = self.fc(output[:, -1, :]) # 句子最后时刻的 hidden state
# output = [batch size, num_classes]
return output
|
"use strict";
(function() {
angular
.module("FormBuilderApp")
.factory("FormService", FormService);
function FormService($http) {
var api = {
createFormForUser : createFormForUser,
findAllFormsForUser : findAllFormsForUser,
deleteFormById : deleteFormById,
updateFormById : updateFormById,
findAllFormsForUserByName : findAllFormsForUserByName,
findFormById: findFormById
};
return api;
function createFormForUser(userId, form) {
console.log("In createFormForUser");
return $http.post("/api/assignment/user/"+userId+"/form",form);
}
function findAllFormsForUser(userId) {
console.log("In findAllFormsForUser");
return $http.get("/api/assignment/user/"+userId+"/form");
}
function findAllFormsForUserByName(userId, form1) {
console.log("In findAllFormsForUserByName");
return $http.get("/api/assignment/user/"+userId+"/form/title/"+form1.title);
}
function deleteFormById(formId) {
console.log("In deleteFormById");
return $http.delete("/api/assignment/form/"+formId);
}
function updateFormById(formId, form) {
console.log("In updateFormById");
return $http.put("/api/assignment/form/"+formId,form);
}
function findFormById(formId) {
console.log("In findFormById");
return $http.get("/api/assignment/form/"+formId);
}
}
})();
|
/*-
* Copyright (c) 2012 Hamrouni Ghassen.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
*
* Recommender system using matrix factorization (MF)
* Computing the product recommendation using latent factor models
*
*/
#include "learned_factors.h"
#include "model_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <assert.h>
#include "utils.h"
struct learned_factors*
init_learned_factors (struct model_parameters * params)
{
struct learned_factors* lfactors =
malloc (sizeof (struct learned_factors) );
if (!lfactors)
{
return NULL;
}
lfactors->x=NULL;
lfactors->y = NULL;
lfactors->ratings_average = 0;
lfactors->item_factor_vectors = generate_random_matrix (params->items_number, params->dimensionality, params->seed);
if(params->algoithm_type == NEIGHBOURS_MF)
{
lfactors->y = generate_random_matrix (params->items_number, params->dimensionality, params->seed);
lfactors->x = generate_random_matrix (params->items_number, params->items_number, params->seed);
}
lfactors->user_factor_vectors = generate_random_matrix (params->users_number, params->dimensionality, params->seed);
lfactors->user_bias = malloc (sizeof (double) *params->users_number);
lfactors->item_bias = malloc (sizeof (double) *params->items_number);
memset (lfactors->user_bias, 0.0, params->users_number*sizeof (double) );
memset (lfactors->item_bias, 0.0, params->items_number*sizeof (double) );
lfactors->R = NULL;
lfactors->R_K = NULL;
if (!lfactors->item_factor_vectors ||
!lfactors->user_factor_vectors ||
!lfactors->item_bias ||
!lfactors->user_bias)
{
return lfactors;
}
return lfactors;
}
/*
* free_learned_factors: delete the learned factors from memory
*/
void
free_learned_factors (learned_factors_t* lfactors)
{
size_t i;
if (!lfactors)
{
return;
}
for (i = 0; i < lfactors->items_number; i++)
{
free (lfactors->item_factor_vectors[i]);
if(lfactors->x)
free (lfactors->x[i]);
if(lfactors->y)
free (lfactors->y[i]);
}
free (lfactors->item_factor_vectors);
if(lfactors->x)
free (lfactors->x);
if(lfactors->y)
free (lfactors->y);
for (i = 0; i < lfactors->users_number; i++)
{
free (lfactors->user_factor_vectors[i]);
}
free (lfactors->user_factor_vectors);
free (lfactors->item_bias);
free (lfactors->user_bias);
if (lfactors->R)
{
for (i = 0; i < lfactors->users_number; i++)
{
free (lfactors->R[i].ratings_order);
}
free (lfactors->R);
}
if (lfactors->R)
{
for (i = 0; i < lfactors->items_number; i++)
{
free (lfactors->R_K[i].ratings_order);
}
}
free (lfactors->R_K);
free (lfactors);
}
|
# -*- coding: utf-8 -*-
"""
Copyright () 2018
All rights reserved
FILE: poker.py
AUTHOR: tianyuningmou
DATE CREATED: @Time : 2018/3/15 下午4:37
DESCRIPTION: .
VERSION: : #1
CHANGED By: : tianyuningmou
CHANGE: :
MODIFIED: : @Time : 2018/3/15 下午4:37
"""
# 找出最大的顺子
def find_max_straight(num_list):
# 先对给出的数据去重
num_list = list(set(num_list))
# 因为1可以当A用,且1开头的不算顺子
if 1 in num_list:
num_list.append(14)
num_list.remove(1)
# 2开头的不算顺子
if 2 in num_list:
num_list.remove(2)
# 对整理后的数据进行从大到小排序
num_list.sort(reverse=True)
result_list = list()
# 循环比对,找出顺子
for num in range(1, len(num_list)):
# 前后两个数相差为1,则把大的加入到result_list
if num_list[num-1] - num_list[num] == 1:
result_list.append(num_list[num-1])
# 如果已经比对到最后一个数,则把最后一个数加入到列表
if num == len(num_list) -1:
result_list.append(num_list[num])
# 前后相差不为1,把前一个加入列表
else:
result_list.append(num_list[num-1])
# 此时如果已经多于五张牌,则已找到最大顺子,少于五张牌,把result_list清空,继续往下找
if len(result_list) < 5:
result_list = []
continue
else:
break
# 一遍查找之后,判断结果是否多于五张牌,是的话则找到最大顺子,如果不是,则这副牌中没有顺子
if len(result_list) < 5:
result_list = []
# 对结果正向排序
result_list.sort()
# 替换到扑克牌中的表示
result_list = [str(x) for x in result_list]
result_list = ['A' if x == '14' else x for x in result_list]
result_list = ['K' if x == '13' else x for x in result_list]
result_list = ['Q' if x == '12' else x for x in result_list]
result_list = ['J' if x == '11' else x for x in result_list]
return result_list
# 找出最长的顺子,并输出其长度
def find_max_length_straight(num_list):
straight_one = find_max_straight(num_list)
num_list.sort()
# 因为1可以当A用,且1开头的不算顺子
if 1 in num_list:
num_list.append(14)
num_list.remove(1)
# 2开头的不算顺子
if 2 in num_list:
num_list.remove(2)
if len(straight_one) > 0:
new_num_list = [x for x in num_list if x < int(straight_one[0])]
else:
return [], 0
if len(new_num_list) > 0:
straight_two = find_max_straight(new_num_list)
if len(straight_one) >= len(straight_two):
return straight_one, len(straight_one)
else:
return straight_two, len(straight_two)
else:
return straight_one, len(straight_one)
if __name__ == '__main__':
num_list = [1, 12, 11, 13, 10, 8]
result_one = find_max_straight(num_list)
print('此扑克牌中不存在顺子') if len(result_one) == 0 else print('扑克牌中最大的顺子为:{}'.format(result_one))
result_two, length = find_max_length_straight(num_list)
print('此扑克牌中不存在顺子') if length == 0 else print('此扑克牌中最长的顺子为:{},其长度为:{}'.format(result_two, length))
|
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_SYNC_SYNC_OBSERVER_BRIDGE_H_
#define IOS_CHROME_BROWSER_SYNC_SYNC_OBSERVER_BRIDGE_H_
#import <Foundation/Foundation.h>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/scoped_observation.h"
#include "components/sync/driver/sync_service.h"
#include "components/sync/driver/sync_service_observer.h"
@protocol SyncObserverModelBridge<NSObject>
- (void)onSyncStateChanged;
@optional
- (void)onSyncConfigurationCompleted;
@end
// C++ class to monitor profile sync status in Objective-C type.
class SyncObserverBridge : public syncer::SyncServiceObserver {
public:
// |service| must outlive the SyncObserverBridge.
SyncObserverBridge(id<SyncObserverModelBridge> delegate,
syncer::SyncService* service);
~SyncObserverBridge() override;
// syncer::SyncServiceObserver implementation:
void OnStateChanged(syncer::SyncService* sync) override;
void OnSyncConfigurationCompleted(syncer::SyncService* sync) override;
private:
__weak id<SyncObserverModelBridge> delegate_ = nil;
base::ScopedObservation<syncer::SyncService, syncer::SyncServiceObserver>
scoped_observation_{this};
DISALLOW_COPY_AND_ASSIGN(SyncObserverBridge);
};
#endif // IOS_CHROME_BROWSER_SYNC_SYNC_OBSERVER_BRIDGE_H_
|
from flask import Flask, jsonify, request
from Models import db, Dispositivo
from logging import exception
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///database\\dispositivo.db"
app.config["SQLALCHEMY_TRAK_MODIFICATIONS"] = False
db.init_app(app)
# Routes
@app.route("/")
def home():
return "<h1>Welcome</h1>"
@app.route("/api/dispositivos", methods=["GET"])
def getDispositivos():
try:
dispositivos = Dispositivo.query.all()
toReturn = [dispositivo.serialize() for dispositivo in dispositivos]
return jsonify(toReturn), 200
except Exception as e:
exception("[SERVER]: Error")
return jsonify({"message": "Ha ocurrido un error"}), 500
@app.route("/api/dispositivo", methods=["GET"])
def getDispositivoById():
try:
name = request.args["nombre_de_equipo"]
dispositivo = Dispositivo.query.filter_by(nombre_de_equipo=name).first()
if not dispositivo:
return jsonify({"message": "No existe el dispositivo"}), 404
else:
return jsonify(dispositivo.serialize()), 200
except Exception as e:
exception("[SERVER]: Error")
return jsonify({"message": "Ha ocurrido un error"}), 500
@app.route("/api/find-dispositivo", methods=["GET"])
def getDispositivo():
try:
fields = {}
for arg in request.args:
print(arg)
fields[arg] = request.args[arg]
dispositivo = Dispositivo.query.filter_by(**fields).first()
if not dispositivo:
return jsonify({"message": "No existe el dispositivo"}), 404
else:
return jsonify(dispositivo.serialize()), 200
except Exception as e:
exception("[SERVER]: Error")
return jsonify({"message": "Ha ocurrido un error"}), 500
if __name__ == "__main__":
app.run(debug = True, port=4666)
|
// This is a manifest file that'll be compiled into application.js, which will include all the files
// listed below.
//
// Any JavaScript/Coffee file within this directory, lib/assets/javascripts, or any plugin's
// vendor/assets/javascripts directory can be referenced here using a relative path.
//
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// compiled file. JavaScript code in this file should be added after the last require_* statement.
//
// Read Sprockets README (https://github.com/rails/sprockets#sprockets-directives) for details
// about supported directives.
//
//= require sip/application
//= require_tree .
|
"""
Row-level correlations for spreadsheets used in the spreadsheet visualizer.
"""
from nest_py.core.data_types.tablelike_schema import TablelikeSchema
COLLECTION_NAME = 'ssviz_feature_correlations'
def generate_schema():
schema = TablelikeSchema(COLLECTION_NAME)
# the spreadsheet whose correlation values are stored in this record
schema.add_foreignid_attribute('ssviz_spreadsheet_id')
# next two columns identify the spreadsheet row that defines the grouping
# NOTE: needed to shorten these names so auto-generated index name is
# short enough for psql
schema.add_foreignid_attribute('g_spreadsheet_id')
schema.add_int_attribute('g_feature_idx')
# the scores, ordered by row index
# note this field can contain NaNs
# if serializing to json, use something like...
# [None if np.isnan(val) else val for val in scores]
# we cap pval scores (-10log10(pval)) at 200, and precision might as well
# be a multiple of 4, because the storage cost is 2 bytes per four decimal
# digits
schema.add_numeric_list_attribute('scores', precision=24, scale=23)
schema.add_index(\
['ssviz_spreadsheet_id', 'g_spreadsheet_id', 'g_feature_idx'])
return schema
|
/*
* Platform level USB initialization for FS USB OTG controller on omap1 and 24xx
*
* Copyright (C) 2004 Texas Instruments, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <plat/mux.h>
#include <mach/usb.h>
#include "common.h"
/* These routines should handle the standard chip-specific modes
* for usb0/1/2 ports, covering basic mux and transceiver setup.
*
* Some board-*.c files will need to set up additional mux options,
* like for suspend handling, vbus sensing, GPIOs, and the D+ pullup.
*/
/* TESTED ON:
* - 1611B H2 (with usb1 mini-AB) using standard Mini-B or OTG cables
* - 5912 OSK OHCI (with usb0 standard-A), standard A-to-B cables
* - 5912 OSK UDC, with *nonstandard* A-to-A cable
* - 1510 Innovator UDC with bundled usb0 cable
* - 1510 Innovator OHCI with bundled usb1/usb2 cable
* - 1510 Innovator OHCI with custom usb0 cable, feeding 5V VBUS
* - 1710 custom development board using alternate pin group
* - 1710 H3 (with usb1 mini-AB) using standard Mini-B or OTG cables
*/
#define INT_USB_IRQ_GEN IH2_BASE + 20
#define INT_USB_IRQ_NISO IH2_BASE + 30
#define INT_USB_IRQ_ISO IH2_BASE + 29
#define INT_USB_IRQ_HGEN INT_USB_HHC_1
#define INT_USB_IRQ_OTG IH2_BASE + 8
#ifdef CONFIG_ARCH_OMAP_OTG
void __init
omap_otg_init(struct omap_usb_config *config)
{
u32 syscon;
int alt_pingroup = 0;
/* NOTE: no bus or clock setup (yet?) */
syscon = omap_readl(OTG_SYSCON_1) & 0xffff;
if (!(syscon & OTG_RESET_DONE))
pr_debug("USB resets not complete?\n");
//omap_writew(0, OTG_IRQ_EN);
/* pin muxing and transceiver pinouts */
if (config->pins[0] > 2) /* alt pingroup 2 */
alt_pingroup = 1;
syscon |= config->usb0_init(config->pins[0], is_usb0_device(config));
syscon |= config->usb1_init(config->pins[1]);
syscon |= config->usb2_init(config->pins[2], alt_pingroup);
pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
omap_writel(syscon, OTG_SYSCON_1);
syscon = config->hmc_mode;
syscon |= USBX_SYNCHRO | (4 << 16) /* B_ASE0_BRST */;
#ifdef CONFIG_USB_OTG
if (config->otg)
syscon |= OTG_EN;
#endif
if (cpu_class_is_omap1())
pr_debug("USB_TRANSCEIVER_CTRL = %03x\n",
omap_readl(USB_TRANSCEIVER_CTRL));
pr_debug("OTG_SYSCON_2 = %08x\n", omap_readl(OTG_SYSCON_2));
omap_writel(syscon, OTG_SYSCON_2);
printk("USB: hmc %d", config->hmc_mode);
if (!alt_pingroup)
printk(", usb2 alt %d wires", config->pins[2]);
else if (config->pins[0])
printk(", usb0 %d wires%s", config->pins[0],
is_usb0_device(config) ? " (dev)" : "");
if (config->pins[1])
printk(", usb1 %d wires", config->pins[1]);
if (!alt_pingroup && config->pins[2])
printk(", usb2 %d wires", config->pins[2]);
if (config->otg)
printk(", Mini-AB on usb%d", config->otg - 1);
printk("\n");
if (cpu_class_is_omap1()) {
u16 w;
/* leave USB clocks/controllers off until needed */
w = omap_readw(ULPD_SOFT_REQ);
w &= ~SOFT_USB_CLK_REQ;
omap_writew(w, ULPD_SOFT_REQ);
w = omap_readw(ULPD_CLOCK_CTRL);
w &= ~USB_MCLK_EN;
w |= DIS_USB_PVCI_CLK;
omap_writew(w, ULPD_CLOCK_CTRL);
}
syscon = omap_readl(OTG_SYSCON_1);
syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN;
#ifdef CONFIG_USB_GADGET_OMAP
if (config->otg || config->register_dev) {
struct platform_device *udc_device = config->udc_device;
int status;
syscon &= ~DEV_IDLE_EN;
udc_device->dev.platform_data = config;
status = platform_device_register(udc_device);
if (status)
pr_debug("can't register UDC device, %d\n", status);
}
#endif
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
if (config->otg || config->register_host) {
struct platform_device *ohci_device = config->ohci_device;
int status;
syscon &= ~HST_IDLE_EN;
ohci_device->dev.platform_data = config;
status = platform_device_register(ohci_device);
if (status)
pr_debug("can't register OHCI device, %d\n", status);
}
#endif
#ifdef CONFIG_USB_OTG
if (config->otg) {
struct platform_device *otg_device = config->otg_device;
int status;
syscon &= ~OTG_IDLE_EN;
otg_device->dev.platform_data = config;
status = platform_device_register(otg_device);
if (status)
pr_debug("can't register OTG device, %d\n", status);
}
#endif
pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
omap_writel(syscon, OTG_SYSCON_1);
}
#else
void omap_otg_init(struct omap_usb_config *config) {}
#endif
#ifdef CONFIG_USB_GADGET_OMAP
static struct resource udc_resources[] = {
/* order is significant! */
{ /* registers */
.start = UDC_BASE,
.end = UDC_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, { /* general IRQ */
.start = INT_USB_IRQ_GEN,
.flags = IORESOURCE_IRQ,
}, { /* PIO IRQ */
.start = INT_USB_IRQ_NISO,
.flags = IORESOURCE_IRQ,
}, { /* SOF IRQ */
.start = INT_USB_IRQ_ISO,
.flags = IORESOURCE_IRQ,
},
};
static u64 udc_dmamask = ~(u32)0;
static struct platform_device udc_device = {
.name = "omap_udc",
.id = -1,
.dev = {
.dma_mask = &udc_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(udc_resources),
.resource = udc_resources,
};
static inline void udc_device_init(struct omap_usb_config *pdata)
{
/* IRQ numbers for omap7xx */
if(cpu_is_omap7xx()) {
udc_resources[1].start = INT_7XX_USB_GENI;
udc_resources[2].start = INT_7XX_USB_NON_ISO;
udc_resources[3].start = INT_7XX_USB_ISO;
}
pdata->udc_device = &udc_device;
}
#else
static inline void udc_device_init(struct omap_usb_config *pdata)
{
}
#endif
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
/* The dmamask must be set for OHCI to work */
static u64 ohci_dmamask = ~(u32)0;
static struct resource ohci_resources[] = {
{
.start = OMAP_OHCI_BASE,
.end = OMAP_OHCI_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_IRQ_HGEN,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ohci_device = {
.name = "ohci",
.id = -1,
.dev = {
.dma_mask = &ohci_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(ohci_resources),
.resource = ohci_resources,
};
static inline void ohci_device_init(struct omap_usb_config *pdata)
{
if (cpu_is_omap7xx())
ohci_resources[1].start = INT_7XX_USB_HHC_1;
pdata->ohci_device = &ohci_device;
pdata->ocpi_enable = &ocpi_enable;
}
#else
static inline void ohci_device_init(struct omap_usb_config *pdata)
{
}
#endif
#if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
static struct resource otg_resources[] = {
/* order is significant! */
{
.start = OTG_BASE,
.end = OTG_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = INT_USB_IRQ_OTG,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device otg_device = {
.name = "omap_otg",
.id = -1,
.num_resources = ARRAY_SIZE(otg_resources),
.resource = otg_resources,
};
static inline void otg_device_init(struct omap_usb_config *pdata)
{
if (cpu_is_omap7xx())
otg_resources[1].start = INT_7XX_USB_OTG;
pdata->otg_device = &otg_device;
}
#else
static inline void otg_device_init(struct omap_usb_config *pdata)
{
}
#endif
u32 __init omap1_usb0_init(unsigned nwires, unsigned is_device)
{
u32 syscon1 = 0;
if (nwires == 0) {
if (!cpu_is_omap15xx()) {
u32 l;
/* pulldown D+/D- */
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~(3 << 1);
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
return 0;
}
if (is_device) {
if (cpu_is_omap7xx()) {
omap_cfg_reg(AA17_7XX_USB_DM);
omap_cfg_reg(W16_7XX_USB_PU_EN);
omap_cfg_reg(W17_7XX_USB_VBUSI);
omap_cfg_reg(W18_7XX_USB_DMCK_OUT);
omap_cfg_reg(W19_7XX_USB_DCRST);
} else
omap_cfg_reg(W4_USB_PUEN);
}
if (nwires == 2) {
u32 l;
// omap_cfg_reg(P9_USB_DP);
// omap_cfg_reg(R8_USB_DM);
if (cpu_is_omap15xx()) {
/* This works on 1510-Innovator */
return 0;
}
/* NOTES:
* - peripheral should configure VBUS detection!
* - only peripherals may use the internal D+/D- pulldowns
* - OTG support on this port not yet written
*/
/* Don't do this for omap7xx -- it causes USB to not work correctly */
if (!cpu_is_omap7xx()) {
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~(7 << 4);
if (!is_device)
l |= (3 << 1);
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
return 3 << 16;
}
/* alternate pin config, external transceiver */
if (cpu_is_omap15xx()) {
printk(KERN_ERR "no usb0 alt pin config on 15xx\n");
return 0;
}
omap_cfg_reg(V6_USB0_TXD);
omap_cfg_reg(W9_USB0_TXEN);
omap_cfg_reg(W5_USB0_SE0);
if (nwires != 3)
omap_cfg_reg(Y5_USB0_RCV);
/* NOTE: SPEED and SUSP aren't configured here. OTG hosts
* may be able to use I2C requests to set those bits along
* with VBUS switching and overcurrent detection.
*/
if (nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
switch (nwires) {
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 6:
syscon1 = 3;
{
u32 l;
omap_cfg_reg(AA9_USB0_VP);
omap_cfg_reg(R9_USB0_VM);
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
0, nwires);
}
return syscon1 << 16;
}
u32 __init omap1_usb1_init(unsigned nwires)
{
u32 syscon1 = 0;
if (!cpu_is_omap15xx() && nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB1_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
if (nwires == 0)
return 0;
/* external transceiver */
omap_cfg_reg(USB1_TXD);
omap_cfg_reg(USB1_TXEN);
if (nwires != 3)
omap_cfg_reg(USB1_RCV);
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB1_SEO);
omap_cfg_reg(USB1_SPEED);
// SUSP
} else if (cpu_is_omap1610() || cpu_is_omap5912()) {
omap_cfg_reg(W13_1610_USB1_SE0);
omap_cfg_reg(R13_1610_USB1_SPEED);
// SUSP
} else if (cpu_is_omap1710()) {
omap_cfg_reg(R13_1710_USB1_SE0);
// SUSP
} else {
pr_debug("usb%d cpu unrecognized\n", 1);
return 0;
}
switch (nwires) {
case 2:
goto bad;
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 6:
syscon1 = 3;
omap_cfg_reg(USB1_VP);
omap_cfg_reg(USB1_VM);
if (!cpu_is_omap15xx()) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB1_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
bad:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
1, nwires);
}
return syscon1 << 20;
}
u32 __init omap1_usb2_init(unsigned nwires, unsigned alt_pingroup)
{
u32 syscon1 = 0;
/* NOTE omap1 erratum: must leave USB2_UNI_R set if usb0 in use */
if (alt_pingroup || nwires == 0)
return 0;
if (!cpu_is_omap15xx() && nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
/* external transceiver */
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB2_TXD);
omap_cfg_reg(USB2_TXEN);
omap_cfg_reg(USB2_SEO);
if (nwires != 3)
omap_cfg_reg(USB2_RCV);
/* there is no USB2_SPEED */
} else if (cpu_is_omap16xx()) {
omap_cfg_reg(V6_USB2_TXD);
omap_cfg_reg(W9_USB2_TXEN);
omap_cfg_reg(W5_USB2_SE0);
if (nwires != 3)
omap_cfg_reg(Y5_USB2_RCV);
// FIXME omap_cfg_reg(USB2_SPEED);
} else {
pr_debug("usb%d cpu unrecognized\n", 1);
return 0;
}
// omap_cfg_reg(USB2_SUSP);
switch (nwires) {
case 2:
goto bad;
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 5:
goto bad;
case 6:
syscon1 = 3;
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB2_VP);
omap_cfg_reg(USB2_VM);
} else {
u32 l;
omap_cfg_reg(AA9_USB2_VP);
omap_cfg_reg(R9_USB2_VM);
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
bad:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
2, nwires);
}
return syscon1 << 24;
}
#ifdef CONFIG_ARCH_OMAP15XX
/* ULPD_DPLL_CTRL */
#define DPLL_IOB (1 << 13)
#define DPLL_PLL_ENABLE (1 << 4)
#define DPLL_LOCK (1 << 0)
/* ULPD_APLL_CTRL */
#define APLL_NDPLL_SWITCH (1 << 0)
static void __init omap_1510_usb_init(struct omap_usb_config *config)
{
unsigned int val;
u16 w;
config->usb0_init(config->pins[0], is_usb0_device(config));
config->usb1_init(config->pins[1]);
config->usb2_init(config->pins[2], 0);
val = omap_readl(MOD_CONF_CTRL_0) & ~(0x3f << 1);
val |= (config->hmc_mode << 1);
omap_writel(val, MOD_CONF_CTRL_0);
printk("USB: hmc %d", config->hmc_mode);
if (config->pins[0])
printk(", usb0 %d wires%s", config->pins[0],
is_usb0_device(config) ? " (dev)" : "");
if (config->pins[1])
printk(", usb1 %d wires", config->pins[1]);
if (config->pins[2])
printk(", usb2 %d wires", config->pins[2]);
printk("\n");
/* use DPLL for 48 MHz function clock */
pr_debug("APLL %04x DPLL %04x REQ %04x\n", omap_readw(ULPD_APLL_CTRL),
omap_readw(ULPD_DPLL_CTRL), omap_readw(ULPD_SOFT_REQ));
w = omap_readw(ULPD_APLL_CTRL);
w &= ~APLL_NDPLL_SWITCH;
omap_writew(w, ULPD_APLL_CTRL);
w = omap_readw(ULPD_DPLL_CTRL);
w |= DPLL_IOB | DPLL_PLL_ENABLE;
omap_writew(w, ULPD_DPLL_CTRL);
w = omap_readw(ULPD_SOFT_REQ);
w |= SOFT_UDC_REQ | SOFT_DPLL_REQ;
omap_writew(w, ULPD_SOFT_REQ);
while (!(omap_readw(ULPD_DPLL_CTRL) & DPLL_LOCK))
cpu_relax();
#ifdef CONFIG_USB_GADGET_OMAP
if (config->register_dev) {
int status;
udc_device.dev.platform_data = config;
status = platform_device_register(&udc_device);
if (status)
pr_debug("can't register UDC device, %d\n", status);
/* udc driver gates 48MHz by D+ pullup */
}
#endif
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
if (config->register_host) {
int status;
ohci_device.dev.platform_data = config;
status = platform_device_register(&ohci_device);
if (status)
pr_debug("can't register OHCI device, %d\n", status);
/* hcd explicitly gates 48MHz */
}
#endif
}
#else
static inline void omap_1510_usb_init(struct omap_usb_config *config) {}
#endif
void __init omap1_usb_init(struct omap_usb_config *pdata)
{
pdata->usb0_init = omap1_usb0_init;
pdata->usb1_init = omap1_usb1_init;
pdata->usb2_init = omap1_usb2_init;
udc_device_init(pdata);
ohci_device_init(pdata);
otg_device_init(pdata);
if (cpu_is_omap7xx() || cpu_is_omap16xx())
omap_otg_init(pdata);
else if (cpu_is_omap15xx())
omap_1510_usb_init(pdata);
else
printk(KERN_ERR "USB: No init for your chip yet\n");
}
|
from terminal import *
term=Terminal()
term.xprint("Short lines are OK.")
term.xprint("But long lines are OK too because they will be wrapped.")
term.xprint("It does not have to be text.")
term.xprint(42)
term.xprint("That is a number and it is OK.")
term.xprint("We handle scrolling too.")
for i in range(0,10):
term.xprint(i)
term=Terminal(bg=(0,255,255),fg=(255,0,0))
term.xprint("But we can choose other colors too!")
|
class CircularQueue:
def __init__(self, n):
self.len = n
self.size = 0
self.circular_queue = [0] * n
self.start = 0
self.end = 0
def enqueue(self, data):
self.size += 1
if self.size > self.len:
self.resize(self.len)
self.circular_queue[self.end] = data
self.end = (self.end + 1)%self.len
def dequeue(self):
deq = self.circular_queue[self.start]
self.size -= 1
self.start = (self.start + 1)%self.len
return deq
def num_elts(self):
return self.size
def resize(self, num):
temp = [0] * num
timp = self.start
for i in range(self.len):
temp[i] = self.circular_queue[timp % self.len]
timp += 1
self.circular_queue = temp + [0] * num
self.start = 0
self.end = self.len
self.len = 2 * num
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Coupled spin-1/2 (Static dipolar spectrum)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
¹³C-¹H static dipolar coupling simulation.
"""
# %%
import matplotlib.pyplot as plt
from mrsimulator import Simulator, SpinSystem, Site, Coupling
from mrsimulator.methods import BlochDecaySpectrum
from mrsimulator import signal_processing as sp
from mrsimulator.spin_system.tensors import SymmetricTensor
# sphinx_gallery_thumbnail_number = 1
# %%
# **Spin Systems**
#
# Create a 13C-1H coupled spin system.
spin_system = SpinSystem(
sites=[
Site(isotope="13C", isotropic_chemical_shift=0.0),
Site(isotope="1H", isotropic_chemical_shift=0.0),
],
couplings=[Coupling(site_index=[0, 1], dipolar=SymmetricTensor(D=-2e4))],
)
# %%
# **Methods**
#
# Create a BlochDecaySpectrum method.
method = BlochDecaySpectrum(
channels=["13C"],
magnetic_flux_density=9.4, # in T
spectral_dimensions=[dict(count=2048, spectral_width=8.0e4)],
)
# %%
# **Simulator**
#
# Create the Simulator object and add the method and the spin system object.
sim = Simulator()
sim.spin_systems = [spin_system] # add the spin system.
sim.methods = [method] # add the method.
sim.run()
# %%
# **Post-Simulation Processing**
#
# Add post-simulation signal processing.
processor = sp.SignalProcessor(
operations=[
sp.IFFT(),
sp.apodization.Exponential(FWHM="500 Hz"),
sp.FFT(),
]
)
processed_data = processor.apply_operations(data=sim.methods[0].simulation)
# %%
# **Plot**
#
plt.figure(figsize=(4.25, 3.0))
ax = plt.subplot(projection="csdm")
ax.plot(processed_data.real, color="black", linewidth=1)
ax.invert_xaxis()
plt.tight_layout()
plt.show()
|
const test = require('ava')
const AbortController = require('abort-controller')
const pDefer = require('p-defer')
const anySignal = require('./')
test('should abort from any signal', async t => {
const controllers = [...new Array(5)].map(() => new AbortController())
const signal = anySignal(controllers.map(c => c.signal))
t.is(signal.aborted, false)
const deferred = pDefer()
let abortCount = 0
signal.addEventListener('abort', () => {
abortCount++
deferred.resolve()
})
const randomController = controllers[Math.floor(Math.random() * controllers.length)]
randomController.abort()
await deferred.promise
t.is(abortCount, 1)
t.is(signal.aborted, true)
})
test('ignores non signals', async t => {
const controllers = [...new Array(5)].map(() => new AbortController())
const signals = controllers.map(c => c.signal)
signals.push(undefined)
const signal = anySignal(signals)
t.is(signal.aborted, false)
const deferred = pDefer()
let abortCount = 0
signal.addEventListener('abort', () => {
abortCount++
deferred.resolve()
})
const randomController = controllers[Math.floor(Math.random() * controllers.length)]
randomController.abort()
await deferred.promise
t.is(abortCount, 1)
t.is(signal.aborted, true)
})
test('should only abort once', async t => {
const controllers = [...new Array(5)].map(() => new AbortController())
const signal = anySignal(controllers.map(c => c.signal))
t.is(signal.aborted, false)
const deferred = pDefer()
let abortCount = 0
signal.addEventListener('abort', () => {
abortCount++
deferred.resolve()
})
// Abort all controllers
for (const controller of controllers) {
controller.abort()
}
await deferred.promise
t.is(abortCount, 1)
t.is(signal.aborted, true)
})
test('should abort if a provided signal is already aborted', t => {
const controllers = [...new Array(5)].map(() => new AbortController())
const randomController = controllers[Math.floor(Math.random() * controllers.length)]
randomController.abort()
const signal = anySignal(controllers.map(c => c.signal))
t.is(signal.aborted, true)
})
|
// BROBOT STEP CONSTANTS
var STEPSMETER = 11428; // STEPS/METERS default: 11428
var STEPSTURN = 4675; // STEPS/TURN (360 deg) default:4675
|
const express = require('express');
const router = express.Router();
const config = require('../config.json');
const database = require('../controllers/database.js');
router.get('/', (req, res, next) => {
database.query (`SELECT * FROM playerdata; SELECT pd.username as 'username', COUNT(ses.id) as 'joins' FROM gamesessions ses left join playerdata pd on pd.id = ses.player_id group by pd.username;`, function (error, results, fields) {
if (error) {
throw error;
res.render('errorviews/500', {
"pagetitle": "500: Internal Server Error"
});
return;
} else {
res.render('players', {
"pagetitle": "Players",
objdata: results
});
}
});
});
module.exports = router;
|
import { connect } from "react-redux";
import Container from "./container";
import { push } from "react-router-redux"
import { actionCreators as userActions } from "redux/modules/user"
const mapStateToProps = (state, ownProps) => {
const { user: { username } } = state;
return {
username
};
};
const mapDispatchToProps = (dispatch, ownProps) => {
return {
goToSearch : searchTerm => {
dispatch(push(`/search/${searchTerm}`));
},
getNotifications : notifications => {
dispatch(userActions.getNotifications(ownProps.notifications))
}
};
};
// 첫번째 인자는 mapStateToProps 이기때문에 null 처리
// 두번째 인자로 mapDispatchToProps 를 넣고 컨테이너랑 연결
export default connect(mapStateToProps, mapDispatchToProps)(Container);
|
import torch.nn as nn
from lightconvpoint.nn.deprecated import Module
class Identity(Module):
"""Indentity module compatible with LightConvPoint.
# Forward arguments
input: 3-D torch tensor.
Input feature tensor. Dimensions are (B, I, N) with B the batch size,
I the number of input channels and N the number of input points.
points: 3-D torch tensor.
The input points. Dimensions are (B, D, N) with B the batch size, D the
dimension of the spatial space and N the number of input points.
support_points: (optional) 3-D torch tensor.
The support points to project features on. If not provided, use the `search`
object of the layer to compute them.
Dimensions are (B, D, N) with B the batch size, D the dimenstion of the
spatial space and N the number of input points.
indices: (optional) 3-D torch tensor.
The indices of the neighboring points with respect to the support points.
If not provided, use the `search` object of the layer to compute them.
# Returns
(input, support_points, indices)
"""
def __init__(self):
super().__init__()
def forward_without_features(self, pos):
raise NotImplementedError
def forward_with_features(self, x, pos, support_points, indices):
return x
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import keras.backend as K
from time import sleep
def switch(condition, t, e):
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.where(condition, t, e)
elif K.backend() == 'theano':
import theano.tensor as tt
return tt.switch(condition, t, e)
def _ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
W = W / H
cutoff = 0.7*K.mean(K.abs(W)) # # TODO: is this ok??
ones = K.ones_like(W)
zeros = K.zeros_like(W)
Wt = switch(W > cutoff, ones, switch(W <= -cutoff, -ones, zeros))
Wt *= H
return Wt
def ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
Wt = _ternarize(W, H)
return W + K.stop_gradient(Wt - W)
def ternarize_dot(x, W):
'''For RNN (maybe Dense or Conv too).
Refer to 'Recurrent Neural Networks with Limited Numerical Precision' Section 3.1
'''
Wt = _ternarize(W)
return K.dot(x, W) + K.stop_gradient(K.dot(x, Wt - W))
def ternary_tanh(x):
x = K.clip(x, -1, 1)
return ternarize(x)
|
// Vector rendering emulation modes
(function () { "use strict";
Flynn.VectorMode = {
PLAIN: 0, // No Vector rendering emulation (plain lines)
V_THIN: 1, // Thin Vector rendering
V_THICK: 2, // Thick Vector rendering
};
if (typeof Flynn.Config == "undefined") {
Flynn.Config = {}; // Create Configuration object
}
Flynn.Config.MAX_PACE_RECOVERY_TICKS = 5; // Max elapsed 60Hz frames to apply pacing (beyond this, just jank)
// Vector graphics simulation
Flynn.Config.VectorRender = {
PLAIN: {
lineBrightness: 0.0,
vertexBrightness: 0.0,
lineSize: 1.0,
vertexSize: 1.0,
},
V_THIN: {
lineBrightness: -0.15,
vertexBrightness: 0.6,
lineSize: 1.0,
vertexSize: 1.0,
},
V_THICK: {
lineBrightness: -0.15,
vertexBrightness: 0.4,
lineSize: 2.0,
vertexSize: 2.0,
},
};
Flynn.Canvas = Class.extend({
init: function(width, height, canvas, background_color) {
this.showMetrics = false;
if(typeof(canvas)==='undefined'){
this.canvas = document.getElementById("gameCanvas");
}
else{
this.canvas = canvas;
}
if(typeof(background_color)==='undefined'){
this.canvas.style.backgroundColor = '#000';
}
else{
this.canvas.style.backgroundColor = background_color;
}
this.canvas.width = width;
this.canvas.height = height;
this.previousTimestamp = 0;
this.constrained = false;
this.devLowFpsElapsedTicks = 0;
this.devLowFpsFrameCount = 0;
//---------------------------
// Add performance gauges
//---------------------------
var gauge_x = width - 131;
var gauge_spacing = 8;
this.gaugeFps = new Flynn.Gauge(
new Victor(gauge_x, height - 80),
120, // num_samples
60, // range
1, // scale
10, // tick_interval
Flynn.Colors.YELLOW,
'FPS'
);
this.gaugeGameLogicTime = new Flynn.Gauge(
// new Victor(gauge_x, this.gaugeFps.position.y - 90),
new Victor(gauge_x, 100),
120, // num_samples
34, // range
2, // scale
16.6, // tick_interval
Flynn.Colors.DODGERBLUE,
'Input/Update/Render'
);
this.gaugeGameLogicTime.moveTo( new Victor(
gauge_x,
this.gaugeFps.rect.top - this.gaugeGameLogicTime.rect.height - gauge_spacing
));
this.gaugePixiTime = new Flynn.Gauge(
new Victor(
gauge_x,
this.gaugeGameLogicTime.rect.top - 93),
120, // num_samples
34, // range
2, // scale
16.6, // tick_interval
Flynn.Colors.CYAN,
'Pixi'
);
this.gaugePixiTime.moveTo( new Victor(
gauge_x,
this.gaugeGameLogicTime.rect.top - this.gaugePixiTime.rect.height - gauge_spacing
));
this.gaugeTotalAnimation = new Flynn.Gauge(
new Victor(
gauge_x,
this.gaugePixiTime.rect.top - 93),
120, // num_samples
34, // range
2, // scale
16.6, // tick_interval
Flynn.Colors.MAGENTA,
'Animation'
);
this.gaugeTotalAnimation.moveTo( new Victor(
gauge_x,
this.gaugePixiTime.rect.top - this.gaugeTotalAnimation.rect.height - gauge_spacing
));
self = this;
this.ctx = (function(canvas) {
// NOTE: The "Context" (ctx) used in Flynn is (for legacy reasons) NOT
// an HTML5 graphics context. It is, rather, a custom object
// which contains data and functions related to rendering.
//
var ctx = {};
//----------------------------
// Initialize PixiJS Renderer
//----------------------------
// Identify renderer type
var type = "WebGL";
if(!PIXI.utils.isWebGLSupported()){
type = "canvas";
}
PIXI.utils.sayHello(type);
ctx.renderer = PIXI.autoDetectRenderer({
width: Game.CANVAS_WIDTH,
height: Game.CANVAS_HEIGHT,
view:canvas,
antialias:true
});
PIXI.settings.SPRITE_MAX_TEXTURES = Math.min(PIXI.settings.SPRITE_MAX_TEXTURES, 16);
PIXI.settings.PRECISION_FRAGMENT = PIXI.PRECISION.HIGH;
ctx.renderer.backgroundColor = 0x000000;
ctx.stage = new PIXI.Container();
ctx.graphics = new PIXI.Graphics();
ctx.stage.addChild(ctx.graphics);
// Deprecation warnings
ctx.clearAll_deprecation_error_reported = false;
ctx.vectorText_deprecation_error_reported = false;
ctx.vectorTextArc_deprecation_error_reported = false;
ctx.width = canvas.width;
ctx.height = canvas.height;
ctx.fps = 0;
ctx.fpsFrameAverage = 10; // Number of frames to average over
ctx.fpsFrameCount = 0;
ctx.fpsMsecCount = 0;
ctx.ticks = 0;
ctx.is_world = false;
ctx.lineSize = 1;
ctx.vertexSize = 1;
ctx.MAX_VERTICIES_x2 = 200;
ctx.vector_vertices = new Array(ctx.MAX_VERTICIES_x2);
ctx.index_vector_vertex = 0;
ctx.ACODE = "A".charCodeAt(0);
ctx.ZEROCODE = "0".charCodeAt(0);
ctx.SPACECODE = " ".charCodeAt(0);
ctx.EXCLAMATIONCODE = "!".charCodeAt(0);
ctx.ACCENTCODE = '`'.charCodeAt(0);
ctx.LOWERCASE_A = 'a'.charCodeAt(0);
ctx.LOWERCASE_Z = 'z'.charCodeAt(0);
ctx.TILDE = '~'.charCodeAt(0);
ctx.UPPER_TO_LOWER = 0x20;
ctx.world_wrap_enabled = false;
ctx.world_bounds = null;
ctx.world_wrap_offset_x = 0;
ctx.world_wrap_offset_y = 0;
ctx.drawPolygon = function(p, x, y) {
// .drawPolypon is deprecated. No world support.
throw "drawPolygon() is obsolete. Use polygon's .render() method.";
};
ctx.drawFpsGague = function(position, color, percentage){
if(arguments.length == 4){
throw "drawFpsGague(): API has changed.";
}
var draw_pos = position.clone().addScalar(0.5);
this.beginPath();
var length = 60;
var height = 6;
var x_needle = percentage * length;
this.strokeStyle = Flynn.Colors.GRAY;
ctx.fillStyle="#FFFFFF";
ctx.rect(draw_pos.x, draw_pos.y,length,height);
ctx.fillStyle=color;
ctx.fillRect(draw_pos.x, draw_pos.y, x_needle, height);
this.stroke();
};
//-----------------------------
// Vector graphic simulation
//-----------------------------
ctx.vectorStart = function(color, is_world, constrained, alpha){
// Args:
// color: A color string with any of the following forms:
// '#RRGGBB', '#RRGGBBAA'
// is_world: true if vector is using world (rather than screen) coordinates.
// constrained: If true, then x/y will be truncated to land on integer
// pixel locations.
// alpha: Alpha transparency in range 1.0 (solid) to 0.0 (transparent)
//
// If alpha is supplied as part of the color string and as a function
// parameter then the two alphas will be multiplied.
//
if(typeof(is_world)==='undefined'){
this.is_world = false;
}
else{
this.is_world = is_world;
}
if(typeof(constrained)==='undefined'){
this.constrained = false;
}
else{
this.constrained = constrained;
}
// Extract alpha from color string (if present).
// If alpha is supplied in the color string AND as a function
// parameter then the two alphas are multiplied.
alpha = alpha == undefined ? 1.0 : alpha;
var length = color.length;
if(length == 9){
// Color is '#RRGGBBAA
alpha = alpha * parseInt(color.substring(7), 16)/255;
color = color.substring(0, 7);
}
var config;
switch(Flynn.mcp.options.vectorMode){
case Flynn.VectorMode.PLAIN:
config = Flynn.Config.VectorRender.PLAIN;
break;
case Flynn.VectorMode.V_THIN:
config = Flynn.Config.VectorRender.V_THIN;
break;
case Flynn.VectorMode.V_THICK:
config = Flynn.Config.VectorRender.V_THICK;
break;
}
var line_color = Flynn.Util.shadeColor(color, config.lineBrightness);
this.vectorVertexColor = Flynn.Util.colorOverdrive(color, config.vertexBrightness);
this.vectorVertexAlpha = alpha * 0.7;
this.index_vector_vertex = 0;
this.lineSize = config.lineSize;
this.vertexSize = config.vertexSize;
this.graphics.lineStyle(
config.lineSize,
Flynn.Util.parseColor(line_color, true),
alpha);
};
ctx.vectorLineToUnconstrained = function(x, y){
throw "vectorLineToUnconstrained() deprecated. Pass constrained to vectorStart().";
};
ctx.vectorMoveToUnconstrained = function(x, y){
throw "vectorMoveToUnconstrained() deprecated. Pass constrained to vectorStart().";
};
ctx.vectorLineTo = function(x, y){
if(this.constrained){
x = Math.floor(x);
y = Math.floor(y);
}
if(this.is_world){
// World coordinates
var world = this.worldToScreen(x, y, true);
x = world.x;
y = world.y;
}
if(this.index_vector_vertex < ctx.MAX_VERTICIES_x2){
this.vector_vertices[this.index_vector_vertex++] = x;
this.vector_vertices[this.index_vector_vertex++] = y;
}
this.graphics.lineTo(x+0.5, y+0.5);
// This "moveTo" keeps PixiJS from drawing ugly long un-mitered corners
// for vertices with very acute angles.
this.graphics.moveTo(x+0.5, y+0.5);
};
ctx.vectorMoveTo = function(x, y){
if(this.constrained){
x = Math.floor(x);
y = Math.floor(y);
}
if(this.is_world){
// World coordinates
var world = this.worldToScreen(x, y, false);
x = world.x;
y = world.y;
}
if(this.index_vector_vertex < ctx.MAX_VERTICIES_x2){
this.vector_vertices[this.index_vector_vertex++] = x;
this.vector_vertices[this.index_vector_vertex++] = y;
}
this.graphics.moveTo(x+0.5, y+0.5);
};
ctx.vectorEnd = function(){
// Draw the (bright) vector vertex points
var offset = this.vertexSize / 2;
this.graphics.lineStyle();
this.graphics.beginFill(Flynn.Util.parseColor(this.vectorVertexColor, true), this.vectorVertexAlpha);
for(var i=0; i<this.index_vector_vertex; i+=2) {
this.graphics.drawRect(
this.vector_vertices[i] - offset + 0.5,
this.vector_vertices[i + 1] - offset + 0.5,
this.vertexSize,
this.vertexSize);
}
this.graphics.endFill();
};
ctx.worldToScreen = function(x, y, preserve){
// Convert a location from world coordinates to screen coordinates
x = (x - Flynn.mcp.viewport.x) * Flynn.mcp.viewportZoom;
y =(y - Flynn.mcp.viewport.y) * Flynn.mcp.viewportZoom;
if(!this.world_wrap_enabled){
return new Victor(x,y);
}
var wrap_margin = 40;
if(!preserve){
if(x < -wrap_margin){
this.world_wrap_offset_x = this.world_bounds.width * Flynn.mcp.viewportZoom;
}
else if(x > this.width + wrap_margin){
this.world_wrap_offset_x = -this.world_bounds.width * Flynn.mcp.viewportZoom;
}
else{
this.world_wrap_offset_x = 0;
}
if(y < -wrap_margin){
this.world_wrap_offset_y = this.world_bounds.height * Flynn.mcp.viewportZoom;
}
else if(y > this.height + wrap_margin){
this.world_wrap_offset_y = -this.world_bounds.height * Flynn.mcp.viewportZoom;
}
else{
this.world_wrap_offset_y = 0;
}
}
return new Victor(
(x + this.world_wrap_offset_x),
(y + this.world_wrap_offset_y)
);
};
ctx.fillRect = function(x, y, width, height, alpha){
if(alpha == undefined){
alpha = 1;
}
this.graphics.lineStyle();
this.graphics.beginFill(Flynn.Util.parseColor(ctx.fillStyle, true), alpha);
this.graphics.drawRect(x, y, width, height);
this.graphics.endFill();
};
ctx.vectorRectR = function(rect, color, fill_color, is_world){
this.vectorRect(
rect.left, rect.top, rect.width, rect.height,
color, fill_color, is_world);
};
ctx.vectorRect = function(x, y, width, height, color, fill_color, is_world, alpha){
if(typeof(fill_color)!=='undefined' && fill_color){
this.fillStyle = fill_color;
this.fillRect(x, y, width, height);
}
is_world = is_world == undefined ? false: is_world;
alpha = alpha == undefined ? 1.0 : alpha;
// Draw a rect using vectors
this.vectorStart(color, is_world, false, alpha);
this.vectorMoveTo(x, y);
this.vectorLineTo(x+width-1, y);
this.vectorLineTo(x+width-1, y+height-1);
this.vectorLineTo(x, y+height-1);
this.vectorLineTo(x, y);
this.vectorEnd();
};
ctx.vectorLine = function(x1, y1, x2, y2, color, is_world, alpha){
if(typeof(is_world)==='undefined'){
is_world = false;
}
alpha = alpha == undefined ? 1.0 : alpha;
// Draw a vector line
this.vectorStart(color, is_world, false, alpha);
this.vectorMoveTo(x1, y1);
this.vectorLineTo(x2, y2);
this.vectorEnd();
};
ctx.charToPolygon = function(ch, font){
var p;
if ((ch >= this.EXCLAMATIONCODE) && (ch <= this.TILDE)){
if(Flynn.mcp.forceUpperCase && ch >= this.LOWERCASE_A && ch <= this.LOWERCASE_Z){
ch -= this.UPPER_TO_LOWER;
}
p = font.Points.ASCII[ch - this.EXCLAMATIONCODE];
if(p==null){
p = font.Points.UNIMPLEMENTED_CHAR;
}
}
else{
p = font.Points.UNIMPLEMENTED_CHAR;
}
return p;
};
ctx.vectorCircle = function(x, y, radius, num_sides, color, is_world){
// Draw a circle using vectors
var angle;
this.vectorStart(color, is_world);
for(angle = 0; angle <= Math.PI * 2 + 0.001; angle += Math.PI*2/num_sides){
if(angle === 0){
this.vectorMoveTo(x + radius, y);
}
else{
this.vectorLineTo(x + Math.cos(angle) * radius, y + Math.sin(angle) * radius);
}
}
this.vectorEnd();
};
ctx.vectorText2 = function(opts){
// Options:
// text: String (the text to display)
// scale: float, scales the size (1.0 is no scaling)
// x: number or null
// number: The x location to display the text
// null: Center text horizontally on screen
// y: number or null
// number: The y location to display the text
// null: Center text vertically on screen
// justify: String. 'left', 'right', or 'center'
// If x is null, then justify can be null (it is ignored)
// color: String. Text color.
// is_world: Boolean
// true: Use world coordinates
// false: Use screen coordinates
// font: Flynn font object (Flynn.Font.Normal, Flynn.Font.Block, etc.)
// angle: Rotation angle (radians). Set null (or do not pass it) for
// un-rotated text. Rotated text does not support justification.
// Note: Un-rotated text has better rendering performance than text with
// an angle of "0", so pass null (or do not pass) for un-rotated use.
// aspect_ratio: Stretches the font height. Value is width/height, so
// 1.0 causes no stretching, 0.5 doubles the height, 2.0 halves the
// height, etc.
// Tip: An aspect_ratio of 0.75 mimics most old-school vector games.
// spacing: scale the inter-character spacing. 1.0 = normal spacing.
// transform_f: Vertex transformation callback function. Defaults no null if
// no transform_f argument is passed.
// If it exists, for each font vertex the callback function will be passed
// a Victor object representing the x/y vertex
// and should return a Victor object representing the new
// (transformed) vertex.
//
opts = opts || {};
opts.text = Flynn.Util.defaultText(opts.text, '<TEXT>');
opts.scale = opts.scale || 1.0;
opts.x = Flynn.Util.defaultArg(opts.x, null);
opts.y = Flynn.Util.defaultArg(opts.y, null);
opts.justify = opts.justify || 'left';
opts.color = opts.color || Flynn.Colors.WHITE;
opts.is_world = opts.is_world || false;
opts.font = opts.font || Flynn.Font.Normal;
opts.angle = Flynn.Util.defaultArg(opts.angle, null);
opts.aspect_ratio = opts.aspect_ratio || 1.0;
opts.spacing = opts.spacing || 1.0;
opts.transform_f = opts.transform_f || null;
if(opts.is_constrained == undefined){
if (opts.angle == null){
// Un-rotated text defaults to constrained
opts.is_constrained = true;
} else{
// Rotated text defaults to unconstrained
opts.is_constrained = false;
}
}
// Force opts.text to be a string representation
opts.text = String(opts.text);
var i, len, j, len2, character, polygon, pen_up;
var string_x = opts.x;
var string_y = opts.y;
var character_x = 0;
var character_y = 0;
var step = opts.scale * opts.font.CharacterSpacing * opts.spacing;
if (opts.angle == null){
//----------------------------
// Non-rotated text
//----------------------------
// Center x/y if they are not numbers
if (string_x == null){
string_x = Math.round((this.width - (opts.text.length*step-(opts.font.CharacterGap*opts.scale)))/2);
}
if (string_y == null){
string_y = Math.round((this.height - step)/2);
}
// Justification
switch(opts.justify){
case 'right':
character_x -= step * opts.text.length - opts.scale * opts.font.CharacterGap;
break;
case 'center':
character_x -= step * opts.text.length / 2 - opts.scale * opts.font.CharacterGap / 2;
break;
case 'left':
break;
case null:
break;
default:
throw 'opts.justify must be one of null, "left", "right", or "center".';
}
for(i = 0, len = opts.text.length; i<len; i++){
character = opts.text.charCodeAt(i);
if (character === this.SPACECODE){
character_x += step;
continue;
}
polygon = this.charToPolygon(character, opts.font);
pen_up = false;
this.vectorStart(
opts.color,
opts.is_world,
opts.is_constrained
);
for (j=0, len2=polygon.length; j<len2; j+=2){
if(polygon[j]==Flynn.PEN_COMMAND){
pen_up = true;
}
else{
var vertex_v = new Victor(
polygon[j] * opts.scale + character_x,
polygon[j+1] / opts.aspect_ratio * opts.scale + character_y );
if(opts.transform_f !== null){
vertex_v = opts.transform_f(vertex_v);
}
if(j===0 || pen_up){
this.vectorMoveTo(
vertex_v.x + string_x,
vertex_v.y + string_y);
pen_up = false;
}
else{
this.vectorLineTo(
vertex_v.x + string_x,
vertex_v.y + string_y);
}
}
}
this.vectorEnd();
character_x += step;
}
}
else{
//----------------------
// Rotated text
//----------------------
// Center x or y is they are not numbers
if (character_x == null){
character_x = Math.round(this.width);
}
if (character_y == null){
character_y = Math.round(this.height);
}
var pen_v = new Victor(character_x, character_y);
var unit_x_v = new Victor(1, 0).rotate(opts.angle);
var unit_y_v = new Victor(0, 1).rotate(opts.angle);
// Move to start of text
var start_v = new Victor(
-(opts.text.length*step-(opts.font.CharacterGap*opts.scale))/2,
-(opts.scale / opts.aspect_ratio * opts.font.CharacterHeight)/2
);
start_v.rotate(opts.angle);
pen_v.add(start_v);
for(i = 0, len = opts.text.length; i<len; i++){
character = opts.text.charCodeAt(i);
if (character === this.SPACECODE){
pen_v.add(unit_x_v.clone().multiplyScalar(opts.scale*opts.font.CharacterSpacing));
continue;
}
polygon = this.charToPolygon(character, opts.font);
pen_up = false;
this.vectorStart(
opts.color,
opts.is_world,
opts.is_constrained
);
for (j=0, len2=polygon.length; j<len2; j+=2){
if(polygon[j]==Flynn.PEN_COMMAND){
pen_up = true;
}
else{
var draw_v = pen_v.clone();
draw_v.add(unit_x_v.clone().multiplyScalar(polygon[j]*opts.scale));
draw_v.add(unit_y_v.clone().multiplyScalar(polygon[j+1]*opts.scale / opts.aspect_ratio));
if(opts.transform_f !== null){
draw_v = opts.transform_f(draw_v);
}
if(j===0 || pen_up){
this.vectorMoveTo(draw_v.x + string_x, draw_v.y + string_y);
pen_up = false;
}
else{
this.vectorLineTo(draw_v.x + string_x, draw_v.y + string_y);
}
}
}
this.vectorEnd();
pen_v.add(unit_x_v.clone().multiplyScalar(opts.scale*opts.font.CharacterSpacing));
}
}
};
ctx.vectorText = function(text, scale, x, y, justify, color, is_world, font, angle, aspect_ratio){
// ************************************************
// *** Deprecated. Use .vectorText2() instead. ***
// ************************************************
if(!this.vectorText_deprecation_error_reported){
this.vectorText_deprecation_error_reported = true;
console.error('ctx.vectorText() has been deprecated. Use ctx.vectortText2() instead.');
}
ctx.vectorText2(
{
text: text,
scale: scale,
x: x,
y: y,
justify: justify,
color: color,
is_world: is_world,
font: font,
angle: angle,
aspect_ratio: aspect_ratio
}
);
return;
};
ctx.vectorTextArc2 = function(opts){
// Options:
// text: String (the text to display)
// scale: float, scales the size (1.0 is no scaling)
// center_x: X axis center of the arc.
// center_y: Y axis center of the arc.
// angle: Angle at which to draw the text.
// radius: Radius of the text arc along which to draw.
// color: String. Text color.
// is_centered: If true then center the text at angle. Else
// left justify the text at angle.
// is_reversed: If true then the bottom of the characters will
// face outward from the center. Used when text is at the
// bottom of an arc and needs to appear "right side up".
// is_world: Boolean
// true: Use world coordinates
// false: Use screen coordinates
// font: Flynn font object (Flynn.Font.Normal, Flynn.Font.Block, etc.)
// stretch: Value by which to vertically stretch the font, else null
// for no stretching.
opts = opts || {};
opts.text = Flynn.Util.defaultText(opts.text, '<TEXT>');
opts.scale = opts.scale || 1.0;
opts.center_x = opts.center_x || this.width/2;
opts.center_y = opts.center_y || this.height/2;
opts.angle = opts.angle || 0.0;
opts.radius = opts.radius || 100;
opts.color = opts.color || Flynn.Colors.WHITE;
opts.is_centered = Flynn.Util.defaultTrue(opts.is_centered);
opts.is_reversed = opts.is_reversed || false;
opts.is_world = opts.is_world || false;
opts.font = opts.font || Flynn.Font.Normal;
opts.stretch = opts.stretch || null;
var draw_x, draw_y;
var text = String(opts.text);
var render_angle = opts.angle;
var render_angle_step = Math.asin(opts.font.CharacterSpacing * opts.scale / opts.radius);
if(opts.stretch){
render_angle_step *= 1.4;
}
var renderAngleOffset = 0;
if (opts.is_centered){
renderAngleOffset = render_angle_step * (text.length / 2 - 0.5);
if(opts.is_reversed){
renderAngleOffset = -renderAngleOffset;
}
}
render_angle -= renderAngleOffset;
var character_angle = render_angle + Math.PI/2;
if(opts.is_reversed){
character_angle += Math.PI;
render_angle_step = - render_angle_step;
}
for(var i = 0, len = text.length; i<len; i++){
this.vectorStart(opts.color, opts.is_world, false);
var character = text.charCodeAt(i);
if (character === this.SPACECODE){
render_angle += render_angle_step;
character_angle += render_angle_step;
continue;
}
// Get the character vector points
var polygon = this.charToPolygon(character, opts.font);
// Render character
var pen_up = false;
for (var j=0, len2=polygon.length; j<len2; j+=2){
if(polygon[j]==Flynn.PEN_COMMAND){
pen_up = true;
}
else{
if(opts.stretch){
var sign = 1;
if (opts.is_reversed){
sign = -sign;
}
// Remap x coordinate onto a logarithmic scale
var character_x = polygon[j+1];
if (!opts.is_reversed){
character_x = opts.font.CharacterHeight - character_x;
}
var x_log = Flynn.Util.logish(
character_x,
0, // min
opts.font.CharacterHeight, // max
1.2 // power
);
var draw_radius = opts.radius + (x_log - opts.font.CharacterHeight/2) * opts.scale * opts.stretch;
var draw_angle = render_angle +
sign * (polygon[j] - opts.font.CharacterWidth/2) * opts.font.CharacterSpacing * opts.scale / (opts.font.CharacterWidth * opts.radius);
draw_x = Math.cos(draw_angle) * draw_radius + opts.center_x;
draw_y = Math.sin(draw_angle) * draw_radius + opts.center_y;
}
else{
var x = polygon[j] - opts.font.CharacterWidth/2;
var y = polygon[j+1] - opts.font.CharacterHeight/2;
var c = Math.cos(character_angle);
var s = Math.sin(character_angle);
draw_x = (c*x - s*y) * opts.scale + Math.cos(render_angle) * opts.radius + opts.center_x;
draw_y = (s*x + c*y) * opts.scale + Math.sin(render_angle) * opts.radius + opts.center_y;
}
if(j===0 || pen_up){
this.vectorMoveTo(draw_x, draw_y);
pen_up = false;
}
else{
this.vectorLineTo(draw_x, draw_y);
}
}
}
this.vectorEnd();
render_angle += render_angle_step;
character_angle += render_angle_step;
}
};
ctx.vectorTextArc = function(text, scale, center_x, center_y, angle, radius, color, is_centered, is_reversed, is_world, font, stretch){
// ***************************************************
// *** Deprecated. Use .vectorTextArc2() instead. ***
// ***************************************************
if(!this.vectorTextArc_deprecation_error_reported){
this.vectorTextArc_deprecation_error_reported = true;
console.error('ctx.vectorTextArc() has been deprecated. Use ctx.vectortTextArc2() instead.');
}
ctx.vectorTextArc2(
{
text: text,
scale: scale,
center_x: center_x,
center_y: center_y,
angle: angle,
radius: radius,
color: color,
is_centered: is_centered,
is_reversed: is_reversed,
is_world: is_world,
font: font,
is_stretched: stretch
}
);
return;
};
ctx.clearAll = function(){
if(!this.clearAll_deprecation_error_reported){
this.clearAll_deprecation_error_reported = true;
console.error('ctx.clearAll() has been deprecated and should not be called.');
}
};
return ctx;
})(this.canvas);
this.ctx.strokeStyle = Flynn.Colors.WHITE;
},
animate: function(animation_callback_f) {
var refresh_f = (function() {
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
// probably excessive fallback
function(cb, el){
window.setTimeout(cb, 1000/60);
};
})();
var self = this;
var callback_f = function(timeStamp) {
//---------------------------
// Calculate FPS and pacing
//---------------------------
var timeNow;
var performance_proxy;
if(Flynn.mcp.browserSupportsPerformance){
timeNow = performance.now();
performance_proxy = performance;
}
else{
timeNow = timeStamp;
performance_proxy = Flynn.PerformanceNull;
}
var deltaMsec = timeNow - self.previousTimestamp;
self.ctx.fpsMsecCount += deltaMsec;
// elapsed_ticks represents the (possibly fractional) number of
// 60FPS game ticks which have elapsed.
// If a game is running at 30FPS then elapsed_ticks will be 2.0, At 15FPS it will be 4.0
var elapsed_ticks = (60*(timeNow - self.previousTimestamp))/1000;
if (elapsed_ticks > Flynn.Config.MAX_PACE_RECOVERY_TICKS) {
elapsed_ticks = 1;
}
elapsed_ticks *= Flynn.mcp.gameSpeedFactor;
self.ctx.ticks += 1;
++self.ctx.fpsFrameCount;
if (self.ctx.fpsFrameCount >= self.ctx.fpsFrameAverage){
self.ctx.fpsFrameCount = 0;
self.ctx.fps = Math.round(1000/(self.ctx.fpsMsecCount/self.ctx.fpsFrameAverage));
self.ctx.fpsMsecCount = 0;
}
self.previousTimestamp = timeNow;
//---------------------------
// Apply Developer Speed/Pacing options
//---------------------------
var label = '';
var skip_this_frame = false;
switch(Flynn.mcp.devPacingMode){
case Flynn.DevPacingMode.NORMAL:
self.gaugeFps.record(1000/deltaMsec);
break;
case Flynn.DevPacingMode.SLOW_MO:
elapsed_ticks *= 0.2;
label = "SLOW_MO";
self.gaugeFps.record(1000/deltaMsec);
break;
case Flynn.DevPacingMode.FPS_20:
++self.devLowFpsFrameCount;
self.devLowFpsElapsedTicks += elapsed_ticks;
if(self.devLowFpsFrameCount === 3){
self.devLowFpsFrameCount = 0;
elapsed_ticks = self.devLowFpsElapsedTicks;
self.devLowFpsElapsedTicks = 0;
self.gaugeFps.record(60/elapsed_ticks);
}
else{
// Skip this frame (to simulate low frame rate)
skip_this_frame = true;
}
label = "FPS_20";
break;
}
//---------------------------
// Do animation
//---------------------------
if(!skip_this_frame){
var render_start=0;
var render_end=0;
render_start = performance_proxy.now();
// ***** Clear the PixiJS Graphics object ******
self.ctx.stage.removeChildren();
self.ctx.graphics.clear();
self.ctx.stage.addChild(self.ctx.graphics);
animation_callback_f(elapsed_ticks);
if(label){
self.ctx.vectorText2({
text:label,
scale: 1.5,
x: 10,
y: self.canvas.height-20,
color:Flynn.Colors.RED
});
}
if (self.showMetrics){
self.gaugeFps.render(self.ctx);
self.gaugeGameLogicTime.render(self.ctx);
self.gaugePixiTime.render(self.ctx);
self.gaugeTotalAnimation.render(self.ctx);
}
render_end = performance_proxy.now();
self.gaugeGameLogicTime.record(render_end - render_start);
var pixi_start, pixi_end;
pixi_start = performance_proxy.now();
// ***** Render the PixiJS Graphics object ******
self.ctx.renderer.render(self.ctx.stage);
pixi_end = performance_proxy.now();
self.gaugePixiTime.record(pixi_end - pixi_start);
self.gaugeTotalAnimation.record(
render_end - render_start +
pixi_end - pixi_start
);
}
// Update screen and request callback
if(!Flynn.mcp.halted){
refresh_f(callback_f, self.canvas);
}
};
refresh_f(callback_f, this.canvas );
},
});
Flynn.PerformanceNull = {
now: function(){
return 0;
},
};
}()); // "use strict" wrapper
|
import os
from pathlib import Path
# import django_on_heroku
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-k^8bcdroc@(jkkq%va22otz1n2(rrd68s!c9a$m(*rq%x0lqry'
# SECRET_KEY = os.environ['SECRET_KEY']
# SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'cst-research-api.herokuapp.com']
# ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'research',
# 'users',
'teaching_activities',
'cst_data',
# 'publications',
'isbn_field',
'djmoney',
'rest_auth.registration',
'rest_framework',
'allauth',
'allauth.account',
'rest_auth',
'rest_framework.authtoken',
'allauth.socialaccount',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'home.wsgi.dev.application'
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'cst_updated',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Heroku: Update database configuration from $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# # Activate Django-Heroku.
# django_on_heroku.settings(locals())
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_acqua.ui.run._model import Model
from qiskit_acqua import get_qconfig,QuantumAlgorithm
from qiskit_acqua.ui.run._customwidgets import EntryPopup, ComboboxPopup, TextPopup
import psutil
import os
import subprocess
import threading
import queue
import tempfile
import tkinter as tk
from tkinter import messagebox
from qiskit_acqua.parser import InputParser
import json
import ast
import sys
import logging
logger = logging.getLogger(__name__)
class Controller(object):
_START, _STOP = 'Start', 'Stop'
def __init__(self,view):
self._view = view
self._model = Model()
self._filemenu = None
self._title = tk.StringVar()
self._sectionsView = None
self._emptyView = None
self._sectionView_title = tk.StringVar()
self._propertiesView = None
self._textView = None
self._outputView = None
self._progress = None
self._button_text = None
self._start_button = None
self._thread_queue = queue.Queue()
self._thread = None
self._command = Controller._START
self._process_stop = False
self._validate_integer_command = self._view.register(Controller._validate_integer)
self._validate_float_command = self._view.register(Controller._validate_float)
self._available_backends = []
self._backendsthread = None
self.get_available_backends()
@staticmethod
def _validate_integer(action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
# action=1 -> insert
if action != '1':
return True
if value_if_allowed == '+' or value_if_allowed == '-':
return True
try:
int(value_if_allowed)
return True
except ValueError:
return False
@staticmethod
def _validate_float(action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
# action=1 -> insert
if action != '1':
return True
if value_if_allowed == '+' or value_if_allowed == '-':
return True
if value_if_allowed is not None:
index = value_if_allowed.find('e')
if index == 0:
return False
if index > 0:
try:
float(value_if_allowed[:index])
except ValueError:
return False
if index < len(value_if_allowed) - 1:
right = value_if_allowed[index+1:]
if right == '+' or right == '-':
return True
try:
int(right)
except ValueError:
return False
return True
try:
float(value_if_allowed)
return True
except ValueError:
return False
def get_available_backends(self):
if self._backendsthread is not None:
return
self._backendsthread = threading.Thread(target=self._get_available_backends,
name='Chemistry remote backends')
self._backendsthread.daemon = True
self._backendsthread.start()
def _get_available_backends(self):
try:
qconfig = get_qconfig()
if qconfig is None or \
qconfig.APItoken is None or \
len(qconfig.APItoken) == 0 or \
'url' not in qconfig.config:
qconfig = None
self._available_backends = QuantumAlgorithm.register_and_get_operational_backends(qconfig)
except Exception as e:
logger.debug(str(e))
finally:
self._backendsthread = None
def new_input(self):
try:
self.stop()
self._outputView.clear()
self._start_button.state(['disabled'])
self._title.set('')
self._sectionsView.clear()
self._sectionsView.show_add_button(True)
self._sectionsView.show_remove_button(False)
self._textView.clear()
self._sectionView_title.set('')
self._propertiesView.clear()
self._propertiesView.show_remove_button(False)
self._emptyView.tkraise()
section_names = self._model.new()
self._sectionsView.populate(section_names)
self._start_button.state(['!disabled'])
missing = self.get_sections_names_missing()
self._sectionsView.show_add_button(True if missing else False)
return True
except Exception as e:
self._outputView.clear()
self._outputView.write_line(str(e))
return False
def open_file(self,filename):
try:
self.stop()
self._outputView.clear()
self._start_button.state(['disabled'])
self._title.set('')
self._sectionsView.clear()
self._sectionsView.show_add_button(True)
self._sectionsView.show_remove_button(False)
self._textView.clear()
self._sectionView_title.set('')
self._propertiesView.clear()
self._propertiesView.show_remove_button(False)
self._emptyView.tkraise()
section_names = self._model.load_file(filename)
self._title.set(os.path.basename(filename))
if len(section_names) == 0:
self._outputView.write_line('No sections found on file')
return
self._sectionsView.populate(section_names)
self._start_button.state(['!disabled'])
missing = self.get_sections_names_missing()
self._sectionsView.show_add_button(True if missing else False)
return True
except Exception as e:
self._outputView.clear()
self._outputView.write_line(str(e))
return False
def is_empty(self):
return self._model.is_empty()
def save_file(self):
filename = self._model.get_filename()
if filename is None or len(filename) == 0:
self._outputView.write_line("No file to save.")
return False
try:
self._model.save_to_file(filename)
self._outputView.write_line("Saved file: {}".format(filename))
return True
except Exception as e:
messagebox.showerror("Error",str(e))
return False
def save_file_as(self,filename):
try:
self._model.save_to_file(filename)
self.open_file(filename)
return True
except Exception as e:
messagebox.showerror("Error",str(e))
return False
def on_section_select(self,section_name):
self._sectionsView.show_remove_button(True)
self._sectionView_title.set(section_name)
if self._model.section_is_text(section_name):
self._textView.populate(self._model.get_section_text(section_name))
self._textView.section_name = section_name
self._textView.show_add_button(False)
self._textView.show_remove_button(False)
self._textView.show_defaults_button(not self._model.default_properties_equals_properties(section_name))
self._textView.tkraise()
else:
self._propertiesView.show_add_button(self.shows_add_button(section_name))
self._propertiesView.populate(self._model.get_section_properties(section_name))
self._propertiesView.section_name = section_name
self._propertiesView.show_remove_button(False)
self._propertiesView.show_defaults_button(not self._model.default_properties_equals_properties(section_name))
self._propertiesView.tkraise()
def on_property_select(self,section_name,property_name):
self._propertiesView.show_remove_button(property_name != InputParser.NAME)
def on_section_add(self,section_name):
try:
if section_name is None:
section_name = ''
section_name = section_name.lower().strip()
if len(section_name) == 0:
return False
self._model.set_section(section_name)
missing = self.get_sections_names_missing()
self._sectionsView.show_add_button(True if missing else False)
except Exception as e:
messagebox.showerror("Error",str(e))
return False
return True
def validate_section_add(self,section_name):
try:
if section_name in self._model.get_section_names():
return'Duplicate section name'
except Exception as e:
return e.message
return None
def on_section_remove(self,section_name):
try:
self._sectionsView.show_remove_button(False)
self._model.delete_section(section_name)
missing = self.get_sections_names_missing()
self._sectionsView.show_add_button(True if missing else False)
self._sectionView_title.set('')
self._propertiesView.clear()
self._textView.clear()
self._emptyView.tkraise()
except Exception as e:
messagebox.showerror("Error",str(e))
return False
return True
def on_section_defaults(self,section_name):
try:
self._model.set_default_properties_for_name(section_name)
self.on_section_select(section_name)
return True
except Exception as e:
messagebox.showerror("Error",str(e))
return False
def get_sections_names_missing(self):
try:
section_names = self._model.get_section_names()
default_sections = self._model.get_default_sections()
return list(set(default_sections.keys()) - set(section_names))
except Exception as e:
self._outputView.write_line(str(e))
def get_property_names_missing(self,section_name):
try:
properties = self._model.get_section_properties(section_name)
default_properties = self._model.get_section_default_properties(section_name)
if default_properties is None:
return None
return list(set(default_properties.keys()) - set(properties.keys()))
except Exception as e:
self._outputView.write_line(str(e))
def shows_add_button(self,section_name):
if self._model.allows_additional_properties(section_name):
return True
missing = self.get_property_names_missing(section_name)
return missing is None or len(missing) > 0
def on_property_add(self,section_name,property_name):
try:
value = self._model.get_property_default_value(section_name,property_name)
if value is None:
value = ''
return self.on_property_set(section_name,property_name,value)
except Exception as e:
messagebox.showerror("Error",str(e))
return False
def on_property_set(self,section_name,property_name,value):
try:
self._model.set_section_property(section_name,property_name,value)
except Exception as e:
messagebox.showerror("Error",str(e))
return False
try:
self._propertiesView.populate(self._model.get_section_properties(section_name))
self._propertiesView.show_add_button(self.shows_add_button(section_name))
self._propertiesView.show_remove_button(
property_name != InputParser.NAME and self._propertiesView.has_selection())
self._propertiesView.show_defaults_button(not self._model.default_properties_equals_properties(section_name))
section_names = self._model.get_section_names()
self._sectionsView.populate(section_names,section_name)
missing = self.get_sections_names_missing()
self._sectionsView.show_add_button(True if missing else False)
return True
except Exception as e:
messagebox.showerror("Error",str(e))
return False
def validate_property_add(self,section_name,property_name):
try:
value = self._model.get_section_property(section_name,property_name)
if value is not None:
return 'Duplicate property name'
except Exception as e:
return e.message
return None
def on_section_property_remove(self,section_name,property_name):
try:
self._model.delete_section_property(section_name,property_name)
self._propertiesView.populate(self._model.get_section_properties(section_name))
self._propertiesView.show_add_button(self.shows_add_button(section_name))
self._propertiesView.show_remove_button(False)
self._propertiesView.show_defaults_button(not self._model.default_properties_equals_properties(section_name))
except Exception as e:
self._outputView.write_line(str(e))
def on_text_set(self,section_name,value):
try:
self._model.set_section_text(section_name,value)
self._textView.show_defaults_button(not self._model.default_properties_equals_properties(section_name))
except Exception as e:
self._outputView.write_line(str(e))
return False
return True
def create_popup(self,section_name,property_name,parent,value):
values = None
types = ['string']
if InputParser.NAME == property_name and InputParser.INPUT == section_name:
values = self._model.get_input_section_names()
elif InputParser.NAME == property_name and Model.is_pluggable_section(section_name):
values = self._model.get_pluggable_section_names(section_name)
elif InputParser.BACKEND == section_name and InputParser.NAME == property_name:
values = self._available_backends
else:
values = self._model.get_property_default_values(section_name,property_name)
types = self._model.get_property_types(section_name,property_name)
if values is not None:
value = '' if value is None else str(value)
values = [str(v) for v in values]
widget = ComboboxPopup(self,section_name,
property_name,
parent,
exportselection=0,
state='readonly',
values=values)
widget._text = value
if len(values) > 0:
if value in values:
widget.current(values.index(value))
else:
widget.current(0)
return widget
value = '' if value is None else value
if 'number' in types or 'integer' in types:
vcmd = self._validate_integer_command if 'integer' in types else self._validate_float_command
vcmd = (vcmd,'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
widget = EntryPopup(self,
section_name,
property_name,
parent,
value,
validate='all',
validatecommand=vcmd,
state=tk.NORMAL)
widget.selectAll()
return widget
if 'object' in types or 'array' in types:
try:
if isinstance(value,str):
value = value.strip()
if len(value) > 0:
value = ast.literal_eval(value)
if isinstance(value,dict) or isinstance(value,list):
value = json.dumps(value, sort_keys=True, indent=4)
except:
pass
widget = TextPopup(self,
section_name,
property_name,
parent,
value)
widget.selectAll()
return widget
def toggle(self):
if self._model.is_empty():
self._outputView.write_line("Missing Input")
return
self._start_button.state(['disabled'])
self._filemenu.entryconfig(0,state='disabled')
self._filemenu.entryconfig(1,state='disabled')
self._filemenu.entryconfig(2,state='disabled')
self._view.after(100, self._process_thread_queue)
try:
if self._command is Controller._START:
self._outputView.clear()
self._thread = AlgoritthmThread(self._model,self._outputView,self._thread_queue)
self._thread.daemon = True
self._thread.start()
else:
self.stop()
except Exception as e:
self._thread = None
self._thread_queue.put(None)
self._outputView.write_line("Failure: {}".format(str(e)))
self._start_button.state(['!disabled'])
self._filemenu.entryconfig(0,state='normal')
self._filemenu.entryconfig(1,state='normal')
self._filemenu.entryconfig(2,state='normal')
def stop(self):
if self._thread is not None:
stopthread = threading.Thread(target=Controller._stop,
args=(self._thread,),
name='Chemistry stop thread')
stopthread.daemon = True
stopthread.start()
self._outputView.clear_buffer()
self._thread = None
self._process_stop = True
self._thread_queue.put(Controller._STOP)
@staticmethod
def _stop(thread):
try:
if thread is not None:
thread.stop()
except:
pass
def _process_thread_queue(self):
try:
line = self._thread_queue.get_nowait()
if line is None:
return
elif line is Controller._START:
self._progress.start(500)
self._command = Controller._STOP
self._button_text.set(self._command)
self._start_button.state(['!disabled'])
elif line is Controller._STOP:
if not self._outputView.buffer_empty():
# repost stop
self._thread_queue.put(Controller._STOP)
else:
self._thread = None
self._progress.stop()
self._command = Controller._START
self._button_text.set(self._command)
self._start_button.state(['!disabled'])
self._filemenu.entryconfig(0,state='normal')
self._filemenu.entryconfig(1,state='normal')
self._filemenu.entryconfig(2,state='normal')
if self._process_stop:
self._process_stop = False
self._outputView.write_line('Process stopped.')
return
self._view.update_idletasks()
except:
pass
self._view.after(100, self._process_thread_queue)
class AlgoritthmThread(threading.Thread):
def __init__(self,model,output,queue):
super(AlgoritthmThread, self).__init__(name='Algorithm run thread')
self._model = model
self._output = output
self._thread_queue = queue
self._popen = None
def stop(self):
self._output = None
self._thread_queue = None
if self._popen is not None:
p = self._popen
self._kill(p.pid)
p.stdout.close()
def _kill(self,proc_pid):
try:
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except Exception as e:
if self._output is not None:
self._output.write_line('Process kill has failed: {}'.format(str(e)))
def run(self):
input_file = None
temp_input = False
try:
algorithms_directory = os.path.dirname(os.path.realpath(__file__))
algorithms_directory = os.path.abspath(os.path.join(algorithms_directory,'../..'))
input_file = self._model.get_filename()
if input_file is None or self._model.is_modified():
fd,input_file = tempfile.mkstemp(suffix='.in')
os.close(fd)
temp_input = True
self._model.save_to_file(input_file)
startupinfo = None
process_name = psutil.Process().exe()
if process_name is None or len(process_name) == 0:
process_name = 'python'
else:
if sys.platform == 'win32' and process_name.endswith('pythonw.exe'):
path = os.path.dirname(process_name)
files = [f for f in os.listdir(path) if f != 'pythonw.exe' and f.startswith('python') and f.endswith('.exe')]
# sort reverse to have hihre python versions first: python3.exe before python2.exe
files = sorted(files,key=str.lower, reverse=True)
new_process = None
for file in files:
p = os.path.join(path,file)
if os.path.isfile(p):
# python.exe takes precedence
if file.lower() == 'python.exe':
new_process = p
break
# use first found
if new_process is None:
new_process = p
if new_process is not None:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
process_name = new_process
if self._output is not None and logger.getEffectiveLevel() == logging.DEBUG:
self._output.write('Process: {}\n'.format(process_name))
self._popen = subprocess.Popen([process_name,
algorithms_directory,
input_file],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
startupinfo=startupinfo)
if self._thread_queue is not None:
self._thread_queue.put(Controller._START)
for line in iter(self._popen.stdout.readline,''):
if self._output is not None:
self._output.write(str(line))
self._popen.stdout.close()
self._popen.wait()
except Exception as e:
if self._output is not None:
self._output.write('Process has failed: {}'.format(str(e)))
finally:
self._popen = None
if self._thread_queue is not None:
self._thread_queue.put(Controller._STOP)
if temp_input and input_file is not None:
os.remove(input_file)
input_file = None
|
#!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Runner for standalone program tests of Nuitka.
These tests aim at showing that one specific module works in standalone
mode, trying to find issues with that packaging.
"""
import os
import sys
# Find nuitka package relative to us. The replacement is for POSIX python
# and Windows paths on command line.
sys.path.insert(
0,
os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__.replace("\\", os.sep))), "..", ".."
)
),
)
# isort:start
from nuitka.tools.testing.Common import (
checkLoadedFileAccesses,
checkRequirements,
compareWithCPython,
createSearchMode,
decideFilenameVersionSkip,
displayFileContents,
displayFolderContents,
displayRuntimeTraces,
getRuntimeTraceOfLoadedFiles,
reportSkip,
setup,
test_logger,
)
from nuitka.utils.FileOperations import removeDirectory
from nuitka.utils.Timing import TimerReport
from nuitka.utils.Utils import getOS
def displayError(dirname, filename):
assert dirname is None
dist_path = filename[:-3] + ".dist"
displayFolderContents("dist folder", dist_path)
inclusion_log_path = filename[:-3] + ".py.inclusion.log"
displayFileContents("inclusion log", inclusion_log_path)
def main():
# Complex stuff, even more should become common code or project options though.
# pylint: disable=too-many-branches,too-many-statements
python_version = setup(needs_io_encoding=True)
search_mode = createSearchMode()
for filename in sorted(os.listdir(".")):
if not filename.endswith(".py"):
continue
if not decideFilenameVersionSkip(filename):
continue
active = search_mode.consider(dirname=None, filename=filename)
if not active:
test_logger.info("Skipping %s" % filename)
continue
extra_flags = [
"expect_success",
"--standalone",
"remove_output",
# Cache the CPython results for re-use, they will normally not change.
"cpython_cache",
# To understand what is slow.
"timing",
# TODO: This plugin probably ought to be on by default.
"plugin_enable:pkg-resources",
]
# skip each test if their respective requirements are not met
requirements_met, error_message = checkRequirements(filename)
if not requirements_met:
reportSkip(error_message, ".", filename)
continue
# catch error
if filename == "Boto3Using.py":
reportSkip("boto3 test not fully working yet", ".", filename)
continue
if "Idna" in filename:
# For the warnings of Python2.
if python_version < (3,):
extra_flags.append("ignore_stderr")
if filename == "CtypesUsing.py":
extra_flags.append("plugin_disable:pylint-warnings")
if filename == "GtkUsing.py":
# Don't test on platforms not supported by current Debian testing, and
# which should be considered irrelevant by now.
if python_version < (2, 7):
reportSkip("irrelevant Python version", ".", filename)
continue
# For the warnings.
extra_flags.append("ignore_warnings")
if filename.startswith("Win"):
if os.name != "nt":
reportSkip("Windows only test", ".", filename)
continue
if filename == "TkInterUsing.py":
if getOS() == "Darwin":
reportSkip("Not working macOS yet", ".", filename)
continue
if getOS() == "Windows":
reportSkip("Can hang on Windows CI.", ".", filename)
continue
# For the plug-in information.
extra_flags.append("plugin_enable:tk-inter")
if filename == "FlaskUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "NumpyUsing.py":
extra_flags.append("plugin_enable:numpy")
# TODO: Disabled for now.
reportSkip("numpy.test not fully working yet", ".", filename)
continue
if filename == "PandasUsing.py":
extra_flags.append("plugin_enable:numpy")
extra_flags.append("plugin_disable:pylint-warnings")
extra_flags.append("plugin_disable:pyqt5")
extra_flags.append("plugin_disable:pyside2")
extra_flags.append("plugin_disable:pyside6")
if filename == "PmwUsing.py":
extra_flags.append("plugin_enable:pmw-freezer")
if filename == "OpenGLUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "GlfwUsing.py":
# For the warnings.
extra_flags.append("plugin_enable:glfw")
extra_flags.append("plugin_enable:numpy")
if filename == "PasslibUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "Win32ComUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename.startswith(("PySide2", "PySide6", "PyQt5", "PyQt6")):
# Don't test on platforms not supported by current Debian testing, and
# which should be considered irrelevant by now.
if python_version < (2, 7) or ((3,) <= python_version < (3, 7)):
reportSkip("irrelevant Python version", ".", filename)
continue
# For the plug-in information
if filename.startswith("PySide2"):
extra_flags.append("plugin_enable:pyside6")
elif filename.startswith("PySide6"):
extra_flags.append("plugin_enable:pyside6")
elif filename.startswith("PyQt5"):
extra_flags.append("plugin_enable:pyqt5")
elif filename.startswith("PyQt6"):
extra_flags.append("plugin_enable:pyqt6")
test_logger.info(
"Consider output of standalone mode compiled program: %s" % filename
)
# First compare so we know the program behaves identical.
compareWithCPython(
dirname=None,
filename=filename,
extra_flags=extra_flags,
search_mode=search_mode,
needs_2to3=False,
on_error=displayError,
)
# Second check if glibc libraries haven't been accidentally
# shipped with the standalone executable
found_glibc_libs = []
for dist_filename in os.listdir(os.path.join(filename[:-3] + ".dist")):
if os.path.basename(dist_filename).startswith(
(
"ld-linux-x86-64.so",
"libc.so.",
"libpthread.so.",
"libm.so.",
"libdl.so.",
"libBrokenLocale.so.",
"libSegFault.so",
"libanl.so.",
"libcidn.so.",
"libcrypt.so.",
"libmemusage.so",
"libmvec.so.",
"libnsl.so.",
"libnss_compat.so.",
"libnss_db.so.",
"libnss_dns.so.",
"libnss_files.so.",
"libnss_hesiod.so.",
"libnss_nis.so.",
"libnss_nisplus.so.",
"libpcprofile.so",
"libresolv.so.",
"librt.so.",
"libthread_db-1.0.so",
"libthread_db.so.",
"libutil.so.",
)
):
found_glibc_libs.append(dist_filename)
if found_glibc_libs:
test_logger.warning(
"Should not ship glibc libraries with the standalone executable (found %s)"
% found_glibc_libs
)
sys.exit(1)
binary_filename = os.path.join(
filename[:-3] + ".dist", filename[:-3] + (".exe" if os.name == "nt" else "")
)
# Then use "strace" on the result.
with TimerReport(
"Determining run time loaded files took %.2f", logger=test_logger
):
loaded_filenames = getRuntimeTraceOfLoadedFiles(
logger=test_logger, path=binary_filename
)
illegal_accesses = checkLoadedFileAccesses(
loaded_filenames=loaded_filenames, current_dir=os.getcwd()
)
if illegal_accesses:
displayError(None, filename)
displayRuntimeTraces(test_logger, binary_filename)
test_logger.warning(
"Should not access these file(s): '%r'." % illegal_accesses
)
search_mode.onErrorDetected(1)
removeDirectory(filename[:-3] + ".dist", ignore_errors=True)
search_mode.finish()
if __name__ == "__main__":
main()
|
tabageos = tabageos || {};
/*
* Scales the game based on window.innerWidth/Height and gameWidth/Height
*
* gameWidth - the width of game game
* gameHeight - the height of the game
* divideScaleXBy - amount to divide the scaleX by (1.0 to 1.9)
* divideScaleYBy - amount to divide the scaleY by
* container - reference to the container div element that holds the game and controller canvas element
* if null is passed no resizing happens
* controller - reference to the controller canvas element
* (container through the end are optional params)
* showController - Boolean
* controllerStyle - 1 = 'basicController', 2 = 'directionalsController' or you can pass your own String.
* sets the controllers canvas elements style id. (see ControllerPad.css and ControllerPad.show())
* dontPositionController - Boolean, optional if you have placed the controller in your own specific way.
* Otherwise the controllers canvas elements style top will be set as controller.y and the left as controller.x.
* cW = controller width - default is 640
* cH = controller height - default is 192
*
*
*
*/
tabageos.ResizeGame = function(gameWidth, gameHeight, divideScaleXBy, divideScaleYBy, container, controller, showController, controllerStyle, scaleRectReference, dontPositionController, cW, cH, camera, cmScaleX, cmScaleY) {
var scaleX = window.innerWidth / gameWidth;
var scaleY = window.innerHeight / gameHeight;
if (controller && !showController) {
controller.hide();
} else if (showController && controller) { window.console.log("c show "+ cW + " "+ cH);
controller.show(cW || 640, cH || 192, controllerStyle || 1);
}
if (container) {
container.style.transformOrigin = "0 0";
container.style.transform = "scale(" + (divideScaleXBy ? scaleX / divideScaleXBy : 1) + "," + (divideScaleYBy ? scaleY / divideScaleYBy : 1) + ")";
var scaleRect = container.getBoundingClientRect();
if(camera) {
camera.layerToRender.canvas.style.transformOrigin = "0 0";
camera.layerToRender.canvas.style.transform = "scale(" + (divideScaleXBy ? cmScaleX / divideScaleXBy : 1) + "," + (divideScaleYBy ? cmScaleY / divideScaleYBy : 1) + ")";
}
if(scaleRectReference) {
scaleRectReference.width = scaleRect.width;
scaleRectReference.height = scaleRect.height;
}
tabageos.MouseController.defineMousePositionOffset(gameWidth, gameHeight, scaleRect.width, scaleRect.height);
}
};
tabageos.seekTouch = function() {
return ('ontouchstart'in window ? 1 : (navigator.maxTouchPoints ? 1 : 0));
};
tabageos.combineTwoNumbers = function(a,b) {
return a << 16 | b;
};
tabageos.getAFromCombined = function(num) {
return num >> 16;
};
tabageos.getBFromCombined = function(num) {
return num & 0xFFFF;
};
tabageos.splitNumberAtDecimal = function(num, direction) {
var absnum = Math.abs(num);
var fnum = Math.floor(absnum);
if(direction === 1) {
return fnum * (num < 0 ? -1 : 1);
}
var thounum = Math.round( (absnum - fnum) * 1000 );
var hunnum = Math.round( (absnum - fnum) * 100 );
var tennum = Math.round( (absnum - fnum) * 10 );
return fnum+(tennum/10) === absnum ? tennum : ( (fnum+(hunnum/100) <= absnum + .001 && fnum+(hunnum/100) >= absnum) ? hunnum : thounum );
};
tabageos.stringSplitNumberAtDecimal = function(num, direction) {
var absnum = Math.abs(num);
var fnum = Math.floor(absnum);
if(direction === 1) {
return fnum * (num < 0 ? -1 : 1);
}
return Number( (num+"").split(".")[1] );
};
|
/*
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <config.h>
#include "ofp-actions.h"
#include "bundle.h"
#include "byte-order.h"
#include "compiler.h"
#include "dynamic-string.h"
#include "hmap.h"
#include "learn.h"
#include "meta-flow.h"
#include "multipath.h"
#include "nx-match.h"
#include "ofp-parse.h"
#include "ofp-util.h"
#include "ofpbuf.h"
#include "unaligned.h"
#include "util.h"
#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ofp_actions);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
struct ofp_action_header;
/* Raw identifiers for OpenFlow actions.
*
* Decoding and encoding OpenFlow actions across multiple versions is difficult
* to do in a clean, consistent way. This enumeration lays out all of the
* forms of actions that Open vSwitch supports.
*
* The comments here must follow a stylized form because the
* "extract-ofp-actions" program parses them at build time to generate data
* tables.
*
* - The first part of each comment specifies the vendor, OpenFlow versions,
* and type for each protocol that supports the action:
*
* # The vendor is OF for standard OpenFlow actions, NX for Nicira
* extension actions. (Support for other vendors can be added, but
* it can't be done just based on a vendor ID definition alone
* because OpenFlow doesn't define a standard way to specify a
* subtype for vendor actions, so other vendors might do it different
* from Nicira.)
*
* # The version can specify a specific OpenFlow version, a version
* range delimited by "-", or an open-ended range with "+".
*
* # The type, in parentheses, is the action type number (for standard
* OpenFlow actions) or subtype (for vendor extension actions).
*
* # Optionally one may add "is deprecated" followed by a
* human-readable reason in parentheses (which will be used in log
* messages), if a particular action should no longer be used.
*
* Multiple such specifications may be separated by commas.
*
* - The second part describes the action's wire format. It may be:
*
* # "struct <name>": The struct fully specifies the wire format. The
* action is exactly the size of the struct. (Thus, the struct must
* be an exact multiple of 8 bytes in size.)
*
* # "struct <name>, ...": The struct specifies the beginning of the
* wire format. An instance of the action is either the struct's
* exact size, or a multiple of 8 bytes longer.
*
* # "uint<N>_t" or "ovs_be<N>": The action consists of a (standard or
* vendor extension) header, followed by 0 or more pad bytes to align
* to a multiple of <N> bits, followed by an argument of the given
* type, followed by 0 or more pad bytes to bring the total action up
* to a multiple of 8 bytes.
*
* # "void": The action is just a (standard or vendor extension)
* header.
*
* - Optional additional text enclosed in square brackets is commentary for
* the human reader.
*/
enum ofp_raw_action_type {
/* ## ----------------- ## */
/* ## Standard actions. ## */
/* ## ----------------- ## */
/* OF1.0(0): struct ofp10_action_output. */
OFPAT_RAW10_OUTPUT,
/* OF1.1+(0): struct ofp11_action_output. */
OFPAT_RAW11_OUTPUT,
/* OF1.0(1): uint16_t. */
OFPAT_RAW10_SET_VLAN_VID,
/* OF1.0(2): uint8_t. */
OFPAT_RAW10_SET_VLAN_PCP,
/* OF1.1(1), OF1.2+(1) is deprecated (use Set-Field): uint16_t.
*
* [Semantics differ slightly between the 1.0 and 1.1 versions of the VLAN
* modification actions: the 1.0 versions push a VLAN header if none is
* present, but the 1.1 versions do not. That is the only reason that we
* distinguish their raw action types.] */
OFPAT_RAW11_SET_VLAN_VID,
/* OF1.1(2), OF1.2+(2) is deprecated (use Set-Field): uint8_t. */
OFPAT_RAW11_SET_VLAN_PCP,
/* OF1.1+(17): ovs_be16.
*
* [The argument is the Ethertype, e.g. ETH_TYPE_VLAN_8021Q, not the VID or
* TCI.] */
OFPAT_RAW11_PUSH_VLAN,
/* OF1.0(3): void. */
OFPAT_RAW10_STRIP_VLAN,
/* OF1.1+(18): void. */
OFPAT_RAW11_POP_VLAN,
/* OF1.0(4), OF1.1(3), OF1.2+(3) is deprecated (use Set-Field): struct
* ofp_action_dl_addr. */
OFPAT_RAW_SET_DL_SRC,
/* OF1.0(5), OF1.1(4), OF1.2+(4) is deprecated (use Set-Field): struct
* ofp_action_dl_addr. */
OFPAT_RAW_SET_DL_DST,
/* OF1.0(6), OF1.1(5), OF1.2+(5) is deprecated (use Set-Field):
* ovs_be32. */
OFPAT_RAW_SET_NW_SRC,
/* OF1.0(7), OF1.1(6), OF1.2+(6) is deprecated (use Set-Field):
* ovs_be32. */
OFPAT_RAW_SET_NW_DST,
/* OF1.0(8), OF1.1(7), OF1.2+(7) is deprecated (use Set-Field): uint8_t. */
OFPAT_RAW_SET_NW_TOS,
/* OF1.1(8), OF1.2+(8) is deprecated (use Set-Field): uint8_t. */
OFPAT_RAW11_SET_NW_ECN,
/* OF1.0(9), OF1.1(9), OF1.2+(9) is deprecated (use Set-Field):
* ovs_be16. */
OFPAT_RAW_SET_TP_SRC,
/* OF1.0(10), OF1.1(10), OF1.2+(10) is deprecated (use Set-Field):
* ovs_be16. */
OFPAT_RAW_SET_TP_DST,
/* OF1.0(11): struct ofp10_action_enqueue. */
OFPAT_RAW10_ENQUEUE,
/* NX1.0(30), OF1.1(13), OF1.2+(13) is deprecated (use Set-Field):
* ovs_be32. */
OFPAT_RAW_SET_MPLS_LABEL,
/* NX1.0(31), OF1.1(14), OF1.2+(14) is deprecated (use Set-Field):
* uint8_t. */
OFPAT_RAW_SET_MPLS_TC,
/* NX1.0(25), OF1.1(15), OF1.2+(15) is deprecated (use Set-Field):
* uint8_t. */
OFPAT_RAW_SET_MPLS_TTL,
/* NX1.0(26), OF1.1+(16): void. */
OFPAT_RAW_DEC_MPLS_TTL,
/* NX1.0(23), OF1.1+(19): ovs_be16.
*
* [The argument is the Ethertype, e.g. ETH_TYPE_MPLS, not the label.] */
OFPAT_RAW_PUSH_MPLS,
/* NX1.0(24), OF1.1+(20): ovs_be16.
*
* [The argument is the Ethertype, e.g. ETH_TYPE_IPV4 if at BoS or
* ETH_TYPE_MPLS otherwise, not the label.] */
OFPAT_RAW_POP_MPLS,
/* NX1.0(4), OF1.1+(21): uint32_t. */
OFPAT_RAW_SET_QUEUE,
/* OF1.1+(22): uint32_t. */
OFPAT_RAW11_GROUP,
/* OF1.1+(23): uint8_t. */
OFPAT_RAW11_SET_NW_TTL,
/* NX1.0(18), OF1.1+(24): void. */
OFPAT_RAW_DEC_NW_TTL,
/* NX1.0+(21): struct nx_action_cnt_ids, ... */
NXAST_RAW_DEC_TTL_CNT_IDS,
/* OF1.2-1.4(25): struct ofp12_action_set_field, ... */
OFPAT_RAW12_SET_FIELD,
/* OF1.5+(25): struct ofp12_action_set_field, ... */
OFPAT_RAW15_SET_FIELD,
/* NX1.0-1.4(7): struct nx_action_reg_load.
*
* [In OpenFlow 1.5, set_field is a superset of reg_load functionality, so
* we drop reg_load.] */
NXAST_RAW_REG_LOAD,
/* NX1.0-1.4(33): struct nx_action_reg_load2, ...
*
* [In OpenFlow 1.5, set_field is a superset of reg_load2 functionality, so
* we drop reg_load2.] */
NXAST_RAW_REG_LOAD2,
/* OF1.5+(28): struct ofp15_action_copy_field, ... */
OFPAT_RAW15_COPY_FIELD,
/* ONF1.3-1.4(3200): struct onf_action_copy_field, ... */
ONFACT_RAW13_COPY_FIELD,
/* NX1.0-1.4(6): struct nx_action_reg_move, ... */
NXAST_RAW_REG_MOVE,
/* ## ------------------------- ## */
/* ## Nicira extension actions. ## */
/* ## ------------------------- ## */
/* Actions similar to standard actions are listed with the standard actions. */
/* NX1.0+(1): uint16_t. */
NXAST_RAW_RESUBMIT,
/* NX1.0+(14): struct nx_action_resubmit. */
NXAST_RAW_RESUBMIT_TABLE,
/* NX1.0+(2): uint32_t. */
NXAST_RAW_SET_TUNNEL,
/* NX1.0+(9): uint64_t. */
NXAST_RAW_SET_TUNNEL64,
/* NX1.0+(5): void. */
NXAST_RAW_POP_QUEUE,
/* NX1.0+(8): struct nx_action_note, ... */
NXAST_RAW_NOTE,
/* NX1.0+(10): struct nx_action_multipath. */
NXAST_RAW_MULTIPATH,
/* NX1.0+(12): struct nx_action_bundle, ... */
NXAST_RAW_BUNDLE,
/* NX1.0+(13): struct nx_action_bundle, ... */
NXAST_RAW_BUNDLE_LOAD,
/* NX1.0+(15): struct nx_action_output_reg. */
NXAST_RAW_OUTPUT_REG,
/* NX1.0+(32): struct nx_action_output_reg2. */
NXAST_RAW_OUTPUT_REG2,
/* NX1.0+(16): struct nx_action_learn, ... */
NXAST_RAW_LEARN,
/* NX1.0+(17): void. */
NXAST_RAW_EXIT,
/* NX1.0+(19): struct nx_action_fin_timeout. */
NXAST_RAW_FIN_TIMEOUT,
/* NX1.0+(20): struct nx_action_controller. */
NXAST_RAW_CONTROLLER,
/* NX1.0+(22): struct nx_action_write_metadata. */
NXAST_RAW_WRITE_METADATA,
/* NX1.0+(27): struct nx_action_stack. */
NXAST_RAW_STACK_PUSH,
/* NX1.0+(28): struct nx_action_stack. */
NXAST_RAW_STACK_POP,
/* NX1.0+(29): struct nx_action_sample. */
NXAST_RAW_SAMPLE,
/* NX1.0+(34): struct nx_action_conjunction. */
NXAST_RAW_CONJUNCTION,
};
/* OpenFlow actions are always a multiple of 8 bytes in length. */
#define OFP_ACTION_ALIGN 8
/* Define a few functions for working with instructions. */
#define DEFINE_INST(ENUM, STRUCT, EXTENSIBLE, NAME) \
static inline const struct STRUCT * OVS_UNUSED \
instruction_get_##ENUM(const struct ofp11_instruction *inst)\
{ \
ovs_assert(inst->type == htons(ENUM)); \
return ALIGNED_CAST(struct STRUCT *, inst); \
} \
\
static inline void OVS_UNUSED \
instruction_init_##ENUM(struct STRUCT *s) \
{ \
memset(s, 0, sizeof *s); \
s->type = htons(ENUM); \
s->len = htons(sizeof *s); \
} \
\
static inline struct STRUCT * OVS_UNUSED \
instruction_put_##ENUM(struct ofpbuf *buf) \
{ \
struct STRUCT *s = ofpbuf_put_uninit(buf, sizeof *s); \
instruction_init_##ENUM(s); \
return s; \
}
OVS_INSTRUCTIONS
#undef DEFINE_INST
static void ofpacts_update_instruction_actions(struct ofpbuf *openflow,
size_t ofs);
static void pad_ofpat(struct ofpbuf *openflow, size_t start_ofs);
static enum ofperr ofpacts_verify(const struct ofpact[], size_t ofpacts_len,
uint32_t allowed_ovsinsts);
static void ofpact_put_set_field(struct ofpbuf *openflow, enum ofp_version,
enum mf_field_id, uint64_t value);
static enum ofperr ofpact_pull_raw(struct ofpbuf *, enum ofp_version,
enum ofp_raw_action_type *, uint64_t *arg);
static void *ofpact_put_raw(struct ofpbuf *, enum ofp_version,
enum ofp_raw_action_type, uint64_t arg);
static char *OVS_WARN_UNUSED_RESULT ofpacts_parse(
char *str, struct ofpbuf *ofpacts, enum ofputil_protocol *usable_protocols,
bool allow_instructions);
#include "ofp-actions.inc1"
/* Output actions. */
/* Action structure for OFPAT10_OUTPUT, which sends packets out 'port'.
* When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
* number of bytes to send. A 'max_len' of zero means no bytes of the
* packet should be sent. */
struct ofp10_action_output {
ovs_be16 type; /* OFPAT10_OUTPUT. */
ovs_be16 len; /* Length is 8. */
ovs_be16 port; /* Output port. */
ovs_be16 max_len; /* Max length to send to controller. */
};
OFP_ASSERT(sizeof(struct ofp10_action_output) == 8);
/* Action structure for OFPAT_OUTPUT, which sends packets out 'port'.
* When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
* number of bytes to send. A 'max_len' of zero means no bytes of the
* packet should be sent.*/
struct ofp11_action_output {
ovs_be16 type; /* OFPAT11_OUTPUT. */
ovs_be16 len; /* Length is 16. */
ovs_be32 port; /* Output port. */
ovs_be16 max_len; /* Max length to send to controller. */
uint8_t pad[6]; /* Pad to 64 bits. */
};
OFP_ASSERT(sizeof(struct ofp11_action_output) == 16);
static enum ofperr
decode_OFPAT_RAW10_OUTPUT(const struct ofp10_action_output *oao,
struct ofpbuf *out)
{
struct ofpact_output *output;
output = ofpact_put_OUTPUT(out);
output->port = u16_to_ofp(ntohs(oao->port));
output->max_len = ntohs(oao->max_len);
return ofpact_check_output_port(output->port, OFPP_MAX);
}
static enum ofperr
decode_OFPAT_RAW11_OUTPUT(const struct ofp11_action_output *oao,
struct ofpbuf *out)
{
struct ofpact_output *output;
enum ofperr error;
output = ofpact_put_OUTPUT(out);
output->max_len = ntohs(oao->max_len);
error = ofputil_port_from_ofp11(oao->port, &output->port);
if (error) {
return error;
}
return ofpact_check_output_port(output->port, OFPP_MAX);
}
static void
encode_OUTPUT(const struct ofpact_output *output,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
struct ofp10_action_output *oao;
oao = put_OFPAT10_OUTPUT(out);
oao->port = htons(ofp_to_u16(output->port));
oao->max_len = htons(output->max_len);
} else {
struct ofp11_action_output *oao;
oao = put_OFPAT11_OUTPUT(out);
oao->port = ofputil_port_to_ofp11(output->port);
oao->max_len = htons(output->max_len);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_OUTPUT(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
if (strchr(arg, '[')) {
struct ofpact_output_reg *output_reg;
output_reg = ofpact_put_OUTPUT_REG(ofpacts);
output_reg->max_len = UINT16_MAX;
return mf_parse_subfield(&output_reg->src, arg);
} else {
struct ofpact_output *output;
output = ofpact_put_OUTPUT(ofpacts);
if (!ofputil_port_from_string(arg, &output->port)) {
return xasprintf("%s: output to unknown port", arg);
}
output->max_len = output->port == OFPP_CONTROLLER ? UINT16_MAX : 0;
return NULL;
}
}
static void
format_OUTPUT(const struct ofpact_output *a, struct ds *s)
{
if (ofp_to_u16(a->port) < ofp_to_u16(OFPP_MAX)) {
ds_put_format(s, "output:%"PRIu16, a->port);
} else {
ofputil_format_port(a->port, s);
if (a->port == OFPP_CONTROLLER) {
ds_put_format(s, ":%"PRIu16, a->max_len);
}
}
}
/* Group actions. */
static enum ofperr
decode_OFPAT_RAW11_GROUP(uint32_t group_id, struct ofpbuf *out)
{
ofpact_put_GROUP(out)->group_id = group_id;
return 0;
}
static void
encode_GROUP(const struct ofpact_group *group,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
/* XXX */
} else {
put_OFPAT11_GROUP(out, group->group_id);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_GROUP(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_u32(arg, &ofpact_put_GROUP(ofpacts)->group_id);
}
static void
format_GROUP(const struct ofpact_group *a, struct ds *s)
{
ds_put_format(s, "group:%"PRIu32, a->group_id);
}
/* Action structure for NXAST_CONTROLLER.
*
* This generalizes using OFPAT_OUTPUT to send a packet to OFPP_CONTROLLER. In
* addition to the 'max_len' that OFPAT_OUTPUT supports, it also allows
* specifying:
*
* - 'reason': The reason code to use in the ofp_packet_in or nx_packet_in.
*
* - 'controller_id': The ID of the controller connection to which the
* ofp_packet_in should be sent. The ofp_packet_in or nx_packet_in is
* sent only to controllers that have the specified controller connection
* ID. See "struct nx_controller_id" for more information. */
struct nx_action_controller {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_CONTROLLER. */
ovs_be16 max_len; /* Maximum length to send to controller. */
ovs_be16 controller_id; /* Controller ID to send packet-in. */
uint8_t reason; /* enum ofp_packet_in_reason (OFPR_*). */
uint8_t zero; /* Must be zero. */
};
OFP_ASSERT(sizeof(struct nx_action_controller) == 16);
static enum ofperr
decode_NXAST_RAW_CONTROLLER(const struct nx_action_controller *nac,
struct ofpbuf *out)
{
struct ofpact_controller *oc;
oc = ofpact_put_CONTROLLER(out);
oc->max_len = ntohs(nac->max_len);
oc->controller_id = ntohs(nac->controller_id);
oc->reason = nac->reason;
return 0;
}
static void
encode_CONTROLLER(const struct ofpact_controller *controller,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
struct nx_action_controller *nac;
nac = put_NXAST_CONTROLLER(out);
nac->max_len = htons(controller->max_len);
nac->controller_id = htons(controller->controller_id);
nac->reason = controller->reason;
}
static char * OVS_WARN_UNUSED_RESULT
parse_CONTROLLER(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
enum ofp_packet_in_reason reason = OFPR_ACTION;
uint16_t controller_id = 0;
uint16_t max_len = UINT16_MAX;
if (!arg[0]) {
/* Use defaults. */
} else if (strspn(arg, "0123456789") == strlen(arg)) {
char *error = str_to_u16(arg, "max_len", &max_len);
if (error) {
return error;
}
} else {
char *name, *value;
while (ofputil_parse_key_value(&arg, &name, &value)) {
if (!strcmp(name, "reason")) {
if (!ofputil_packet_in_reason_from_string(value, &reason)) {
return xasprintf("unknown reason \"%s\"", value);
}
} else if (!strcmp(name, "max_len")) {
char *error = str_to_u16(value, "max_len", &max_len);
if (error) {
return error;
}
} else if (!strcmp(name, "id")) {
char *error = str_to_u16(value, "id", &controller_id);
if (error) {
return error;
}
} else {
return xasprintf("unknown key \"%s\" parsing controller "
"action", name);
}
}
}
if (reason == OFPR_ACTION && controller_id == 0) {
struct ofpact_output *output;
output = ofpact_put_OUTPUT(ofpacts);
output->port = OFPP_CONTROLLER;
output->max_len = max_len;
} else {
struct ofpact_controller *controller;
controller = ofpact_put_CONTROLLER(ofpacts);
controller->max_len = max_len;
controller->reason = reason;
controller->controller_id = controller_id;
}
return NULL;
}
static void
format_CONTROLLER(const struct ofpact_controller *a, struct ds *s)
{
if (a->reason == OFPR_ACTION && a->controller_id == 0) {
ds_put_format(s, "CONTROLLER:%"PRIu16, a->max_len);
} else {
enum ofp_packet_in_reason reason = a->reason;
ds_put_cstr(s, "controller(");
if (reason != OFPR_ACTION) {
char reasonbuf[OFPUTIL_PACKET_IN_REASON_BUFSIZE];
ds_put_format(s, "reason=%s,",
ofputil_packet_in_reason_to_string(
reason, reasonbuf, sizeof reasonbuf));
}
if (a->max_len != UINT16_MAX) {
ds_put_format(s, "max_len=%"PRIu16",", a->max_len);
}
if (a->controller_id != 0) {
ds_put_format(s, "id=%"PRIu16",", a->controller_id);
}
ds_chomp(s, ',');
ds_put_char(s, ')');
}
}
/* Enqueue action. */
struct ofp10_action_enqueue {
ovs_be16 type; /* OFPAT10_ENQUEUE. */
ovs_be16 len; /* Len is 16. */
ovs_be16 port; /* Port that queue belongs. Should
refer to a valid physical port
(i.e. < OFPP_MAX) or OFPP_IN_PORT. */
uint8_t pad[6]; /* Pad for 64-bit alignment. */
ovs_be32 queue_id; /* Where to enqueue the packets. */
};
OFP_ASSERT(sizeof(struct ofp10_action_enqueue) == 16);
static enum ofperr
decode_OFPAT_RAW10_ENQUEUE(const struct ofp10_action_enqueue *oae,
struct ofpbuf *out)
{
struct ofpact_enqueue *enqueue;
enqueue = ofpact_put_ENQUEUE(out);
enqueue->port = u16_to_ofp(ntohs(oae->port));
enqueue->queue = ntohl(oae->queue_id);
if (ofp_to_u16(enqueue->port) >= ofp_to_u16(OFPP_MAX)
&& enqueue->port != OFPP_IN_PORT
&& enqueue->port != OFPP_LOCAL) {
return OFPERR_OFPBAC_BAD_OUT_PORT;
}
return 0;
}
static void
encode_ENQUEUE(const struct ofpact_enqueue *enqueue,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
struct ofp10_action_enqueue *oae;
oae = put_OFPAT10_ENQUEUE(out);
oae->port = htons(ofp_to_u16(enqueue->port));
oae->queue_id = htonl(enqueue->queue);
} else {
/* XXX */
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_ENQUEUE(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
char *sp = NULL;
char *port = strtok_r(arg, ":q,", &sp);
char *queue = strtok_r(NULL, "", &sp);
struct ofpact_enqueue *enqueue;
if (port == NULL || queue == NULL) {
return xstrdup("\"enqueue\" syntax is \"enqueue:PORT:QUEUE\" or "
"\"enqueue(PORT,QUEUE)\"");
}
enqueue = ofpact_put_ENQUEUE(ofpacts);
if (!ofputil_port_from_string(port, &enqueue->port)) {
return xasprintf("%s: enqueue to unknown port", port);
}
return str_to_u32(queue, &enqueue->queue);
}
static void
format_ENQUEUE(const struct ofpact_enqueue *a, struct ds *s)
{
ds_put_format(s, "enqueue:");
ofputil_format_port(a->port, s);
ds_put_format(s, ":%"PRIu32, a->queue);
}
/* Action structure for NXAST_OUTPUT_REG.
*
* Outputs to the OpenFlow port number written to src[ofs:ofs+nbits].
*
* The format and semantics of 'src' and 'ofs_nbits' are similar to those for
* the NXAST_REG_LOAD action.
*
* The acceptable nxm_header values for 'src' are the same as the acceptable
* nxm_header values for the 'src' field of NXAST_REG_MOVE.
*
* The 'max_len' field indicates the number of bytes to send when the chosen
* port is OFPP_CONTROLLER. Its semantics are equivalent to the 'max_len'
* field of OFPAT_OUTPUT.
*
* The 'zero' field is required to be zeroed for forward compatibility. */
struct nx_action_output_reg {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_OUTPUT_REG. */
ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
ovs_be32 src; /* Source. */
ovs_be16 max_len; /* Max length to send to controller. */
uint8_t zero[6]; /* Reserved, must be zero. */
};
OFP_ASSERT(sizeof(struct nx_action_output_reg) == 24);
/* Action structure for NXAST_OUTPUT_REG2.
*
* Like the NXAST_OUTPUT_REG but organized so that there is room for a 64-bit
* experimenter OXM as 'src'.
*/
struct nx_action_output_reg2 {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_OUTPUT_REG2. */
ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
ovs_be16 max_len; /* Max length to send to controller. */
/* Followed by:
* - 'src', as an OXM/NXM header (either 4 or 8 bytes).
* - Enough 0-bytes to pad the action out to 24 bytes. */
uint8_t pad[10];
};
OFP_ASSERT(sizeof(struct nx_action_output_reg2) == 24);
static enum ofperr
decode_NXAST_RAW_OUTPUT_REG(const struct nx_action_output_reg *naor,
struct ofpbuf *out)
{
struct ofpact_output_reg *output_reg;
if (!is_all_zeros(naor->zero, sizeof naor->zero)) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
output_reg = ofpact_put_OUTPUT_REG(out);
output_reg->ofpact.raw = NXAST_RAW_OUTPUT_REG;
output_reg->src.field = mf_from_nxm_header(ntohl(naor->src));
output_reg->src.ofs = nxm_decode_ofs(naor->ofs_nbits);
output_reg->src.n_bits = nxm_decode_n_bits(naor->ofs_nbits);
output_reg->max_len = ntohs(naor->max_len);
return mf_check_src(&output_reg->src, NULL);
}
static enum ofperr
decode_NXAST_RAW_OUTPUT_REG2(const struct nx_action_output_reg2 *naor,
struct ofpbuf *out)
{
struct ofpact_output_reg *output_reg;
enum ofperr error;
struct ofpbuf b;
output_reg = ofpact_put_OUTPUT_REG(out);
output_reg->ofpact.raw = NXAST_RAW_OUTPUT_REG2;
output_reg->src.ofs = nxm_decode_ofs(naor->ofs_nbits);
output_reg->src.n_bits = nxm_decode_n_bits(naor->ofs_nbits);
output_reg->max_len = ntohs(naor->max_len);
ofpbuf_use_const(&b, naor, ntohs(naor->len));
ofpbuf_pull(&b, OBJECT_OFFSETOF(naor, pad));
error = nx_pull_header(&b, &output_reg->src.field, NULL);
if (error) {
return error;
}
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
return mf_check_src(&output_reg->src, NULL);
}
static void
encode_OUTPUT_REG(const struct ofpact_output_reg *output_reg,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
/* If 'output_reg' came in as an NXAST_RAW_OUTPUT_REG2 action, or if it
* cannot be encoded in the older form, encode it as
* NXAST_RAW_OUTPUT_REG2. */
if (output_reg->ofpact.raw == NXAST_RAW_OUTPUT_REG2
|| !mf_nxm_header(output_reg->src.field->id)) {
struct nx_action_output_reg2 *naor = put_NXAST_OUTPUT_REG2(out);
size_t size = out->size;
naor->ofs_nbits = nxm_encode_ofs_nbits(output_reg->src.ofs,
output_reg->src.n_bits);
naor->max_len = htons(output_reg->max_len);
out->size = size - sizeof naor->pad;
nx_put_header(out, output_reg->src.field->id, 0, false);
out->size = size;
} else {
struct nx_action_output_reg *naor = put_NXAST_OUTPUT_REG(out);
naor->ofs_nbits = nxm_encode_ofs_nbits(output_reg->src.ofs,
output_reg->src.n_bits);
naor->src = htonl(mf_nxm_header(output_reg->src.field->id));
naor->max_len = htons(output_reg->max_len);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_OUTPUT_REG(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return parse_OUTPUT(arg, ofpacts, usable_protocols);
}
static void
format_OUTPUT_REG(const struct ofpact_output_reg *a, struct ds *s)
{
ds_put_cstr(s, "output:");
mf_format_subfield(&a->src, s);
}
/* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD.
*
* The bundle actions choose a slave from a supplied list of options.
* NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its
* selection to a register.
*
* The list of possible slaves follows the nx_action_bundle structure. The size
* of each slave is governed by its type as indicated by the 'slave_type'
* parameter. The list of slaves should be padded at its end with zeros to make
* the total length of the action a multiple of 8.
*
* Switches infer from the 'slave_type' parameter the size of each slave. All
* implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates
* that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) ==
* 2 byte width. Switches should reject actions which indicate unknown or
* unsupported slave types.
*
* Switches use a strategy dictated by the 'algorithm' parameter to choose a
* slave. If the switch does not support the specified 'algorithm' parameter,
* it should reject the action.
*
* Several algorithms take into account liveness when selecting slaves. The
* liveness of a slave is implementation defined (with one exception), but will
* generally take into account things like its carrier status and the results
* of any link monitoring protocols which happen to be running on it. In order
* to give controllers a place-holder value, the OFPP_NONE port is always
* considered live.
*
* Some slave selection strategies require the use of a hash function, in which
* case the 'fields' and 'basis' parameters should be populated. The 'fields'
* parameter (one of NX_HASH_FIELDS_*) designates which parts of the flow to
* hash. Refer to the definition of "enum nx_hash_fields" for details. The
* 'basis' parameter is used as a universal hash parameter. Different values
* of 'basis' yield different hash results.
*
* The 'zero' parameter at the end of the action structure is reserved for
* future use. Switches are required to reject actions which have nonzero
* bytes in the 'zero' field.
*
* NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. Switches
* should reject actions which have nonzero bytes in either of these fields.
*
* NXAST_BUNDLE_LOAD stores the OpenFlow port number of the selected slave in
* dst[ofs:ofs+n_bits]. The format and semantics of 'dst' and 'ofs_nbits' are
* similar to those for the NXAST_REG_LOAD action. */
struct nx_action_bundle {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length including slaves. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_BUNDLE or NXAST_BUNDLE_LOAD. */
/* Slave choice algorithm to apply to hash value. */
ovs_be16 algorithm; /* One of NX_BD_ALG_*. */
/* What fields to hash and how. */
ovs_be16 fields; /* One of NX_HASH_FIELDS_*. */
ovs_be16 basis; /* Universal hash parameter. */
ovs_be32 slave_type; /* NXM_OF_IN_PORT. */
ovs_be16 n_slaves; /* Number of slaves. */
ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
ovs_be32 dst; /* Destination. */
uint8_t zero[4]; /* Reserved. Must be zero. */
};
OFP_ASSERT(sizeof(struct nx_action_bundle) == 32);
static enum ofperr
decode_bundle(bool load, const struct nx_action_bundle *nab,
struct ofpbuf *ofpacts)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
struct ofpact_bundle *bundle;
uint32_t slave_type;
size_t slaves_size, i;
enum ofperr error;
bundle = ofpact_put_BUNDLE(ofpacts);
bundle->n_slaves = ntohs(nab->n_slaves);
bundle->basis = ntohs(nab->basis);
bundle->fields = ntohs(nab->fields);
bundle->algorithm = ntohs(nab->algorithm);
slave_type = ntohl(nab->slave_type);
slaves_size = ntohs(nab->len) - sizeof *nab;
error = OFPERR_OFPBAC_BAD_ARGUMENT;
if (!flow_hash_fields_valid(bundle->fields)) {
VLOG_WARN_RL(&rl, "unsupported fields %d", (int) bundle->fields);
} else if (bundle->n_slaves > BUNDLE_MAX_SLAVES) {
VLOG_WARN_RL(&rl, "too many slaves");
} else if (bundle->algorithm != NX_BD_ALG_HRW
&& bundle->algorithm != NX_BD_ALG_ACTIVE_BACKUP) {
VLOG_WARN_RL(&rl, "unsupported algorithm %d", (int) bundle->algorithm);
} else if (slave_type != mf_nxm_header(MFF_IN_PORT)) {
VLOG_WARN_RL(&rl, "unsupported slave type %"PRIu16, slave_type);
} else {
error = 0;
}
if (!is_all_zeros(nab->zero, sizeof nab->zero)) {
VLOG_WARN_RL(&rl, "reserved field is nonzero");
error = OFPERR_OFPBAC_BAD_ARGUMENT;
}
if (load) {
bundle->dst.field = mf_from_nxm_header(ntohl(nab->dst));
bundle->dst.ofs = nxm_decode_ofs(nab->ofs_nbits);
bundle->dst.n_bits = nxm_decode_n_bits(nab->ofs_nbits);
if (bundle->dst.n_bits < 16) {
VLOG_WARN_RL(&rl, "bundle_load action requires at least 16 bit "
"destination.");
error = OFPERR_OFPBAC_BAD_ARGUMENT;
}
} else {
if (nab->ofs_nbits || nab->dst) {
VLOG_WARN_RL(&rl, "bundle action has nonzero reserved fields");
error = OFPERR_OFPBAC_BAD_ARGUMENT;
}
}
if (slaves_size < bundle->n_slaves * sizeof(ovs_be16)) {
VLOG_WARN_RL(&rl, "Nicira action %s only has %"PRIuSIZE" bytes "
"allocated for slaves. %"PRIuSIZE" bytes are required "
"for %"PRIu16" slaves.",
load ? "bundle_load" : "bundle", slaves_size,
bundle->n_slaves * sizeof(ovs_be16), bundle->n_slaves);
error = OFPERR_OFPBAC_BAD_LEN;
}
for (i = 0; i < bundle->n_slaves; i++) {
uint16_t ofp_port = ntohs(((ovs_be16 *)(nab + 1))[i]);
ofpbuf_put(ofpacts, &ofp_port, sizeof ofp_port);
}
bundle = ofpacts->header;
ofpact_update_len(ofpacts, &bundle->ofpact);
if (!error) {
error = bundle_check(bundle, OFPP_MAX, NULL);
}
return error;
}
static enum ofperr
decode_NXAST_RAW_BUNDLE(const struct nx_action_bundle *nab, struct ofpbuf *out)
{
return decode_bundle(false, nab, out);
}
static enum ofperr
decode_NXAST_RAW_BUNDLE_LOAD(const struct nx_action_bundle *nab,
struct ofpbuf *out)
{
return decode_bundle(true, nab, out);
}
static void
encode_BUNDLE(const struct ofpact_bundle *bundle,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
int slaves_len = ROUND_UP(2 * bundle->n_slaves, OFP_ACTION_ALIGN);
struct nx_action_bundle *nab;
ovs_be16 *slaves;
size_t i;
nab = (bundle->dst.field
? put_NXAST_BUNDLE_LOAD(out)
: put_NXAST_BUNDLE(out));
nab->len = htons(ntohs(nab->len) + slaves_len);
nab->algorithm = htons(bundle->algorithm);
nab->fields = htons(bundle->fields);
nab->basis = htons(bundle->basis);
nab->slave_type = htonl(mf_nxm_header(MFF_IN_PORT));
nab->n_slaves = htons(bundle->n_slaves);
if (bundle->dst.field) {
nab->ofs_nbits = nxm_encode_ofs_nbits(bundle->dst.ofs,
bundle->dst.n_bits);
nab->dst = htonl(mf_nxm_header(bundle->dst.field->id));
}
slaves = ofpbuf_put_zeros(out, slaves_len);
for (i = 0; i < bundle->n_slaves; i++) {
slaves[i] = htons(ofp_to_u16(bundle->slaves[i]));
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_BUNDLE(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return bundle_parse(arg, ofpacts);
}
static char * OVS_WARN_UNUSED_RESULT
parse_bundle_load(const char *arg, struct ofpbuf *ofpacts)
{
return bundle_parse_load(arg, ofpacts);
}
static void
format_BUNDLE(const struct ofpact_bundle *a, struct ds *s)
{
bundle_format(a, s);
}
/* Set VLAN actions. */
static enum ofperr
decode_set_vlan_vid(uint16_t vid, bool push_vlan_if_needed, struct ofpbuf *out)
{
if (vid & ~0xfff) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else {
struct ofpact_vlan_vid *vlan_vid = ofpact_put_SET_VLAN_VID(out);
vlan_vid->vlan_vid = vid;
vlan_vid->push_vlan_if_needed = push_vlan_if_needed;
return 0;
}
}
static enum ofperr
decode_OFPAT_RAW10_SET_VLAN_VID(uint16_t vid, struct ofpbuf *out)
{
return decode_set_vlan_vid(vid, true, out);
}
static enum ofperr
decode_OFPAT_RAW11_SET_VLAN_VID(uint16_t vid, struct ofpbuf *out)
{
return decode_set_vlan_vid(vid, false, out);
}
static void
encode_SET_VLAN_VID(const struct ofpact_vlan_vid *vlan_vid,
enum ofp_version ofp_version, struct ofpbuf *out)
{
uint16_t vid = vlan_vid->vlan_vid;
/* Push a VLAN tag, if none is present and this form of the action calls
* for such a feature. */
if (ofp_version > OFP10_VERSION
&& vlan_vid->push_vlan_if_needed
&& !vlan_vid->flow_has_vlan) {
put_OFPAT11_PUSH_VLAN(out, htons(ETH_TYPE_VLAN_8021Q));
}
if (ofp_version == OFP10_VERSION) {
put_OFPAT10_SET_VLAN_VID(out, vid);
} else if (ofp_version == OFP11_VERSION) {
put_OFPAT11_SET_VLAN_VID(out, vid);
} else {
ofpact_put_set_field(out, ofp_version,
MFF_VLAN_VID, vid | OFPVID12_PRESENT);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_set_vlan_vid(char *arg, struct ofpbuf *ofpacts, bool push_vlan_if_needed)
{
struct ofpact_vlan_vid *vlan_vid;
uint16_t vid;
char *error;
error = str_to_u16(arg, "VLAN VID", &vid);
if (error) {
return error;
}
if (vid & ~VLAN_VID_MASK) {
return xasprintf("%s: not a valid VLAN VID", arg);
}
vlan_vid = ofpact_put_SET_VLAN_VID(ofpacts);
vlan_vid->vlan_vid = vid;
vlan_vid->push_vlan_if_needed = push_vlan_if_needed;
return NULL;
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_VLAN_VID(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return parse_set_vlan_vid(arg, ofpacts, false);
}
static void
format_SET_VLAN_VID(const struct ofpact_vlan_vid *a, struct ds *s)
{
ds_put_format(s, "%s:%"PRIu16,
a->push_vlan_if_needed ? "mod_vlan_vid" : "set_vlan_vid",
a->vlan_vid);
}
/* Set PCP actions. */
static enum ofperr
decode_set_vlan_pcp(uint8_t pcp, bool push_vlan_if_needed, struct ofpbuf *out)
{
if (pcp & ~7) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else {
struct ofpact_vlan_pcp *vlan_pcp = ofpact_put_SET_VLAN_PCP(out);
vlan_pcp->vlan_pcp = pcp;
vlan_pcp->push_vlan_if_needed = push_vlan_if_needed;
return 0;
}
}
static enum ofperr
decode_OFPAT_RAW10_SET_VLAN_PCP(uint8_t pcp, struct ofpbuf *out)
{
return decode_set_vlan_pcp(pcp, true, out);
}
static enum ofperr
decode_OFPAT_RAW11_SET_VLAN_PCP(uint8_t pcp, struct ofpbuf *out)
{
return decode_set_vlan_pcp(pcp, false, out);
}
static void
encode_SET_VLAN_PCP(const struct ofpact_vlan_pcp *vlan_pcp,
enum ofp_version ofp_version, struct ofpbuf *out)
{
uint8_t pcp = vlan_pcp->vlan_pcp;
/* Push a VLAN tag, if none is present and this form of the action calls
* for such a feature. */
if (ofp_version > OFP10_VERSION
&& vlan_pcp->push_vlan_if_needed
&& !vlan_pcp->flow_has_vlan) {
put_OFPAT11_PUSH_VLAN(out, htons(ETH_TYPE_VLAN_8021Q));
}
if (ofp_version == OFP10_VERSION) {
put_OFPAT10_SET_VLAN_PCP(out, pcp);
} else if (ofp_version == OFP11_VERSION) {
put_OFPAT11_SET_VLAN_PCP(out, pcp);
} else {
ofpact_put_set_field(out, ofp_version, MFF_VLAN_PCP, pcp);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_set_vlan_pcp(char *arg, struct ofpbuf *ofpacts, bool push_vlan_if_needed)
{
struct ofpact_vlan_pcp *vlan_pcp;
uint8_t pcp;
char *error;
error = str_to_u8(arg, "VLAN PCP", &pcp);
if (error) {
return error;
}
if (pcp & ~7) {
return xasprintf("%s: not a valid VLAN PCP", arg);
}
vlan_pcp = ofpact_put_SET_VLAN_PCP(ofpacts);
vlan_pcp->vlan_pcp = pcp;
vlan_pcp->push_vlan_if_needed = push_vlan_if_needed;
return NULL;
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_VLAN_PCP(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return parse_set_vlan_pcp(arg, ofpacts, false);
}
static void
format_SET_VLAN_PCP(const struct ofpact_vlan_pcp *a, struct ds *s)
{
ds_put_format(s, "%s:%"PRIu8,
a->push_vlan_if_needed ? "mod_vlan_pcp" : "set_vlan_pcp",
a->vlan_pcp);
}
/* Strip VLAN actions. */
static enum ofperr
decode_OFPAT_RAW10_STRIP_VLAN(struct ofpbuf *out)
{
ofpact_put_STRIP_VLAN(out)->ofpact.raw = OFPAT_RAW10_STRIP_VLAN;
return 0;
}
static enum ofperr
decode_OFPAT_RAW11_POP_VLAN(struct ofpbuf *out)
{
ofpact_put_STRIP_VLAN(out)->ofpact.raw = OFPAT_RAW11_POP_VLAN;
return 0;
}
static void
encode_STRIP_VLAN(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
put_OFPAT10_STRIP_VLAN(out);
} else {
put_OFPAT11_POP_VLAN(out);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_STRIP_VLAN(char *arg OVS_UNUSED, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
ofpact_put_STRIP_VLAN(ofpacts)->ofpact.raw = OFPAT_RAW10_STRIP_VLAN;
return NULL;
}
static char * OVS_WARN_UNUSED_RESULT
parse_pop_vlan(struct ofpbuf *ofpacts)
{
ofpact_put_STRIP_VLAN(ofpacts)->ofpact.raw = OFPAT_RAW11_POP_VLAN;
return NULL;
}
static void
format_STRIP_VLAN(const struct ofpact_null *a, struct ds *s)
{
ds_put_cstr(s, (a->ofpact.raw == OFPAT_RAW11_POP_VLAN
? "pop_vlan"
: "strip_vlan"));
}
/* Push VLAN action. */
static enum ofperr
decode_OFPAT_RAW11_PUSH_VLAN(ovs_be16 eth_type, struct ofpbuf *out)
{
if (eth_type != htons(ETH_TYPE_VLAN_8021Q)) {
/* XXX 802.1AD(QinQ) isn't supported at the moment */
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
ofpact_put_PUSH_VLAN(out);
return 0;
}
static void
encode_PUSH_VLAN(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
/* PUSH is a side effect of a SET_VLAN_VID/PCP, which should
* follow this action. */
} else {
/* XXX ETH_TYPE_VLAN_8021AD case */
put_OFPAT11_PUSH_VLAN(out, htons(ETH_TYPE_VLAN_8021Q));
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_PUSH_VLAN(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint16_t ethertype;
char *error;
*usable_protocols &= OFPUTIL_P_OF11_UP;
error = str_to_u16(arg, "ethertype", ðertype);
if (error) {
return error;
}
if (ethertype != ETH_TYPE_VLAN_8021Q) {
/* XXX ETH_TYPE_VLAN_8021AD case isn't supported */
return xasprintf("%s: not a valid VLAN ethertype", arg);
}
ofpact_put_PUSH_VLAN(ofpacts);
return NULL;
}
static void
format_PUSH_VLAN(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
/* XXX 802.1AD case*/
ds_put_format(s, "push_vlan:%#"PRIx16, ETH_TYPE_VLAN_8021Q);
}
/* Action structure for OFPAT10_SET_DL_SRC/DST and OFPAT11_SET_DL_SRC/DST. */
struct ofp_action_dl_addr {
ovs_be16 type; /* Type. */
ovs_be16 len; /* Length is 16. */
uint8_t dl_addr[OFP_ETH_ALEN]; /* Ethernet address. */
uint8_t pad[6];
};
OFP_ASSERT(sizeof(struct ofp_action_dl_addr) == 16);
static enum ofperr
decode_OFPAT_RAW_SET_DL_SRC(const struct ofp_action_dl_addr *a,
struct ofpbuf *out)
{
memcpy(ofpact_put_SET_ETH_SRC(out)->mac, a->dl_addr, ETH_ADDR_LEN);
return 0;
}
static enum ofperr
decode_OFPAT_RAW_SET_DL_DST(const struct ofp_action_dl_addr *a,
struct ofpbuf *out)
{
memcpy(ofpact_put_SET_ETH_DST(out)->mac, a->dl_addr, ETH_ADDR_LEN);
return 0;
}
static void
encode_SET_ETH_addr(const struct ofpact_mac *mac, enum ofp_version ofp_version,
enum ofp_raw_action_type raw, enum mf_field_id field,
struct ofpbuf *out)
{
const uint8_t *addr = mac->mac;
if (ofp_version < OFP12_VERSION) {
struct ofp_action_dl_addr *oada;
oada = ofpact_put_raw(out, ofp_version, raw, 0);
memcpy(oada->dl_addr, addr, ETH_ADDR_LEN);
} else {
ofpact_put_set_field(out, ofp_version, field,
eth_addr_to_uint64(addr));
}
}
static void
encode_SET_ETH_SRC(const struct ofpact_mac *mac, enum ofp_version ofp_version,
struct ofpbuf *out)
{
encode_SET_ETH_addr(mac, ofp_version, OFPAT_RAW_SET_DL_SRC, MFF_ETH_SRC,
out);
}
static void
encode_SET_ETH_DST(const struct ofpact_mac *mac,
enum ofp_version ofp_version,
struct ofpbuf *out)
{
encode_SET_ETH_addr(mac, ofp_version, OFPAT_RAW_SET_DL_DST, MFF_ETH_DST,
out);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_ETH_SRC(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_mac(arg, ofpact_put_SET_ETH_SRC(ofpacts)->mac);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_ETH_DST(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_mac(arg, ofpact_put_SET_ETH_DST(ofpacts)->mac);
}
static void
format_SET_ETH_SRC(const struct ofpact_mac *a, struct ds *s)
{
ds_put_format(s, "mod_dl_src:"ETH_ADDR_FMT, ETH_ADDR_ARGS(a->mac));
}
static void
format_SET_ETH_DST(const struct ofpact_mac *a, struct ds *s)
{
ds_put_format(s, "mod_dl_dst:"ETH_ADDR_FMT, ETH_ADDR_ARGS(a->mac));
}
/* Set IPv4 address actions. */
static enum ofperr
decode_OFPAT_RAW_SET_NW_SRC(ovs_be32 ipv4, struct ofpbuf *out)
{
ofpact_put_SET_IPV4_SRC(out)->ipv4 = ipv4;
return 0;
}
static enum ofperr
decode_OFPAT_RAW_SET_NW_DST(ovs_be32 ipv4, struct ofpbuf *out)
{
ofpact_put_SET_IPV4_DST(out)->ipv4 = ipv4;
return 0;
}
static void
encode_SET_IPV4_addr(const struct ofpact_ipv4 *ipv4,
enum ofp_version ofp_version,
enum ofp_raw_action_type raw, enum mf_field_id field,
struct ofpbuf *out)
{
ovs_be32 addr = ipv4->ipv4;
if (ofp_version < OFP12_VERSION) {
ofpact_put_raw(out, ofp_version, raw, ntohl(addr));
} else {
ofpact_put_set_field(out, ofp_version, field, ntohl(addr));
}
}
static void
encode_SET_IPV4_SRC(const struct ofpact_ipv4 *ipv4,
enum ofp_version ofp_version, struct ofpbuf *out)
{
encode_SET_IPV4_addr(ipv4, ofp_version, OFPAT_RAW_SET_NW_SRC, MFF_IPV4_SRC,
out);
}
static void
encode_SET_IPV4_DST(const struct ofpact_ipv4 *ipv4,
enum ofp_version ofp_version, struct ofpbuf *out)
{
encode_SET_IPV4_addr(ipv4, ofp_version, OFPAT_RAW_SET_NW_DST, MFF_IPV4_DST,
out);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_IPV4_SRC(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_ip(arg, &ofpact_put_SET_IPV4_SRC(ofpacts)->ipv4);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_IPV4_DST(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_ip(arg, &ofpact_put_SET_IPV4_DST(ofpacts)->ipv4);
}
static void
format_SET_IPV4_SRC(const struct ofpact_ipv4 *a, struct ds *s)
{
ds_put_format(s, "mod_nw_src:"IP_FMT, IP_ARGS(a->ipv4));
}
static void
format_SET_IPV4_DST(const struct ofpact_ipv4 *a, struct ds *s)
{
ds_put_format(s, "mod_nw_dst:"IP_FMT, IP_ARGS(a->ipv4));
}
/* Set IPv4/v6 TOS actions. */
static enum ofperr
decode_OFPAT_RAW_SET_NW_TOS(uint8_t dscp, struct ofpbuf *out)
{
if (dscp & ~IP_DSCP_MASK) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else {
ofpact_put_SET_IP_DSCP(out)->dscp = dscp;
return 0;
}
}
static void
encode_SET_IP_DSCP(const struct ofpact_dscp *dscp,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version < OFP12_VERSION) {
put_OFPAT_SET_NW_TOS(out, ofp_version, dscp->dscp);
} else {
ofpact_put_set_field(out, ofp_version,
MFF_IP_DSCP_SHIFTED, dscp->dscp >> 2);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_IP_DSCP(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint8_t tos;
char *error;
error = str_to_u8(arg, "TOS", &tos);
if (error) {
return error;
}
if (tos & ~IP_DSCP_MASK) {
return xasprintf("%s: not a valid TOS", arg);
}
ofpact_put_SET_IP_DSCP(ofpacts)->dscp = tos;
return NULL;
}
static void
format_SET_IP_DSCP(const struct ofpact_dscp *a, struct ds *s)
{
ds_put_format(s, "mod_nw_tos:%d", a->dscp);
}
/* Set IPv4/v6 ECN actions. */
static enum ofperr
decode_OFPAT_RAW11_SET_NW_ECN(uint8_t ecn, struct ofpbuf *out)
{
if (ecn & ~IP_ECN_MASK) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else {
ofpact_put_SET_IP_ECN(out)->ecn = ecn;
return 0;
}
}
static void
encode_SET_IP_ECN(const struct ofpact_ecn *ip_ecn,
enum ofp_version ofp_version, struct ofpbuf *out)
{
uint8_t ecn = ip_ecn->ecn;
if (ofp_version == OFP10_VERSION) {
/* XXX */
} else if (ofp_version == OFP11_VERSION) {
put_OFPAT11_SET_NW_ECN(out, ecn);
} else {
ofpact_put_set_field(out, ofp_version, MFF_IP_ECN, ecn);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_IP_ECN(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint8_t ecn;
char *error;
error = str_to_u8(arg, "ECN", &ecn);
if (error) {
return error;
}
if (ecn & ~IP_ECN_MASK) {
return xasprintf("%s: not a valid ECN", arg);
}
ofpact_put_SET_IP_ECN(ofpacts)->ecn = ecn;
return NULL;
}
static void
format_SET_IP_ECN(const struct ofpact_ecn *a, struct ds *s)
{
ds_put_format(s, "mod_nw_ecn:%d", a->ecn);
}
/* Set IPv4/v6 TTL actions. */
static enum ofperr
decode_OFPAT_RAW11_SET_NW_TTL(uint8_t ttl, struct ofpbuf *out)
{
ofpact_put_SET_IP_TTL(out)->ttl = ttl;
return 0;
}
static void
encode_SET_IP_TTL(const struct ofpact_ip_ttl *ttl,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version >= OFP11_VERSION) {
put_OFPAT11_SET_NW_TTL(out, ttl->ttl);
} else {
/* XXX */
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_IP_TTL(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint8_t ttl;
char *error;
error = str_to_u8(arg, "TTL", &ttl);
if (error) {
return error;
}
ofpact_put_SET_IP_TTL(ofpacts)->ttl = ttl;
return NULL;
}
static void
format_SET_IP_TTL(const struct ofpact_ip_ttl *a, struct ds *s)
{
ds_put_format(s, "mod_nw_ttl:%d", a->ttl);
}
/* Set TCP/UDP/SCTP port actions. */
static enum ofperr
decode_OFPAT_RAW_SET_TP_SRC(ovs_be16 port, struct ofpbuf *out)
{
ofpact_put_SET_L4_SRC_PORT(out)->port = ntohs(port);
return 0;
}
static enum ofperr
decode_OFPAT_RAW_SET_TP_DST(ovs_be16 port, struct ofpbuf *out)
{
ofpact_put_SET_L4_DST_PORT(out)->port = ntohs(port);
return 0;
}
static void
encode_SET_L4_port(const struct ofpact_l4_port *l4_port,
enum ofp_version ofp_version, enum ofp_raw_action_type raw,
enum mf_field_id field, struct ofpbuf *out)
{
uint16_t port = l4_port->port;
if (ofp_version >= OFP12_VERSION && field != MFF_N_IDS) {
ofpact_put_set_field(out, ofp_version, field, port);
} else {
ofpact_put_raw(out, ofp_version, raw, port);
}
}
static void
encode_SET_L4_SRC_PORT(const struct ofpact_l4_port *l4_port,
enum ofp_version ofp_version, struct ofpbuf *out)
{
uint8_t proto = l4_port->flow_ip_proto;
enum mf_field_id field = (proto == IPPROTO_TCP ? MFF_TCP_SRC
: proto == IPPROTO_UDP ? MFF_UDP_SRC
: proto == IPPROTO_SCTP ? MFF_SCTP_SRC
: MFF_N_IDS);
encode_SET_L4_port(l4_port, ofp_version, OFPAT_RAW_SET_TP_SRC, field, out);
}
static void
encode_SET_L4_DST_PORT(const struct ofpact_l4_port *l4_port,
enum ofp_version ofp_version,
struct ofpbuf *out)
{
uint8_t proto = l4_port->flow_ip_proto;
enum mf_field_id field = (proto == IPPROTO_TCP ? MFF_TCP_DST
: proto == IPPROTO_UDP ? MFF_UDP_DST
: proto == IPPROTO_SCTP ? MFF_SCTP_DST
: MFF_N_IDS);
encode_SET_L4_port(l4_port, ofp_version, OFPAT_RAW_SET_TP_DST, field, out);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_L4_SRC_PORT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_u16(arg, "source port",
&ofpact_put_SET_L4_SRC_PORT(ofpacts)->port);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_L4_DST_PORT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_u16(arg, "destination port",
&ofpact_put_SET_L4_DST_PORT(ofpacts)->port);
}
static void
format_SET_L4_SRC_PORT(const struct ofpact_l4_port *a, struct ds *s)
{
ds_put_format(s, "mod_tp_src:%d", a->port);
}
static void
format_SET_L4_DST_PORT(const struct ofpact_l4_port *a, struct ds *s)
{
ds_put_format(s, "mod_tp_dst:%d", a->port);
}
/* Action structure for OFPAT_COPY_FIELD. */
struct ofp15_action_copy_field {
ovs_be16 type; /* OFPAT_COPY_FIELD. */
ovs_be16 len; /* Length is padded to 64 bits. */
ovs_be16 n_bits; /* Number of bits to copy. */
ovs_be16 src_offset; /* Starting bit offset in source. */
ovs_be16 dst_offset; /* Starting bit offset in destination. */
uint8_t pad[2];
/* Followed by:
* - OXM header for source field.
* - OXM header for destination field.
* - Padding with 0-bytes to a multiple of 8 bytes.
* The "pad2" member is the beginning of the above. */
uint8_t pad2[4];
};
OFP_ASSERT(sizeof(struct ofp15_action_copy_field) == 16);
/* Action structure for OpenFlow 1.3 extension copy-field action.. */
struct onf_action_copy_field {
ovs_be16 type; /* OFPAT_EXPERIMENTER. */
ovs_be16 len; /* Length is padded to 64 bits. */
ovs_be32 experimenter; /* ONF_VENDOR_ID. */
ovs_be16 exp_type; /* 3200. */
uint8_t pad[2]; /* Not used. */
ovs_be16 n_bits; /* Number of bits to copy. */
ovs_be16 src_offset; /* Starting bit offset in source. */
ovs_be16 dst_offset; /* Starting bit offset in destination. */
uint8_t pad2[2]; /* Not used. */
/* Followed by:
* - OXM header for source field.
* - OXM header for destination field.
* - Padding with 0-bytes (either 0 or 4 of them) to a multiple of 8 bytes.
* The "pad3" member is the beginning of the above. */
uint8_t pad3[4]; /* Not used. */
};
OFP_ASSERT(sizeof(struct onf_action_copy_field) == 24);
/* Action structure for NXAST_REG_MOVE.
*
* Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where
* a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including
* bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for
* the next most significant bit, and so on.
*
* 'src' and 'dst' are nxm_header values with nxm_hasmask=0. (It doesn't make
* sense to use nxm_hasmask=1 because the action does not do any kind of
* matching; it uses the actual value of a field.)
*
* The following nxm_header values are potentially acceptable as 'src':
*
* - NXM_OF_IN_PORT
* - NXM_OF_ETH_DST
* - NXM_OF_ETH_SRC
* - NXM_OF_ETH_TYPE
* - NXM_OF_VLAN_TCI
* - NXM_OF_IP_TOS
* - NXM_OF_IP_PROTO
* - NXM_OF_IP_SRC
* - NXM_OF_IP_DST
* - NXM_OF_TCP_SRC
* - NXM_OF_TCP_DST
* - NXM_OF_UDP_SRC
* - NXM_OF_UDP_DST
* - NXM_OF_ICMP_TYPE
* - NXM_OF_ICMP_CODE
* - NXM_OF_ARP_OP
* - NXM_OF_ARP_SPA
* - NXM_OF_ARP_TPA
* - NXM_NX_TUN_ID
* - NXM_NX_ARP_SHA
* - NXM_NX_ARP_THA
* - NXM_NX_ICMPV6_TYPE
* - NXM_NX_ICMPV6_CODE
* - NXM_NX_ND_SLL
* - NXM_NX_ND_TLL
* - NXM_NX_REG(idx) for idx in the switch's accepted range.
* - NXM_NX_PKT_MARK
* - NXM_NX_TUN_IPV4_SRC
* - NXM_NX_TUN_IPV4_DST
*
* The following nxm_header values are potentially acceptable as 'dst':
*
* - NXM_OF_ETH_DST
* - NXM_OF_ETH_SRC
* - NXM_OF_IP_TOS
* - NXM_OF_IP_SRC
* - NXM_OF_IP_DST
* - NXM_OF_TCP_SRC
* - NXM_OF_TCP_DST
* - NXM_OF_UDP_SRC
* - NXM_OF_UDP_DST
* - NXM_NX_ARP_SHA
* - NXM_NX_ARP_THA
* - NXM_OF_ARP_OP
* - NXM_OF_ARP_SPA
* - NXM_OF_ARP_TPA
* Modifying any of the above fields changes the corresponding packet
* header.
*
* - NXM_OF_IN_PORT
*
* - NXM_NX_REG(idx) for idx in the switch's accepted range.
*
* - NXM_NX_PKT_MARK
*
* - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the
* packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q
* header (if any), ignoring the other bits. Setting a value with CFI=1
* adds or modifies the 802.1Q header appropriately, setting the TCI field
* to the field's new value (with the CFI bit masked out).
*
* - NXM_NX_TUN_ID, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST. Modifying
* any of these values modifies the corresponding tunnel header field used
* for the packet's next tunnel encapsulation, if allowed by the
* configuration of the output tunnel port.
*
* A given nxm_header value may be used as 'src' or 'dst' only on a flow whose
* nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be
* used only if the flow's nx_match includes an nxm_entry that specifies
* nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800.
*
* The switch will reject actions for which src_ofs+n_bits is greater than the
* width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with
* error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT.
*
* This action behaves properly when 'src' overlaps with 'dst', that is, it
* behaves as if 'src' were copied out to a temporary buffer, then the
* temporary buffer copied to 'dst'.
*/
struct nx_action_reg_move {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_REG_MOVE. */
ovs_be16 n_bits; /* Number of bits. */
ovs_be16 src_ofs; /* Starting bit offset in source. */
ovs_be16 dst_ofs; /* Starting bit offset in destination. */
/* Followed by:
* - OXM/NXM header for source field (4 or 8 bytes).
* - OXM/NXM header for destination field (4 or 8 bytes).
* - Padding with 0-bytes to a multiple of 8 bytes, if necessary. */
};
OFP_ASSERT(sizeof(struct nx_action_reg_move) == 16);
static enum ofperr
decode_copy_field__(ovs_be16 src_offset, ovs_be16 dst_offset, ovs_be16 n_bits,
const void *action, ovs_be16 action_len, size_t oxm_offset,
struct ofpbuf *ofpacts)
{
struct ofpact_reg_move *move;
enum ofperr error;
struct ofpbuf b;
move = ofpact_put_REG_MOVE(ofpacts);
move->ofpact.raw = ONFACT_RAW13_COPY_FIELD;
move->src.ofs = ntohs(src_offset);
move->src.n_bits = ntohs(n_bits);
move->dst.ofs = ntohs(dst_offset);
move->dst.n_bits = ntohs(n_bits);
ofpbuf_use_const(&b, action, ntohs(action_len));
ofpbuf_pull(&b, oxm_offset);
error = nx_pull_header(&b, &move->src.field, NULL);
if (error) {
return error;
}
error = nx_pull_header(&b, &move->dst.field, NULL);
if (error) {
return error;
}
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
return nxm_reg_move_check(move, NULL);
}
static enum ofperr
decode_OFPAT_RAW15_COPY_FIELD(const struct ofp15_action_copy_field *oacf,
struct ofpbuf *ofpacts)
{
return decode_copy_field__(oacf->src_offset, oacf->dst_offset,
oacf->n_bits, oacf, oacf->len,
OBJECT_OFFSETOF(oacf, pad2), ofpacts);
}
static enum ofperr
decode_ONFACT_RAW13_COPY_FIELD(const struct onf_action_copy_field *oacf,
struct ofpbuf *ofpacts)
{
return decode_copy_field__(oacf->src_offset, oacf->dst_offset,
oacf->n_bits, oacf, oacf->len,
OBJECT_OFFSETOF(oacf, pad3), ofpacts);
}
static enum ofperr
decode_NXAST_RAW_REG_MOVE(const struct nx_action_reg_move *narm,
struct ofpbuf *ofpacts)
{
struct ofpact_reg_move *move;
enum ofperr error;
struct ofpbuf b;
move = ofpact_put_REG_MOVE(ofpacts);
move->ofpact.raw = NXAST_RAW_REG_MOVE;
move->src.ofs = ntohs(narm->src_ofs);
move->src.n_bits = ntohs(narm->n_bits);
move->dst.ofs = ntohs(narm->dst_ofs);
move->dst.n_bits = ntohs(narm->n_bits);
ofpbuf_use_const(&b, narm, ntohs(narm->len));
ofpbuf_pull(&b, sizeof *narm);
error = nx_pull_header(&b, &move->src.field, NULL);
if (error) {
return error;
}
error = nx_pull_header(&b, &move->dst.field, NULL);
if (error) {
return error;
}
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
return nxm_reg_move_check(move, NULL);
}
static void
encode_REG_MOVE(const struct ofpact_reg_move *move,
enum ofp_version ofp_version, struct ofpbuf *out)
{
/* For OpenFlow 1.3, the choice of ONFACT_RAW13_COPY_FIELD versus
* NXAST_RAW_REG_MOVE is somewhat difficult. Neither one is guaranteed to
* be supported by every OpenFlow 1.3 implementation. It would be ideal to
* probe for support. Until we have that ability, we currently prefer
* NXAST_RAW_REG_MOVE for backward compatibility with older Open vSwitch
* versions. */
size_t start_ofs = out->size;
if (ofp_version >= OFP15_VERSION) {
struct ofp15_action_copy_field *copy = put_OFPAT15_COPY_FIELD(out);
copy->n_bits = htons(move->dst.n_bits);
copy->src_offset = htons(move->src.ofs);
copy->dst_offset = htons(move->dst.ofs);
out->size = out->size - sizeof copy->pad2;
nx_put_header(out, move->src.field->id, ofp_version, false);
nx_put_header(out, move->dst.field->id, ofp_version, false);
} else if (ofp_version == OFP13_VERSION
&& move->ofpact.raw == ONFACT_RAW13_COPY_FIELD) {
struct onf_action_copy_field *copy = put_ONFACT13_COPY_FIELD(out);
copy->n_bits = htons(move->dst.n_bits);
copy->src_offset = htons(move->src.ofs);
copy->dst_offset = htons(move->dst.ofs);
out->size = out->size - sizeof copy->pad3;
nx_put_header(out, move->src.field->id, ofp_version, false);
nx_put_header(out, move->dst.field->id, ofp_version, false);
} else {
struct nx_action_reg_move *narm = put_NXAST_REG_MOVE(out);
narm->n_bits = htons(move->dst.n_bits);
narm->src_ofs = htons(move->src.ofs);
narm->dst_ofs = htons(move->dst.ofs);
nx_put_header(out, move->src.field->id, 0, false);
nx_put_header(out, move->dst.field->id, 0, false);
}
pad_ofpat(out, start_ofs);
}
static char * OVS_WARN_UNUSED_RESULT
parse_REG_MOVE(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_reg_move *move = ofpact_put_REG_MOVE(ofpacts);
const char *full_arg = arg;
char *error;
error = mf_parse_subfield__(&move->src, &arg);
if (error) {
return error;
}
if (strncmp(arg, "->", 2)) {
return xasprintf("%s: missing `->' following source", full_arg);
}
arg += 2;
error = mf_parse_subfield(&move->dst, arg);
if (error) {
return error;
}
if (move->src.n_bits != move->dst.n_bits) {
return xasprintf("%s: source field is %d bits wide but destination is "
"%d bits wide", full_arg,
move->src.n_bits, move->dst.n_bits);
}
return NULL;
}
static void
format_REG_MOVE(const struct ofpact_reg_move *a, struct ds *s)
{
nxm_format_reg_move(a, s);
}
/* Action structure for OFPAT12_SET_FIELD. */
struct ofp12_action_set_field {
ovs_be16 type; /* OFPAT12_SET_FIELD. */
ovs_be16 len; /* Length is padded to 64 bits. */
/* Followed by:
* - An OXM header, value, and (in OpenFlow 1.5+) optionally a mask.
* - Enough 0-bytes to pad out to a multiple of 64 bits.
*
* The "pad" member is the beginning of the above. */
uint8_t pad[4];
};
OFP_ASSERT(sizeof(struct ofp12_action_set_field) == 8);
/* Action structure for NXAST_REG_LOAD.
*
* Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits
* within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering
* starts at 0 for the least-significant bit, 1 for the next most significant
* bit, and so on.
*
* 'dst' is an nxm_header with nxm_hasmask=0. See the documentation for
* NXAST_REG_MOVE, above, for the permitted fields and for the side effects of
* loading them.
*
* The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field
* to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to
* take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is
* also stored as one less than its true value:
*
* 15 6 5 0
* +------------------------------+------------------+
* | ofs | n_bits - 1 |
* +------------------------------+------------------+
*
* The switch will reject actions for which ofs+n_bits is greater than the
* width of 'dst', or in which any bits in 'value' with value 2**n_bits or
* greater are set to 1, with error type OFPET_BAD_ACTION, code
* OFPBAC_BAD_ARGUMENT.
*/
struct nx_action_reg_load {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_REG_LOAD. */
ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
ovs_be32 dst; /* Destination register. */
ovs_be64 value; /* Immediate value. */
};
OFP_ASSERT(sizeof(struct nx_action_reg_load) == 24);
/* Action structure for NXAST_REG_LOAD2.
*
* Compared to OFPAT_SET_FIELD, we can use this to set whole or partial fields
* in any OpenFlow version. Compared to NXAST_REG_LOAD, we can use this to set
* OXM experimenter fields. */
struct nx_action_reg_load2 {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* At least 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_SET_FIELD. */
/* Followed by:
* - An NXM/OXM header, value, and optionally a mask.
* - Enough 0-bytes to pad out to a multiple of 64 bits.
*
* The "pad" member is the beginning of the above. */
uint8_t pad[6];
};
OFP_ASSERT(sizeof(struct nx_action_reg_load2) == 16);
static enum ofperr
decode_ofpat_set_field(const struct ofp12_action_set_field *oasf,
bool may_mask, struct ofpbuf *ofpacts)
{
struct ofpact_set_field *sf;
enum ofperr error;
struct ofpbuf b;
sf = ofpact_put_SET_FIELD(ofpacts);
ofpbuf_use_const(&b, oasf, ntohs(oasf->len));
ofpbuf_pull(&b, OBJECT_OFFSETOF(oasf, pad));
error = nx_pull_entry(&b, &sf->field, &sf->value,
may_mask ? &sf->mask : NULL);
if (error) {
return (error == OFPERR_OFPBMC_BAD_MASK
? OFPERR_OFPBAC_BAD_SET_MASK
: error);
}
if (!may_mask) {
memset(&sf->mask, 0xff, sf->field->n_bytes);
}
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
/* OpenFlow says specifically that one may not set OXM_OF_IN_PORT via
* Set-Field. */
if (sf->field->id == MFF_IN_PORT_OXM) {
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
/* oxm_length is now validated to be compatible with mf_value. */
if (!sf->field->writable) {
VLOG_WARN_RL(&rl, "destination field %s is not writable",
sf->field->name);
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
/* The value must be valid for match. OpenFlow 1.5 also says,
* "In an OXM_OF_VLAN_VID set-field action, the OFPVID_PRESENT bit must be
* a 1-bit in oxm_value and in oxm_mask." */
if (!mf_is_value_valid(sf->field, &sf->value)
|| (sf->field->id == MFF_VLAN_VID
&& (!(sf->mask.be16 & htons(OFPVID12_PRESENT))
|| !(sf->value.be16 & htons(OFPVID12_PRESENT))))) {
struct ds ds = DS_EMPTY_INITIALIZER;
mf_format(sf->field, &sf->value, NULL, &ds);
VLOG_WARN_RL(&rl, "Invalid value for set field %s: %s",
sf->field->name, ds_cstr(&ds));
ds_destroy(&ds);
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
return 0;
}
static enum ofperr
decode_OFPAT_RAW12_SET_FIELD(const struct ofp12_action_set_field *oasf,
struct ofpbuf *ofpacts)
{
return decode_ofpat_set_field(oasf, false, ofpacts);
}
static enum ofperr
decode_OFPAT_RAW15_SET_FIELD(const struct ofp12_action_set_field *oasf,
struct ofpbuf *ofpacts)
{
return decode_ofpat_set_field(oasf, true, ofpacts);
}
static enum ofperr
decode_NXAST_RAW_REG_LOAD(const struct nx_action_reg_load *narl,
struct ofpbuf *out)
{
struct ofpact_set_field *sf = ofpact_put_reg_load(out);
struct mf_subfield dst;
enum ofperr error;
sf->ofpact.raw = NXAST_RAW_REG_LOAD;
dst.field = mf_from_nxm_header(ntohl(narl->dst));
dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
error = mf_check_dst(&dst, NULL);
if (error) {
return error;
}
/* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
* narl->value. */
if (dst.n_bits < 64 && ntohll(narl->value) >> dst.n_bits) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
sf->field = dst.field;
bitwise_put(ntohll(narl->value),
&sf->value, dst.field->n_bytes, dst.ofs,
dst.n_bits);
bitwise_put(UINT64_MAX,
&sf->mask, dst.field->n_bytes, dst.ofs,
dst.n_bits);
return 0;
}
static enum ofperr
decode_NXAST_RAW_REG_LOAD2(const struct nx_action_reg_load2 *narl,
struct ofpbuf *out)
{
struct ofpact_set_field *sf;
enum ofperr error;
struct ofpbuf b;
sf = ofpact_put_SET_FIELD(out);
sf->ofpact.raw = NXAST_RAW_REG_LOAD2;
ofpbuf_use_const(&b, narl, ntohs(narl->len));
ofpbuf_pull(&b, OBJECT_OFFSETOF(narl, pad));
error = nx_pull_entry(&b, &sf->field, &sf->value, &sf->mask);
if (error) {
return error;
}
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
if (!sf->field->writable) {
VLOG_WARN_RL(&rl, "destination field %s is not writable",
sf->field->name);
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
return 0;
}
static void
ofpact_put_set_field(struct ofpbuf *openflow, enum ofp_version ofp_version,
enum mf_field_id field, uint64_t value_)
{
struct ofp12_action_set_field *oasf OVS_UNUSED;
int n_bytes = mf_from_id(field)->n_bytes;
size_t start_ofs = openflow->size;
union mf_value value;
value.be64 = htonll(value_ << (8 * (8 - n_bytes)));
oasf = put_OFPAT12_SET_FIELD(openflow);
openflow->size = openflow->size - sizeof oasf->pad;
nx_put_entry(openflow, field, ofp_version, &value, NULL);
pad_ofpat(openflow, start_ofs);
}
static bool
next_load_segment(const struct ofpact_set_field *sf,
struct mf_subfield *dst, uint64_t *value)
{
int n_bits = sf->field->n_bits;
int n_bytes = sf->field->n_bytes;
int start = dst->ofs + dst->n_bits;
if (start < n_bits) {
dst->field = sf->field;
dst->ofs = bitwise_scan(&sf->mask, n_bytes, 1, start, n_bits);
if (dst->ofs < n_bits) {
dst->n_bits = bitwise_scan(&sf->mask, n_bytes, 0, dst->ofs + 1,
MIN(dst->ofs + 64, n_bits)) - dst->ofs;
*value = bitwise_get(&sf->value, n_bytes, dst->ofs, dst->n_bits);
return true;
}
}
return false;
}
/* Convert 'sf' to a series of REG_LOADs. */
static void
set_field_to_nxast(const struct ofpact_set_field *sf, struct ofpbuf *openflow)
{
/* If 'sf' cannot be encoded as NXAST_REG_LOAD because it requires an
* experimenter OXM (or if it came in as NXAST_REG_LOAD2), encode as
* NXAST_REG_LOAD2. Otherwise use NXAST_REG_LOAD, which is backward
* compatible. */
if (sf->ofpact.raw == NXAST_RAW_REG_LOAD2
|| !mf_nxm_header(sf->field->id)) {
struct nx_action_reg_load2 *narl OVS_UNUSED;
size_t start_ofs = openflow->size;
narl = put_NXAST_REG_LOAD2(openflow);
openflow->size = openflow->size - sizeof narl->pad;
nx_put_entry(openflow, sf->field->id, 0, &sf->value, &sf->mask);
pad_ofpat(openflow, start_ofs);
} else {
struct mf_subfield dst;
uint64_t value;
dst.ofs = dst.n_bits = 0;
while (next_load_segment(sf, &dst, &value)) {
struct nx_action_reg_load *narl = put_NXAST_REG_LOAD(openflow);
narl->ofs_nbits = nxm_encode_ofs_nbits(dst.ofs, dst.n_bits);
narl->dst = htonl(mf_nxm_header(dst.field->id));
narl->value = htonll(value);
}
}
}
/* Convert 'sf', which must set an entire field, to standard OpenFlow 1.0/1.1
* actions, if we can, falling back to Nicira extensions if we must.
*
* We check only meta-flow types that can appear within set field actions and
* that have a mapping to compatible action types. These struct mf_field
* definitions have a defined OXM or NXM header value and specify the field as
* writable. */
static void
set_field_to_legacy_openflow(const struct ofpact_set_field *sf,
enum ofp_version ofp_version,
struct ofpbuf *out)
{
switch ((int) sf->field->id) {
case MFF_VLAN_TCI: {
ovs_be16 tci = sf->value.be16;
bool cfi = (tci & htons(VLAN_CFI)) != 0;
uint16_t vid = vlan_tci_to_vid(tci);
uint8_t pcp = vlan_tci_to_pcp(tci);
if (ofp_version < OFP11_VERSION) {
/* NXM_OF_VLAN_TCI to OpenFlow 1.0 mapping:
*
* If CFI=1, Add or modify VLAN VID & PCP.
* If CFI=0, strip VLAN header, if any.
*/
if (cfi) {
put_OFPAT10_SET_VLAN_VID(out, vid);
put_OFPAT10_SET_VLAN_PCP(out, pcp);
} else {
put_OFPAT10_STRIP_VLAN(out);
}
} else {
/* NXM_OF_VLAN_TCI to OpenFlow 1.1 mapping:
*
* If CFI=1, Add or modify VLAN VID & PCP.
* OpenFlow 1.1 set actions only apply if the packet
* already has VLAN tags. To be sure that is the case
* we have to push a VLAN header. As we do not support
* multiple layers of VLANs, this is a no-op, if a VLAN
* header already exists. This may backfire, however,
* when we start supporting multiple layers of VLANs.
* If CFI=0, strip VLAN header, if any.
*/
if (cfi) {
/* Push a VLAN tag, if one was not seen at action validation
* time. */
if (!sf->flow_has_vlan) {
put_OFPAT11_PUSH_VLAN(out, htons(ETH_TYPE_VLAN_8021Q));
}
put_OFPAT11_SET_VLAN_VID(out, vid);
put_OFPAT11_SET_VLAN_PCP(out, pcp);
} else {
/* If the flow did not match on vlan, we have no way of
* knowing if the vlan tag exists, so we must POP just to be
* sure. */
put_OFPAT11_POP_VLAN(out);
}
}
break;
}
case MFF_VLAN_VID: {
uint16_t vid = ntohs(sf->value.be16) & VLAN_VID_MASK;
if (ofp_version == OFP10_VERSION) {
put_OFPAT10_SET_VLAN_VID(out, vid);
} else {
put_OFPAT11_SET_VLAN_VID(out, vid);
}
break;
}
case MFF_VLAN_PCP:
if (ofp_version == OFP10_VERSION) {
put_OFPAT10_SET_VLAN_PCP(out, sf->value.u8);
} else {
put_OFPAT11_SET_VLAN_PCP(out, sf->value.u8);
}
break;
case MFF_ETH_SRC:
memcpy(put_OFPAT_SET_DL_SRC(out, ofp_version)->dl_addr,
sf->value.mac, ETH_ADDR_LEN);
break;
case MFF_ETH_DST:
memcpy(put_OFPAT_SET_DL_DST(out, ofp_version)->dl_addr,
sf->value.mac, ETH_ADDR_LEN);
break;
case MFF_IPV4_SRC:
put_OFPAT_SET_NW_SRC(out, ofp_version, sf->value.be32);
break;
case MFF_IPV4_DST:
put_OFPAT_SET_NW_DST(out, ofp_version, sf->value.be32);
break;
case MFF_IP_DSCP:
put_OFPAT_SET_NW_TOS(out, ofp_version, sf->value.u8);
break;
case MFF_IP_DSCP_SHIFTED:
put_OFPAT_SET_NW_TOS(out, ofp_version, sf->value.u8 << 2);
break;
case MFF_TCP_SRC:
case MFF_UDP_SRC:
put_OFPAT_SET_TP_SRC(out, sf->value.be16);
break;
case MFF_TCP_DST:
case MFF_UDP_DST:
put_OFPAT_SET_TP_DST(out, sf->value.be16);
break;
default:
set_field_to_nxast(sf, out);
break;
}
}
static void
set_field_to_set_field(const struct ofpact_set_field *sf,
enum ofp_version ofp_version, struct ofpbuf *out)
{
struct ofp12_action_set_field *oasf OVS_UNUSED;
size_t start_ofs = out->size;
oasf = put_OFPAT12_SET_FIELD(out);
out->size = out->size - sizeof oasf->pad;
nx_put_entry(out, sf->field->id, ofp_version, &sf->value, &sf->mask);
pad_ofpat(out, start_ofs);
}
static void
encode_SET_FIELD(const struct ofpact_set_field *sf,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version >= OFP15_VERSION) {
/* OF1.5+ only has Set-Field (reg_load is redundant so we drop it
* entirely). */
set_field_to_set_field(sf, ofp_version, out);
} else if (sf->ofpact.raw == NXAST_RAW_REG_LOAD ||
sf->ofpact.raw == NXAST_RAW_REG_LOAD2) {
/* It came in as reg_load, send it out the same way. */
set_field_to_nxast(sf, out);
} else if (ofp_version < OFP12_VERSION) {
/* OpenFlow 1.0 and 1.1 don't have Set-Field. */
set_field_to_legacy_openflow(sf, ofp_version, out);
} else if (is_all_ones((const uint8_t *) &sf->mask, sf->field->n_bytes)) {
/* We're encoding to OpenFlow 1.2, 1.3, or 1.4. The action sets an
* entire field, so encode it as OFPAT_SET_FIELD. */
set_field_to_set_field(sf, ofp_version, out);
} else {
/* We're encoding to OpenFlow 1.2, 1.3, or 1.4. The action cannot be
* encoded as OFPAT_SET_FIELD because it does not set an entire field,
* so encode it as reg_load. */
set_field_to_nxast(sf, out);
}
}
/* Parses a "set_field" action with argument 'arg', appending the parsed
* action to 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
static char * OVS_WARN_UNUSED_RESULT
set_field_parse__(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
struct ofpact_set_field *sf = ofpact_put_SET_FIELD(ofpacts);
char *value;
char *delim;
char *key;
const struct mf_field *mf;
char *error;
value = arg;
delim = strstr(arg, "->");
if (!delim) {
return xasprintf("%s: missing `->'", arg);
}
if (strlen(delim) <= strlen("->")) {
return xasprintf("%s: missing field name following `->'", arg);
}
key = delim + strlen("->");
mf = mf_from_name(key);
if (!mf) {
return xasprintf("%s is not a valid OXM field name", key);
}
if (!mf->writable) {
return xasprintf("%s is read-only", key);
}
sf->field = mf;
delim[0] = '\0';
error = mf_parse(mf, value, &sf->value, &sf->mask);
if (error) {
return error;
}
if (!mf_is_value_valid(mf, &sf->value)) {
return xasprintf("%s is not a valid value for field %s", value, key);
}
*usable_protocols &= mf->usable_protocols_exact;
return NULL;
}
/* Parses 'arg' as the argument to a "set_field" action, and appends such an
* action to 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
static char * OVS_WARN_UNUSED_RESULT
parse_SET_FIELD(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
char *copy = xstrdup(arg);
char *error = set_field_parse__(copy, ofpacts, usable_protocols);
free(copy);
return error;
}
static char * OVS_WARN_UNUSED_RESULT
parse_reg_load(char *arg, struct ofpbuf *ofpacts)
{
struct ofpact_set_field *sf = ofpact_put_reg_load(ofpacts);
const char *full_arg = arg;
uint64_t value = strtoull(arg, (char **) &arg, 0);
struct mf_subfield dst;
char *error;
if (strncmp(arg, "->", 2)) {
return xasprintf("%s: missing `->' following value", full_arg);
}
arg += 2;
error = mf_parse_subfield(&dst, arg);
if (error) {
return error;
}
if (dst.n_bits < 64 && (value >> dst.n_bits) != 0) {
return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
full_arg, value, dst.n_bits);
}
sf->field = dst.field;
memset(&sf->value, 0, sizeof sf->value);
bitwise_put(value, &sf->value, dst.field->n_bytes, dst.ofs, dst.n_bits);
bitwise_put(UINT64_MAX, &sf->mask,
dst.field->n_bytes, dst.ofs, dst.n_bits);
return NULL;
}
static void
format_SET_FIELD(const struct ofpact_set_field *a, struct ds *s)
{
if (a->ofpact.raw == NXAST_RAW_REG_LOAD) {
struct mf_subfield dst;
uint64_t value;
dst.ofs = dst.n_bits = 0;
while (next_load_segment(a, &dst, &value)) {
ds_put_format(s, "load:%#"PRIx64"->", value);
mf_format_subfield(&dst, s);
ds_put_char(s, ',');
}
ds_chomp(s, ',');
} else {
ds_put_cstr(s, "set_field:");
mf_format(a->field, &a->value, &a->mask, s);
ds_put_format(s, "->%s", a->field->name);
}
}
/* Appends an OFPACT_SET_FIELD ofpact to 'ofpacts' and returns it. The ofpact
* is marked such that, if possible, it will be translated to OpenFlow as
* NXAST_REG_LOAD extension actions rather than OFPAT_SET_FIELD, either because
* that was the way that the action was expressed when it came into OVS or for
* backward compatibility. */
struct ofpact_set_field *
ofpact_put_reg_load(struct ofpbuf *ofpacts)
{
struct ofpact_set_field *sf = ofpact_put_SET_FIELD(ofpacts);
sf->ofpact.raw = NXAST_RAW_REG_LOAD;
return sf;
}
/* Action structure for NXAST_STACK_PUSH and NXAST_STACK_POP.
*
* Pushes (or pops) field[offset: offset + n_bits] to (or from)
* top of the stack.
*/
struct nx_action_stack {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_STACK_PUSH or NXAST_STACK_POP. */
ovs_be16 offset; /* Bit offset into the field. */
/* Followed by:
* - OXM/NXM header for field to push or pop (4 or 8 bytes).
* - ovs_be16 'n_bits', the number of bits to extract from the field.
* - Enough 0-bytes to pad out the action to 24 bytes. */
uint8_t pad[12]; /* See above. */
};
OFP_ASSERT(sizeof(struct nx_action_stack) == 24);
static enum ofperr
decode_stack_action(const struct nx_action_stack *nasp,
struct ofpact_stack *stack_action)
{
enum ofperr error;
struct ofpbuf b;
stack_action->subfield.ofs = ntohs(nasp->offset);
ofpbuf_use_const(&b, nasp, sizeof *nasp);
ofpbuf_pull(&b, OBJECT_OFFSETOF(nasp, pad));
error = nx_pull_header(&b, &stack_action->subfield.field, NULL);
if (error) {
return error;
}
stack_action->subfield.n_bits = ntohs(*(const ovs_be16 *) b.data);
ofpbuf_pull(&b, 2);
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
return 0;
}
static enum ofperr
decode_NXAST_RAW_STACK_PUSH(const struct nx_action_stack *nasp,
struct ofpbuf *ofpacts)
{
struct ofpact_stack *push = ofpact_put_STACK_PUSH(ofpacts);
enum ofperr error = decode_stack_action(nasp, push);
return error ? error : nxm_stack_push_check(push, NULL);
}
static enum ofperr
decode_NXAST_RAW_STACK_POP(const struct nx_action_stack *nasp,
struct ofpbuf *ofpacts)
{
struct ofpact_stack *pop = ofpact_put_STACK_POP(ofpacts);
enum ofperr error = decode_stack_action(nasp, pop);
return error ? error : nxm_stack_pop_check(pop, NULL);
}
static void
encode_STACK_op(const struct ofpact_stack *stack_action,
struct nx_action_stack *nasp)
{
struct ofpbuf b;
ovs_be16 n_bits;
nasp->offset = htons(stack_action->subfield.ofs);
ofpbuf_use_stack(&b, nasp, ntohs(nasp->len));
ofpbuf_put_uninit(&b, OBJECT_OFFSETOF(nasp, pad));
nx_put_header(&b, stack_action->subfield.field->id, 0, false);
n_bits = htons(stack_action->subfield.n_bits);
ofpbuf_put(&b, &n_bits, sizeof n_bits);
}
static void
encode_STACK_PUSH(const struct ofpact_stack *stack,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
encode_STACK_op(stack, put_NXAST_STACK_PUSH(out));
}
static void
encode_STACK_POP(const struct ofpact_stack *stack,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
encode_STACK_op(stack, put_NXAST_STACK_POP(out));
}
static char * OVS_WARN_UNUSED_RESULT
parse_STACK_PUSH(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return nxm_parse_stack_action(ofpact_put_STACK_PUSH(ofpacts), arg);
}
static char * OVS_WARN_UNUSED_RESULT
parse_STACK_POP(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return nxm_parse_stack_action(ofpact_put_STACK_POP(ofpacts), arg);
}
static void
format_STACK_PUSH(const struct ofpact_stack *a, struct ds *s)
{
nxm_format_stack_push(a, s);
}
static void
format_STACK_POP(const struct ofpact_stack *a, struct ds *s)
{
nxm_format_stack_pop(a, s);
}
/* Action structure for NXAST_DEC_TTL_CNT_IDS.
*
* If the packet is not IPv4 or IPv6, does nothing. For IPv4 or IPv6, if the
* TTL or hop limit is at least 2, decrements it by 1. Otherwise, if TTL or
* hop limit is 0 or 1, sends a packet-in to the controllers with each of the
* 'n_controllers' controller IDs specified in 'cnt_ids'.
*
* (This differs from NXAST_DEC_TTL in that for NXAST_DEC_TTL the packet-in is
* sent only to controllers with id 0.)
*/
struct nx_action_cnt_ids {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length including slaves. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_DEC_TTL_CNT_IDS. */
ovs_be16 n_controllers; /* Number of controllers. */
uint8_t zeros[4]; /* Must be zero. */
/* Followed by 1 or more controller ids.
*
* uint16_t cnt_ids[]; // Controller ids.
* uint8_t pad[]; // Must be 0 to 8-byte align cnt_ids[].
*/
};
OFP_ASSERT(sizeof(struct nx_action_cnt_ids) == 16);
static enum ofperr
decode_OFPAT_RAW_DEC_NW_TTL(struct ofpbuf *out)
{
uint16_t id = 0;
struct ofpact_cnt_ids *ids;
enum ofperr error = 0;
ids = ofpact_put_DEC_TTL(out);
ids->n_controllers = 1;
ofpbuf_put(out, &id, sizeof id);
ids = out->header;
ofpact_update_len(out, &ids->ofpact);
return error;
}
static enum ofperr
decode_NXAST_RAW_DEC_TTL_CNT_IDS(const struct nx_action_cnt_ids *nac_ids,
struct ofpbuf *out)
{
struct ofpact_cnt_ids *ids;
size_t ids_size;
int i;
ids = ofpact_put_DEC_TTL(out);
ids->ofpact.raw = NXAST_RAW_DEC_TTL_CNT_IDS;
ids->n_controllers = ntohs(nac_ids->n_controllers);
ids_size = ntohs(nac_ids->len) - sizeof *nac_ids;
if (!is_all_zeros(nac_ids->zeros, sizeof nac_ids->zeros)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
if (ids_size < ids->n_controllers * sizeof(ovs_be16)) {
VLOG_WARN_RL(&rl, "Nicira action dec_ttl_cnt_ids only has %"PRIuSIZE" "
"bytes allocated for controller ids. %"PRIuSIZE" bytes "
"are required for %"PRIu16" controllers.",
ids_size, ids->n_controllers * sizeof(ovs_be16),
ids->n_controllers);
return OFPERR_OFPBAC_BAD_LEN;
}
for (i = 0; i < ids->n_controllers; i++) {
uint16_t id = ntohs(((ovs_be16 *)(nac_ids + 1))[i]);
ofpbuf_put(out, &id, sizeof id);
ids = out->header;
}
ofpact_update_len(out, &ids->ofpact);
return 0;
}
static void
encode_DEC_TTL(const struct ofpact_cnt_ids *dec_ttl,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (dec_ttl->ofpact.raw == NXAST_RAW_DEC_TTL_CNT_IDS
|| dec_ttl->n_controllers != 1
|| dec_ttl->cnt_ids[0] != 0) {
struct nx_action_cnt_ids *nac_ids = put_NXAST_DEC_TTL_CNT_IDS(out);
int ids_len = ROUND_UP(2 * dec_ttl->n_controllers, OFP_ACTION_ALIGN);
ovs_be16 *ids;
size_t i;
nac_ids->len = htons(ntohs(nac_ids->len) + ids_len);
nac_ids->n_controllers = htons(dec_ttl->n_controllers);
ids = ofpbuf_put_zeros(out, ids_len);
for (i = 0; i < dec_ttl->n_controllers; i++) {
ids[i] = htons(dec_ttl->cnt_ids[i]);
}
} else {
put_OFPAT_DEC_NW_TTL(out, ofp_version);
}
}
static void
parse_noargs_dec_ttl(struct ofpbuf *ofpacts)
{
struct ofpact_cnt_ids *ids;
uint16_t id = 0;
ofpact_put_DEC_TTL(ofpacts);
ofpbuf_put(ofpacts, &id, sizeof id);
ids = ofpacts->header;
ids->n_controllers++;
ofpact_update_len(ofpacts, &ids->ofpact);
}
static char * OVS_WARN_UNUSED_RESULT
parse_DEC_TTL(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
if (*arg == '\0') {
parse_noargs_dec_ttl(ofpacts);
} else {
struct ofpact_cnt_ids *ids;
char *cntr;
ids = ofpact_put_DEC_TTL(ofpacts);
ids->ofpact.raw = NXAST_RAW_DEC_TTL_CNT_IDS;
for (cntr = strtok_r(arg, ", ", &arg); cntr != NULL;
cntr = strtok_r(NULL, ", ", &arg)) {
uint16_t id = atoi(cntr);
ofpbuf_put(ofpacts, &id, sizeof id);
ids = ofpacts->header;
ids->n_controllers++;
}
if (!ids->n_controllers) {
return xstrdup("dec_ttl_cnt_ids: expected at least one controller "
"id.");
}
ofpact_update_len(ofpacts, &ids->ofpact);
}
return NULL;
}
static void
format_DEC_TTL(const struct ofpact_cnt_ids *a, struct ds *s)
{
size_t i;
ds_put_cstr(s, "dec_ttl");
if (a->ofpact.raw == NXAST_RAW_DEC_TTL_CNT_IDS) {
ds_put_cstr(s, "(");
for (i = 0; i < a->n_controllers; i++) {
if (i) {
ds_put_cstr(s, ",");
}
ds_put_format(s, "%"PRIu16, a->cnt_ids[i]);
}
ds_put_cstr(s, ")");
}
}
/* Set MPLS label actions. */
static enum ofperr
decode_OFPAT_RAW_SET_MPLS_LABEL(ovs_be32 label, struct ofpbuf *out)
{
ofpact_put_SET_MPLS_LABEL(out)->label = label;
return 0;
}
static void
encode_SET_MPLS_LABEL(const struct ofpact_mpls_label *label,
enum ofp_version ofp_version,
struct ofpbuf *out)
{
if (ofp_version < OFP12_VERSION) {
put_OFPAT_SET_MPLS_LABEL(out, ofp_version, label->label);
} else {
ofpact_put_set_field(out, ofp_version, MFF_MPLS_LABEL,
ntohl(label->label));
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_MPLS_LABEL(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_mpls_label *mpls_label = ofpact_put_SET_MPLS_LABEL(ofpacts);
if (*arg == '\0') {
return xstrdup("set_mpls_label: expected label.");
}
mpls_label->label = htonl(atoi(arg));
return NULL;
}
static void
format_SET_MPLS_LABEL(const struct ofpact_mpls_label *a, struct ds *s)
{
ds_put_format(s, "set_mpls_label(%"PRIu32")", ntohl(a->label));
}
/* Set MPLS TC actions. */
static enum ofperr
decode_OFPAT_RAW_SET_MPLS_TC(uint8_t tc, struct ofpbuf *out)
{
ofpact_put_SET_MPLS_TC(out)->tc = tc;
return 0;
}
static void
encode_SET_MPLS_TC(const struct ofpact_mpls_tc *tc,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version < OFP12_VERSION) {
put_OFPAT_SET_MPLS_TC(out, ofp_version, tc->tc);
} else {
ofpact_put_set_field(out, ofp_version, MFF_MPLS_TC, tc->tc);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_MPLS_TC(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_mpls_tc *mpls_tc = ofpact_put_SET_MPLS_TC(ofpacts);
if (*arg == '\0') {
return xstrdup("set_mpls_tc: expected tc.");
}
mpls_tc->tc = atoi(arg);
return NULL;
}
static void
format_SET_MPLS_TC(const struct ofpact_mpls_tc *a, struct ds *s)
{
ds_put_format(s, "set_mpls_ttl(%"PRIu8")", a->tc);
}
/* Set MPLS TTL actions. */
static enum ofperr
decode_OFPAT_RAW_SET_MPLS_TTL(uint8_t ttl, struct ofpbuf *out)
{
ofpact_put_SET_MPLS_TTL(out)->ttl = ttl;
return 0;
}
static void
encode_SET_MPLS_TTL(const struct ofpact_mpls_ttl *ttl,
enum ofp_version ofp_version, struct ofpbuf *out)
{
put_OFPAT_SET_MPLS_TTL(out, ofp_version, ttl->ttl);
}
/* Parses 'arg' as the argument to a "set_mpls_ttl" action, and appends such an
* action to 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
static char * OVS_WARN_UNUSED_RESULT
parse_SET_MPLS_TTL(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_mpls_ttl *mpls_ttl = ofpact_put_SET_MPLS_TTL(ofpacts);
if (*arg == '\0') {
return xstrdup("set_mpls_ttl: expected ttl.");
}
mpls_ttl->ttl = atoi(arg);
return NULL;
}
static void
format_SET_MPLS_TTL(const struct ofpact_mpls_ttl *a, struct ds *s)
{
ds_put_format(s, "set_mpls_ttl(%"PRIu8")", a->ttl);
}
/* Decrement MPLS TTL actions. */
static enum ofperr
decode_OFPAT_RAW_DEC_MPLS_TTL(struct ofpbuf *out)
{
ofpact_put_DEC_MPLS_TTL(out);
return 0;
}
static void
encode_DEC_MPLS_TTL(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version, struct ofpbuf *out)
{
put_OFPAT_DEC_MPLS_TTL(out, ofp_version);
}
static char * OVS_WARN_UNUSED_RESULT
parse_DEC_MPLS_TTL(char *arg OVS_UNUSED, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
ofpact_put_DEC_MPLS_TTL(ofpacts);
return NULL;
}
static void
format_DEC_MPLS_TTL(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_cstr(s, "dec_mpls_ttl");
}
/* Push MPLS label action. */
static enum ofperr
decode_OFPAT_RAW_PUSH_MPLS(ovs_be16 ethertype, struct ofpbuf *out)
{
struct ofpact_push_mpls *oam;
if (!eth_type_mpls(ethertype)) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
oam = ofpact_put_PUSH_MPLS(out);
oam->ethertype = ethertype;
return 0;
}
static void
encode_PUSH_MPLS(const struct ofpact_push_mpls *push_mpls,
enum ofp_version ofp_version, struct ofpbuf *out)
{
put_OFPAT_PUSH_MPLS(out, ofp_version, push_mpls->ethertype);
}
static char * OVS_WARN_UNUSED_RESULT
parse_PUSH_MPLS(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint16_t ethertype;
char *error;
error = str_to_u16(arg, "push_mpls", ðertype);
if (!error) {
ofpact_put_PUSH_MPLS(ofpacts)->ethertype = htons(ethertype);
}
return error;
}
static void
format_PUSH_MPLS(const struct ofpact_push_mpls *a, struct ds *s)
{
ds_put_format(s, "push_mpls:0x%04"PRIx16, ntohs(a->ethertype));
}
/* Pop MPLS label action. */
static enum ofperr
decode_OFPAT_RAW_POP_MPLS(ovs_be16 ethertype, struct ofpbuf *out)
{
ofpact_put_POP_MPLS(out)->ethertype = ethertype;
return 0;
}
static void
encode_POP_MPLS(const struct ofpact_pop_mpls *pop_mpls,
enum ofp_version ofp_version, struct ofpbuf *out)
{
put_OFPAT_POP_MPLS(out, ofp_version, pop_mpls->ethertype);
}
static char * OVS_WARN_UNUSED_RESULT
parse_POP_MPLS(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint16_t ethertype;
char *error;
error = str_to_u16(arg, "pop_mpls", ðertype);
if (!error) {
ofpact_put_POP_MPLS(ofpacts)->ethertype = htons(ethertype);
}
return error;
}
static void
format_POP_MPLS(const struct ofpact_pop_mpls *a, struct ds *s)
{
ds_put_format(s, "pop_mpls:0x%04"PRIx16, ntohs(a->ethertype));
}
/* Set tunnel ID actions. */
static enum ofperr
decode_NXAST_RAW_SET_TUNNEL(uint32_t tun_id, struct ofpbuf *out)
{
struct ofpact_tunnel *tunnel = ofpact_put_SET_TUNNEL(out);
tunnel->ofpact.raw = NXAST_RAW_SET_TUNNEL;
tunnel->tun_id = tun_id;
return 0;
}
static enum ofperr
decode_NXAST_RAW_SET_TUNNEL64(uint64_t tun_id, struct ofpbuf *out)
{
struct ofpact_tunnel *tunnel = ofpact_put_SET_TUNNEL(out);
tunnel->ofpact.raw = NXAST_RAW_SET_TUNNEL64;
tunnel->tun_id = tun_id;
return 0;
}
static void
encode_SET_TUNNEL(const struct ofpact_tunnel *tunnel,
enum ofp_version ofp_version, struct ofpbuf *out)
{
uint64_t tun_id = tunnel->tun_id;
if (ofp_version < OFP12_VERSION) {
if (tun_id <= UINT32_MAX
&& tunnel->ofpact.raw != NXAST_RAW_SET_TUNNEL64) {
put_NXAST_SET_TUNNEL(out, tun_id);
} else {
put_NXAST_SET_TUNNEL64(out, tun_id);
}
} else {
ofpact_put_set_field(out, ofp_version, MFF_TUN_ID, tun_id);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_set_tunnel(char *arg, struct ofpbuf *ofpacts,
enum ofp_raw_action_type raw)
{
struct ofpact_tunnel *tunnel;
tunnel = ofpact_put_SET_TUNNEL(ofpacts);
tunnel->ofpact.raw = raw;
return str_to_u64(arg, &tunnel->tun_id);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_TUNNEL(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return parse_set_tunnel(arg, ofpacts, NXAST_RAW_SET_TUNNEL);
}
static void
format_SET_TUNNEL(const struct ofpact_tunnel *a, struct ds *s)
{
ds_put_format(s, "set_tunnel%s:%#"PRIx64,
(a->tun_id > UINT32_MAX
|| a->ofpact.raw == NXAST_RAW_SET_TUNNEL64 ? "64" : ""),
a->tun_id);
}
/* Set queue action. */
static enum ofperr
decode_OFPAT_RAW_SET_QUEUE(uint32_t queue_id, struct ofpbuf *out)
{
ofpact_put_SET_QUEUE(out)->queue_id = queue_id;
return 0;
}
static void
encode_SET_QUEUE(const struct ofpact_queue *queue,
enum ofp_version ofp_version, struct ofpbuf *out)
{
put_OFPAT_SET_QUEUE(out, ofp_version, queue->queue_id);
}
static char * OVS_WARN_UNUSED_RESULT
parse_SET_QUEUE(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return str_to_u32(arg, &ofpact_put_SET_QUEUE(ofpacts)->queue_id);
}
static void
format_SET_QUEUE(const struct ofpact_queue *a, struct ds *s)
{
ds_put_format(s, "set_queue:%"PRIu32, a->queue_id);
}
/* Pop queue action. */
static enum ofperr
decode_NXAST_RAW_POP_QUEUE(struct ofpbuf *out)
{
ofpact_put_POP_QUEUE(out);
return 0;
}
static void
encode_POP_QUEUE(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
put_NXAST_POP_QUEUE(out);
}
static char * OVS_WARN_UNUSED_RESULT
parse_POP_QUEUE(const char *arg OVS_UNUSED, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
ofpact_put_POP_QUEUE(ofpacts);
return NULL;
}
static void
format_POP_QUEUE(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_cstr(s, "pop_queue");
}
/* Action structure for NXAST_FIN_TIMEOUT.
*
* This action changes the idle timeout or hard timeout, or both, of this
* OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag.
* When such a packet is observed, the action reduces the rule's idle timeout
* to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This
* action has no effect on an existing timeout that is already shorter than the
* one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout'
* of zero has no effect on the respective timeout.
*
* 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds.
* 'fin_hard_timeout' specifies time since the flow's creation, not since the
* receipt of the FIN or RST.
*
* This is useful for quickly discarding learned TCP flows that otherwise will
* take a long time to expire.
*
* This action is intended for use with an OpenFlow rule that matches only a
* single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards
* all TCP traffic, or all TCP traffic to a particular port), then any FIN or
* RST in any of those flows will cause the entire OpenFlow rule to expire
* early, which is not normally desirable.
*/
struct nx_action_fin_timeout {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_FIN_TIMEOUT. */
ovs_be16 fin_idle_timeout; /* New idle timeout, if nonzero. */
ovs_be16 fin_hard_timeout; /* New hard timeout, if nonzero. */
ovs_be16 pad; /* Must be zero. */
};
OFP_ASSERT(sizeof(struct nx_action_fin_timeout) == 16);
static enum ofperr
decode_NXAST_RAW_FIN_TIMEOUT(const struct nx_action_fin_timeout *naft,
struct ofpbuf *out)
{
struct ofpact_fin_timeout *oft;
oft = ofpact_put_FIN_TIMEOUT(out);
oft->fin_idle_timeout = ntohs(naft->fin_idle_timeout);
oft->fin_hard_timeout = ntohs(naft->fin_hard_timeout);
return 0;
}
static void
encode_FIN_TIMEOUT(const struct ofpact_fin_timeout *fin_timeout,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
struct nx_action_fin_timeout *naft = put_NXAST_FIN_TIMEOUT(out);
naft->fin_idle_timeout = htons(fin_timeout->fin_idle_timeout);
naft->fin_hard_timeout = htons(fin_timeout->fin_hard_timeout);
}
static char * OVS_WARN_UNUSED_RESULT
parse_FIN_TIMEOUT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_fin_timeout *oft = ofpact_put_FIN_TIMEOUT(ofpacts);
char *key, *value;
while (ofputil_parse_key_value(&arg, &key, &value)) {
char *error;
if (!strcmp(key, "idle_timeout")) {
error = str_to_u16(value, key, &oft->fin_idle_timeout);
} else if (!strcmp(key, "hard_timeout")) {
error = str_to_u16(value, key, &oft->fin_hard_timeout);
} else {
error = xasprintf("invalid key '%s' in 'fin_timeout' argument",
key);
}
if (error) {
return error;
}
}
return NULL;
}
static void
format_FIN_TIMEOUT(const struct ofpact_fin_timeout *a, struct ds *s)
{
ds_put_cstr(s, "fin_timeout(");
if (a->fin_idle_timeout) {
ds_put_format(s, "idle_timeout=%"PRIu16",", a->fin_idle_timeout);
}
if (a->fin_hard_timeout) {
ds_put_format(s, "hard_timeout=%"PRIu16",", a->fin_hard_timeout);
}
ds_chomp(s, ',');
ds_put_char(s, ')');
}
/* Action structures for NXAST_RESUBMIT and NXAST_RESUBMIT_TABLE.
*
* These actions search one of the switch's flow tables:
*
* - For NXAST_RESUBMIT_TABLE only, if the 'table' member is not 255, then
* it specifies the table to search.
*
* - Otherwise (for NXAST_RESUBMIT_TABLE with a 'table' of 255, or for
* NXAST_RESUBMIT regardless of 'table'), it searches the current flow
* table, that is, the OpenFlow flow table that contains the flow from
* which this action was obtained. If this action did not come from a
* flow table (e.g. it came from an OFPT_PACKET_OUT message), then table 0
* is the current table.
*
* The flow table lookup uses a flow that may be slightly modified from the
* original lookup:
*
* - For NXAST_RESUBMIT, the 'in_port' member of struct nx_action_resubmit
* is used as the flow's in_port.
*
* - For NXAST_RESUBMIT_TABLE, if the 'in_port' member is not OFPP_IN_PORT,
* then its value is used as the flow's in_port. Otherwise, the original
* in_port is used.
*
* - If actions that modify the flow (e.g. OFPAT_SET_VLAN_VID) precede the
* resubmit action, then the flow is updated with the new values.
*
* Following the lookup, the original in_port is restored.
*
* If the modified flow matched in the flow table, then the corresponding
* actions are executed. Afterward, actions following the resubmit in the
* original set of actions, if any, are executed; any changes made to the
* packet (e.g. changes to VLAN) by secondary actions persist when those
* actions are executed, although the original in_port is restored.
*
* Resubmit actions may be used any number of times within a set of actions.
*
* Resubmit actions may nest to an implementation-defined depth. Beyond this
* implementation-defined depth, further resubmit actions are simply ignored.
*
* NXAST_RESUBMIT ignores 'table' and 'pad'. NXAST_RESUBMIT_TABLE requires
* 'pad' to be all-bits-zero.
*
* Open vSwitch 1.0.1 and earlier did not support recursion. Open vSwitch
* before 1.2.90 did not support NXAST_RESUBMIT_TABLE.
*/
struct nx_action_resubmit {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_RESUBMIT. */
ovs_be16 in_port; /* New in_port for checking flow table. */
uint8_t table; /* NXAST_RESUBMIT_TABLE: table to use. */
uint8_t pad[3];
};
OFP_ASSERT(sizeof(struct nx_action_resubmit) == 16);
static enum ofperr
decode_NXAST_RAW_RESUBMIT(uint16_t port, struct ofpbuf *out)
{
struct ofpact_resubmit *resubmit;
resubmit = ofpact_put_RESUBMIT(out);
resubmit->ofpact.raw = NXAST_RAW_RESUBMIT;
resubmit->in_port = u16_to_ofp(port);
resubmit->table_id = 0xff;
return 0;
}
static enum ofperr
decode_NXAST_RAW_RESUBMIT_TABLE(const struct nx_action_resubmit *nar,
struct ofpbuf *out)
{
struct ofpact_resubmit *resubmit;
if (nar->pad[0] || nar->pad[1] || nar->pad[2]) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
resubmit = ofpact_put_RESUBMIT(out);
resubmit->ofpact.raw = NXAST_RAW_RESUBMIT_TABLE;
resubmit->in_port = u16_to_ofp(ntohs(nar->in_port));
resubmit->table_id = nar->table;
return 0;
}
static void
encode_RESUBMIT(const struct ofpact_resubmit *resubmit,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
uint16_t in_port = ofp_to_u16(resubmit->in_port);
if (resubmit->table_id == 0xff
&& resubmit->ofpact.raw != NXAST_RAW_RESUBMIT_TABLE) {
put_NXAST_RESUBMIT(out, in_port);
} else {
struct nx_action_resubmit *nar = put_NXAST_RESUBMIT_TABLE(out);
nar->table = resubmit->table_id;
nar->in_port = htons(in_port);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_RESUBMIT(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_resubmit *resubmit;
char *in_port_s, *table_s;
resubmit = ofpact_put_RESUBMIT(ofpacts);
in_port_s = strsep(&arg, ",");
if (in_port_s && in_port_s[0]) {
if (!ofputil_port_from_string(in_port_s, &resubmit->in_port)) {
return xasprintf("%s: resubmit to unknown port", in_port_s);
}
} else {
resubmit->in_port = OFPP_IN_PORT;
}
table_s = strsep(&arg, ",");
if (table_s && table_s[0]) {
uint32_t table_id = 0;
char *error;
error = str_to_u32(table_s, &table_id);
if (error) {
return error;
}
resubmit->table_id = table_id;
} else {
resubmit->table_id = 255;
}
if (resubmit->in_port == OFPP_IN_PORT && resubmit->table_id == 255) {
return xstrdup("at least one \"in_port\" or \"table\" must be "
"specified on resubmit");
}
return NULL;
}
static void
format_RESUBMIT(const struct ofpact_resubmit *a, struct ds *s)
{
if (a->in_port != OFPP_IN_PORT && a->table_id == 255) {
ds_put_cstr(s, "resubmit:");
ofputil_format_port(a->in_port, s);
} else {
ds_put_format(s, "resubmit(");
if (a->in_port != OFPP_IN_PORT) {
ofputil_format_port(a->in_port, s);
}
ds_put_char(s, ',');
if (a->table_id != 255) {
ds_put_format(s, "%"PRIu8, a->table_id);
}
ds_put_char(s, ')');
}
}
/* Action structure for NXAST_LEARN.
*
* This action adds or modifies a flow in an OpenFlow table, similar to
* OFPT_FLOW_MOD with OFPFC_MODIFY_STRICT as 'command'. The new flow has the
* specified idle timeout, hard timeout, priority, cookie, and flags. The new
* flow's match criteria and actions are built by applying each of the series
* of flow_mod_spec elements included as part of the action.
*
* A flow_mod_spec starts with a 16-bit header. A header that is all-bits-0 is
* a no-op used for padding the action as a whole to a multiple of 8 bytes in
* length. Otherwise, the flow_mod_spec can be thought of as copying 'n_bits'
* bits from a source to a destination. In this case, the header contains
* multiple fields:
*
* 15 14 13 12 11 10 0
* +------+---+------+---------------------------------+
* | 0 |src| dst | n_bits |
* +------+---+------+---------------------------------+
*
* The meaning and format of a flow_mod_spec depends on 'src' and 'dst'. The
* following table summarizes the meaning of each possible combination.
* Details follow the table:
*
* src dst meaning
* --- --- ----------------------------------------------------------
* 0 0 Add match criteria based on value in a field.
* 1 0 Add match criteria based on an immediate value.
* 0 1 Add NXAST_REG_LOAD action to copy field into a different field.
* 1 1 Add NXAST_REG_LOAD action to load immediate value into a field.
* 0 2 Add OFPAT_OUTPUT action to output to port from specified field.
* All other combinations are undefined and not allowed.
*
* The flow_mod_spec header is followed by a source specification and a
* destination specification. The format and meaning of the source
* specification depends on 'src':
*
* - If 'src' is 0, the source bits are taken from a field in the flow to
* which this action is attached. (This should be a wildcarded field. If
* its value is fully specified then the source bits being copied have
* constant values.)
*
* The source specification is an ovs_be32 'field' and an ovs_be16 'ofs'.
* 'field' is an nxm_header with nxm_hasmask=0, and 'ofs' the starting bit
* offset within that field. The source bits are field[ofs:ofs+n_bits-1].
* 'field' and 'ofs' are subject to the same restrictions as the source
* field in NXAST_REG_MOVE.
*
* - If 'src' is 1, the source bits are a constant value. The source
* specification is (n_bits+15)/16*2 bytes long. Taking those bytes as a
* number in network order, the source bits are the 'n_bits'
* least-significant bits. The switch will report an error if other bits
* in the constant are nonzero.
*
* The flow_mod_spec destination specification, for 'dst' of 0 or 1, is an
* ovs_be32 'field' and an ovs_be16 'ofs'. 'field' is an nxm_header with
* nxm_hasmask=0 and 'ofs' is a starting bit offset within that field. The
* meaning of the flow_mod_spec depends on 'dst':
*
* - If 'dst' is 0, the flow_mod_spec specifies match criteria for the new
* flow. The new flow matches only if bits field[ofs:ofs+n_bits-1] in a
* packet equal the source bits. 'field' may be any nxm_header with
* nxm_hasmask=0 that is allowed in NXT_FLOW_MOD.
*
* Order is significant. Earlier flow_mod_specs must satisfy any
* prerequisites for matching fields specified later, by copying constant
* values into prerequisite fields.
*
* The switch will reject flow_mod_specs that do not satisfy NXM masking
* restrictions.
*
* - If 'dst' is 1, the flow_mod_spec specifies an NXAST_REG_LOAD action for
* the new flow. The new flow copies the source bits into
* field[ofs:ofs+n_bits-1]. Actions are executed in the same order as the
* flow_mod_specs.
*
* A single NXAST_REG_LOAD action writes no more than 64 bits, so n_bits
* greater than 64 yields multiple NXAST_REG_LOAD actions.
*
* The flow_mod_spec destination spec for 'dst' of 2 (when 'src' is 0) is
* empty. It has the following meaning:
*
* - The flow_mod_spec specifies an OFPAT_OUTPUT action for the new flow.
* The new flow outputs to the OpenFlow port specified by the source field.
* Of the special output ports with value OFPP_MAX or larger, OFPP_IN_PORT,
* OFPP_FLOOD, OFPP_LOCAL, and OFPP_ALL are supported. Other special ports
* may not be used.
*
* Resource Management
* -------------------
*
* A switch has a finite amount of flow table space available for learning.
* When this space is exhausted, no new learning table entries will be learned
* until some existing flow table entries expire. The controller should be
* prepared to handle this by flooding (which can be implemented as a
* low-priority flow).
*
* If a learned flow matches a single TCP stream with a relatively long
* timeout, one may make the best of resource constraints by setting
* 'fin_idle_timeout' or 'fin_hard_timeout' (both measured in seconds), or
* both, to shorter timeouts. When either of these is specified as a nonzero
* value, OVS adds a NXAST_FIN_TIMEOUT action, with the specified timeouts, to
* the learned flow.
*
* Examples
* --------
*
* The following examples give a prose description of the flow_mod_specs along
* with informal notation for how those would be represented and a hex dump of
* the bytes that would be required.
*
* These examples could work with various nx_action_learn parameters. Typical
* values would be idle_timeout=OFP_FLOW_PERMANENT, hard_timeout=60,
* priority=OFP_DEFAULT_PRIORITY, flags=0, table_id=10.
*
* 1. Learn input port based on the source MAC, with lookup into
* NXM_NX_REG1[16:31] by resubmit to in_port=99:
*
* Match on in_port=99:
* ovs_be16(src=1, dst=0, n_bits=16), 20 10
* ovs_be16(99), 00 63
* ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00
*
* Match Ethernet destination on Ethernet source from packet:
* ovs_be16(src=0, dst=0, n_bits=48), 00 30
* ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00
* ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00
*
* Set NXM_NX_REG1[16:31] to the packet's input port:
* ovs_be16(src=0, dst=1, n_bits=16), 08 10
* ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00
* ovs_be32(NXM_NX_REG1), ovs_be16(16) 00 01 02 04 00 10
*
* Given a packet that arrived on port A with Ethernet source address B,
* this would set up the flow "in_port=99, dl_dst=B,
* actions=load:A->NXM_NX_REG1[16..31]".
*
* In syntax accepted by ovs-ofctl, this action is: learn(in_port=99,
* NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],
* load:NXM_OF_IN_PORT[]->NXM_NX_REG1[16..31])
*
* 2. Output to input port based on the source MAC and VLAN VID, with lookup
* into NXM_NX_REG1[16:31]:
*
* Match on same VLAN ID as packet:
* ovs_be16(src=0, dst=0, n_bits=12), 00 0c
* ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00
* ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00
*
* Match Ethernet destination on Ethernet source from packet:
* ovs_be16(src=0, dst=0, n_bits=48), 00 30
* ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00
* ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00
*
* Output to the packet's input port:
* ovs_be16(src=0, dst=2, n_bits=16), 10 10
* ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00
*
* Given a packet that arrived on port A with Ethernet source address B in
* VLAN C, this would set up the flow "dl_dst=B, vlan_vid=C,
* actions=output:A".
*
* In syntax accepted by ovs-ofctl, this action is:
* learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],
* output:NXM_OF_IN_PORT[])
*
* 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a
* 10-second MAC expiration time to make it easier to see what's going on
*
* ovs-vsctl del-controller br0
* ovs-ofctl del-flows br0
* ovs-ofctl add-flow br0 "table=0 actions=learn(table=1, \
hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \
NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \
output:NXM_OF_IN_PORT[]), resubmit(,1)"
* ovs-ofctl add-flow br0 "table=1 priority=0 actions=flood"
*
* You can then dump the MAC learning table with:
*
* ovs-ofctl dump-flows br0 table=1
*
* Usage Advice
* ------------
*
* For best performance, segregate learned flows into a table that is not used
* for any other flows except possibly for a lowest-priority "catch-all" flow
* (a flow with no match criteria). If different learning actions specify
* different match criteria, use different tables for the learned flows.
*
* The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive.
* These timeouts apply to the flow that is added, which means that a flow with
* an idle timeout will expire when no traffic has been sent *to* the learned
* address. This is not usually the intent in MAC learning; instead, we want
* the MAC learn entry to expire when no traffic has been sent *from* the
* learned address. Use a hard timeout for that.
*/
struct nx_action_learn {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* At least 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_LEARN. */
ovs_be16 idle_timeout; /* Idle time before discarding (seconds). */
ovs_be16 hard_timeout; /* Max time before discarding (seconds). */
ovs_be16 priority; /* Priority level of flow entry. */
ovs_be64 cookie; /* Cookie for new flow. */
ovs_be16 flags; /* NX_LEARN_F_*. */
uint8_t table_id; /* Table to insert flow entry. */
uint8_t pad; /* Must be zero. */
ovs_be16 fin_idle_timeout; /* Idle timeout after FIN, if nonzero. */
ovs_be16 fin_hard_timeout; /* Hard timeout after FIN, if nonzero. */
/* Followed by a sequence of flow_mod_spec elements, as described above,
* until the end of the action is reached. */
};
OFP_ASSERT(sizeof(struct nx_action_learn) == 32);
static ovs_be16
get_be16(const void **pp)
{
const ovs_be16 *p = *pp;
ovs_be16 value = *p;
*pp = p + 1;
return value;
}
static ovs_be32
get_be32(const void **pp)
{
const ovs_be32 *p = *pp;
ovs_be32 value = get_unaligned_be32(p);
*pp = p + 1;
return value;
}
static void
get_subfield(int n_bits, const void **p, struct mf_subfield *sf)
{
sf->field = mf_from_nxm_header(ntohl(get_be32(p)));
sf->ofs = ntohs(get_be16(p));
sf->n_bits = n_bits;
}
static unsigned int
learn_min_len(uint16_t header)
{
int n_bits = header & NX_LEARN_N_BITS_MASK;
int src_type = header & NX_LEARN_SRC_MASK;
int dst_type = header & NX_LEARN_DST_MASK;
unsigned int min_len;
min_len = 0;
if (src_type == NX_LEARN_SRC_FIELD) {
min_len += sizeof(ovs_be32); /* src_field */
min_len += sizeof(ovs_be16); /* src_ofs */
} else {
min_len += DIV_ROUND_UP(n_bits, 16);
}
if (dst_type == NX_LEARN_DST_MATCH ||
dst_type == NX_LEARN_DST_LOAD) {
min_len += sizeof(ovs_be32); /* dst_field */
min_len += sizeof(ovs_be16); /* dst_ofs */
}
return min_len;
}
/* Converts 'nal' into a "struct ofpact_learn" and appends that struct to
* 'ofpacts'. Returns 0 if successful, otherwise an OFPERR_*. */
static enum ofperr
decode_NXAST_RAW_LEARN(const struct nx_action_learn *nal,
struct ofpbuf *ofpacts)
{
struct ofpact_learn *learn;
const void *p, *end;
if (nal->pad) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
learn = ofpact_put_LEARN(ofpacts);
learn->idle_timeout = ntohs(nal->idle_timeout);
learn->hard_timeout = ntohs(nal->hard_timeout);
learn->priority = ntohs(nal->priority);
learn->cookie = nal->cookie;
learn->table_id = nal->table_id;
learn->fin_idle_timeout = ntohs(nal->fin_idle_timeout);
learn->fin_hard_timeout = ntohs(nal->fin_hard_timeout);
learn->flags = ntohs(nal->flags);
if (learn->flags & ~(NX_LEARN_F_SEND_FLOW_REM |
NX_LEARN_F_DELETE_LEARNED)) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
if (learn->table_id == 0xff) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
end = (char *) nal + ntohs(nal->len);
for (p = nal + 1; p != end; ) {
struct ofpact_learn_spec *spec;
uint16_t header = ntohs(get_be16(&p));
if (!header) {
break;
}
spec = ofpbuf_put_zeros(ofpacts, sizeof *spec);
learn = ofpacts->header;
learn->n_specs++;
spec->src_type = header & NX_LEARN_SRC_MASK;
spec->dst_type = header & NX_LEARN_DST_MASK;
spec->n_bits = header & NX_LEARN_N_BITS_MASK;
/* Check for valid src and dst type combination. */
if (spec->dst_type == NX_LEARN_DST_MATCH ||
spec->dst_type == NX_LEARN_DST_LOAD ||
(spec->dst_type == NX_LEARN_DST_OUTPUT &&
spec->src_type == NX_LEARN_SRC_FIELD)) {
/* OK. */
} else {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
/* Check that the arguments don't overrun the end of the action. */
if ((char *) end - (char *) p < learn_min_len(header)) {
return OFPERR_OFPBAC_BAD_LEN;
}
/* Get the source. */
if (spec->src_type == NX_LEARN_SRC_FIELD) {
get_subfield(spec->n_bits, &p, &spec->src);
} else {
int p_bytes = 2 * DIV_ROUND_UP(spec->n_bits, 16);
bitwise_copy(p, p_bytes, 0,
&spec->src_imm, sizeof spec->src_imm, 0,
spec->n_bits);
p = (const uint8_t *) p + p_bytes;
}
/* Get the destination. */
if (spec->dst_type == NX_LEARN_DST_MATCH ||
spec->dst_type == NX_LEARN_DST_LOAD) {
get_subfield(spec->n_bits, &p, &spec->dst);
}
}
ofpact_update_len(ofpacts, &learn->ofpact);
if (!is_all_zeros(p, (char *) end - (char *) p)) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
return 0;
}
static void
put_be16(struct ofpbuf *b, ovs_be16 x)
{
ofpbuf_put(b, &x, sizeof x);
}
static void
put_be32(struct ofpbuf *b, ovs_be32 x)
{
ofpbuf_put(b, &x, sizeof x);
}
static void
put_u16(struct ofpbuf *b, uint16_t x)
{
put_be16(b, htons(x));
}
static void
put_u32(struct ofpbuf *b, uint32_t x)
{
put_be32(b, htonl(x));
}
static void
encode_LEARN(const struct ofpact_learn *learn,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
const struct ofpact_learn_spec *spec;
struct nx_action_learn *nal;
size_t start_ofs;
start_ofs = out->size;
nal = put_NXAST_LEARN(out);
nal->idle_timeout = htons(learn->idle_timeout);
nal->hard_timeout = htons(learn->hard_timeout);
nal->fin_idle_timeout = htons(learn->fin_idle_timeout);
nal->fin_hard_timeout = htons(learn->fin_hard_timeout);
nal->priority = htons(learn->priority);
nal->cookie = learn->cookie;
nal->flags = htons(learn->flags);
nal->table_id = learn->table_id;
for (spec = learn->specs; spec < &learn->specs[learn->n_specs]; spec++) {
put_u16(out, spec->n_bits | spec->dst_type | spec->src_type);
if (spec->src_type == NX_LEARN_SRC_FIELD) {
put_u32(out, mf_nxm_header(spec->src.field->id));
put_u16(out, spec->src.ofs);
} else {
size_t n_dst_bytes = 2 * DIV_ROUND_UP(spec->n_bits, 16);
uint8_t *bits = ofpbuf_put_zeros(out, n_dst_bytes);
bitwise_copy(&spec->src_imm, sizeof spec->src_imm, 0,
bits, n_dst_bytes, 0,
spec->n_bits);
}
if (spec->dst_type == NX_LEARN_DST_MATCH ||
spec->dst_type == NX_LEARN_DST_LOAD) {
put_u32(out, mf_nxm_header(spec->dst.field->id));
put_u16(out, spec->dst.ofs);
}
}
pad_ofpat(out, start_ofs);
}
static char * OVS_WARN_UNUSED_RESULT
parse_LEARN(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return learn_parse(arg, ofpacts);
}
static void
format_LEARN(const struct ofpact_learn *a, struct ds *s)
{
learn_format(a, s);
}
/* Action structure for NXAST_CONJUNCTION. */
struct nx_action_conjunction {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* At least 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* See enum ofp_raw_action_type. */
uint8_t clause;
uint8_t n_clauses;
ovs_be32 id;
};
OFP_ASSERT(sizeof(struct nx_action_conjunction) == 16);
static void
add_conjunction(struct ofpbuf *out,
uint32_t id, uint8_t clause, uint8_t n_clauses)
{
struct ofpact_conjunction *oc;
oc = ofpact_put_CONJUNCTION(out);
oc->id = id;
oc->clause = clause;
oc->n_clauses = n_clauses;
}
static enum ofperr
decode_NXAST_RAW_CONJUNCTION(const struct nx_action_conjunction *nac,
struct ofpbuf *out)
{
if (nac->n_clauses < 2 || nac->n_clauses > 64
|| nac->clause >= nac->n_clauses) {
return OFPERR_NXBAC_BAD_CONJUNCTION;
} else {
add_conjunction(out, ntohl(nac->id), nac->clause, nac->n_clauses);
return 0;
}
}
static void
encode_CONJUNCTION(const struct ofpact_conjunction *oc,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
struct nx_action_conjunction *nac = put_NXAST_CONJUNCTION(out);
nac->clause = oc->clause;
nac->n_clauses = oc->n_clauses;
nac->id = htonl(oc->id);
}
static void
format_CONJUNCTION(const struct ofpact_conjunction *oc, struct ds *s)
{
ds_put_format(s, "conjunction(%"PRIu32",%"PRIu8"/%"PRIu8")",
oc->id, oc->clause + 1, oc->n_clauses);
}
static char * OVS_WARN_UNUSED_RESULT
parse_CONJUNCTION(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
uint8_t n_clauses;
uint8_t clause;
uint32_t id;
int n;
if (!ovs_scan(arg, "%"SCNi32" , %"SCNu8" / %"SCNu8" %n",
&id, &clause, &n_clauses, &n) || n != strlen(arg)) {
return xstrdup("\"conjunction\" syntax is \"conjunction(id,i/n)\"");
}
if (n_clauses < 2) {
return xstrdup("conjunction must have at least 2 clauses");
} else if (n_clauses > 64) {
return xstrdup("conjunction must have at most 64 clauses");
} else if (clause < 1) {
return xstrdup("clause index must be positive");
} else if (clause > n_clauses) {
return xstrdup("clause index must be less than or equal to "
"number of clauses");
}
add_conjunction(ofpacts, id, clause - 1, n_clauses);
return NULL;
}
/* Action structure for NXAST_MULTIPATH.
*
* This action performs the following steps in sequence:
*
* 1. Hashes the fields designated by 'fields', one of NX_HASH_FIELDS_*.
* Refer to the definition of "enum nx_mp_fields" for details.
*
* The 'basis' value is used as a universal hash parameter, that is,
* different values of 'basis' yield different hash functions. The
* particular universal hash function used is implementation-defined.
*
* The hashed fields' values are drawn from the current state of the
* flow, including all modifications that have been made by actions up to
* this point.
*
* 2. Applies the multipath link choice algorithm specified by 'algorithm',
* one of NX_MP_ALG_*. Refer to the definition of "enum nx_mp_algorithm"
* for details.
*
* The output of the algorithm is 'link', an unsigned integer less than
* or equal to 'max_link'.
*
* Some algorithms use 'arg' as an additional argument.
*
* 3. Stores 'link' in dst[ofs:ofs+n_bits]. The format and semantics of
* 'dst' and 'ofs_nbits' are similar to those for the NXAST_REG_LOAD
* action.
*
* The switch will reject actions that have an unknown 'fields', or an unknown
* 'algorithm', or in which ofs+n_bits is greater than the width of 'dst', or
* in which 'max_link' is greater than or equal to 2**n_bits, with error type
* OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT.
*/
struct nx_action_multipath {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 32. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_MULTIPATH. */
/* What fields to hash and how. */
ovs_be16 fields; /* One of NX_HASH_FIELDS_*. */
ovs_be16 basis; /* Universal hash parameter. */
ovs_be16 pad0;
/* Multipath link choice algorithm to apply to hash value. */
ovs_be16 algorithm; /* One of NX_MP_ALG_*. */
ovs_be16 max_link; /* Number of output links, minus 1. */
ovs_be32 arg; /* Algorithm-specific argument. */
ovs_be16 pad1;
/* Where to store the result. */
ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
ovs_be32 dst; /* Destination. */
};
OFP_ASSERT(sizeof(struct nx_action_multipath) == 32);
static enum ofperr
decode_NXAST_RAW_MULTIPATH(const struct nx_action_multipath *nam,
struct ofpbuf *out)
{
uint32_t n_links = ntohs(nam->max_link) + 1;
size_t min_n_bits = log_2_ceil(n_links);
struct ofpact_multipath *mp;
mp = ofpact_put_MULTIPATH(out);
mp->fields = ntohs(nam->fields);
mp->basis = ntohs(nam->basis);
mp->algorithm = ntohs(nam->algorithm);
mp->max_link = ntohs(nam->max_link);
mp->arg = ntohl(nam->arg);
mp->dst.field = mf_from_nxm_header(ntohl(nam->dst));
mp->dst.ofs = nxm_decode_ofs(nam->ofs_nbits);
mp->dst.n_bits = nxm_decode_n_bits(nam->ofs_nbits);
if (!flow_hash_fields_valid(mp->fields)) {
VLOG_WARN_RL(&rl, "unsupported fields %d", (int) mp->fields);
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else if (mp->algorithm != NX_MP_ALG_MODULO_N
&& mp->algorithm != NX_MP_ALG_HASH_THRESHOLD
&& mp->algorithm != NX_MP_ALG_HRW
&& mp->algorithm != NX_MP_ALG_ITER_HASH) {
VLOG_WARN_RL(&rl, "unsupported algorithm %d", (int) mp->algorithm);
return OFPERR_OFPBAC_BAD_ARGUMENT;
} else if (mp->dst.n_bits < min_n_bits) {
VLOG_WARN_RL(&rl, "multipath action requires at least %"PRIuSIZE" bits for "
"%"PRIu32" links", min_n_bits, n_links);
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
return multipath_check(mp, NULL);
}
static void
encode_MULTIPATH(const struct ofpact_multipath *mp,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
struct nx_action_multipath *nam = put_NXAST_MULTIPATH(out);
nam->fields = htons(mp->fields);
nam->basis = htons(mp->basis);
nam->algorithm = htons(mp->algorithm);
nam->max_link = htons(mp->max_link);
nam->arg = htonl(mp->arg);
nam->ofs_nbits = nxm_encode_ofs_nbits(mp->dst.ofs, mp->dst.n_bits);
nam->dst = htonl(mf_nxm_header(mp->dst.field->id));
}
static char * OVS_WARN_UNUSED_RESULT
parse_MULTIPATH(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return multipath_parse(ofpact_put_MULTIPATH(ofpacts), arg);
}
static void
format_MULTIPATH(const struct ofpact_multipath *a, struct ds *s)
{
multipath_format(a, s);
}
/* Action structure for NXAST_NOTE.
*
* This action has no effect. It is variable length. The switch does not
* attempt to interpret the user-defined 'note' data in any way. A controller
* can use this action to attach arbitrary metadata to a flow.
*
* This action might go away in the future.
*/
struct nx_action_note {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* A multiple of 8, but at least 16. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_NOTE. */
uint8_t note[6]; /* Start of user-defined data. */
/* Possibly followed by additional user-defined data. */
};
OFP_ASSERT(sizeof(struct nx_action_note) == 16);
static enum ofperr
decode_NXAST_RAW_NOTE(const struct nx_action_note *nan, struct ofpbuf *out)
{
struct ofpact_note *note;
unsigned int length;
length = ntohs(nan->len) - offsetof(struct nx_action_note, note);
note = ofpact_put(out, OFPACT_NOTE,
offsetof(struct ofpact_note, data) + length);
note->length = length;
memcpy(note->data, nan->note, length);
return 0;
}
static void
encode_NOTE(const struct ofpact_note *note,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
size_t start_ofs = out->size;
struct nx_action_note *nan;
unsigned int remainder;
unsigned int len;
put_NXAST_NOTE(out);
out->size = out->size - sizeof nan->note;
ofpbuf_put(out, note->data, note->length);
len = out->size - start_ofs;
remainder = len % OFP_ACTION_ALIGN;
if (remainder) {
ofpbuf_put_zeros(out, OFP_ACTION_ALIGN - remainder);
}
nan = ofpbuf_at(out, start_ofs, sizeof *nan);
nan->len = htons(out->size - start_ofs);
}
static char * OVS_WARN_UNUSED_RESULT
parse_NOTE(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_note *note;
note = ofpact_put_NOTE(ofpacts);
while (*arg != '\0') {
uint8_t byte;
bool ok;
if (*arg == '.') {
arg++;
}
if (*arg == '\0') {
break;
}
byte = hexits_value(arg, 2, &ok);
if (!ok) {
return xstrdup("bad hex digit in `note' argument");
}
ofpbuf_put(ofpacts, &byte, 1);
note = ofpacts->header;
note->length++;
arg += 2;
}
ofpact_update_len(ofpacts, ¬e->ofpact);
return NULL;
}
static void
format_NOTE(const struct ofpact_note *a, struct ds *s)
{
size_t i;
ds_put_cstr(s, "note:");
for (i = 0; i < a->length; i++) {
if (i) {
ds_put_char(s, '.');
}
ds_put_format(s, "%02"PRIx8, a->data[i]);
}
}
/* Exit action. */
static enum ofperr
decode_NXAST_RAW_EXIT(struct ofpbuf *out)
{
ofpact_put_EXIT(out);
return 0;
}
static void
encode_EXIT(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
put_NXAST_EXIT(out);
}
static char * OVS_WARN_UNUSED_RESULT
parse_EXIT(char *arg OVS_UNUSED, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
ofpact_put_EXIT(ofpacts);
return NULL;
}
static void
format_EXIT(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_cstr(s, "exit");
}
/* Unroll xlate action. */
static void
encode_UNROLL_XLATE(const struct ofpact_unroll_xlate *unroll OVS_UNUSED,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out OVS_UNUSED)
{
OVS_NOT_REACHED();
}
static char * OVS_WARN_UNUSED_RESULT
parse_UNROLL_XLATE(char *arg OVS_UNUSED, struct ofpbuf *ofpacts OVS_UNUSED,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
OVS_NOT_REACHED();
return NULL;
}
static void
format_UNROLL_XLATE(const struct ofpact_unroll_xlate *a OVS_UNUSED,
struct ds *s)
{
ds_put_cstr(s, "unroll_xlate");
}
/* Action structure for NXAST_SAMPLE.
*
* Samples matching packets with the given probability and sends them
* each to the set of collectors identified with the given ID. The
* probability is expressed as a number of packets to be sampled out
* of USHRT_MAX packets, and must be >0.
*
* When sending packet samples to IPFIX collectors, the IPFIX flow
* record sent for each sampled packet is associated with the given
* observation domain ID and observation point ID. Each IPFIX flow
* record contain the sampled packet's headers when executing this
* rule. If a sampled packet's headers are modified by previous
* actions in the flow, those modified headers are sent. */
struct nx_action_sample {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 24. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_SAMPLE. */
ovs_be16 probability; /* Fraction of packets to sample. */
ovs_be32 collector_set_id; /* ID of collector set in OVSDB. */
ovs_be32 obs_domain_id; /* ID of sampling observation domain. */
ovs_be32 obs_point_id; /* ID of sampling observation point. */
};
OFP_ASSERT(sizeof(struct nx_action_sample) == 24);
static enum ofperr
decode_NXAST_RAW_SAMPLE(const struct nx_action_sample *nas, struct ofpbuf *out)
{
struct ofpact_sample *sample;
sample = ofpact_put_SAMPLE(out);
sample->probability = ntohs(nas->probability);
sample->collector_set_id = ntohl(nas->collector_set_id);
sample->obs_domain_id = ntohl(nas->obs_domain_id);
sample->obs_point_id = ntohl(nas->obs_point_id);
if (sample->probability == 0) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
return 0;
}
static void
encode_SAMPLE(const struct ofpact_sample *sample,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
struct nx_action_sample *nas;
nas = put_NXAST_SAMPLE(out);
nas->probability = htons(sample->probability);
nas->collector_set_id = htonl(sample->collector_set_id);
nas->obs_domain_id = htonl(sample->obs_domain_id);
nas->obs_point_id = htonl(sample->obs_point_id);
}
/* Parses 'arg' as the argument to a "sample" action, and appends such an
* action to 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
static char * OVS_WARN_UNUSED_RESULT
parse_SAMPLE(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_sample *os = ofpact_put_SAMPLE(ofpacts);
char *key, *value;
while (ofputil_parse_key_value(&arg, &key, &value)) {
char *error = NULL;
if (!strcmp(key, "probability")) {
error = str_to_u16(value, "probability", &os->probability);
if (!error && os->probability == 0) {
error = xasprintf("invalid probability value \"%s\"", value);
}
} else if (!strcmp(key, "collector_set_id")) {
error = str_to_u32(value, &os->collector_set_id);
} else if (!strcmp(key, "obs_domain_id")) {
error = str_to_u32(value, &os->obs_domain_id);
} else if (!strcmp(key, "obs_point_id")) {
error = str_to_u32(value, &os->obs_point_id);
} else {
error = xasprintf("invalid key \"%s\" in \"sample\" argument",
key);
}
if (error) {
return error;
}
}
if (os->probability == 0) {
return xstrdup("non-zero \"probability\" must be specified on sample");
}
return NULL;
}
static void
format_SAMPLE(const struct ofpact_sample *a, struct ds *s)
{
ds_put_format(s, "sample(probability=%"PRIu16",collector_set_id=%"PRIu32
",obs_domain_id=%"PRIu32",obs_point_id=%"PRIu32")",
a->probability, a->collector_set_id,
a->obs_domain_id, a->obs_point_id);
}
/* Meter instruction. */
static void
encode_METER(const struct ofpact_meter *meter,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version >= OFP13_VERSION) {
instruction_put_OFPIT13_METER(out)->meter_id = htonl(meter->meter_id);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_METER(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
*usable_protocols &= OFPUTIL_P_OF13_UP;
return str_to_u32(arg, &ofpact_put_METER(ofpacts)->meter_id);
}
static void
format_METER(const struct ofpact_meter *a, struct ds *s)
{
ds_put_format(s, "meter:%"PRIu32, a->meter_id);
}
/* Clear-Actions instruction. */
static void
encode_CLEAR_ACTIONS(const struct ofpact_null *null OVS_UNUSED,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out OVS_UNUSED)
{
if (ofp_version > OFP10_VERSION) {
instruction_put_OFPIT11_CLEAR_ACTIONS(out);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_CLEAR_ACTIONS(char *arg OVS_UNUSED, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
ofpact_put_CLEAR_ACTIONS(ofpacts);
return NULL;
}
static void
format_CLEAR_ACTIONS(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_cstr(s, "clear_actions");
}
/* Write-Actions instruction. */
static void
encode_WRITE_ACTIONS(const struct ofpact_nest *actions,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version > OFP10_VERSION) {
const size_t ofs = out->size;
instruction_put_OFPIT11_WRITE_ACTIONS(out);
ofpacts_put_openflow_actions(actions->actions,
ofpact_nest_get_action_len(actions),
out, ofp_version);
ofpacts_update_instruction_actions(out, ofs);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_WRITE_ACTIONS(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
struct ofpact_nest *on;
char *error;
size_t ofs;
/* Pull off existing actions or instructions. */
ofpact_pad(ofpacts);
ofs = ofpacts->size;
ofpbuf_pull(ofpacts, ofs);
/* Add a Write-Actions instruction and then pull it off. */
ofpact_put(ofpacts, OFPACT_WRITE_ACTIONS, sizeof *on);
ofpbuf_pull(ofpacts, sizeof *on);
/* Parse nested actions.
*
* We pulled off "write-actions" and the previous actions because the
* OFPACT_WRITE_ACTIONS is only partially constructed: its length is such
* that it doesn't actually include the nested actions. That means that
* ofpacts_parse() would reject them as being part of an Apply-Actions that
* follows a Write-Actions, which is an invalid order. */
error = ofpacts_parse(arg, ofpacts, usable_protocols, false);
/* Put the Write-Actions back on and update its length. */
on = ofpbuf_push_uninit(ofpacts, sizeof *on);
on->ofpact.len = ofpacts->size;
/* Put any previous actions or instructions back on. */
ofpbuf_push_uninit(ofpacts, ofs);
return error;
}
static void
format_WRITE_ACTIONS(const struct ofpact_nest *a, struct ds *s)
{
ds_put_cstr(s, "write_actions(");
ofpacts_format(a->actions, ofpact_nest_get_action_len(a), s);
ds_put_char(s, ')');
}
/* Action structure for NXAST_WRITE_METADATA.
*
* Modifies the 'mask' bits of the metadata value. */
struct nx_action_write_metadata {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* Length is 32. */
ovs_be32 vendor; /* NX_VENDOR_ID. */
ovs_be16 subtype; /* NXAST_WRITE_METADATA. */
uint8_t zeros[6]; /* Must be zero. */
ovs_be64 metadata; /* Metadata register. */
ovs_be64 mask; /* Metadata mask. */
};
OFP_ASSERT(sizeof(struct nx_action_write_metadata) == 32);
static enum ofperr
decode_NXAST_RAW_WRITE_METADATA(const struct nx_action_write_metadata *nawm,
struct ofpbuf *out)
{
struct ofpact_metadata *om;
if (!is_all_zeros(nawm->zeros, sizeof nawm->zeros)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
om = ofpact_put_WRITE_METADATA(out);
om->metadata = nawm->metadata;
om->mask = nawm->mask;
return 0;
}
static void
encode_WRITE_METADATA(const struct ofpact_metadata *metadata,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
struct nx_action_write_metadata *nawm;
nawm = put_NXAST_WRITE_METADATA(out);
nawm->metadata = metadata->metadata;
nawm->mask = metadata->mask;
} else {
struct ofp11_instruction_write_metadata *oiwm;
oiwm = instruction_put_OFPIT11_WRITE_METADATA(out);
oiwm->metadata = metadata->metadata;
oiwm->metadata_mask = metadata->mask;
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_WRITE_METADATA(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
struct ofpact_metadata *om;
char *mask = strchr(arg, '/');
*usable_protocols &= OFPUTIL_P_NXM_OF11_UP;
om = ofpact_put_WRITE_METADATA(ofpacts);
if (mask) {
char *error;
*mask = '\0';
error = str_to_be64(mask + 1, &om->mask);
if (error) {
return error;
}
} else {
om->mask = OVS_BE64_MAX;
}
return str_to_be64(arg, &om->metadata);
}
static void
format_WRITE_METADATA(const struct ofpact_metadata *a, struct ds *s)
{
ds_put_format(s, "write_metadata:%#"PRIx64, ntohll(a->metadata));
if (a->mask != OVS_BE64_MAX) {
ds_put_format(s, "/%#"PRIx64, ntohll(a->mask));
}
}
/* Goto-Table instruction. */
static void
encode_GOTO_TABLE(const struct ofpact_goto_table *goto_table,
enum ofp_version ofp_version, struct ofpbuf *out)
{
if (ofp_version == OFP10_VERSION) {
struct nx_action_resubmit *nar;
nar = put_NXAST_RESUBMIT_TABLE(out);
nar->table = goto_table->table_id;
nar->in_port = htons(ofp_to_u16(OFPP_IN_PORT));
} else {
struct ofp11_instruction_goto_table *oigt;
oigt = instruction_put_OFPIT11_GOTO_TABLE(out);
oigt->table_id = goto_table->table_id;
memset(oigt->pad, 0, sizeof oigt->pad);
}
}
static char * OVS_WARN_UNUSED_RESULT
parse_GOTO_TABLE(char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
struct ofpact_goto_table *ogt = ofpact_put_GOTO_TABLE(ofpacts);
char *table_s = strsep(&arg, ",");
if (!table_s || !table_s[0]) {
return xstrdup("instruction goto-table needs table id");
}
return str_to_u8(table_s, "table", &ogt->table_id);
}
static void
format_GOTO_TABLE(const struct ofpact_goto_table *a, struct ds *s)
{
ds_put_format(s, "goto_table:%"PRIu8, a->table_id);
}
static void
log_bad_action(const struct ofp_action_header *actions, size_t actions_len,
const struct ofp_action_header *bad_action, enum ofperr error)
{
if (!VLOG_DROP_WARN(&rl)) {
struct ds s;
ds_init(&s);
ds_put_hex_dump(&s, actions, actions_len, 0, false);
VLOG_WARN("bad action at offset %#"PRIxPTR" (%s):\n%s",
(char *)bad_action - (char *)actions,
ofperr_get_name(error), ds_cstr(&s));
ds_destroy(&s);
}
}
static enum ofperr
ofpacts_decode(const void *actions, size_t actions_len,
enum ofp_version ofp_version, struct ofpbuf *ofpacts)
{
struct ofpbuf openflow;
ofpbuf_use_const(&openflow, actions, actions_len);
while (openflow.size) {
const struct ofp_action_header *action = openflow.data;
enum ofp_raw_action_type raw;
enum ofperr error;
uint64_t arg;
error = ofpact_pull_raw(&openflow, ofp_version, &raw, &arg);
if (!error) {
error = ofpact_decode(action, raw, arg, ofpacts);
}
if (error) {
log_bad_action(actions, actions_len, action, error);
return error;
}
}
ofpact_pad(ofpacts);
return 0;
}
static enum ofperr
ofpacts_pull_openflow_actions__(struct ofpbuf *openflow,
unsigned int actions_len,
enum ofp_version version,
uint32_t allowed_ovsinsts,
struct ofpbuf *ofpacts)
{
const struct ofp_action_header *actions;
enum ofperr error;
ofpbuf_clear(ofpacts);
if (actions_len % OFP_ACTION_ALIGN != 0) {
VLOG_WARN_RL(&rl, "OpenFlow message actions length %u is not a "
"multiple of %d", actions_len, OFP_ACTION_ALIGN);
return OFPERR_OFPBRC_BAD_LEN;
}
actions = ofpbuf_try_pull(openflow, actions_len);
if (actions == NULL) {
VLOG_WARN_RL(&rl, "OpenFlow message actions length %u exceeds "
"remaining message length (%"PRIu32")",
actions_len, openflow->size);
return OFPERR_OFPBRC_BAD_LEN;
}
error = ofpacts_decode(actions, actions_len, version, ofpacts);
if (error) {
ofpbuf_clear(ofpacts);
return error;
}
error = ofpacts_verify(ofpacts->data, ofpacts->size,
allowed_ovsinsts);
if (error) {
ofpbuf_clear(ofpacts);
}
return error;
}
/* Attempts to convert 'actions_len' bytes of OpenFlow actions from the
* front of 'openflow' into ofpacts. On success, replaces any existing content
* in 'ofpacts' by the converted ofpacts; on failure, clears 'ofpacts'.
* Returns 0 if successful, otherwise an OpenFlow error.
*
* Actions are processed according to their OpenFlow version which
* is provided in the 'version' parameter.
*
* In most places in OpenFlow, actions appear encapsulated in instructions, so
* you should call ofpacts_pull_openflow_instructions() instead of this
* function.
*
* The parsed actions are valid generically, but they may not be valid in a
* specific context. For example, port numbers up to OFPP_MAX are valid
* generically, but specific datapaths may only support port numbers in a
* smaller range. Use ofpacts_check() to additional check whether actions are
* valid in a specific context. */
enum ofperr
ofpacts_pull_openflow_actions(struct ofpbuf *openflow,
unsigned int actions_len,
enum ofp_version version,
struct ofpbuf *ofpacts)
{
return ofpacts_pull_openflow_actions__(openflow, actions_len, version,
1u << OVSINST_OFPIT11_APPLY_ACTIONS,
ofpacts);
}
/* OpenFlow 1.1 actions. */
/* True if an action sets the value of a field
* in a way that is compatibile with the action set.
* The field can be set via either a set or a move action.
* False otherwise. */
static bool
ofpact_is_set_or_move_action(const struct ofpact *a)
{
switch (a->type) {
case OFPACT_SET_FIELD:
case OFPACT_REG_MOVE:
case OFPACT_SET_ETH_DST:
case OFPACT_SET_ETH_SRC:
case OFPACT_SET_IP_DSCP:
case OFPACT_SET_IP_ECN:
case OFPACT_SET_IP_TTL:
case OFPACT_SET_IPV4_DST:
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_L4_DST_PORT:
case OFPACT_SET_L4_SRC_PORT:
case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_MPLS_TC:
case OFPACT_SET_MPLS_TTL:
case OFPACT_SET_QUEUE:
case OFPACT_SET_TUNNEL:
case OFPACT_SET_VLAN_PCP:
case OFPACT_SET_VLAN_VID:
return true;
case OFPACT_BUNDLE:
case OFPACT_CLEAR_ACTIONS:
case OFPACT_CONTROLLER:
case OFPACT_DEC_MPLS_TTL:
case OFPACT_DEC_TTL:
case OFPACT_ENQUEUE:
case OFPACT_EXIT:
case OFPACT_UNROLL_XLATE:
case OFPACT_FIN_TIMEOUT:
case OFPACT_GOTO_TABLE:
case OFPACT_GROUP:
case OFPACT_LEARN:
case OFPACT_CONJUNCTION:
case OFPACT_METER:
case OFPACT_MULTIPATH:
case OFPACT_NOTE:
case OFPACT_OUTPUT:
case OFPACT_OUTPUT_REG:
case OFPACT_POP_MPLS:
case OFPACT_POP_QUEUE:
case OFPACT_PUSH_MPLS:
case OFPACT_PUSH_VLAN:
case OFPACT_RESUBMIT:
case OFPACT_SAMPLE:
case OFPACT_STACK_POP:
case OFPACT_STACK_PUSH:
case OFPACT_STRIP_VLAN:
case OFPACT_WRITE_ACTIONS:
case OFPACT_WRITE_METADATA:
return false;
default:
OVS_NOT_REACHED();
}
}
/* True if an action is allowed in the action set.
* False otherwise. */
static bool
ofpact_is_allowed_in_actions_set(const struct ofpact *a)
{
switch (a->type) {
case OFPACT_DEC_MPLS_TTL:
case OFPACT_DEC_TTL:
case OFPACT_GROUP:
case OFPACT_OUTPUT:
case OFPACT_POP_MPLS:
case OFPACT_PUSH_MPLS:
case OFPACT_PUSH_VLAN:
case OFPACT_REG_MOVE:
case OFPACT_SET_FIELD:
case OFPACT_SET_ETH_DST:
case OFPACT_SET_ETH_SRC:
case OFPACT_SET_IP_DSCP:
case OFPACT_SET_IP_ECN:
case OFPACT_SET_IP_TTL:
case OFPACT_SET_IPV4_DST:
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_L4_DST_PORT:
case OFPACT_SET_L4_SRC_PORT:
case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_MPLS_TC:
case OFPACT_SET_MPLS_TTL:
case OFPACT_SET_QUEUE:
case OFPACT_SET_TUNNEL:
case OFPACT_SET_VLAN_PCP:
case OFPACT_SET_VLAN_VID:
case OFPACT_STRIP_VLAN:
return true;
/* In general these actions are excluded because they are not part of
* the OpenFlow specification nor map to actions that are defined in
* the specification. Thus the order in which they should be applied
* in the action set is undefined. */
case OFPACT_BUNDLE:
case OFPACT_CONTROLLER:
case OFPACT_ENQUEUE:
case OFPACT_EXIT:
case OFPACT_UNROLL_XLATE:
case OFPACT_FIN_TIMEOUT:
case OFPACT_LEARN:
case OFPACT_CONJUNCTION:
case OFPACT_MULTIPATH:
case OFPACT_NOTE:
case OFPACT_OUTPUT_REG:
case OFPACT_POP_QUEUE:
case OFPACT_RESUBMIT:
case OFPACT_SAMPLE:
case OFPACT_STACK_POP:
case OFPACT_STACK_PUSH:
/* The action set may only include actions and thus
* may not include any instructions */
case OFPACT_CLEAR_ACTIONS:
case OFPACT_GOTO_TABLE:
case OFPACT_METER:
case OFPACT_WRITE_ACTIONS:
case OFPACT_WRITE_METADATA:
return false;
default:
OVS_NOT_REACHED();
}
}
/* Append ofpact 'a' onto the tail of 'out' */
static void
ofpact_copy(struct ofpbuf *out, const struct ofpact *a)
{
ofpbuf_put(out, a, OFPACT_ALIGN(a->len));
}
/* Copies the last ofpact whose type is 'filter' from 'in' to 'out'. */
static bool
ofpacts_copy_last(struct ofpbuf *out, const struct ofpbuf *in,
enum ofpact_type filter)
{
const struct ofpact *target;
const struct ofpact *a;
target = NULL;
OFPACT_FOR_EACH (a, in->data, in->size) {
if (a->type == filter) {
target = a;
}
}
if (target) {
ofpact_copy(out, target);
}
return target != NULL;
}
/* Append all ofpacts, for which 'filter' returns true, from 'in' to 'out'.
* The order of appended ofpacts is preserved between 'in' and 'out' */
static void
ofpacts_copy_all(struct ofpbuf *out, const struct ofpbuf *in,
bool (*filter)(const struct ofpact *))
{
const struct ofpact *a;
OFPACT_FOR_EACH (a, in->data, in->size) {
if (filter(a)) {
ofpact_copy(out, a);
}
}
}
/* Reads 'action_set', which contains ofpacts accumulated by
* OFPACT_WRITE_ACTIONS instructions, and writes equivalent actions to be
* executed directly into 'action_list'. (These names correspond to the
* "Action Set" and "Action List" terms used in OpenFlow 1.1+.)
*
* In general this involves appending the last instance of each action that is
* admissible in the action set in the order described in the OpenFlow
* specification.
*
* Exceptions:
* + output action is only appended if no group action was present in 'in'.
* + As a simplification all set actions are copied in the order the are
* provided in 'in' as many set actions applied to a field has the same
* affect as only applying the last action that sets a field and
* duplicates are removed by do_xlate_actions().
* This has an unwanted side-effect of compsoting multiple
* LOAD_REG actions that touch different regions of the same field. */
void
ofpacts_execute_action_set(struct ofpbuf *action_list,
const struct ofpbuf *action_set)
{
/* The OpenFlow spec "Action Set" section specifies this order. */
ofpacts_copy_last(action_list, action_set, OFPACT_STRIP_VLAN);
ofpacts_copy_last(action_list, action_set, OFPACT_POP_MPLS);
ofpacts_copy_last(action_list, action_set, OFPACT_PUSH_MPLS);
ofpacts_copy_last(action_list, action_set, OFPACT_PUSH_VLAN);
ofpacts_copy_last(action_list, action_set, OFPACT_DEC_TTL);
ofpacts_copy_last(action_list, action_set, OFPACT_DEC_MPLS_TTL);
ofpacts_copy_all(action_list, action_set, ofpact_is_set_or_move_action);
ofpacts_copy_last(action_list, action_set, OFPACT_SET_QUEUE);
/* If both OFPACT_GROUP and OFPACT_OUTPUT are present, OpenFlow says that
* we should execute only OFPACT_GROUP.
*
* If neither OFPACT_GROUP nor OFPACT_OUTPUT is present, then we can drop
* all the actions because there's no point in modifying a packet that will
* not be sent anywhere. */
if (!ofpacts_copy_last(action_list, action_set, OFPACT_GROUP) &&
!ofpacts_copy_last(action_list, action_set, OFPACT_OUTPUT) &&
!ofpacts_copy_last(action_list, action_set, OFPACT_RESUBMIT)) {
ofpbuf_clear(action_list);
}
}
static enum ofperr
ofpacts_decode_for_action_set(const struct ofp_action_header *in,
size_t n_in, enum ofp_version version,
struct ofpbuf *out)
{
enum ofperr error;
struct ofpact *a;
size_t start = out->size;
error = ofpacts_decode(in, n_in, version, out);
if (error) {
return error;
}
OFPACT_FOR_EACH (a, ofpact_end(out->data, start), out->size - start) {
if (!ofpact_is_allowed_in_actions_set(a)) {
VLOG_WARN_RL(&rl, "disallowed action in action set");
return OFPERR_OFPBAC_BAD_TYPE;
}
}
return 0;
}
/* OpenFlow 1.1 instructions. */
struct instruction_type_info {
enum ovs_instruction_type type;
const char *name;
};
static const struct instruction_type_info inst_info[] = {
#define DEFINE_INST(ENUM, STRUCT, EXTENSIBLE, NAME) {OVSINST_##ENUM, NAME},
OVS_INSTRUCTIONS
#undef DEFINE_INST
};
const char *
ovs_instruction_name_from_type(enum ovs_instruction_type type)
{
return inst_info[type].name;
}
int
ovs_instruction_type_from_name(const char *name)
{
const struct instruction_type_info *p;
for (p = inst_info; p < &inst_info[ARRAY_SIZE(inst_info)]; p++) {
if (!strcasecmp(name, p->name)) {
return p->type;
}
}
return -1;
}
enum ovs_instruction_type
ovs_instruction_type_from_ofpact_type(enum ofpact_type type)
{
switch (type) {
case OFPACT_METER:
return OVSINST_OFPIT13_METER;
case OFPACT_CLEAR_ACTIONS:
return OVSINST_OFPIT11_CLEAR_ACTIONS;
case OFPACT_WRITE_ACTIONS:
return OVSINST_OFPIT11_WRITE_ACTIONS;
case OFPACT_WRITE_METADATA:
return OVSINST_OFPIT11_WRITE_METADATA;
case OFPACT_GOTO_TABLE:
return OVSINST_OFPIT11_GOTO_TABLE;
case OFPACT_OUTPUT:
case OFPACT_GROUP:
case OFPACT_CONTROLLER:
case OFPACT_ENQUEUE:
case OFPACT_OUTPUT_REG:
case OFPACT_BUNDLE:
case OFPACT_SET_VLAN_VID:
case OFPACT_SET_VLAN_PCP:
case OFPACT_STRIP_VLAN:
case OFPACT_PUSH_VLAN:
case OFPACT_SET_ETH_SRC:
case OFPACT_SET_ETH_DST:
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_IPV4_DST:
case OFPACT_SET_IP_DSCP:
case OFPACT_SET_IP_ECN:
case OFPACT_SET_IP_TTL:
case OFPACT_SET_L4_SRC_PORT:
case OFPACT_SET_L4_DST_PORT:
case OFPACT_REG_MOVE:
case OFPACT_SET_FIELD:
case OFPACT_STACK_PUSH:
case OFPACT_STACK_POP:
case OFPACT_DEC_TTL:
case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_MPLS_TC:
case OFPACT_SET_MPLS_TTL:
case OFPACT_DEC_MPLS_TTL:
case OFPACT_PUSH_MPLS:
case OFPACT_POP_MPLS:
case OFPACT_SET_TUNNEL:
case OFPACT_SET_QUEUE:
case OFPACT_POP_QUEUE:
case OFPACT_FIN_TIMEOUT:
case OFPACT_RESUBMIT:
case OFPACT_LEARN:
case OFPACT_CONJUNCTION:
case OFPACT_MULTIPATH:
case OFPACT_NOTE:
case OFPACT_EXIT:
case OFPACT_UNROLL_XLATE:
case OFPACT_SAMPLE:
default:
return OVSINST_OFPIT11_APPLY_ACTIONS;
}
}
enum ofperr
ovs_instruction_type_from_inst_type(enum ovs_instruction_type *instruction_type,
const uint16_t inst_type)
{
switch (inst_type) {
#define DEFINE_INST(ENUM, STRUCT, EXTENSIBLE, NAME) \
case ENUM: \
*instruction_type = OVSINST_##ENUM; \
return 0;
OVS_INSTRUCTIONS
#undef DEFINE_INST
default:
return OFPERR_OFPBIC_UNKNOWN_INST;
}
}
/* Two-way translation between OVS's internal "OVSINST_*" representation of
* instructions and the "OFPIT_*" representation used in OpenFlow. */
struct ovsinst_map {
enum ovs_instruction_type ovsinst; /* Internal name for instruction. */
int ofpit; /* OFPIT_* number from OpenFlow spec. */
};
static const struct ovsinst_map *
get_ovsinst_map(enum ofp_version version)
{
/* OpenFlow 1.1 and 1.2 instructions. */
static const struct ovsinst_map of11[] = {
{ OVSINST_OFPIT11_GOTO_TABLE, 1 },
{ OVSINST_OFPIT11_WRITE_METADATA, 2 },
{ OVSINST_OFPIT11_WRITE_ACTIONS, 3 },
{ OVSINST_OFPIT11_APPLY_ACTIONS, 4 },
{ OVSINST_OFPIT11_CLEAR_ACTIONS, 5 },
{ 0, -1 },
};
/* OpenFlow 1.3+ instructions. */
static const struct ovsinst_map of13[] = {
{ OVSINST_OFPIT11_GOTO_TABLE, 1 },
{ OVSINST_OFPIT11_WRITE_METADATA, 2 },
{ OVSINST_OFPIT11_WRITE_ACTIONS, 3 },
{ OVSINST_OFPIT11_APPLY_ACTIONS, 4 },
{ OVSINST_OFPIT11_CLEAR_ACTIONS, 5 },
{ OVSINST_OFPIT13_METER, 6 },
{ 0, -1 },
};
return version < OFP13_VERSION ? of11 : of13;
}
/* Converts 'ovsinst_bitmap', a bitmap whose bits correspond to OVSINST_*
* values, into a bitmap of instructions suitable for OpenFlow 'version'
* (OFP11_VERSION or later), and returns the result. */
ovs_be32
ovsinst_bitmap_to_openflow(uint32_t ovsinst_bitmap, enum ofp_version version)
{
uint32_t ofpit_bitmap = 0;
const struct ovsinst_map *x;
for (x = get_ovsinst_map(version); x->ofpit >= 0; x++) {
if (ovsinst_bitmap & (1u << x->ovsinst)) {
ofpit_bitmap |= 1u << x->ofpit;
}
}
return htonl(ofpit_bitmap);
}
/* Converts 'ofpit_bitmap', a bitmap of instructions from an OpenFlow message
* with the given 'version' (OFP11_VERSION or later) into a bitmap whose bits
* correspond to OVSINST_* values, and returns the result. */
uint32_t
ovsinst_bitmap_from_openflow(ovs_be32 ofpit_bitmap, enum ofp_version version)
{
uint32_t ovsinst_bitmap = 0;
const struct ovsinst_map *x;
for (x = get_ovsinst_map(version); x->ofpit >= 0; x++) {
if (ofpit_bitmap & htonl(1u << x->ofpit)) {
ovsinst_bitmap |= 1u << x->ovsinst;
}
}
return ovsinst_bitmap;
}
static inline struct ofp11_instruction *
instruction_next(const struct ofp11_instruction *inst)
{
return ((struct ofp11_instruction *) (void *)
((uint8_t *) inst + ntohs(inst->len)));
}
static inline bool
instruction_is_valid(const struct ofp11_instruction *inst,
size_t n_instructions)
{
uint16_t len = ntohs(inst->len);
return (!(len % OFP11_INSTRUCTION_ALIGN)
&& len >= sizeof *inst
&& len / sizeof *inst <= n_instructions);
}
/* This macro is careful to check for instructions with bad lengths. */
#define INSTRUCTION_FOR_EACH(ITER, LEFT, INSTRUCTIONS, N_INSTRUCTIONS) \
for ((ITER) = (INSTRUCTIONS), (LEFT) = (N_INSTRUCTIONS); \
(LEFT) > 0 && instruction_is_valid(ITER, LEFT); \
((LEFT) -= (ntohs((ITER)->len) \
/ sizeof(struct ofp11_instruction)), \
(ITER) = instruction_next(ITER)))
static enum ofperr
decode_openflow11_instruction(const struct ofp11_instruction *inst,
enum ovs_instruction_type *type)
{
uint16_t len = ntohs(inst->len);
switch (inst->type) {
case CONSTANT_HTONS(OFPIT11_EXPERIMENTER):
return OFPERR_OFPBIC_BAD_EXPERIMENTER;
#define DEFINE_INST(ENUM, STRUCT, EXTENSIBLE, NAME) \
case CONSTANT_HTONS(ENUM): \
if (EXTENSIBLE \
? len >= sizeof(struct STRUCT) \
: len == sizeof(struct STRUCT)) { \
*type = OVSINST_##ENUM; \
return 0; \
} else { \
return OFPERR_OFPBIC_BAD_LEN; \
}
OVS_INSTRUCTIONS
#undef DEFINE_INST
default:
return OFPERR_OFPBIC_UNKNOWN_INST;
}
}
static enum ofperr
decode_openflow11_instructions(const struct ofp11_instruction insts[],
size_t n_insts,
const struct ofp11_instruction *out[])
{
const struct ofp11_instruction *inst;
size_t left;
memset(out, 0, N_OVS_INSTRUCTIONS * sizeof *out);
INSTRUCTION_FOR_EACH (inst, left, insts, n_insts) {
enum ovs_instruction_type type;
enum ofperr error;
error = decode_openflow11_instruction(inst, &type);
if (error) {
return error;
}
if (out[type]) {
return OFPERR_OFPBIC_DUP_INST;
}
out[type] = inst;
}
if (left) {
VLOG_WARN_RL(&rl, "bad instruction format at offset %"PRIuSIZE,
(n_insts - left) * sizeof *inst);
return OFPERR_OFPBIC_BAD_LEN;
}
return 0;
}
static void
get_actions_from_instruction(const struct ofp11_instruction *inst,
const struct ofp_action_header **actions,
size_t *actions_len)
{
*actions = ALIGNED_CAST(const struct ofp_action_header *, inst + 1);
*actions_len = ntohs(inst->len) - sizeof *inst;
}
enum ofperr
ofpacts_pull_openflow_instructions(struct ofpbuf *openflow,
unsigned int instructions_len,
enum ofp_version version,
struct ofpbuf *ofpacts)
{
const struct ofp11_instruction *instructions;
const struct ofp11_instruction *insts[N_OVS_INSTRUCTIONS];
enum ofperr error;
if (version == OFP10_VERSION) {
return ofpacts_pull_openflow_actions__(openflow, instructions_len,
version,
(1u << N_OVS_INSTRUCTIONS) - 1,
ofpacts);
}
ofpbuf_clear(ofpacts);
if (instructions_len % OFP11_INSTRUCTION_ALIGN != 0) {
VLOG_WARN_RL(&rl, "OpenFlow message instructions length %u is not a "
"multiple of %d",
instructions_len, OFP11_INSTRUCTION_ALIGN);
error = OFPERR_OFPBIC_BAD_LEN;
goto exit;
}
instructions = ofpbuf_try_pull(openflow, instructions_len);
if (instructions == NULL) {
VLOG_WARN_RL(&rl, "OpenFlow message instructions length %u exceeds "
"remaining message length (%"PRIu32")",
instructions_len, openflow->size);
error = OFPERR_OFPBIC_BAD_LEN;
goto exit;
}
error = decode_openflow11_instructions(
instructions, instructions_len / OFP11_INSTRUCTION_ALIGN,
insts);
if (error) {
goto exit;
}
if (insts[OVSINST_OFPIT13_METER]) {
const struct ofp13_instruction_meter *oim;
struct ofpact_meter *om;
oim = ALIGNED_CAST(const struct ofp13_instruction_meter *,
insts[OVSINST_OFPIT13_METER]);
om = ofpact_put_METER(ofpacts);
om->meter_id = ntohl(oim->meter_id);
}
if (insts[OVSINST_OFPIT11_APPLY_ACTIONS]) {
const struct ofp_action_header *actions;
size_t actions_len;
get_actions_from_instruction(insts[OVSINST_OFPIT11_APPLY_ACTIONS],
&actions, &actions_len);
error = ofpacts_decode(actions, actions_len, version, ofpacts);
if (error) {
goto exit;
}
}
if (insts[OVSINST_OFPIT11_CLEAR_ACTIONS]) {
instruction_get_OFPIT11_CLEAR_ACTIONS(
insts[OVSINST_OFPIT11_CLEAR_ACTIONS]);
ofpact_put_CLEAR_ACTIONS(ofpacts);
}
if (insts[OVSINST_OFPIT11_WRITE_ACTIONS]) {
struct ofpact_nest *on;
const struct ofp_action_header *actions;
size_t actions_len;
size_t start;
ofpact_pad(ofpacts);
start = ofpacts->size;
on = ofpact_put(ofpacts, OFPACT_WRITE_ACTIONS,
offsetof(struct ofpact_nest, actions));
get_actions_from_instruction(insts[OVSINST_OFPIT11_WRITE_ACTIONS],
&actions, &actions_len);
error = ofpacts_decode_for_action_set(actions, actions_len,
version, ofpacts);
if (error) {
goto exit;
}
on = ofpbuf_at_assert(ofpacts, start, sizeof *on);
on->ofpact.len = ofpacts->size - start;
}
if (insts[OVSINST_OFPIT11_WRITE_METADATA]) {
const struct ofp11_instruction_write_metadata *oiwm;
struct ofpact_metadata *om;
oiwm = ALIGNED_CAST(const struct ofp11_instruction_write_metadata *,
insts[OVSINST_OFPIT11_WRITE_METADATA]);
om = ofpact_put_WRITE_METADATA(ofpacts);
om->metadata = oiwm->metadata;
om->mask = oiwm->metadata_mask;
}
if (insts[OVSINST_OFPIT11_GOTO_TABLE]) {
const struct ofp11_instruction_goto_table *oigt;
struct ofpact_goto_table *ogt;
oigt = instruction_get_OFPIT11_GOTO_TABLE(
insts[OVSINST_OFPIT11_GOTO_TABLE]);
ogt = ofpact_put_GOTO_TABLE(ofpacts);
ogt->table_id = oigt->table_id;
}
error = ofpacts_verify(ofpacts->data, ofpacts->size,
(1u << N_OVS_INSTRUCTIONS) - 1);
exit:
if (error) {
ofpbuf_clear(ofpacts);
}
return error;
}
/* Update the length of the instruction that begins at offset 'ofs' within
* 'openflow' and contains nested actions that extend to the end of 'openflow'.
* If the instruction contains no nested actions, deletes it entirely. */
static void
ofpacts_update_instruction_actions(struct ofpbuf *openflow, size_t ofs)
{
struct ofp11_instruction_actions *oia;
oia = ofpbuf_at_assert(openflow, ofs, sizeof *oia);
if (openflow->size > ofs + sizeof *oia) {
oia->len = htons(openflow->size - ofs);
} else {
openflow->size = ofs;
}
}
/* Checks that 'port' is a valid output port for OFPACT_OUTPUT, given that the
* switch will never have more than 'max_ports' ports. Returns 0 if 'port' is
* valid, otherwise an OpenFlow error code. */
enum ofperr
ofpact_check_output_port(ofp_port_t port, ofp_port_t max_ports)
{
switch (port) {
case OFPP_IN_PORT:
case OFPP_TABLE:
case OFPP_NORMAL:
case OFPP_FLOOD:
case OFPP_ALL:
case OFPP_CONTROLLER:
case OFPP_NONE:
case OFPP_LOCAL:
return 0;
default:
if (ofp_to_u16(port) < ofp_to_u16(max_ports)) {
return 0;
}
return OFPERR_OFPBAC_BAD_OUT_PORT;
}
}
/* Removes the protocols that require consistency between match and actions
* (that's everything but OpenFlow 1.0) from '*usable_protocols'.
*
* (An example of an inconsistency between match and actions is a flow that
* does not match on an MPLS Ethertype but has an action that pops an MPLS
* label.) */
static void
inconsistent_match(enum ofputil_protocol *usable_protocols)
{
*usable_protocols &= OFPUTIL_P_OF10_ANY;
}
/* May modify flow->dl_type, flow->nw_proto and flow->vlan_tci,
* caller must restore them.
*
* Modifies some actions, filling in fields that could not be properly set
* without context. */
static enum ofperr
ofpact_check__(enum ofputil_protocol *usable_protocols, struct ofpact *a,
struct flow *flow, ofp_port_t max_ports,
uint8_t table_id, uint8_t n_tables)
{
const struct ofpact_enqueue *enqueue;
const struct mf_field *mf;
switch (a->type) {
case OFPACT_OUTPUT:
return ofpact_check_output_port(ofpact_get_OUTPUT(a)->port,
max_ports);
case OFPACT_CONTROLLER:
return 0;
case OFPACT_ENQUEUE:
enqueue = ofpact_get_ENQUEUE(a);
if (ofp_to_u16(enqueue->port) >= ofp_to_u16(max_ports)
&& enqueue->port != OFPP_IN_PORT
&& enqueue->port != OFPP_LOCAL) {
return OFPERR_OFPBAC_BAD_OUT_PORT;
}
return 0;
case OFPACT_OUTPUT_REG:
return mf_check_src(&ofpact_get_OUTPUT_REG(a)->src, flow);
case OFPACT_BUNDLE:
return bundle_check(ofpact_get_BUNDLE(a), max_ports, flow);
case OFPACT_SET_VLAN_VID:
/* Remember if we saw a vlan tag in the flow to aid translating to
* OpenFlow 1.1+ if need be. */
ofpact_get_SET_VLAN_VID(a)->flow_has_vlan =
(flow->vlan_tci & htons(VLAN_CFI)) == htons(VLAN_CFI);
if (!(flow->vlan_tci & htons(VLAN_CFI)) &&
!ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
inconsistent_match(usable_protocols);
}
/* Temporary mark that we have a vlan tag. */
flow->vlan_tci |= htons(VLAN_CFI);
return 0;
case OFPACT_SET_VLAN_PCP:
/* Remember if we saw a vlan tag in the flow to aid translating to
* OpenFlow 1.1+ if need be. */
ofpact_get_SET_VLAN_PCP(a)->flow_has_vlan =
(flow->vlan_tci & htons(VLAN_CFI)) == htons(VLAN_CFI);
if (!(flow->vlan_tci & htons(VLAN_CFI)) &&
!ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
inconsistent_match(usable_protocols);
}
/* Temporary mark that we have a vlan tag. */
flow->vlan_tci |= htons(VLAN_CFI);
return 0;
case OFPACT_STRIP_VLAN:
if (!(flow->vlan_tci & htons(VLAN_CFI))) {
inconsistent_match(usable_protocols);
}
/* Temporary mark that we have no vlan tag. */
flow->vlan_tci = htons(0);
return 0;
case OFPACT_PUSH_VLAN:
if (flow->vlan_tci & htons(VLAN_CFI)) {
/* Multiple VLAN headers not supported. */
return OFPERR_OFPBAC_BAD_TAG;
}
/* Temporary mark that we have a vlan tag. */
flow->vlan_tci |= htons(VLAN_CFI);
return 0;
case OFPACT_SET_ETH_SRC:
case OFPACT_SET_ETH_DST:
return 0;
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_IPV4_DST:
if (flow->dl_type != htons(ETH_TYPE_IP)) {
inconsistent_match(usable_protocols);
}
return 0;
case OFPACT_SET_IP_DSCP:
case OFPACT_SET_IP_ECN:
case OFPACT_SET_IP_TTL:
case OFPACT_DEC_TTL:
if (!is_ip_any(flow)) {
inconsistent_match(usable_protocols);
}
return 0;
case OFPACT_SET_L4_SRC_PORT:
case OFPACT_SET_L4_DST_PORT:
if (!is_ip_any(flow) || (flow->nw_frag & FLOW_NW_FRAG_LATER) ||
(flow->nw_proto != IPPROTO_TCP && flow->nw_proto != IPPROTO_UDP
&& flow->nw_proto != IPPROTO_SCTP)) {
inconsistent_match(usable_protocols);
}
/* Note on which transport protocol the port numbers are set.
* This allows this set action to be converted to an OF1.2 set field
* action. */
if (a->type == OFPACT_SET_L4_SRC_PORT) {
ofpact_get_SET_L4_SRC_PORT(a)->flow_ip_proto = flow->nw_proto;
} else {
ofpact_get_SET_L4_DST_PORT(a)->flow_ip_proto = flow->nw_proto;
}
return 0;
case OFPACT_REG_MOVE:
return nxm_reg_move_check(ofpact_get_REG_MOVE(a), flow);
case OFPACT_SET_FIELD:
mf = ofpact_get_SET_FIELD(a)->field;
/* Require OXM_OF_VLAN_VID to have an existing VLAN header. */
if (!mf_are_prereqs_ok(mf, flow) ||
(mf->id == MFF_VLAN_VID && !(flow->vlan_tci & htons(VLAN_CFI)))) {
VLOG_WARN_RL(&rl, "set_field %s lacks correct prerequisities",
mf->name);
return OFPERR_OFPBAC_MATCH_INCONSISTENT;
}
/* Remember if we saw a vlan tag in the flow to aid translating to
* OpenFlow 1.1 if need be. */
ofpact_get_SET_FIELD(a)->flow_has_vlan =
(flow->vlan_tci & htons(VLAN_CFI)) == htons(VLAN_CFI);
if (mf->id == MFF_VLAN_TCI) {
/* The set field may add or remove the vlan tag,
* Mark the status temporarily. */
flow->vlan_tci = ofpact_get_SET_FIELD(a)->value.be16;
}
return 0;
case OFPACT_STACK_PUSH:
return nxm_stack_push_check(ofpact_get_STACK_PUSH(a), flow);
case OFPACT_STACK_POP:
return nxm_stack_pop_check(ofpact_get_STACK_POP(a), flow);
case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_MPLS_TC:
case OFPACT_SET_MPLS_TTL:
case OFPACT_DEC_MPLS_TTL:
if (!eth_type_mpls(flow->dl_type)) {
inconsistent_match(usable_protocols);
}
return 0;
case OFPACT_SET_TUNNEL:
case OFPACT_SET_QUEUE:
case OFPACT_POP_QUEUE:
case OFPACT_RESUBMIT:
return 0;
case OFPACT_FIN_TIMEOUT:
if (flow->nw_proto != IPPROTO_TCP) {
inconsistent_match(usable_protocols);
}
return 0;
case OFPACT_LEARN:
return learn_check(ofpact_get_LEARN(a), flow);
case OFPACT_CONJUNCTION:
return 0;
case OFPACT_MULTIPATH:
return multipath_check(ofpact_get_MULTIPATH(a), flow);
case OFPACT_NOTE:
case OFPACT_EXIT:
return 0;
case OFPACT_PUSH_MPLS:
flow->dl_type = ofpact_get_PUSH_MPLS(a)->ethertype;
/* The packet is now MPLS and the MPLS payload is opaque.
* Thus nothing can be assumed about the network protocol.
* Temporarily mark that we have no nw_proto. */
flow->nw_proto = 0;
return 0;
case OFPACT_POP_MPLS:
if (!eth_type_mpls(flow->dl_type)) {
inconsistent_match(usable_protocols);
}
flow->dl_type = ofpact_get_POP_MPLS(a)->ethertype;
return 0;
case OFPACT_SAMPLE:
return 0;
case OFPACT_CLEAR_ACTIONS:
return 0;
case OFPACT_WRITE_ACTIONS: {
/* Use a temporary copy of 'usable_protocols' because we can't check
* consistency of an action set. */
struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
enum ofputil_protocol p = *usable_protocols;
return ofpacts_check(on->actions, ofpact_nest_get_action_len(on),
flow, max_ports, table_id, n_tables, &p);
}
case OFPACT_WRITE_METADATA:
return 0;
case OFPACT_METER: {
uint32_t mid = ofpact_get_METER(a)->meter_id;
if (mid == 0 || mid > OFPM13_MAX) {
return OFPERR_OFPMMFC_INVALID_METER;
}
return 0;
}
case OFPACT_GOTO_TABLE: {
uint8_t goto_table = ofpact_get_GOTO_TABLE(a)->table_id;
if ((table_id != 255 && goto_table <= table_id)
|| (n_tables != 255 && goto_table >= n_tables)) {
return OFPERR_OFPBIC_BAD_TABLE_ID;
}
return 0;
}
case OFPACT_GROUP:
return 0;
case OFPACT_UNROLL_XLATE:
/* UNROLL is an internal action that should never be seen via
* OpenFlow. */
return OFPERR_OFPBAC_BAD_TYPE;
default:
OVS_NOT_REACHED();
}
}
/* Checks that the 'ofpacts_len' bytes of actions in 'ofpacts' are
* appropriate for a packet with the prerequisites satisfied by 'flow' in a
* switch with no more than 'max_ports' ports.
*
* If 'ofpacts' and 'flow' are inconsistent with one another, un-sets in
* '*usable_protocols' the protocols that forbid the inconsistency. (An
* example of an inconsistency between match and actions is a flow that does
* not match on an MPLS Ethertype but has an action that pops an MPLS label.)
*
* May annotate ofpacts with information gathered from the 'flow'.
*
* May temporarily modify 'flow', but restores the changes before returning. */
enum ofperr
ofpacts_check(struct ofpact ofpacts[], size_t ofpacts_len,
struct flow *flow, ofp_port_t max_ports,
uint8_t table_id, uint8_t n_tables,
enum ofputil_protocol *usable_protocols)
{
struct ofpact *a;
ovs_be16 dl_type = flow->dl_type;
ovs_be16 vlan_tci = flow->vlan_tci;
uint8_t nw_proto = flow->nw_proto;
enum ofperr error = 0;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
error = ofpact_check__(usable_protocols, a, flow,
max_ports, table_id, n_tables);
if (error) {
break;
}
}
/* Restore fields that may have been modified. */
flow->dl_type = dl_type;
flow->vlan_tci = vlan_tci;
flow->nw_proto = nw_proto;
return error;
}
/* Like ofpacts_check(), but reports inconsistencies as
* OFPERR_OFPBAC_MATCH_INCONSISTENT rather than clearing bits. */
enum ofperr
ofpacts_check_consistency(struct ofpact ofpacts[], size_t ofpacts_len,
struct flow *flow, ofp_port_t max_ports,
uint8_t table_id, uint8_t n_tables,
enum ofputil_protocol usable_protocols)
{
enum ofputil_protocol p = usable_protocols;
enum ofperr error;
error = ofpacts_check(ofpacts, ofpacts_len, flow, max_ports,
table_id, n_tables, &p);
return (error ? error
: p != usable_protocols ? OFPERR_OFPBAC_MATCH_INCONSISTENT
: 0);
}
/* Verifies that the 'ofpacts_len' bytes of actions in 'ofpacts' are in the
* appropriate order as defined by the OpenFlow spec and as required by Open
* vSwitch.
*
* 'allowed_ovsinsts' is a bitmap of OVSINST_* values, in which 1-bits indicate
* instructions that are allowed within 'ofpacts[]'. */
static enum ofperr
ofpacts_verify(const struct ofpact ofpacts[], size_t ofpacts_len,
uint32_t allowed_ovsinsts)
{
const struct ofpact *a;
enum ovs_instruction_type inst;
inst = OVSINST_OFPIT13_METER;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
enum ovs_instruction_type next;
if (a->type == OFPACT_CONJUNCTION) {
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
if (a->type != OFPACT_CONJUNCTION) {
VLOG_WARN("when %s action is present, it must be the only "
"kind of action used", ofpact_name(a->type));
return OFPERR_NXBAC_BAD_CONJUNCTION;
}
}
return 0;
}
next = ovs_instruction_type_from_ofpact_type(a->type);
if (a > ofpacts
&& (inst == OVSINST_OFPIT11_APPLY_ACTIONS
? next < inst
: next <= inst)) {
const char *name = ovs_instruction_name_from_type(inst);
const char *next_name = ovs_instruction_name_from_type(next);
if (next == inst) {
VLOG_WARN("duplicate %s instruction not allowed, for OpenFlow "
"1.1+ compatibility", name);
} else {
VLOG_WARN("invalid instruction ordering: %s must appear "
"before %s, for OpenFlow 1.1+ compatibility",
next_name, name);
}
return OFPERR_OFPBAC_UNSUPPORTED_ORDER;
}
if (!((1u << next) & allowed_ovsinsts)) {
const char *name = ovs_instruction_name_from_type(next);
VLOG_WARN("%s instruction not allowed here", name);
return OFPERR_OFPBIC_UNSUP_INST;
}
inst = next;
}
return 0;
}
/* Converting ofpacts to OpenFlow. */
static void
encode_ofpact(const struct ofpact *a, enum ofp_version ofp_version,
struct ofpbuf *out)
{
switch (a->type) {
#define OFPACT(ENUM, STRUCT, MEMBER, NAME) \
case OFPACT_##ENUM: \
encode_##ENUM(ofpact_get_##ENUM(a), ofp_version, out); \
return;
OFPACTS
#undef OFPACT
default:
OVS_NOT_REACHED();
}
}
/* Converts the 'ofpacts_len' bytes of ofpacts in 'ofpacts' into OpenFlow
* actions in 'openflow', appending the actions to any existing data in
* 'openflow'. */
size_t
ofpacts_put_openflow_actions(const struct ofpact ofpacts[], size_t ofpacts_len,
struct ofpbuf *openflow,
enum ofp_version ofp_version)
{
const struct ofpact *a;
size_t start_size = openflow->size;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
encode_ofpact(a, ofp_version, openflow);
}
return openflow->size - start_size;
}
static enum ovs_instruction_type
ofpact_is_apply_actions(const struct ofpact *a)
{
return (ovs_instruction_type_from_ofpact_type(a->type)
== OVSINST_OFPIT11_APPLY_ACTIONS);
}
void
ofpacts_put_openflow_instructions(const struct ofpact ofpacts[],
size_t ofpacts_len,
struct ofpbuf *openflow,
enum ofp_version ofp_version)
{
const struct ofpact *end = ofpact_end(ofpacts, ofpacts_len);
const struct ofpact *a;
if (ofp_version == OFP10_VERSION) {
ofpacts_put_openflow_actions(ofpacts, ofpacts_len, openflow,
ofp_version);
return;
}
a = ofpacts;
while (a < end) {
if (ofpact_is_apply_actions(a)) {
size_t ofs = openflow->size;
instruction_put_OFPIT11_APPLY_ACTIONS(openflow);
do {
encode_ofpact(a, ofp_version, openflow);
a = ofpact_next(a);
} while (a < end && ofpact_is_apply_actions(a));
ofpacts_update_instruction_actions(openflow, ofs);
} else {
encode_ofpact(a, ofp_version, openflow);
a = ofpact_next(a);
}
}
}
/* Sets of supported actions. */
/* Two-way translation between OVS's internal "OFPACT_*" representation of
* actions and the "OFPAT_*" representation used in some OpenFlow version.
* (OFPAT_* numbering varies from one OpenFlow version to another, so a given
* instance is specific to one OpenFlow version.) */
struct ofpact_map {
enum ofpact_type ofpact; /* Internal name for action type. */
int ofpat; /* OFPAT_* number from OpenFlow spec. */
};
static const struct ofpact_map *
get_ofpact_map(enum ofp_version version)
{
/* OpenFlow 1.0 actions. */
static const struct ofpact_map of10[] = {
{ OFPACT_OUTPUT, 0 },
{ OFPACT_SET_VLAN_VID, 1 },
{ OFPACT_SET_VLAN_PCP, 2 },
{ OFPACT_STRIP_VLAN, 3 },
{ OFPACT_SET_ETH_SRC, 4 },
{ OFPACT_SET_ETH_DST, 5 },
{ OFPACT_SET_IPV4_SRC, 6 },
{ OFPACT_SET_IPV4_DST, 7 },
{ OFPACT_SET_IP_DSCP, 8 },
{ OFPACT_SET_L4_SRC_PORT, 9 },
{ OFPACT_SET_L4_DST_PORT, 10 },
{ OFPACT_ENQUEUE, 11 },
{ 0, -1 },
};
/* OpenFlow 1.1 actions. */
static const struct ofpact_map of11[] = {
{ OFPACT_OUTPUT, 0 },
{ OFPACT_SET_VLAN_VID, 1 },
{ OFPACT_SET_VLAN_PCP, 2 },
{ OFPACT_SET_ETH_SRC, 3 },
{ OFPACT_SET_ETH_DST, 4 },
{ OFPACT_SET_IPV4_SRC, 5 },
{ OFPACT_SET_IPV4_DST, 6 },
{ OFPACT_SET_IP_DSCP, 7 },
{ OFPACT_SET_IP_ECN, 8 },
{ OFPACT_SET_L4_SRC_PORT, 9 },
{ OFPACT_SET_L4_DST_PORT, 10 },
/* OFPAT_COPY_TTL_OUT (11) not supported. */
/* OFPAT_COPY_TTL_IN (12) not supported. */
{ OFPACT_SET_MPLS_LABEL, 13 },
{ OFPACT_SET_MPLS_TC, 14 },
{ OFPACT_SET_MPLS_TTL, 15 },
{ OFPACT_DEC_MPLS_TTL, 16 },
{ OFPACT_PUSH_VLAN, 17 },
{ OFPACT_STRIP_VLAN, 18 },
{ OFPACT_PUSH_MPLS, 19 },
{ OFPACT_POP_MPLS, 20 },
{ OFPACT_SET_QUEUE, 21 },
{ OFPACT_GROUP, 22 },
{ OFPACT_SET_IP_TTL, 23 },
{ OFPACT_DEC_TTL, 24 },
{ 0, -1 },
};
/* OpenFlow 1.2, 1.3, and 1.4 actions. */
static const struct ofpact_map of12[] = {
{ OFPACT_OUTPUT, 0 },
/* OFPAT_COPY_TTL_OUT (11) not supported. */
/* OFPAT_COPY_TTL_IN (12) not supported. */
{ OFPACT_SET_MPLS_TTL, 15 },
{ OFPACT_DEC_MPLS_TTL, 16 },
{ OFPACT_PUSH_VLAN, 17 },
{ OFPACT_STRIP_VLAN, 18 },
{ OFPACT_PUSH_MPLS, 19 },
{ OFPACT_POP_MPLS, 20 },
{ OFPACT_SET_QUEUE, 21 },
{ OFPACT_GROUP, 22 },
{ OFPACT_SET_IP_TTL, 23 },
{ OFPACT_DEC_TTL, 24 },
{ OFPACT_SET_FIELD, 25 },
/* OF1.3+ OFPAT_PUSH_PBB (26) not supported. */
/* OF1.3+ OFPAT_POP_PBB (27) not supported. */
{ 0, -1 },
};
switch (version) {
case OFP10_VERSION:
return of10;
case OFP11_VERSION:
return of11;
case OFP12_VERSION:
case OFP13_VERSION:
case OFP14_VERSION:
case OFP15_VERSION:
default:
return of12;
}
}
/* Converts 'ofpacts_bitmap', a bitmap whose bits correspond to OFPACT_*
* values, into a bitmap of actions suitable for OpenFlow 'version', and
* returns the result. */
ovs_be32
ofpact_bitmap_to_openflow(uint64_t ofpacts_bitmap, enum ofp_version version)
{
uint32_t openflow_bitmap = 0;
const struct ofpact_map *x;
for (x = get_ofpact_map(version); x->ofpat >= 0; x++) {
if (ofpacts_bitmap & (UINT64_C(1) << x->ofpact)) {
openflow_bitmap |= 1u << x->ofpat;
}
}
return htonl(openflow_bitmap);
}
/* Converts 'ofpat_bitmap', a bitmap of actions from an OpenFlow message with
* the given 'version' into a bitmap whose bits correspond to OFPACT_* values,
* and returns the result. */
uint64_t
ofpact_bitmap_from_openflow(ovs_be32 ofpat_bitmap, enum ofp_version version)
{
uint64_t ofpact_bitmap = 0;
const struct ofpact_map *x;
for (x = get_ofpact_map(version); x->ofpat >= 0; x++) {
if (ofpat_bitmap & htonl(1u << x->ofpat)) {
ofpact_bitmap |= UINT64_C(1) << x->ofpact;
}
}
return ofpact_bitmap;
}
/* Appends to 's' a string representation of the set of OFPACT_* represented
* by 'ofpacts_bitmap'. */
void
ofpact_bitmap_format(uint64_t ofpacts_bitmap, struct ds *s)
{
if (!ofpacts_bitmap) {
ds_put_cstr(s, "<none>");
} else {
while (ofpacts_bitmap) {
ds_put_format(s, "%s ",
ofpact_name(rightmost_1bit_idx(ofpacts_bitmap)));
ofpacts_bitmap = zero_rightmost_1bit(ofpacts_bitmap);
}
ds_chomp(s, ' ');
}
}
/* Returns true if 'action' outputs to 'port', false otherwise. */
static bool
ofpact_outputs_to_port(const struct ofpact *ofpact, ofp_port_t port)
{
switch (ofpact->type) {
case OFPACT_OUTPUT:
return ofpact_get_OUTPUT(ofpact)->port == port;
case OFPACT_ENQUEUE:
return ofpact_get_ENQUEUE(ofpact)->port == port;
case OFPACT_CONTROLLER:
return port == OFPP_CONTROLLER;
case OFPACT_OUTPUT_REG:
case OFPACT_BUNDLE:
case OFPACT_SET_VLAN_VID:
case OFPACT_SET_VLAN_PCP:
case OFPACT_STRIP_VLAN:
case OFPACT_PUSH_VLAN:
case OFPACT_SET_ETH_SRC:
case OFPACT_SET_ETH_DST:
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_IPV4_DST:
case OFPACT_SET_IP_DSCP:
case OFPACT_SET_IP_ECN:
case OFPACT_SET_IP_TTL:
case OFPACT_SET_L4_SRC_PORT:
case OFPACT_SET_L4_DST_PORT:
case OFPACT_REG_MOVE:
case OFPACT_SET_FIELD:
case OFPACT_STACK_PUSH:
case OFPACT_STACK_POP:
case OFPACT_DEC_TTL:
case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_MPLS_TC:
case OFPACT_SET_MPLS_TTL:
case OFPACT_DEC_MPLS_TTL:
case OFPACT_SET_TUNNEL:
case OFPACT_WRITE_METADATA:
case OFPACT_SET_QUEUE:
case OFPACT_POP_QUEUE:
case OFPACT_FIN_TIMEOUT:
case OFPACT_RESUBMIT:
case OFPACT_LEARN:
case OFPACT_CONJUNCTION:
case OFPACT_MULTIPATH:
case OFPACT_NOTE:
case OFPACT_EXIT:
case OFPACT_UNROLL_XLATE:
case OFPACT_PUSH_MPLS:
case OFPACT_POP_MPLS:
case OFPACT_SAMPLE:
case OFPACT_CLEAR_ACTIONS:
case OFPACT_WRITE_ACTIONS:
case OFPACT_GOTO_TABLE:
case OFPACT_METER:
case OFPACT_GROUP:
default:
return false;
}
}
/* Returns true if any action in the 'ofpacts_len' bytes of 'ofpacts' outputs
* to 'port', false otherwise. */
bool
ofpacts_output_to_port(const struct ofpact *ofpacts, size_t ofpacts_len,
ofp_port_t port)
{
const struct ofpact *a;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
if (ofpact_outputs_to_port(a, port)) {
return true;
}
}
return false;
}
/* Returns true if any action in the 'ofpacts_len' bytes of 'ofpacts' outputs
* to 'group', false otherwise. */
bool
ofpacts_output_to_group(const struct ofpact *ofpacts, size_t ofpacts_len,
uint32_t group_id)
{
const struct ofpact *a;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
if (a->type == OFPACT_GROUP
&& ofpact_get_GROUP(a)->group_id == group_id) {
return true;
}
}
return false;
}
bool
ofpacts_equal(const struct ofpact *a, size_t a_len,
const struct ofpact *b, size_t b_len)
{
return a_len == b_len && !memcmp(a, b, a_len);
}
/* Finds the OFPACT_METER action, if any, in the 'ofpacts_len' bytes of
* 'ofpacts'. If found, returns its meter ID; if not, returns 0.
*
* This function relies on the order of 'ofpacts' being correct (as checked by
* ofpacts_verify()). */
uint32_t
ofpacts_get_meter(const struct ofpact ofpacts[], size_t ofpacts_len)
{
const struct ofpact *a;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
enum ovs_instruction_type inst;
inst = ovs_instruction_type_from_ofpact_type(a->type);
if (a->type == OFPACT_METER) {
return ofpact_get_METER(a)->meter_id;
} else if (inst > OVSINST_OFPIT13_METER) {
break;
}
}
return 0;
}
/* Formatting ofpacts. */
static void
ofpact_format(const struct ofpact *a, struct ds *s)
{
switch (a->type) {
#define OFPACT(ENUM, STRUCT, MEMBER, NAME) \
case OFPACT_##ENUM: \
format_##ENUM(ALIGNED_CAST(const struct STRUCT *, a), s); \
break;
OFPACTS
#undef OFPACT
default:
OVS_NOT_REACHED();
}
}
/* Appends a string representing the 'ofpacts_len' bytes of ofpacts in
* 'ofpacts' to 'string'. */
void
ofpacts_format(const struct ofpact *ofpacts, size_t ofpacts_len,
struct ds *string)
{
if (!ofpacts_len) {
ds_put_cstr(string, "drop");
} else {
const struct ofpact *a;
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
if (a != ofpacts) {
ds_put_cstr(string, ",");
}
/* XXX write-actions */
ofpact_format(a, string);
}
}
}
/* Internal use by helpers. */
void *
ofpact_put(struct ofpbuf *ofpacts, enum ofpact_type type, size_t len)
{
struct ofpact *ofpact;
ofpact_pad(ofpacts);
ofpacts->header = ofpbuf_put_uninit(ofpacts, len);
ofpact = ofpacts->header;
ofpact_init(ofpact, type, len);
return ofpact;
}
void
ofpact_init(struct ofpact *ofpact, enum ofpact_type type, size_t len)
{
memset(ofpact, 0, len);
ofpact->type = type;
ofpact->raw = -1;
ofpact->len = len;
}
/* Updates 'ofpact->len' to the number of bytes in the tail of 'ofpacts'
* starting at 'ofpact'.
*
* This is the correct way to update a variable-length ofpact's length after
* adding the variable-length part of the payload. (See the large comment
* near the end of ofp-actions.h for more information.) */
void
ofpact_update_len(struct ofpbuf *ofpacts, struct ofpact *ofpact)
{
ovs_assert(ofpact == ofpacts->header);
ofpact->len = (char *) ofpbuf_tail(ofpacts) - (char *) ofpact;
}
/* Pads out 'ofpacts' to a multiple of OFPACT_ALIGNTO bytes in length. Each
* ofpact_put_<ENUM>() calls this function automatically beforehand, but the
* client must call this itself after adding the final ofpact to an array of
* them.
*
* (The consequences of failing to call this function are probably not dire.
* OFPACT_FOR_EACH will calculate a pointer beyond the end of the ofpacts, but
* not dereference it. That's undefined behavior, technically, but it will not
* cause a real problem on common systems. Still, it seems better to call
* it.) */
void
ofpact_pad(struct ofpbuf *ofpacts)
{
unsigned int pad = PAD_SIZE(ofpacts->size, OFPACT_ALIGNTO);
if (pad) {
ofpbuf_put_zeros(ofpacts, pad);
}
}
static char * OVS_WARN_UNUSED_RESULT
ofpact_parse(enum ofpact_type type, char *value, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
switch (type) {
#define OFPACT(ENUM, STRUCT, MEMBER, NAME) \
case OFPACT_##ENUM: \
return parse_##ENUM(value, ofpacts, usable_protocols);
OFPACTS
#undef OFPACT
default:
OVS_NOT_REACHED();
}
}
static bool
ofpact_type_from_name(const char *name, enum ofpact_type *type)
{
#define OFPACT(ENUM, STRUCT, MEMBER, NAME) \
if (!strcasecmp(name, NAME)) { \
*type = OFPACT_##ENUM; \
return true; \
}
OFPACTS
#undef OFPACT
return false;
}
/* Parses 'str' as a series of instructions, and appends them to 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
static char * OVS_WARN_UNUSED_RESULT
ofpacts_parse__(char *str, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols,
bool allow_instructions)
{
int prev_inst = -1;
enum ofperr retval;
char *key, *value;
bool drop = false;
char *pos;
pos = str;
while (ofputil_parse_key_value(&pos, &key, &value)) {
enum ovs_instruction_type inst = OVSINST_OFPIT11_APPLY_ACTIONS;
enum ofpact_type type;
char *error = NULL;
ofp_port_t port;
if (ofpact_type_from_name(key, &type)) {
error = ofpact_parse(type, value, ofpacts, usable_protocols);
inst = ovs_instruction_type_from_ofpact_type(type);
} else if (!strcasecmp(key, "mod_vlan_vid")) {
error = parse_set_vlan_vid(value, ofpacts, true);
} else if (!strcasecmp(key, "mod_vlan_pcp")) {
error = parse_set_vlan_pcp(value, ofpacts, true);
} else if (!strcasecmp(key, "set_nw_ttl")) {
error = parse_SET_IP_TTL(value, ofpacts, usable_protocols);
} else if (!strcasecmp(key, "pop_vlan")) {
error = parse_pop_vlan(ofpacts);
} else if (!strcasecmp(key, "set_tunnel64")) {
error = parse_set_tunnel(value, ofpacts,
NXAST_RAW_SET_TUNNEL64);
} else if (!strcasecmp(key, "load")) {
error = parse_reg_load(value, ofpacts);
} else if (!strcasecmp(key, "bundle_load")) {
error = parse_bundle_load(value, ofpacts);
} else if (!strcasecmp(key, "drop")) {
drop = true;
} else if (!strcasecmp(key, "apply_actions")) {
return xstrdup("apply_actions is the default instruction");
} else if (ofputil_port_from_string(key, &port)) {
ofpact_put_OUTPUT(ofpacts)->port = port;
} else {
return xasprintf("unknown action %s", key);
}
if (error) {
return error;
}
if (inst != OVSINST_OFPIT11_APPLY_ACTIONS) {
if (!allow_instructions) {
return xasprintf("only actions are allowed here (not "
"instruction %s)",
ovs_instruction_name_from_type(inst));
}
if (inst == prev_inst) {
return xasprintf("instruction %s may be specified only once",
ovs_instruction_name_from_type(inst));
}
}
if (prev_inst != -1 && inst < prev_inst) {
return xasprintf("instruction %s must be specified before %s",
ovs_instruction_name_from_type(inst),
ovs_instruction_name_from_type(prev_inst));
}
prev_inst = inst;
}
ofpact_pad(ofpacts);
if (drop && ofpacts->size) {
return xstrdup("\"drop\" must not be accompanied by any other action "
"or instruction");
}
retval = ofpacts_verify(ofpacts->data, ofpacts->size,
(allow_instructions
? (1u << N_OVS_INSTRUCTIONS) - 1
: 1u << OVSINST_OFPIT11_APPLY_ACTIONS));
if (retval) {
return xstrdup("Incorrect instruction ordering");
}
return NULL;
}
static char * OVS_WARN_UNUSED_RESULT
ofpacts_parse(char *str, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols, bool allow_instructions)
{
uint32_t orig_size = ofpacts->size;
char *error = ofpacts_parse__(str, ofpacts, usable_protocols,
allow_instructions);
if (error) {
ofpacts->size = orig_size;
}
return error;
}
static char * OVS_WARN_UNUSED_RESULT
ofpacts_parse_copy(const char *s_, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols,
bool allow_instructions)
{
char *error, *s;
*usable_protocols = OFPUTIL_P_ANY;
s = xstrdup(s_);
error = ofpacts_parse(s, ofpacts, usable_protocols, allow_instructions);
free(s);
return error;
}
/* Parses 's' as a set of OpenFlow actions and appends the actions to
* 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
char * OVS_WARN_UNUSED_RESULT
ofpacts_parse_actions(const char *s, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
return ofpacts_parse_copy(s, ofpacts, usable_protocols, false);
}
/* Parses 's' as a set of OpenFlow instructions and appends the instructions to
* 'ofpacts'.
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
char * OVS_WARN_UNUSED_RESULT
ofpacts_parse_instructions(const char *s, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols)
{
return ofpacts_parse_copy(s, ofpacts, usable_protocols, true);
}
const char *
ofpact_name(enum ofpact_type type)
{
switch (type) {
#define OFPACT(ENUM, STRUCT, MEMBER, NAME) case OFPACT_##ENUM: return NAME;
OFPACTS
#undef OFPACT
}
return "<unknown>";
}
/* Low-level action decoding and encoding functions. */
/* Everything needed to identify a particular OpenFlow action. */
struct ofpact_hdrs {
uint32_t vendor; /* 0 if standard, otherwise a vendor code. */
uint16_t type; /* Type if standard, otherwise subtype. */
uint8_t ofp_version; /* From ofp_header. */
};
/* Information about a particular OpenFlow action. */
struct ofpact_raw_instance {
/* The action's identity. */
struct ofpact_hdrs hdrs;
enum ofp_raw_action_type raw;
/* Looking up the action. */
struct hmap_node decode_node; /* Based on 'hdrs'. */
struct hmap_node encode_node; /* Based on 'raw' + 'hdrs.ofp_version'. */
/* The action's encoded size.
*
* If this action is fixed-length, 'min_length' == 'max_length'.
* If it is variable length, then 'max_length' is ROUND_DOWN(UINT16_MAX,
* OFP_ACTION_ALIGN) == 65528. */
unsigned short int min_length;
unsigned short int max_length;
/* For actions with a simple integer numeric argument, 'arg_ofs' is the
* offset of that argument from the beginning of the action and 'arg_len'
* its length, both in bytes.
*
* For actions that take other forms, these are both zero. */
unsigned short int arg_ofs;
unsigned short int arg_len;
/* The name of the action, e.g. "OFPAT_OUTPUT" or "NXAST_RESUBMIT". */
const char *name;
/* If this action is deprecated, a human-readable string with a brief
* explanation. */
const char *deprecation;
};
/* Action header. */
struct ofp_action_header {
/* The meaning of other values of 'type' generally depends on the OpenFlow
* version (see enum ofp_raw_action_type).
*
* Across all OpenFlow versions, OFPAT_VENDOR indicates that 'vendor'
* designates an OpenFlow vendor ID and that the remainder of the action
* structure has a vendor-defined meaning.
*/
#define OFPAT_VENDOR 0xffff
ovs_be16 type;
/* Always a multiple of 8. */
ovs_be16 len;
/* For type == OFPAT_VENDOR only, this is a vendor ID, e.g. NX_VENDOR_ID or
* ONF_VENDOR_ID. Other 'type's use this space for some other purpose. */
ovs_be32 vendor;
};
OFP_ASSERT(sizeof(struct ofp_action_header) == 8);
/* Header for Nicira-defined actions and for ONF vendor extensions.
*
* This cannot be used as an entirely generic vendor extension action header,
* because OpenFlow does not specify the location or size of the action
* subtype; it just happens that ONF extensions and Nicira extensions share
* this format. */
struct ext_action_header {
ovs_be16 type; /* OFPAT_VENDOR. */
ovs_be16 len; /* At least 16. */
ovs_be32 vendor; /* NX_VENDOR_ID or ONF_VENDOR_ID. */
ovs_be16 subtype; /* See enum ofp_raw_action_type. */
uint8_t pad[6];
};
OFP_ASSERT(sizeof(struct ext_action_header) == 16);
static bool
ofpact_hdrs_equal(const struct ofpact_hdrs *a,
const struct ofpact_hdrs *b)
{
return (a->vendor == b->vendor
&& a->type == b->type
&& a->ofp_version == b->ofp_version);
}
static uint32_t
ofpact_hdrs_hash(const struct ofpact_hdrs *hdrs)
{
return hash_2words(hdrs->vendor, (hdrs->type << 16) | hdrs->ofp_version);
}
#include "ofp-actions.inc2"
static struct hmap *
ofpact_decode_hmap(void)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
static struct hmap hmap;
if (ovsthread_once_start(&once)) {
struct ofpact_raw_instance *inst;
hmap_init(&hmap);
for (inst = all_raw_instances;
inst < &all_raw_instances[ARRAY_SIZE(all_raw_instances)];
inst++) {
hmap_insert(&hmap, &inst->decode_node,
ofpact_hdrs_hash(&inst->hdrs));
}
ovsthread_once_done(&once);
}
return &hmap;
}
static struct hmap *
ofpact_encode_hmap(void)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
static struct hmap hmap;
if (ovsthread_once_start(&once)) {
struct ofpact_raw_instance *inst;
hmap_init(&hmap);
for (inst = all_raw_instances;
inst < &all_raw_instances[ARRAY_SIZE(all_raw_instances)];
inst++) {
hmap_insert(&hmap, &inst->encode_node,
hash_2words(inst->raw, inst->hdrs.ofp_version));
}
ovsthread_once_done(&once);
}
return &hmap;
}
static enum ofperr
ofpact_decode_raw(enum ofp_version ofp_version,
const struct ofp_action_header *oah, size_t length,
const struct ofpact_raw_instance **instp)
{
const struct ofpact_raw_instance *inst;
struct ofpact_hdrs hdrs;
*instp = NULL;
if (length < sizeof *oah) {
return OFPERR_OFPBAC_BAD_LEN;
}
/* Get base action type. */
if (oah->type == htons(OFPAT_VENDOR)) {
/* Get vendor. */
hdrs.vendor = ntohl(oah->vendor);
if (hdrs.vendor == NX_VENDOR_ID || hdrs.vendor == ONF_VENDOR_ID) {
/* Get extension subtype. */
const struct ext_action_header *nah;
nah = ALIGNED_CAST(const struct ext_action_header *, oah);
if (length < sizeof *nah) {
return OFPERR_OFPBAC_BAD_LEN;
}
hdrs.type = ntohs(nah->subtype);
} else {
VLOG_WARN_RL(&rl, "OpenFlow action has unknown vendor %#"PRIx32,
hdrs.vendor);
return OFPERR_OFPBAC_BAD_VENDOR;
}
} else {
hdrs.vendor = 0;
hdrs.type = ntohs(oah->type);
}
hdrs.ofp_version = ofp_version;
HMAP_FOR_EACH_WITH_HASH (inst, decode_node, ofpact_hdrs_hash(&hdrs),
ofpact_decode_hmap()) {
if (ofpact_hdrs_equal(&hdrs, &inst->hdrs)) {
*instp = inst;
return 0;
}
}
return (hdrs.vendor
? OFPERR_OFPBAC_BAD_VENDOR_TYPE
: OFPERR_OFPBAC_BAD_TYPE);
}
static enum ofperr
ofpact_pull_raw(struct ofpbuf *buf, enum ofp_version ofp_version,
enum ofp_raw_action_type *raw, uint64_t *arg)
{
const struct ofp_action_header *oah = buf->data;
const struct ofpact_raw_instance *action;
unsigned int length;
enum ofperr error;
*raw = *arg = 0;
error = ofpact_decode_raw(ofp_version, oah, buf->size, &action);
if (error) {
return error;
}
if (action->deprecation) {
VLOG_INFO_RL(&rl, "%s is deprecated in %s (%s)",
action->name, ofputil_version_to_string(ofp_version),
action->deprecation);
}
length = ntohs(oah->len);
if (length > buf->size) {
VLOG_WARN_RL(&rl, "OpenFlow action %s length %u exceeds action buffer "
"length %"PRIu32, action->name, length, buf->size);
return OFPERR_OFPBAC_BAD_LEN;
}
if (length < action->min_length || length > action->max_length) {
VLOG_WARN_RL(&rl, "OpenFlow action %s length %u not in valid range "
"[%hu,%hu]", action->name, length,
action->min_length, action->max_length);
return OFPERR_OFPBAC_BAD_LEN;
}
if (length % 8) {
VLOG_WARN_RL(&rl, "OpenFlow action %s length %u is not a multiple "
"of 8", action->name, length);
return OFPERR_OFPBAC_BAD_LEN;
}
*raw = action->raw;
*arg = 0;
if (action->arg_len) {
const uint8_t *p;
int i;
p = ofpbuf_at_assert(buf, action->arg_ofs, action->arg_len);
for (i = 0; i < action->arg_len; i++) {
*arg = (*arg << 8) | p[i];
}
}
ofpbuf_pull(buf, length);
return 0;
}
static const struct ofpact_raw_instance *
ofpact_raw_lookup(enum ofp_version ofp_version, enum ofp_raw_action_type raw)
{
const struct ofpact_raw_instance *inst;
HMAP_FOR_EACH_WITH_HASH (inst, encode_node, hash_2words(raw, ofp_version),
ofpact_encode_hmap()) {
if (inst->raw == raw && inst->hdrs.ofp_version == ofp_version) {
return inst;
}
}
OVS_NOT_REACHED();
}
static void *
ofpact_put_raw(struct ofpbuf *buf, enum ofp_version ofp_version,
enum ofp_raw_action_type raw, uint64_t arg)
{
const struct ofpact_raw_instance *inst;
struct ofp_action_header *oah;
const struct ofpact_hdrs *hdrs;
inst = ofpact_raw_lookup(ofp_version, raw);
hdrs = &inst->hdrs;
oah = ofpbuf_put_zeros(buf, inst->min_length);
oah->type = htons(hdrs->vendor ? OFPAT_VENDOR : hdrs->type);
oah->len = htons(inst->min_length);
oah->vendor = htonl(hdrs->vendor);
switch (hdrs->vendor) {
case 0:
break;
case NX_VENDOR_ID:
case ONF_VENDOR_ID: {
struct ext_action_header *nah = (struct ext_action_header *) oah;
nah->subtype = htons(hdrs->type);
break;
}
default:
OVS_NOT_REACHED();
}
if (inst->arg_len) {
uint8_t *p = (uint8_t *) oah + inst->arg_ofs + inst->arg_len;
int i;
for (i = 0; i < inst->arg_len; i++) {
*--p = arg;
arg >>= 8;
}
} else {
ovs_assert(!arg);
}
return oah;
}
static void
pad_ofpat(struct ofpbuf *openflow, size_t start_ofs)
{
struct ofp_action_header *oah;
ofpbuf_put_zeros(openflow, PAD_SIZE(openflow->size - start_ofs, 8));
oah = ofpbuf_at_assert(openflow, start_ofs, sizeof *oah);
oah->len = htons(openflow->size - start_ofs);
}
|
from __future__ import print_function, division
from sympy.core import S
from sympy.core.function import _coeff_isneg
from sympy.core.mul import Mul
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.relational import Equality
from sympy.core.symbol import Symbol
from sympy.utilities import group
from sympy.utilities.iterables import has_variety
from sympy.core.sympify import SympifyError
from sympy.core.compatibility import u, range
from sympy.printing.printer import Printer
from sympy.printing.str import sstr
from sympy.printing.conventions import requires_partial
from .stringpict import prettyForm, stringPict
from .pretty_symbology import xstr, hobj, vobj, xobj, xsym, pretty_symbol, \
pretty_atom, pretty_use_unicode, pretty_try_use_unicode, greek_unicode, U, \
annotated
from sympy.utilities import default_sort_key
# rename for usage from outside
pprint_use_unicode = pretty_use_unicode
pprint_try_use_unicode = pretty_try_use_unicode
class PrettyPrinter(Printer):
"""Printer, which converts an expression into 2D ASCII-art figure."""
printmethod = "_pretty"
_default_settings = {
"order": None,
"full_prec": "auto",
"use_unicode": None,
"wrap_line": True,
"num_columns": None,
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
self.emptyPrinter = lambda x: prettyForm(xstr(x))
@property
def _use_unicode(self):
if self._settings['use_unicode']:
return True
else:
return pretty_use_unicode()
def doprint(self, expr):
return self._print(expr).render(**self._settings)
# empty op so _print(stringPict) returns the same
def _print_stringPict(self, e):
return e
def _print_basestring(self, e):
return prettyForm(e)
def _print_atan2(self, e):
pform = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(*pform.left('atan2'))
return pform
def _print_Symbol(self, e):
symb = pretty_symbol(e.name)
return prettyForm(symb)
_print_RandomSymbol = _print_Symbol
def _print_Float(self, e):
# we will use StrPrinter's Float printer, but we need to handle the
# full_prec ourselves, according to the self._print_level
full_prec = self._settings["full_prec"]
if full_prec == "auto":
full_prec = self._print_level == 1
return prettyForm(sstr(e, full_prec=full_prec))
def _print_Atom(self, e):
try:
# print atoms like Exp1 or Pi
return prettyForm(pretty_atom(e.__class__.__name__))
except KeyError:
return self.emptyPrinter(e)
# Infinity inherits from Number, so we have to override _print_XXX order
_print_Infinity = _print_Atom
_print_NegativeInfinity = _print_Atom
_print_EmptySet = _print_Atom
_print_Naturals = _print_Atom
_print_Integers = _print_Atom
_print_Reals = _print_Atom
def _print_subfactorial(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('!'))
return pform
def _print_factorial(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!'))
return pform
def _print_factorial2(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!!'))
return pform
def _print_binomial(self, e):
n, k = e.args
n_pform = self._print(n)
k_pform = self._print(k)
bar = ' '*max(n_pform.width(), k_pform.width())
pform = prettyForm(*k_pform.above(bar))
pform = prettyForm(*pform.above(n_pform))
pform = prettyForm(*pform.parens('(', ')'))
pform.baseline = (pform.baseline + 1)//2
return pform
def _print_Relational(self, e):
op = prettyForm(' ' + xsym(e.rel_op) + ' ')
l = self._print(e.lhs)
r = self._print(e.rhs)
pform = prettyForm(*stringPict.next(l, op, r))
return pform
def _print_Not(self, e):
from sympy import Equivalent, Implies
if self._use_unicode:
arg = e.args[0]
pform = self._print(arg)
if isinstance(arg, Equivalent):
return self._print_Equivalent(arg, altchar=u("\N{NOT IDENTICAL TO}"))
if isinstance(arg, Implies):
return self._print_Implies(arg, altchar=u("\N{RIGHTWARDS ARROW WITH STROKE}"))
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(u("\N{NOT SIGN}")))
else:
return self._print_Function(e)
def __print_Boolean(self, e, char, sort=True):
args = e.args
if sort:
args = sorted(e.args, key=default_sort_key)
arg = args[0]
pform = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
for arg in args[1:]:
pform_arg = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform_arg = prettyForm(*pform_arg.parens())
pform = prettyForm(*pform.right(u(' %s ') % char))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_And(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u("\N{LOGICAL AND}"))
else:
return self._print_Function(e, sort=True)
def _print_Or(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u("\N{LOGICAL OR}"))
else:
return self._print_Function(e, sort=True)
def _print_Xor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u("\N{XOR}"))
else:
return self._print_Function(e, sort=True)
def _print_Nand(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u("\N{NAND}"))
else:
return self._print_Function(e, sort=True)
def _print_Nor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u("\N{NOR}"))
else:
return self._print_Function(e, sort=True)
def _print_Implies(self, e, altchar=None):
if self._use_unicode:
return self.__print_Boolean(e, altchar or u("\N{RIGHTWARDS ARROW}"), sort=False)
else:
return self._print_Function(e)
def _print_Equivalent(self, e, altchar=None):
if self._use_unicode:
return self.__print_Boolean(e, altchar or u("\N{IDENTICAL TO}"))
else:
return self._print_Function(e, sort=True)
def _print_conjugate(self, e):
pform = self._print(e.args[0])
return prettyForm( *pform.above( hobj('_', pform.width())) )
def _print_Abs(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('|', '|'))
return pform
_print_Determinant = _print_Abs
def _print_floor(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lfloor', 'rfloor'))
return pform
else:
return self._print_Function(e)
def _print_ceiling(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lceil', 'rceil'))
return pform
else:
return self._print_Function(e)
def _print_Derivative(self, deriv):
if requires_partial(deriv) and self._use_unicode:
deriv_symbol = U('PARTIAL DIFFERENTIAL')
else:
deriv_symbol = r'd'
syms = list(reversed(deriv.variables))
x = None
for sym, num in group(syms, multiple=False):
s = self._print(sym)
ds = prettyForm(*s.left(deriv_symbol))
if num > 1:
ds = ds**prettyForm(str(num))
if x is None:
x = ds
else:
x = prettyForm(*x.right(' '))
x = prettyForm(*x.right(ds))
f = prettyForm(
binding=prettyForm.FUNC, *self._print(deriv.expr).parens())
pform = prettyForm(deriv_symbol)
if len(syms) > 1:
pform = pform**prettyForm(str(len(syms)))
pform = prettyForm(*pform.below(stringPict.LINE, x))
pform.baseline = pform.baseline + 1
pform = prettyForm(*stringPict.next(pform, f))
pform.binding = prettyForm.MUL
return pform
def _print_Cycle(self, dc):
from sympy.combinatorics.permutations import Permutation
return self._print_tuple(Permutation(dc.as_list()).cyclic_form)
def _print_PDF(self, pdf):
lim = self._print(pdf.pdf.args[0])
lim = prettyForm(*lim.right(', '))
lim = prettyForm(*lim.right(self._print(pdf.domain[0])))
lim = prettyForm(*lim.right(', '))
lim = prettyForm(*lim.right(self._print(pdf.domain[1])))
lim = prettyForm(*lim.parens())
f = self._print(pdf.pdf.args[1])
f = prettyForm(*f.right(', '))
f = prettyForm(*f.right(lim))
f = prettyForm(*f.parens())
pform = prettyForm('PDF')
pform = prettyForm(*pform.right(f))
return pform
def _print_Integral(self, integral):
f = integral.function
# Add parentheses if arg involves addition of terms and
# create a pretty form for the argument
prettyF = self._print(f)
# XXX generalize parens
if f.is_Add:
prettyF = prettyForm(*prettyF.parens())
# dx dy dz ...
arg = prettyF
for x in integral.limits:
prettyArg = self._print(x[0])
# XXX qparens (parens if needs-parens)
if prettyArg.width() > 1:
prettyArg = prettyForm(*prettyArg.parens())
arg = prettyForm(*arg.right(' d', prettyArg))
# \int \int \int ...
firstterm = True
s = None
for lim in integral.limits:
x = lim[0]
# Create bar based on the height of the argument
h = arg.height()
H = h + 2
# XXX hack!
ascii_mode = not self._use_unicode
if ascii_mode:
H += 2
vint = vobj('int', H)
# Construct the pretty form with the integral sign and the argument
pform = prettyForm(vint)
pform.baseline = arg.baseline + (
H - h)//2 # covering the whole argument
if len(lim) > 1:
# Create pretty forms for endpoints, if definite integral.
# Do not print empty endpoints.
if len(lim) == 2:
prettyA = prettyForm("")
prettyB = self._print(lim[1])
if len(lim) == 3:
prettyA = self._print(lim[1])
prettyB = self._print(lim[2])
if ascii_mode: # XXX hack
# Add spacing so that endpoint can more easily be
# identified with the correct integral sign
spc = max(1, 3 - prettyB.width())
prettyB = prettyForm(*prettyB.left(' ' * spc))
spc = max(1, 4 - prettyA.width())
prettyA = prettyForm(*prettyA.right(' ' * spc))
pform = prettyForm(*pform.above(prettyB))
pform = prettyForm(*pform.below(prettyA))
if not ascii_mode: # XXX hack
pform = prettyForm(*pform.right(' '))
if firstterm:
s = pform # first term
firstterm = False
else:
s = prettyForm(*s.left(pform))
pform = prettyForm(*arg.left(s))
pform.binding = prettyForm.MUL
return pform
def _print_Product(self, expr):
func = expr.term
pretty_func = self._print(func)
horizontal_chr = xobj('_', 1)
corner_chr = xobj('_', 1)
vertical_chr = xobj('|', 1)
if self._use_unicode:
# use unicode corners
horizontal_chr = xobj('-', 1)
corner_chr = u('\N{BOX DRAWINGS LIGHT DOWN AND HORIZONTAL}')
func_height = pretty_func.height()
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
width = (func_height + 2) * 5 // 3 - 2
sign_lines = []
sign_lines.append(corner_chr + (horizontal_chr*width) + corner_chr)
for i in range(func_height + 1):
sign_lines.append(vertical_chr + (' '*width) + vertical_chr)
pretty_sign = stringPict('')
pretty_sign = prettyForm(*pretty_sign.stack(*sign_lines))
pretty_upper = self._print(lim[2])
pretty_lower = self._print(Equality(lim[0], lim[1]))
max_upper = max(max_upper, pretty_upper.height())
if first:
sign_height = pretty_sign.height()
pretty_sign = prettyForm(*pretty_sign.above(pretty_upper))
pretty_sign = prettyForm(*pretty_sign.below(pretty_lower))
if first:
pretty_func.baseline = 0
first = False
height = pretty_sign.height()
padding = stringPict('')
padding = prettyForm(*padding.stack(*[' ']*(height - 1)))
pretty_sign = prettyForm(*pretty_sign.right(padding))
pretty_func = prettyForm(*pretty_sign.right(pretty_func))
pretty_func.baseline = max_upper + sign_height//2
pretty_func.binding = prettyForm.MUL
return pretty_func
def _print_Sum(self, expr):
ascii_mode = not self._use_unicode
def asum(hrequired, lower, upper, use_ascii):
def adjust(s, wid=None, how='<^>'):
if not wid or len(s) > wid:
return s
need = wid - len(s)
if how == '<^>' or how == "<" or how not in list('<^>'):
return s + ' '*need
half = need//2
lead = ' '*half
if how == ">":
return " "*need + s
return lead + s + ' '*(need - len(lead))
h = max(hrequired, 2)
d = h//2
w = d + 1
more = hrequired % 2
lines = []
if use_ascii:
lines.append("_"*(w) + ' ')
lines.append("\%s`" % (' '*(w - 1)))
for i in range(1, d):
lines.append('%s\\%s' % (' '*i, ' '*(w - i)))
if more:
lines.append('%s)%s' % (' '*(d), ' '*(w - d)))
for i in reversed(range(1, d)):
lines.append('%s/%s' % (' '*i, ' '*(w - i)))
lines.append("/" + "_"*(w - 1) + ',')
return d, h + more, lines, 0
else:
w = w + more
d = d + more
vsum = vobj('sum', 4)
lines.append("_"*(w))
for i in range(0, d):
lines.append('%s%s%s' % (' '*i, vsum[2], ' '*(w - i - 1)))
for i in reversed(range(0, d)):
lines.append('%s%s%s' % (' '*i, vsum[4], ' '*(w - i - 1)))
lines.append(vsum[8]*(w))
return d, h + 2*more, lines, more
f = expr.function
prettyF = self._print(f)
if f.is_Add: # add parens
prettyF = prettyForm(*prettyF.parens())
H = prettyF.height() + 2
# \sum \sum \sum ...
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
if len(lim) == 3:
prettyUpper = self._print(lim[2])
prettyLower = self._print(Equality(lim[0], lim[1]))
elif len(lim) == 2:
prettyUpper = self._print("")
prettyLower = self._print(Equality(lim[0], lim[1]))
elif len(lim) == 1:
prettyUpper = self._print("")
prettyLower = self._print(lim[0])
max_upper = max(max_upper, prettyUpper.height())
# Create sum sign based on the height of the argument
d, h, slines, adjustment = asum(
H, prettyLower.width(), prettyUpper.width(), ascii_mode)
prettySign = stringPict('')
prettySign = prettyForm(*prettySign.stack(*slines))
if first:
sign_height = prettySign.height()
prettySign = prettyForm(*prettySign.above(prettyUpper))
prettySign = prettyForm(*prettySign.below(prettyLower))
if first:
# change F baseline so it centers on the sign
prettyF.baseline -= d - (prettyF.height()//2 -
prettyF.baseline) - adjustment
first = False
# put padding to the right
pad = stringPict('')
pad = prettyForm(*pad.stack(*[' ']*h))
prettySign = prettyForm(*prettySign.right(pad))
# put the present prettyF to the right
prettyF = prettyForm(*prettySign.right(prettyF))
prettyF.baseline = max_upper + sign_height//2
prettyF.binding = prettyForm.MUL
return prettyF
def _print_Limit(self, l):
e, z, z0, dir = l.args
E = self._print(e)
Lim = prettyForm('lim')
LimArg = self._print(z)
if self._use_unicode:
LimArg = prettyForm(*LimArg.right(u('\N{BOX DRAWINGS LIGHT HORIZONTAL}\N{RIGHTWARDS ARROW}')))
else:
LimArg = prettyForm(*LimArg.right('->'))
LimArg = prettyForm(*LimArg.right(self._print(z0)))
if z0 in (S.Infinity, S.NegativeInfinity):
dir = ""
else:
if self._use_unicode:
dir = u('\N{SUPERSCRIPT PLUS SIGN}') if str(dir) == "+" else u('\N{SUPERSCRIPT MINUS}')
LimArg = prettyForm(*LimArg.right(self._print(dir)))
Lim = prettyForm(*Lim.below(LimArg))
Lim = prettyForm(*Lim.right(E))
return Lim
def _print_matrix_contents(self, e):
"""
This method factors out what is essentially grid printing.
"""
M = e # matrix
Ms = {} # i,j -> pretty(M[i,j])
for i in range(M.rows):
for j in range(M.cols):
Ms[i, j] = self._print(M[i, j])
# h- and v- spacers
hsep = 2
vsep = 1
# max width for columns
maxw = [-1] * M.cols
for j in range(M.cols):
maxw[j] = max([Ms[i, j].width() for i in range(M.rows)] or [0])
# drawing result
D = None
for i in range(M.rows):
D_row = None
for j in range(M.cols):
s = Ms[i, j]
# reshape s to maxw
# XXX this should be generalized, and go to stringPict.reshape ?
assert s.width() <= maxw[j]
# hcenter it, +0.5 to the right 2
# ( it's better to align formula starts for say 0 and r )
# XXX this is not good in all cases -- maybe introduce vbaseline?
wdelta = maxw[j] - s.width()
wleft = wdelta // 2
wright = wdelta - wleft
s = prettyForm(*s.right(' '*wright))
s = prettyForm(*s.left(' '*wleft))
# we don't need vcenter cells -- this is automatically done in
# a pretty way because when their baselines are taking into
# account in .right()
if D_row is None:
D_row = s # first box in a row
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(s))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
if D is None:
D = prettyForm('') # Empty Matrix
return D
def _print_MatrixBase(self, e):
D = self._print_matrix_contents(e)
D = prettyForm(*D.parens('[', ']'))
return D
_print_ImmutableMatrix = _print_MatrixBase
_print_Matrix = _print_MatrixBase
def _print_MatrixElement(self, expr):
from sympy.matrices import MatrixSymbol
from sympy import Symbol
if (isinstance(expr.parent, MatrixSymbol)
and expr.i.is_number and expr.j.is_number):
return self._print(
Symbol(expr.parent.name + '_%d%d'%(expr.i, expr.j)))
else:
prettyFunc = self._print(expr.parent)
prettyIndices = self._print_seq((expr.i, expr.j), delimiter=', '
).parens(left='[', right=']')[0]
pform = prettyForm(binding=prettyForm.FUNC,
*stringPict.next(prettyFunc, prettyIndices))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyIndices
return pform
def _print_MatrixSlice(self, m):
# XXX works only for applied functions
prettyFunc = self._print(m.parent)
def ppslice(x):
x = list(x)
if x[2] == 1:
del x[2]
if x[1] == x[0] + 1:
del x[1]
if x[0] == 0:
x[0] = ''
return prettyForm(*self._print_seq(x, delimiter=':'))
prettyArgs = self._print_seq((ppslice(m.rowslice),
ppslice(m.colslice)), delimiter=', ').parens(left='[', right=']')[0]
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_Transpose(self, expr):
pform = self._print(expr.arg)
from sympy.matrices import MatrixSymbol
if not isinstance(expr.arg, MatrixSymbol):
pform = prettyForm(*pform.parens())
pform = pform**(prettyForm('T'))
return pform
def _print_Adjoint(self, expr):
pform = self._print(expr.arg)
if self._use_unicode:
dag = prettyForm(u('\N{DAGGER}'))
else:
dag = prettyForm('+')
from sympy.matrices import MatrixSymbol
if not isinstance(expr.arg, MatrixSymbol):
pform = prettyForm(*pform.parens())
pform = pform**dag
return pform
def _print_BlockMatrix(self, B):
if B.blocks.shape == (1, 1):
return self._print(B.blocks[0, 0])
return self._print(B.blocks)
def _print_MatAdd(self, expr):
return self._print_seq(expr.args, None, None, ' + ')
def _print_MatMul(self, expr):
args = list(expr.args)
from sympy import Add, MatAdd, HadamardProduct
for i, a in enumerate(args):
if (isinstance(a, (Add, MatAdd, HadamardProduct))
and len(expr.args) > 1):
args[i] = prettyForm(*self._print(a).parens())
else:
args[i] = self._print(a)
return prettyForm.__mul__(*args)
def _print_MatPow(self, expr):
pform = self._print(expr.base)
from sympy.matrices import MatrixSymbol
if not isinstance(expr.base, MatrixSymbol):
pform = prettyForm(*pform.parens())
pform = pform**(self._print(expr.exp))
return pform
def _print_HadamardProduct(self, expr):
from sympy import MatAdd, MatMul
if self._use_unicode:
delim = pretty_atom('Ring')
else:
delim = '.*'
return self._print_seq(expr.args, None, None, delim,
parenthesize=lambda x: isinstance(x, (MatAdd, MatMul)))
_print_MatrixSymbol = _print_Symbol
def _print_FunctionMatrix(self, X):
D = self._print(X.lamda.expr)
D = prettyForm(*D.parens('[', ']'))
return D
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
if not self._use_unicode:
raise NotImplementedError("ASCII pretty printing of BasisDependent is not implemented")
class Fake(object):
baseline = 0
# slf to distinguish from self from _print_BasisDependent
def render(slf, *args, **kwargs):
if expr == expr.zero:
return expr.zero._pretty_form
o1 = []
vectstrs = []
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x: x[0].__str__())
for k, v in inneritems:
#if the coef of the basis vector is 1
#we skip the 1
if v == 1:
o1.append(u("") +
k._pretty_form)
#Same for -1
elif v == -1:
o1.append(u("(-1) ") +
k._pretty_form)
#For a general expr
else:
#We always wrap the measure numbers in
#parentheses
arg_str = self._print(
v).parens()[0]
o1.append(arg_str + ' ' + k._pretty_form)
vectstrs.append(k._pretty_form)
#outstr = u("").join(o1)
if o1[0].startswith(u(" + ")):
o1[0] = o1[0][3:]
elif o1[0].startswith(" "):
o1[0] = o1[0][1:]
#Fixing the newlines
lengths = []
strs = ['']
for i, partstr in enumerate(o1):
# XXX: What is this hack?
if '\n' in partstr:
tempstr = partstr
tempstr = tempstr.replace(vectstrs[i], '')
tempstr = tempstr.replace(u('\N{RIGHT PARENTHESIS UPPER HOOK}'),
u('\N{RIGHT PARENTHESIS UPPER HOOK}')
+ ' ' + vectstrs[i])
o1[i] = tempstr
o1 = [x.split('\n') for x in o1]
n_newlines = max([len(x) for x in o1])
for parts in o1:
lengths.append(len(parts[0]))
for j in range(n_newlines):
if j+1 <= len(parts):
if j >= len(strs):
strs.append(' ' * (sum(lengths[:-1]) +
3*(len(lengths)-1)))
if j == 0:
strs[0] += parts[0] + ' + '
else:
strs[j] += parts[j] + ' '*(lengths[-1] -
len(parts[j])+
3)
else:
if j >= len(strs):
strs.append(' ' * (sum(lengths[:-1]) +
3*(len(lengths)-1)))
strs[j] += ' '*(lengths[-1]+3)
return u('\n').join([s[:-3] for s in strs])
return Fake()
def _print_Piecewise(self, pexpr):
P = {}
for n, ec in enumerate(pexpr.args):
P[n, 0] = self._print(ec.expr)
if ec.cond == True:
P[n, 1] = prettyForm('otherwise')
else:
P[n, 1] = prettyForm(
*prettyForm('for ').right(self._print(ec.cond)))
hsep = 2
vsep = 1
len_args = len(pexpr.args)
# max widths
maxw = [max([P[i, j].width() for i in range(len_args)])
for j in range(2)]
# FIXME: Refactor this code and matrix into some tabular environment.
# drawing result
D = None
for i in range(len_args):
D_row = None
for j in range(2):
p = P[i, j]
assert p.width() <= maxw[j]
wdelta = maxw[j] - p.width()
wleft = wdelta // 2
wright = wdelta - wleft
p = prettyForm(*p.right(' '*wright))
p = prettyForm(*p.left(' '*wleft))
if D_row is None:
D_row = p
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(p))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
D = prettyForm(*D.parens('{', ''))
D.baseline = D.height()//2
D.binding = prettyForm.OPEN
return D
def _hprint_vec(self, v):
D = None
for a in v:
p = a
if D is None:
D = p
else:
D = prettyForm(*D.right(', '))
D = prettyForm(*D.right(p))
if D is None:
D = stringPict(' ')
return D
def _hprint_vseparator(self, p1, p2):
tmp = prettyForm(*p1.right(p2))
sep = stringPict(vobj('|', tmp.height()), baseline=tmp.baseline)
return prettyForm(*p1.right(sep, p2))
def _print_hyper(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
ap = [self._print(a) for a in e.ap]
bq = [self._print(b) for b in e.bq]
P = self._print(e.argument)
P.baseline = P.height()//2
# Drawing result - first create the ap, bq vectors
D = None
for v in [ap, bq]:
D_row = self._hprint_vec(v)
if D is None:
D = D_row # first row in a picture
else:
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the F symbol
above = D.height()//2 - 1
below = D.height() - above - 1
sz, t, b, add, img = annotated('F')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline=above + sz)
add = (sz + 1)//2
F = prettyForm(*F.left(self._print(len(e.ap))))
F = prettyForm(*F.right(self._print(len(e.bq))))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_meijerg(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
v = {}
v[(0, 0)] = [self._print(a) for a in e.an]
v[(0, 1)] = [self._print(a) for a in e.aother]
v[(1, 0)] = [self._print(b) for b in e.bm]
v[(1, 1)] = [self._print(b) for b in e.bother]
P = self._print(e.argument)
P.baseline = P.height()//2
vp = {}
for idx in v:
vp[idx] = self._hprint_vec(v[idx])
for i in range(2):
maxw = max(vp[(0, i)].width(), vp[(1, i)].width())
for j in range(2):
s = vp[(j, i)]
left = (maxw - s.width()) // 2
right = maxw - left - s.width()
s = prettyForm(*s.left(' ' * left))
s = prettyForm(*s.right(' ' * right))
vp[(j, i)] = s
D1 = prettyForm(*vp[(0, 0)].right(' ', vp[(0, 1)]))
D1 = prettyForm(*D1.below(' '))
D2 = prettyForm(*vp[(1, 0)].right(' ', vp[(1, 1)]))
D = prettyForm(*D1.below(D2))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the G symbol
above = D.height()//2 - 1
below = D.height() - above - 1
sz, t, b, add, img = annotated('G')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline=above + sz)
pp = self._print(len(e.ap))
pq = self._print(len(e.bq))
pm = self._print(len(e.bm))
pn = self._print(len(e.an))
def adjust(p1, p2):
diff = p1.width() - p2.width()
if diff == 0:
return p1, p2
elif diff > 0:
return p1, prettyForm(*p2.left(' '*diff))
else:
return prettyForm(*p1.left(' '*-diff)), p2
pp, pm = adjust(pp, pm)
pq, pn = adjust(pq, pn)
pu = prettyForm(*pm.right(', ', pn))
pl = prettyForm(*pp.right(', ', pq))
ht = F.baseline - above - 2
if ht > 0:
pu = prettyForm(*pu.below('\n'*ht))
p = prettyForm(*pu.below(pl))
F.baseline = above
F = prettyForm(*F.right(p))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_ExpBase(self, e):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
base = prettyForm(pretty_atom('Exp1', 'e'))
return base ** self._print(e.args[0])
def _print_Function(self, e, sort=False):
# XXX works only for applied functions
func = e.func
args = e.args
if sort:
args = sorted(args, key=default_sort_key)
func_name = func.__name__
prettyFunc = self._print(Symbol(func_name))
prettyArgs = prettyForm(*self._print_seq(args).parens())
#postioning func_name
mid = prettyArgs.height()//2
if mid > 2:
prettyFunc.baseline = -mid + 1
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_GeometryEntity(self, expr):
# GeometryEntity is based on Tuple but should not print like a Tuple
return self.emptyPrinter(expr)
def _print_Lambda(self, e):
vars, expr = e.args
if self._use_unicode:
arrow = u(" \N{RIGHTWARDS ARROW FROM BAR} ")
else:
arrow = " -> "
if len(vars) == 1:
var_form = self._print(vars[0])
else:
var_form = self._print(tuple(vars))
return prettyForm(*stringPict.next(var_form, arrow, self._print(expr)), binding=8)
def _print_Order(self, expr):
pform = self._print(expr.expr)
if (expr.point and any(p != S.Zero for p in expr.point)) or \
len(expr.variables) > 1:
pform = prettyForm(*pform.right("; "))
if len(expr.variables) > 1:
pform = prettyForm(*pform.right(self._print(expr.variables)))
elif len(expr.variables):
pform = prettyForm(*pform.right(self._print(expr.variables[0])))
if self._use_unicode:
pform = prettyForm(*pform.right(u(" \N{RIGHTWARDS ARROW} ")))
else:
pform = prettyForm(*pform.right(" -> "))
if len(expr.point) > 1:
pform = prettyForm(*pform.right(self._print(expr.point)))
else:
pform = prettyForm(*pform.right(self._print(expr.point[0])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left("O"))
return pform
def _print_gamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek_unicode['Gamma']))
return pform
else:
return self._print_Function(e)
def _print_uppergamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.right(', ', self._print(e.args[1])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek_unicode['Gamma']))
return pform
else:
return self._print_Function(e)
def _print_lowergamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.right(', ', self._print(e.args[1])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek_unicode['gamma']))
return pform
else:
return self._print_Function(e)
def _print_expint(self, e):
from sympy import Function
if e.args[0].is_Integer and self._use_unicode:
return self._print_Function(Function('E_%s' % e.args[0])(e.args[1]))
return self._print_Function(e)
def _print_Chi(self, e):
# This needs a special case since otherwise it comes out as greek
# letter chi...
prettyFunc = prettyForm("Chi")
prettyArgs = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_elliptic_e(self, e):
pforma0 = self._print(e.args[0])
if len(e.args) == 1:
pform = pforma0
else:
pforma1 = self._print(e.args[1])
pform = self._hprint_vseparator(pforma0, pforma1)
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('E'))
return pform
def _print_elliptic_k(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('K'))
return pform
def _print_elliptic_f(self, e):
pforma0 = self._print(e.args[0])
pforma1 = self._print(e.args[1])
pform = self._hprint_vseparator(pforma0, pforma1)
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('F'))
return pform
def _print_elliptic_pi(self, e):
name = greek_unicode['Pi'] if self._use_unicode else 'Pi'
pforma0 = self._print(e.args[0])
pforma1 = self._print(e.args[1])
if len(e.args) == 2:
pform = self._hprint_vseparator(pforma0, pforma1)
else:
pforma2 = self._print(e.args[2])
pforma = self._hprint_vseparator(pforma1, pforma2)
pforma = prettyForm(*pforma.left('; '))
pform = prettyForm(*pforma.left(pforma0))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(name))
return pform
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
pforms, indices = [], []
def pretty_negative(pform, index):
"""Prepend a minus sign to a pretty form. """
#TODO: Move this code to prettyForm
if index == 0:
if pform.height() > 1:
pform_neg = '- '
else:
pform_neg = '-'
else:
pform_neg = ' - '
if pform.binding > prettyForm.NEG:
p = stringPict(*pform.parens())
else:
p = pform
p = stringPict.next(pform_neg, p)
# Lower the binding to NEG, even if it was higher. Otherwise, it
# will print as a + ( - (b)), instead of a - (b).
return prettyForm(binding=prettyForm.NEG, *p)
for i, term in enumerate(terms):
if term.is_Mul and _coeff_isneg(term):
coeff, other = term.as_coeff_mul(rational=False)
pform = self._print(Mul(-coeff, *other, evaluate=False))
pforms.append(pretty_negative(pform, i))
elif term.is_Rational and term.q > 1:
pforms.append(None)
indices.append(i)
elif term.is_Number and term < 0:
pform = self._print(-term)
pforms.append(pretty_negative(pform, i))
elif term.is_Relational:
pforms.append(prettyForm(*self._print(term).parens()))
else:
pforms.append(self._print(term))
if indices:
large = True
for pform in pforms:
if pform is not None and pform.height() > 1:
break
else:
large = False
for i in indices:
term, negative = terms[i], False
if term < 0:
term, negative = -term, True
if large:
pform = prettyForm(str(term.p))/prettyForm(str(term.q))
else:
pform = self._print(term)
if negative:
pform = pretty_negative(pform, i)
pforms[i] = pform
return prettyForm.__add__(*pforms)
def _print_Mul(self, product):
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = product.as_ordered_factors()
else:
args = product.args
# Gather terms for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append( Rational(item.p) )
if item.q != 1:
b.append( Rational(item.q) )
else:
a.append(item)
from sympy import Integral, Piecewise, Product, Sum
# Convert to pretty forms. Add parens to Add instances if there
# is more than one term in the numer/denom
for i in range(0, len(a)):
if (a[i].is_Add and len(a) > 1) or (i != len(a) - 1 and
isinstance(a[i], (Integral, Piecewise, Product, Sum))):
a[i] = prettyForm(*self._print(a[i]).parens())
elif a[i].is_Relational:
a[i] = prettyForm(*self._print(a[i]).parens())
else:
a[i] = self._print(a[i])
for i in range(0, len(b)):
if (b[i].is_Add and len(b) > 1) or (i != len(b) - 1 and
isinstance(b[i], (Integral, Piecewise, Product, Sum))):
b[i] = prettyForm(*self._print(b[i]).parens())
else:
b[i] = self._print(b[i])
# Construct a pretty form
if len(b) == 0:
return prettyForm.__mul__(*a)
else:
if len(a) == 0:
a.append( self._print(S.One) )
return prettyForm.__mul__(*a)/prettyForm.__mul__(*b)
# A helper function for _print_Pow to print x**(1/n)
def _print_nth_root(self, base, expt):
bpretty = self._print(base)
# Construct root sign, start with the \/ shape
_zZ = xobj('/', 1)
rootsign = xobj('\\', 1) + _zZ
# Make exponent number to put above it
if isinstance(expt, Rational):
exp = str(expt.q)
if exp == '2':
exp = ''
else:
exp = str(expt.args[0])
exp = exp.ljust(2)
if len(exp) > 2:
rootsign = ' '*(len(exp) - 2) + rootsign
# Stack the exponent
rootsign = stringPict(exp + '\n' + rootsign)
rootsign.baseline = 0
# Diagonal: length is one less than height of base
linelength = bpretty.height() - 1
diagonal = stringPict('\n'.join(
' '*(linelength - i - 1) + _zZ + ' '*i
for i in range(linelength)
))
# Put baseline just below lowest line: next to exp
diagonal.baseline = linelength - 1
# Make the root symbol
rootsign = prettyForm(*rootsign.right(diagonal))
# Det the baseline to match contents to fix the height
# but if the height of bpretty is one, the rootsign must be one higher
rootsign.baseline = max(1, bpretty.baseline)
#build result
s = prettyForm(hobj('_', 2 + bpretty.width()))
s = prettyForm(*bpretty.above(s))
s = prettyForm(*s.left(rootsign))
return s
def _print_Pow(self, power):
from sympy.simplify.simplify import fraction
b, e = power.as_base_exp()
if power.is_commutative:
if e is S.NegativeOne:
return prettyForm("1")/self._print(b)
n, d = fraction(e)
if n is S.One and d.is_Atom and not e.is_Integer:
return self._print_nth_root(b, e)
if e.is_Rational and e < 0:
return prettyForm("1")/self._print(Pow(b, -e, evaluate=False))
if b.is_Relational:
return prettyForm(*self._print(b).parens()).__pow__(self._print(e))
return self._print(b)**self._print(e)
def __print_numer_denom(self, p, q):
if q == 1:
if p < 0:
return prettyForm(str(p), binding=prettyForm.NEG)
else:
return prettyForm(str(p))
elif abs(p) >= 10 and abs(q) >= 10:
# If more than one digit in numer and denom, print larger fraction
if p < 0:
return prettyForm(str(p), binding=prettyForm.NEG)/prettyForm(str(q))
# Old printing method:
#pform = prettyForm(str(-p))/prettyForm(str(q))
#return prettyForm(binding=prettyForm.NEG, *pform.left('- '))
else:
return prettyForm(str(p))/prettyForm(str(q))
else:
return None
def _print_Rational(self, expr):
result = self.__print_numer_denom(expr.p, expr.q)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_Fraction(self, expr):
result = self.__print_numer_denom(expr.numerator, expr.denominator)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_ProductSet(self, p):
if len(p.sets) > 1 and not has_variety(p.sets):
from sympy import Pow
return self._print(Pow(p.sets[0], len(p.sets), evaluate=False))
else:
prod_char = u('\xd7')
return self._print_seq(p.sets, None, None, ' %s ' % prod_char,
parenthesize=lambda set: set.is_Union or set.is_Intersection)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_seq(items, '{', '}', ', ' )
def _print_Range(self, s):
if self._use_unicode:
dots = u("\N{HORIZONTAL ELLIPSIS}")
else:
dots = '...'
if s.start is S.NegativeInfinity:
it = iter(s)
printset = s.start, dots, s._last_element - s.step, s._last_element
elif s.stop is S.Infinity or len(s) > 4:
it = iter(s)
printset = next(it), next(it), dots, s._last_element
else:
printset = tuple(s)
return self._print_seq(printset, '{', '}', ', ' )
def _print_Interval(self, i):
if i.start == i.end:
return self._print_seq(i.args[:1], '{', '}')
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return self._print_seq(i.args[:2], left, right)
def _print_Intersection(self, u):
delimiter = ' %s ' % pretty_atom('Intersection')
return self._print_seq(u.args, None, None, delimiter,
parenthesize=lambda set: set.is_ProductSet or set.is_Union)
def _print_Union(self, u):
union_delimiter = ' %s ' % pretty_atom('Union')
return self._print_seq(u.args, None, None, union_delimiter,
parenthesize=lambda set: set.is_ProductSet or set.is_Intersection)
def _print_SymmetricDifference(self, u):
if not self._use_unicode:
raise NotImplementedError("ASCII pretty printing of SymmetricDifference is not implemented")
sym_delimeter = ' %s ' % pretty_atom('SymmetricDifference')
return self._print_seq(u.args, None, None, sym_delimeter)
def _print_Complement(self, u):
delimiter = ' \ '
return self._print_seq(u.args, None, None, delimiter,
parenthesize=lambda set: set.is_ProductSet or set.is_Intersection
or set.is_Union)
def _print_ImageSet(self, ts):
if self._use_unicode:
inn = u("\N{SMALL ELEMENT OF}")
else:
inn = 'in'
variables = self._print_seq(ts.lamda.variables)
expr = self._print(ts.lamda.expr)
bar = self._print("|")
base = self._print(ts.base_set)
return self._print_seq((expr, bar, variables, inn, base), "{", "}", ' ')
def _print_Contains(self, e):
var, set = e.args
if self._use_unicode:
el = u(" \N{ELEMENT OF} ")
return prettyForm(*stringPict.next(self._print(var),
el, self._print(set)), binding=8)
else:
return prettyForm(sstr(e))
def _print_seq(self, seq, left=None, right=None, delimiter=', ',
parenthesize=lambda x: False):
s = None
for item in seq:
pform = self._print(item)
if parenthesize(item):
pform = prettyForm(*pform.parens())
if s is None:
# first element
s = pform
else:
s = prettyForm(*stringPict.next(s, delimiter))
s = prettyForm(*stringPict.next(s, pform))
if s is None:
s = stringPict('')
s = prettyForm(*s.parens(left, right, ifascii_nougly=True))
return s
def join(self, delimiter, args):
pform = None
for arg in args:
if pform is None:
pform = arg
else:
pform = prettyForm(*pform.right(delimiter))
pform = prettyForm(*pform.right(arg))
if pform is None:
return prettyForm("")
else:
return pform
def _print_list(self, l):
return self._print_seq(l, '[', ']')
def _print_tuple(self, t):
if len(t) == 1:
ptuple = prettyForm(*stringPict.next(self._print(t[0]), ','))
return prettyForm(*ptuple.parens('(', ')', ifascii_nougly=True))
else:
return self._print_seq(t, '(', ')')
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for k in keys:
K = self._print(k)
V = self._print(d[k])
s = prettyForm(*stringPict.next(K, ': ', V))
items.append(s)
return self._print_seq(items, '{', '}')
def _print_Dict(self, d):
return self._print_dict(d)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
pretty = self._print_seq(items, '[', ']')
pretty = prettyForm(*pretty.parens('(', ')', ifascii_nougly=True))
pretty = prettyForm(*stringPict.next(type(s).__name__, pretty))
return pretty
_print_frozenset = _print_set
def _print_PolyRing(self, ring):
return prettyForm(sstr(ring))
def _print_FracField(self, field):
return prettyForm(sstr(field))
def _print_PolyElement(self, poly):
return prettyForm(sstr(poly))
def _print_FracElement(self, frac):
return prettyForm(sstr(frac))
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_RootOf(self, expr):
args = [self._print_Add(expr.expr, order='lex'), expr.index]
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('RootOf'))
return pform
def _print_RootSum(self, expr):
args = [self._print_Add(expr.expr, order='lex')]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('RootSum'))
return pform
def _print_FiniteField(self, expr):
if self._use_unicode:
form = u('\N{DOUBLE-STRUCK CAPITAL Z}_%d')
else:
form = 'GF(%d)'
return prettyForm(pretty_symbol(form % expr.mod))
def _print_IntegerRing(self, expr):
if self._use_unicode:
return prettyForm(u('\N{DOUBLE-STRUCK CAPITAL Z}'))
else:
return prettyForm('ZZ')
def _print_RationalField(self, expr):
if self._use_unicode:
return prettyForm(u('\N{DOUBLE-STRUCK CAPITAL Q}'))
else:
return prettyForm('QQ')
def _print_RealField(self, domain):
if self._use_unicode:
prefix = u('\N{DOUBLE-STRUCK CAPITAL R}')
else:
prefix = 'RR'
if domain.has_default_precision:
return prettyForm(prefix)
else:
return self._print(pretty_symbol(prefix + "_" + str(domain.precision)))
def _print_ComplexField(self, domain):
if self._use_unicode:
prefix = u('\N{DOUBLE-STRUCK CAPITAL C}')
else:
prefix = 'CC'
if domain.has_default_precision:
return prettyForm(prefix)
else:
return self._print(pretty_symbol(prefix + "_" + str(domain.precision)))
def _print_PolynomialRing(self, expr):
args = list(expr.symbols)
if not expr.order.is_default:
order = prettyForm(*prettyForm("order=").right(self._print(expr.order)))
args.append(order)
pform = self._print_seq(args, '[', ']')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_FractionField(self, expr):
args = list(expr.symbols)
if not expr.order.is_default:
order = prettyForm(*prettyForm("order=").right(self._print(expr.order)))
args.append(order)
pform = self._print_seq(args, '(', ')')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_PolynomialRingBase(self, expr):
g = expr.symbols
if str(expr.order) != str(expr.default_order):
g = g + ("order=" + str(expr.order),)
pform = self._print_seq(g, '[', ']')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_GroebnerBasis(self, basis):
exprs = [ self._print_Add(arg, order=basis.order)
for arg in basis.exprs ]
exprs = prettyForm(*self.join(", ", exprs).parens(left="[", right="]"))
gens = [ self._print(gen) for gen in basis.gens ]
domain = prettyForm(
*prettyForm("domain=").right(self._print(basis.domain)))
order = prettyForm(
*prettyForm("order=").right(self._print(basis.order)))
pform = self.join(", ", [exprs] + gens + [domain, order])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(basis.__class__.__name__))
return pform
def _print_Subs(self, e):
pform = self._print(e.expr)
pform = prettyForm(*pform.parens())
h = pform.height() if pform.height() > 1 else 2
rvert = stringPict(vobj('|', h), baseline=pform.baseline)
pform = prettyForm(*pform.right(rvert))
b = pform.baseline
pform.baseline = pform.height() - 1
pform = prettyForm(*pform.right(self._print_seq([
self._print_seq((self._print(v[0]), xsym('=='), self._print(v[1])),
delimiter='') for v in zip(e.variables, e.point) ])))
pform.baseline = b
return pform
def _print_euler(self, e):
pform = prettyForm("E")
arg = self._print(e.args[0])
pform_arg = prettyForm(" "*arg.width())
pform_arg = prettyForm(*pform_arg.below(arg))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_catalan(self, e):
pform = prettyForm("C")
arg = self._print(e.args[0])
pform_arg = prettyForm(" "*arg.width())
pform_arg = prettyForm(*pform_arg.below(arg))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_KroneckerDelta(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.right((prettyForm(','))))
pform = prettyForm(*pform.right((self._print(e.args[1]))))
if self._use_unicode:
a = stringPict(pretty_symbol('delta'))
else:
a = stringPict('d')
b = pform
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.below(top))
def _print_RandomDomain(self, d):
try:
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.as_boolean())))
return pform
except Exception:
try:
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.symbols)))
pform = prettyForm(*pform.right(self._print(' in ')))
pform = prettyForm(*pform.right(self._print(d.set)))
return pform
except:
return self._print(None)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(pretty_symbol(object.name))
def _print_Morphism(self, morphism):
arrow = xsym("-->")
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
tail = domain.right(arrow, codomain)[0]
return prettyForm(tail)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(pretty_symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(":", pretty_morphism)[0])
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(
NamedMorphism(morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
circle = xsym(".")
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [pretty_symbol(component.name) for
component in morphism.components]
component_names_list.reverse()
component_names = circle.join(component_names_list) + ":"
pretty_name = self._print(component_names)
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(pretty_morphism)[0])
def _print_Category(self, category):
return self._print(pretty_symbol(category.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
pretty_result = self._print(diagram.premises)
if diagram.conclusions:
results_arrow = " %s " % xsym("==>")
pretty_conclusions = self._print(diagram.conclusions)[0]
pretty_result = pretty_result.right(
results_arrow, pretty_conclusions)
return prettyForm(pretty_result[0])
def _print_DiagramGrid(self, grid):
from sympy.matrices import Matrix
from sympy import Symbol
matrix = Matrix([[grid[i, j] if grid[i, j] else Symbol(" ")
for j in range(grid.width)]
for i in range(grid.height)])
return self._print_matrix_contents(matrix)
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return self._print_seq(m, '[', ']')
def _print_SubModule(self, M):
return self._print_seq(M.gens, '<', '>')
def _print_FreeModule(self, M):
return self._print(M.ring)**self._print(M.rank)
def _print_ModuleImplementedIdeal(self, M):
return self._print_seq([x for [x] in M._module.gens], '<', '>')
def _print_QuotientRing(self, R):
return self._print(R.ring) / self._print(R.base_ideal)
def _print_QuotientRingElement(self, R):
return self._print(R.data) + self._print(R.ring.base_ideal)
def _print_QuotientModuleElement(self, m):
return self._print(m.data) + self._print(m.module.killed_module)
def _print_QuotientModule(self, M):
return self._print(M.base) / self._print(M.killed_module)
def _print_MatrixHomomorphism(self, h):
matrix = self._print(h._sympy_matrix())
matrix.baseline = matrix.height() // 2
pform = prettyForm(*matrix.right(' : ', self._print(h.domain),
' %s> ' % hobj('-', 2), self._print(h.codomain)))
return pform
def _print_BaseScalarField(self, field):
string = field._coord_sys._names[field._index]
return self._print(pretty_symbol(string))
def _print_BaseVectorField(self, field):
s = U('PARTIAL DIFFERENTIAL') + '_' + field._coord_sys._names[field._index]
return self._print(pretty_symbol(s))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys._names[field._index]
return self._print(u('\N{DOUBLE-STRUCK ITALIC SMALL D} ') + pretty_symbol(string))
else:
pform = self._print(field)
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(u("\N{DOUBLE-STRUCK ITALIC SMALL D}")))
def _print_Tr(self, p):
#TODO: Handle indices
pform = self._print(p.args[0])
pform = prettyForm(*pform.left('%s(' % (p.__class__.__name__)))
pform = prettyForm(*pform.right(')'))
return pform
def pretty(expr, **settings):
"""Returns a string containing the prettified form of expr.
For information on keyword arguments see pretty_print function.
"""
pp = PrettyPrinter(settings)
# XXX: this is an ugly hack, but at least it works
use_unicode = pp._settings['use_unicode']
uflag = pretty_use_unicode(use_unicode)
try:
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def pretty_print(expr, **settings):
"""Prints expr in pretty form.
pprint is just a shortcut for this function.
Parameters
==========
expr : expression
the expression to print
wrap_line : bool, optional
line wrapping enabled/disabled, defaults to True
num_columns : int or None, optional
number of columns before line breaking (default to None which reads
the terminal width), useful when using SymPy without terminal.
use_unicode : bool or None, optional
use unicode characters, such as the Greek letter pi instead of
the string pi.
full_prec : bool or string, optional
use full precision. Default to "auto"
order : bool or string, optional
set to 'none' for long expressions if slow; default is None
"""
print(pretty(expr, **settings))
pprint = pretty_print
def pager_print(expr, **settings):
"""Prints expr using the pager, in pretty form.
This invokes a pager command using pydoc. Lines are not wrapped
automatically. This routine is meant to be used with a pager that allows
sideways scrolling, like ``less -S``.
Parameters are the same as for ``pretty_print``. If you wish to wrap lines,
pass ``num_columns=None`` to auto-detect the width of the terminal.
"""
from pydoc import pager
from locale import getpreferredencoding
if 'num_columns' not in settings:
settings['num_columns'] = 500000 # disable line wrap
pager(pretty(expr, **settings).encode(getpreferredencoding()))
|
/*
* Project: Mapper 1.0
* Description: Responsive html map area highlighter with area selection
* Author:
* License:
*/
// the semi-colon before function invocation is a safety net against concatenated
// scripts and/or other plugins which may not be closed properly.
;(function ( $, window, document, undefined ) {
// undefined is used here as the undefined global variable in ECMAScript 3 is
// mutable (ie. it can be changed by someone else). undefined isn't really being
// passed in so we can ensure the value of it is truly undefined. In ES5, undefined
// can no longer be modified.
// window is passed through as local variable rather than global
// as this (slightly) quickens the resolution process and can be more efficiently
// minified (especially when both are regularly referenced in your plugin).
// Create the defaults once
var pluginName = 'mapoid',
defaults = {
propertyName: "value"
};
// The actual plugin constructor
function mapoidPlugin( element, options ) {
//this.element = element;
// jQuery has an extend method which merges the contents of two or
// more objects, storing the result in the first object. The first object
// is generally empty as we don't want to alter the default options for
// future instances of the plugin
this.elem = element;
this.$elem = $(element);
this.$elem_original = this.$elem
this.options = options;
// This next line takes advantage of HTML5 data attributes
// to support customization of the plugin on a per-element
// basis. For example,
// <div class=item' data-plugin-options='{"message":"Goodbye World!"}'></div>
//this.metadata = this.$elem.data( 'plugin-options' );
this.metadata = this.$elem.data( );
this._init();
}
//Plugin.prototype =
mapoidPlugin.prototype =
{
defaults: {
width : 600,
strokeColor: 'black',
strokeWidth: 2,
fillColor: 'yellow',
fillOpacity: 0.25,
fadeTime: 500,
selectedArea: false,
selectOnClick: true,
click:false,
dblclick:false,
mousedown:false,
mouseup:false,
mouseover:false,
mouseout:false,
mousemove:false,
mouseenter:false,
mouseleave:false,
hoverIn:false,
hoverOut:false,
select:false,
deselect:false
},
lang: {
textSelectAll: function () { return "Select all"; }
},
_init: function() {
// Introduce defaults that can be extended either
// globally or using an object literal.
this.config = $.extend({}, this.defaults, this.options,
this.metadata);
//alert( JSON.stringify( this.lang.textSearching() ) )
//alert( this.config.strokeWidth )
//alert(42)
//this.isResponsive = false
//this.imageLoaded = false
this.map = this.$elem
this.selectedAreas = []
var self = this
//self.mapLoaded = false
//self.coordsLoaded = false
//self.mapHidden = false
var mapName = self.$elem.attr('name')
self.image = $(document).find("img[usemap='#"+mapName+"']")
//self.imageSrc = self.image.attr('src')
//alert( self.$elem.attr('name') )
var wrapper = $("<div class='wrp'></div>")
$("map[name="+mapName+"]").add("img[usemap='#"+mapName+"']").wrapAll(wrapper);
//});
this.$elem = this.$elem.closest('div')
//this.elem = wrapper[0]
//alert( this.image.width() )
//if( !self.image.is(':visible') ){
//self.image.css('display','block')
//self.map.css('display','block')
//self.$elem.css('display','block')
//}
//alert( self.image.width() )
//alert( self.image.css('width') )
//self.map.on('show',function(){
//alert(1)
//})
//var w = this.image.attr('width')
//var h = this.image.attr('height')
//var wd = this.image.width()
//alert(w+" - "+wd)
self.mapHidden = ( self.mapIsVisible() ) ? false : true
//alert(self.mapHidden)
//if( !self.mapIsVisible() )
// self.mapHidden = true
//alert( self.$elem.attr('name') )
//$("map[name="+mapName+"],img[usemap='#"+mapName+"']").each(function(){
//alert(self.mapIsVisible())
//alert(1)
self._onImgLoad( self.image, function(img){
//alert(img.height)
//self.
self.image.attr('width', img.width )
self.image.attr('height', img.height )
//alert(self.image.attr('width') )
//alert(self.image.attr('height') )
//alert(self.image.css('width') )
//do something
//alert('map is not visible')
self._loadMap();
//else
//
self._initEvents();
$(window).resize(function() {
//self.resetMap( );
//if(!self.mapLoaded)
//self._loadMap();
self._reloadAreas()
//alert(s)
});
//self.imageLoaded = true
})
return this;
},
/**
* Returns a random integer between min (inclusive) and max (inclusive)
* Using Math.round() will give you a non-uniform distribution!
*/
_getRandomInt: function(min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
},
mapIsVisible : function()
{
var self = this
return ( self.map.is(':visible') && self.image.is(':visible') )
//return true
//return false
},
_loadMap : function(){
var self = this
//self.mapLoaded = false
var i = self._getRandomInt(1,65000)
var j = self._getRandomInt(1,100)
//var j = Math.ceil(Math.random() * 100 + 1);
//var $this = $(this);
var $this = self.image
//alert($this.prop('tagName'))
//self._loadMap();
//self._responsiveImage( );
// redraw canvas of the selected areas
var cla = $this.attr('class')
//alert( $(this).attr('class') )
//i = $(this).attr('class')
//i=j
//alert(cla)
//$this.attr('class', 'map'+i)
//$this.attr('usemap', '#map'+i)
//this.map.attr('name', 'map'+i)
//var this_class = $this.attr('class')
self.$elem.css({'position':'relative'});
//alert(this_class)
//self.mlDrawIt(this_class);
//if (!document.all) {
//self.mlDrawCanvas('map'+i);
//}
//var mlSource = $this.attr('src'),
//idPrefix = mlSource.substr(mlSource.length - 1),
//mlThisID = $this.addClass('image'+j+idPrefix+i),
//mlMapName = $this.attr('usemap').replace('#',''),
//$that = $('map[name="'+mlMapName+'"]'),
/* $that = $this.nextAll('map').eq(0), */
//mlThatID = $that.addClass('ml_map'+j+idPrefix+i);
//self.saveMapData( );
//self._responsiveImage( );
//self.isResponsive = false
if (typeof(self.image.attr('usemap')) == 'undefined')
return;
//return
//if( self.mapIsVisible() )
self._redrawCoords()
if(self.config.selectedArea)
self._selectAreas( self.config.selectedArea )
//console.log('mapa je nacitana')
//self.mapLoaded = true
},
_redrawCoords : function()
{
var self = this
//self.coordsLoaded = false
//if(!self.mapIsVisible)
//return
var c = 'coords'
var w = self.image.attr('width')
var h = self.image.attr('height')
var wd = self.image.width()
//var hg = this.image.height()
//var wPercent = this.image.width()/100
//var hPercent = this.image.height()/100
var proportion = (wd / w) * 100;
self.map.find('area').each(function()
{
var $this = $(this);
if (!$this.data(c))
$this.data(c, $this.attr(c));
var coords = $this.data(c).split(','),
coordsPercent = new Array(coords.length);
for (var i = 0; i < coordsPercent.length; ++i)
{
coordsPercent[i] = ((coords[i])*proportion) / 100
}
//Change old coordinates to new
$this.attr(c, coordsPercent.toString());
});
//self.coordsLoaded = true
},
_eventCallback : function(event, e, el){
var self = this
if(self.config[event])
{
var id = el.data('id')
if(typeof self.config[event] === 'function')
self.config[event]( e, el, id, self.selectedAreas )
else
console.log( event + ' nie je funkcia!')
return true
}
return false
},
_reloadAreasIfHidden : function(){
var self = this
if(self.mapHidden)
{
self._reloadAreas()
self.mapHidden = false;
}
},
_reloadAreas : function(){
var self = this
self.$elem.find('canvas').remove();
if( !self.mapIsVisible() )
{
self.mapHidden = true
console.log('mapa nie je vidiet')
return
}
self._redrawCoords()
var s = self.$elem.find('area.selected').each(function(k,v){
var id = $(this).data('id')
var fixed = $(this).hasClass('fixed')
self._mapover( $(this), true, fixed )
})
},
_initEvents : function(){
var self = this
self.map.on('mouseover', 'area', function(e){
//console.log(14891)
//if(!self.coordsLoaded)
//if( !self.mapIsVisible() )
self._reloadAreasIfHidden()
self._eventCallback('mouseover', e, $(this))
self._mapover( $(this), false, false, true )
//alert(1)
})
self.map.on('mouseout', 'area', function(e){
//console.log(tid)
self._eventCallback('mouseout', e, $(this))
self.$elem.find('canvas.temp').fadeOut( self.config.fadeTime )
//alert( $(this).closest('div').find('canvas').length )
//alert(self.$elem.attr('class'))
//alert( self.$elem.find('canvas').length )
})
self.map.on('click', 'area', function(e){
//console.log(tid)
e.preventDefault()
if(!self._eventCallback('click', e, $(this)) && !self.config.selectOnClick)
{
var href = $(this).attr('href')
if(href)
location.href = href;
}
else if(self.config.selectOnClick)
{
var id = parseInt( $(this).data('id') )
//var selected = $(this).hasClass('selected')
var selected = self._inArray(self.selectedAreas, id)
var fixed = $(this).hasClass('fixed')
//if(!fixed)
{
if(!selected)
{
//self.selectedAreas.push(id)
self._selectArea(id,fixed,false)
}
else
{
self._deselectArea( id, false )
}
}
//else
//alert(id)
//console.log( self.selectedAreas )
}
//alert(href)
return false
//self.$elem.find('canvas.temp').fadeOut( self.config.fadeTime )
})
self.map.on('mousedown', 'area', function(e){
self._eventCallback('mousedown', e, $(this))
})
self.map.on('mouseup', 'area', function(e){
self._eventCallback('mouseup', e, $(this))
})
self.map.on('dblclick', 'area', function(e){
self._eventCallback('dblclick', e, $(this))
})
self.map.on('mousemove', 'area', function(e){
self._eventCallback('mousemove', e, $(this))
})
self.map.on('mouseenter', 'area', function(e){
self._eventCallback('mouseenter', e, $(this))
})
self.map.on('mouseleave', 'area', function(e){
self._eventCallback('mouseleave', e, $(this))
})
self.map.find('area').hover(function(e){
self._eventCallback('hoverIn', e, $(this))
}, function(e){
self._eventCallback('hoverOut', e, $(this))
})
},
_removeItem: function (array, item)
{
for (var i = 0; i < array.length; i++)
{
if(array[i]===item){
array.splice(i,1);
break;
}
}
},
_inArray: function (array, item)
{
for (var i = 0; i < array.length; i++)
{
if(array[i]===item){
return true;
break;
}
}
return false;
},
_onImgLoad: function ( image, cb )
{
var self = this
var img = new Image;
//img.src = areaImg;
img.src = ( image[0].getAttribute ? image[0].getAttribute("src") : false) || image[0].src;
img.onload = function() {
cb(img)
}
//_getOrigImgSize : function( img ){
//var t = new Image();
//t.src = (img.getAttribute ? img.getAttribute("src") : false) || img.src;
//return {'w':t.width, 'h':t.height};
//},
},
_onMapLoaded: function(cb)
{
var self = this
//if( this.mapLoaded && !this.coordsLoaded && self.mapIsVisible() )
//{
// self._redrawCoords()
// setTimeout(function(){
// console.log('0000')
// self._onMapLoaded( cb )
//},10)
//}
//else if(this.mapLoaded && this.coordsLoaded)
if( self.mapIsVisible() )
{
self._onImgLoad( self.image, function(img){
//if(self.mapHidden)
{
//self._redrawCoords()
//self.mapHidden = false;
}
cb()
//self._loadMap();
//self._initEvents();
//self.imageLoaded = true
})
}
//else
{
//console.log('Public function denied - hidden')
//self._onImgLoad( self.image.attr('src'), function(img){
//if( self.mapIsVisible() )
//self._loadMap();
//})
//setTimeout(function(){
//console.log('0000')
//self._onMapLoaded( cb )
//},10)
}
},
_mapover : function( area, selected, fixed, removeTemp ){
var self = this
var area_id = parseInt( area.data('id') )
if( isNaN(area_id) )
{
area_id = self._getRandomInt(1,99999)
area.data('id', area_id)
//alert(id)
}
if(removeTemp)
self.$elem.find('canvas.temp').remove()
//cId = this._getRandomInt(1,60000)
var $img = self.image,
$bg_fill = $img.data('bg_fill'),
w = $img.width(),
h = $img.height(),
shapeSort = area.attr('shape');
var canvas = $('<canvas data-id="'+area_id+'" width="'+w+'" height="'+h+'"></canvas>');
//$img.parent().append(makeCanvas);
//alert(size.h )
//canvas.width(w)
//canvas.height(h)
$img.parent().append(canvas);
//$('body').prepend(canvas);
//$img.parent().append('<canvas>blablabla</canvas>');
//$img.parent().append('<>');
if(!selected)
canvas.addClass('temp')
//$('#can'+cId).addClass('temp')
else
area.addClass('selected')
if(fixed)
area.addClass('fixed')
//var hmap = $(mlAreaID).closest('.hmap')
//this.$elem.find('.tarea'+tarea_id+' .ttitle').addClass('hovered')
var bg_fill = self.$elem.find('.fill').css('background-color')
var bg_opacity = self.$elem.find('.fill').css('opacity')
//console.log( bg_opacity )
//alert(area)
//self.$elem.css({'width':'auto','height':'auto',})
//var o = self.$elem.offset();
//var x = self.$elem.position();
//alert(o.top)
//alert(x.top)
//alert("Top: " + x.top + " Left: " + x.left);
/*
*
* pointer-events:none makes canvas layer invisible to mouse events in most modern browsers
* allowing map area to still be clickable through the canvas layer.
* For IE browsers less than 11 the highlighting is disabled in DrawIt function.
*
*/
//canvas.parent().css({'position':'relative'})
canvas.css({
'width': w+'px',
'height': h+'px',
'display': 'block',
'position':'absolute',
'top': '0px',
'left': '0px',
'pointer-events':'none',
//'z-index':'9999'
});
//var $canvas = document.getElementById('can'+cId),
var $canvas = canvas[0],
context = $canvas.getContext('2d');
// context.clearRect(0, 0, $canvas.width, $canvas.height);
//$canvas.width = w;
//$canvas.height = h;
var cox,coy
var coords = area.attr('coords').split(','),
cox = [];
coy = [];
for(var i = 0; i < coords.length; i++) {
if(i % 2 == 0) {
cox.push(coords[i]);
}
else {
coy.push(coords[i]);
}
}
//alert(coords)
/* Shape coordinate apportioning */
var showCanvas = true
var area_img = area.data('img');
var mainImgSrc = self.image.attr('src')
//alert(self.image.attr('src'))
//self.onImgLoad(mainImgSrc, function(im){
if(area_img && shapeSort == 'poly')
{
showCanvas = false
//area_img = base_url+area_img;
self._onImgLoad(area_img, function(img){
var min_x = Math.min.apply(null, cox);
var min_y = Math.min.apply(null, coy);
var max_x = Math.max.apply(null, cox);
var max_y = Math.max.apply(null, coy);
var f_w = max_x - min_x
var f_h = max_y - min_y
//console.log(f_w+" "+f_h)
context.drawImage(img,min_x,min_y, f_w, f_h);
canvas.hide().fadeIn( self.config.fadeTime )
})
}
else
{
context.fillStyle = self.config.fillColor;
context.lineWidth = self.config.strokeWidth;
context.strokeStyle = self.config.strokeColor;
context.globalAlpha = self.config.fillOpacity
if(shapeSort == 'rect')
{
context.fillRect(cox[0],coy[0],cox[1]-cox[0],coy[1]-coy[0]);
if(self.config.strokeWidth > 0)
context.strokeRect(cox[0],coy[0],cox[1]-cox[0],coy[1]-coy[0]);
}
else
{
context.beginPath();
if(shapeSort == 'poly' )
{
context.moveTo(cox[0],coy[0]);
for(var j = 1; j < cox.length; j++) {
context.lineTo(cox[j],coy[j]);
}
context.closePath();
}
else if(shapeSort == 'circle')
{
context.arc(cox[0],coy[0],cox[1],0,Math.PI*2,true);
//context.fillStyle = 'rgba('+Math.floor((Math.random() * 255) + 1)+','+Math.floor((Math.random() * 255) + 1)+','+Math.floor((Math.random() * 100) + 1)+',0.3)';
}
if(self.config.fillColor)
context.fill();
if(self.config.strokeWidth > 0)
context.stroke();
}
if(showCanvas)
canvas.hide().fadeIn( self.config.fadeTime )
}
//})
},
_selectArea: function( id, fixed, showCanvas )
{
var self = this
id = parseInt( id )
var el = self.$elem.find("area[data-id='"+id+"']")
var selected = el.hasClass('selected')
var cnv = self.$elem.find('canvas[data-id="'+id+'"]')
//alert(self.mapHidden)
if( self.mapIsVisible() )
{
self._reloadAreasIfHidden()
}
else
self.mapHidden = true
//{
//console.log('ID '+id+' nemozem vybrat, mapa nie je viditelna!')
//return false;
//}
if(selected)
{
console.log('ID '+id+' je uz vybrane!')
return false;
}
if(fixed !== true)
fixed = false
if(el.length > 0)
{
self.$elem.find('canvas[data-id="'+id+'"]:last').removeClass('temp')
el.addClass('selected')
cnv.removeClass('temp')
self.selectedAreas.push(id)
if(showCanvas)
self._mapover( el, true, fixed )
self._eventCallback('select', false, el )
}
else
console.log('Plocha s ID '+id+' na mape neexistuje!')
},
_deselectArea: function( id, hideCanvas )
{
var self = this
id = parseInt( id )
var el = self.$elem.find("area[data-id='"+id+"']")
var cnv = self.$elem.find('canvas[data-id="'+id+'"]')
var selected = el.hasClass('selected')
var fixed = el.hasClass('fixed')
if(!selected || fixed)
{
//console.log('ID '+id+' je uz zrusene!')
return false;
}
//if(fixed !== true)
//fixed = false
if(el.length > 0)
{
cnv.addClass('temp')
if( cnv.length > 1 || hideCanvas )
{
self.$elem.find('canvas[data-id="'+id+'"]:last').fadeOut( self.config.fadeTime )
}
el.removeClass('selected')
self._removeItem( self.selectedAreas, id )
self._eventCallback( 'deselect', false, el )
}
else
console.log('Plocha s ID '+id+' na mape neexistuje!')
},
_selectAreas: function( obj, sec, callback )
{
var self = this
var sec_init = sec
var length=obj.length
function doSetTimeout(i,id,fixed) {
setTimeout(function() {
self._selectArea(id,fixed,true)
self._doCallback(i,length,sec,callback)
}, sec_init);
sec_init += sec
}
//console.log('l- '+obj.length)
for(var i=0; i<length; i++)
{
var id = parseInt(obj[i].id)
var fixed = (obj[i].fixed === true ? true : false )
//var fixed = val.fixed
//alert(id)
//var isLast = (i == length-1)
//alert(id + ' is last')
if(sec)
{
doSetTimeout(i,id,fixed)
//setTimeout(function(){
//console.log('ssssss')
//if(isLast)
//callback()
//}, sec_init)
//sec_init += sec
}
else
{
self._selectArea(id,fixed,true)
self._doCallback(i,length,sec,callback)
}
}
//$.each( obj, function(key, val){
//var id = parseInt(key)
//var fixed = val.fixed
//alert(val)
//})
},
_doCallback: function(i,length,sec,callback) {
var self = this
if( i === length-1 && typeof callback === 'function')
{
if(sec>0)
setTimeout(function(){
callback( self.map, self.selectedAreas )
}, sec)
else
callback( self.map, self.selectedAreas )
}
},
_deselectAreas: function( obj, sec, callback )
{
var self = this
var sec_init = sec
var length=obj.length
function doSetTimeout(i,id) {
setTimeout(function() {
self._deselectArea( id, true )
self._doCallback(i,length,sec,callback)
}, sec_init);
sec_init += sec
}
//$.each( obj, function(key, val){
for(var i=0; i<length; i++)
{
var id = parseInt(obj[i].id)
//var fixed = (obj[i].fixed === true ? true : false )
//var id = parseInt(key)
//var fixed = val.fixed
if(sec)
{
doSetTimeout(i,id)
//setTimeout(function(){
//self._selectArea(id,fixed,true)
//self._deselectArea( id, true )
//}, sec_init)
//sec_init += sec
}
else
{
self._deselectArea( id, true )
self._doCallback(i,length,sec,callback)
}
}
},
isSelected: function( id )
{
var self = this
if ( self._inArray( self.selectedAreas, id) )
return true
return false
},
isFixed: function( id )
{
var self = this
var el = self.$elem.find("area[data-id='"+id+"']")
if ( el.hasClass('fixed') )
return true
return false
},
getSelected: function( )
{
return this.selectedAreas;
},
getAll: function( )
{
var self = this
var ids = []
self.$elem.find('area').each( function(key, val){
var el = $(this)
var id = parseInt( el.data('id') )
//var fixed = val.fixed
//alert(val)
ids.push( {'id':id} )
//ids[id] = {}
//self._selectArea(id,fixed,true)
})
return ids;
},
selectOne: function( id, fixed )
{
var self = this
self._onMapLoaded( function(){
self._selectArea(id,fixed,true)
})
},
selectAll: function( sec, callback )
{
var self = this
var obj = self.getAll()
//alert(obj)
self.selectMany( obj, sec, callback )
},
deselectAll: function( sec, callback )
{
var self = this
var obj = self.getAll()
//alert(obj)
self.deselect( obj, sec, callback )
},
selectMany: function( obj, sec, callback )
{
var self = this
self._onMapLoaded( function(){
self._selectAreas(obj, sec, callback)
//callback()
})
//alert(event)
//alert(id)
},
deselect: function( obj, sec, callback )
{
var self = this
self._deselectAreas(obj, sec, callback)
},
removeFixed: function( obj )
{
var self = this
//self._deselectAreas(obj, sec, callback)
for(var i=0; i<obj.length; i++)
{
var id = parseInt( obj[i].id )
var el = self.$elem.find("area[data-id='"+id+"']")
var cnv = self.$elem.find('canvas[data-id="'+id+'"]')
cnv.remove()
el.removeClass('selected').removeClass('fixed')
self._removeItem( self.selectedAreas, id )
//alert(obj[i].id)
}
},
changeConfig: function( key, value )
{
var self = this
self.config[ key ] = value
}
/*
mlDrawCanvas : function(){
//var mlImageClass = document.getElementsByClassName(imageClass)[0],
var $image = this.image,
//$image = $("."+imageClass),
w = $image.width(),
h = $image.height();
if($image.siblings('canvas')) {
$image.siblings('canvas').remove();
}
mlGetMapName = $image.attr('usemap').replace('#',''),
theMap = $('map[name="'+mlGetMapName+'"]').addClass('mlHighlight'),
wrapped = $('<div id="wrap'+mlGetMapName+'image"></div>');
//wrapped = $('<div class="wrapper"></div>');
if($image.parent('#wrap'+mlGetMapName+'image').length) {
$image.unwrap();
}
$image.wrap(wrapped);
var $wrap = $('#wrap'+mlGetMapName+'image');
var opts = {
'position':'relative',
'margin':'0 auto',
'line-height':'0',
'width':w
}
if($wrap.parent().width() < $image.attr('width'))
opts.width = 'auto'
this.$elem.css(opts);
var index = 0;
//Adds function atts to all areas on image map
theMap.children('area').each(function() {
var $this = $(this);
index++;
$this.attr('id',mlGetMapName+'area'+index);
// World Map add-ons //
$this.attr('target','_blank');
var atTitle = $this.attr('alt');
//$this.attr('href') ? $this.attr('href') : $this.attr('href', 'http://en.wikipedia.org/wiki/'+atTitle);
$this.attr('href') ? $this.attr('href') : $this.attr('href', '');
// End World Map add-ons //
$this.attr('title') ? $this.attr('title') : $this.attr('title',atTitle);
//var cb = 'mapover('+$this.attr('id')+');'
//$this.attr('onmouseover','mapover('+$this.attr('id')+');').attr('onmouseout','mapout('+$this.attr('id')+');');
//$this.attr('onmouseover', cb).attr('onmouseout');
});
//var a = theMap.children('area:first')
//var b = theMap.children('area:last')
//b.mouseover(); mouseover
}
*/
}
// You don't need to change something below:
// A really lightweight plugin wrapper around the constructor,
// preventing against multiple instantiations and allowing any
// public function (ie. a function whose name doesn't start
// with an underscore) to be called via the jQuery plugin,
// e.g. $(element).defaultPluginName('functionName', arg1, arg2)
$.fn[pluginName] = function ( options ) {
var args = arguments;
// Is the first parameter an object (options), or was omitted,
// instantiate a new instance of the plugin.
if (options === undefined || typeof options === 'object') {
return this.each(function () {
// Only allow the plugin to be instantiated once,
// so we check that the element has no plugin instantiation yet
if (!$.data(this, 'plugin_' + pluginName)) {
// if it has no instance, create a new one,
// pass options to our plugin constructor,
// and store the plugin instance
// in the elements jQuery data object.
$.data(this, 'plugin_' + pluginName, new mapoidPlugin( this, options ));
}
});
// If the first parameter is a string and it doesn't start
// with an underscore or "contains" the `init`-function,
// treat this as a call to a public method.
} else if (typeof options === 'string' && options[0] !== '_' && options !== 'init') {
// Cache the method call
// to make it possible
// to return a value
var returns;
this.each(function () {
var instance = $.data(this, 'plugin_' + pluginName);
// Tests that there's already a plugin-instance
// and checks that the requested public method exists
if (instance instanceof mapoidPlugin && typeof instance[options] === 'function') {
//alert( options )
// Call the method of our plugin instance,
// and pass it the supplied arguments.
returns = instance[options].apply( instance, Array.prototype.slice.call( args, 1 ) );
}
// Allow instances to be destroyed via the 'destroy' method
if (options === 'destroy') {
$.data(this, 'plugin_' + pluginName, null);
}
});
// If the earlier cached method
// gives a value back return the value,
// otherwise return this to preserve chainability.
return returns !== undefined ? returns : this;
}
};
}(jQuery, window, document));
|
/*
* This header is generated by classdump-dyld 1.0
* on Tuesday, November 5, 2019 at 1:50:19 PM Mountain Standard Time
* Operating System: Version 13.0 (Build 17J586)
* Image Source: /Applications/Podcasts.app/Podcasts
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
#import <Podcasts/MZUniversalPlaybackPositionDataSource.h>
@class NSString;
@interface MTUniversalPlaybackPositionDataSource : NSObject <MZUniversalPlaybackPositionDataSource>
@property (readonly) unsigned long long hash;
@property (readonly) Class superclass;
@property (copy,readonly) NSString * description;
@property (copy,readonly) NSString * debugDescription;
+(void)updateEpisode:(id)arg1 withUPPMetadata:(id)arg2 ;
+(void)setUppLastSyncTime:(double)arg1 ;
+(id)mediaItemIdentifierForEpisode:(id)arg1 ;
+(void)resetUppLastSyncTimeAndDomainVersion;
+(double)uppLastSyncTime;
-(void)performBlockAndWait:(/*^block*/id)arg1 ;
-(void)cancelUniversalPlaybackPositionTransaction:(id)arg1 ;
-(id)beginTransactionWithItemsToSyncEnumerationBlock:(/*^block*/id)arg1 ;
-(void)commitUniversalPlaybackPositionTransaction:(id)arg1 domainVersion:(id)arg2 metadataEnumerationBlock:(/*^block*/id)arg3 ;
@end
|
import React from 'react'
import renderer from 'react-test-renderer'
import serializer from 'jest-glamor-react'
import { keyframes, sheet } from 'emotion/macro'
import styled from 'react-emotion/macro'
expect.addSnapshotSerializer(serializer(sheet))
describe('keyframes - macro', () => {
test('renders', () => {
const bounce = keyframes`
from, 20%, 53%, 80%, to {
animation-timing-function: cubic-bezier(0.215, 0.610, 0.355, 1.000);
transform: translate3d(0,0,0);
}
40%, 43% {
animation-timing-function: cubic-bezier(0.755, 0.050, 0.855, 0.060);
transform: translate3d(0, -30px, 0);
}
70% {
animation-timing-function: cubic-bezier(0.755, 0.050, 0.855, 0.060);
transform: translate3d(0, -15px, 0);
}
90% {
transform: translate3d(0,-4px,0);
}
`
const H1 = styled.h1`animation: ${bounce} 2s linear infinite;`
const tree = renderer.create(<H1>hello world</H1>).toJSON()
expect(tree).toMatchSnapshot()
})
test('keyframes with interpolation', () => {
const endingRotation = '360deg'
const H1 = styled.h1`
animation: ${keyframes`
from {
transform: rotate(0deg);
}
to {
transform: rotate(${endingRotation});
}
`} 2s linear infinite;
`
const tree = renderer.create(<H1>hello world</H1>).toJSON()
expect(tree).toMatchSnapshot()
expect(
sheet.tags.map(tag => tag.textContent || '').join('')
).toMatchSnapshot()
})
})
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage Cloud Memorystore Redis resources."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Redis(base.Group):
"""Manage Cloud Memorystore Redis resources."""
category = base.STORAGE_CATEGORY
def Filter(self, context, args):
# TODO(b/190538727): Determine if command group works with project number
base.RequireProjectID(args)
del context, args
|
name0_1_1_1_1_3_0 = None
name0_1_1_1_1_3_1 = None
name0_1_1_1_1_3_2 = None
name0_1_1_1_1_3_3 = None
name0_1_1_1_1_3_4 = None
|
"""
tasks.docstrings
~~~~~~~~~~~~~~~~
Check salt code base for for missing or wrong docstrings
"""
import ast
import collections
import os
import pathlib
import re
from invoke import task # pylint: disable=3rd-party-module-not-gated
from tasks import utils
CODE_DIR = pathlib.Path(__file__).resolve().parent.parent
DOCS_DIR = CODE_DIR / "doc"
SALT_CODE_DIR = CODE_DIR / "salt"
os.chdir(str(CODE_DIR))
python_module_to_doc_path = {}
doc_path_to_python_module = {}
check_paths = (
"salt/auth",
"salt/beacons",
"salt/cache",
"salt/cloud",
"salt/engine",
"salt/executors",
"salt/fileserver",
"salt/grains",
"salt/modules",
"salt/netapi",
"salt/output",
"salt/pillar",
"salt/proxy",
"salt/queues",
"salt/renderers",
"salt/returners",
"salt/roster",
"salt/runners",
"salt/sdb",
"salt/serializers",
"salt/states",
"salt/thorium",
"salt/tokens",
"salt/tops",
"salt/wheel",
)
exclude_paths = (
"salt/cloud/cli.py",
"salt/cloud/exceptions.py",
"salt/cloud/libcloudfuncs.py",
)
def build_path_cache():
"""
Build a python module to doc module cache
"""
for path in SALT_CODE_DIR.rglob("*.py"):
path = path.resolve().relative_to(CODE_DIR)
strpath = str(path)
if strpath.endswith("__init__.py"):
continue
if not strpath.startswith(check_paths):
continue
if strpath.startswith(exclude_paths):
continue
parts = list(path.parts)
stub_path = DOCS_DIR / "ref"
# Remove salt from parts
parts.pop(0)
# Remove the package from parts
package = parts.pop(0)
# Remove the module from parts
module = parts.pop()
if package == "cloud":
package = "clouds"
if package == "fileserver":
package = "file_server"
if package == "netapi":
# These are handled differently
if not parts:
# This is rest_wsgi
stub_path = (
stub_path
/ package
/ "all"
/ str(path).replace(".py", ".rst").replace(os.sep, ".")
)
else:
# rest_cherrypy, rest_tornado
subpackage = parts.pop(0)
stub_path = (
stub_path
/ package
/ "all"
/ "salt.netapi.{}.rst".format(subpackage)
)
else:
stub_path = (
stub_path
/ package
/ "all"
/ str(path).replace(".py", ".rst").replace(os.sep, ".")
)
stub_path = stub_path.relative_to(CODE_DIR)
python_module_to_doc_path[path] = stub_path
if path.exists():
doc_path_to_python_module[stub_path] = path
build_path_cache()
def build_file_list(files, extension):
# Unfortunately invoke does not support nargs.
# We migth have been passed --files="foo.py bar.py"
# Turn that into a list of paths
_files = []
for path in files:
if not path:
continue
for spath in path.split():
if not spath.endswith(extension):
continue
_files.append(spath)
if not _files:
_files = CODE_DIR.rglob("*{}".format(extension))
else:
_files = [pathlib.Path(fname).resolve() for fname in _files]
_files = [path.relative_to(CODE_DIR) for path in _files]
return _files
def build_python_module_paths(files):
_files = []
for path in build_file_list(files, ".py"):
strpath = str(path)
if strpath.endswith("__init__.py"):
continue
if not strpath.startswith(check_paths):
continue
if strpath.startswith(exclude_paths):
continue
_files.append(path)
return _files
def build_docs_paths(files):
return build_file_list(files, ".rst")
@task(iterable=["files"], positional=["files"])
def check_inline_markup(ctx, files):
"""
Check docstring for :doc: usage
We should not be using the ``:doc:`` inline markup option when
cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead.
This task checks for reference to ``:doc:`` usage.
See Issue #12788 for more information.
https://github.com/saltstack/salt/issues/12788
"""
# CD into Salt's repo root directory
ctx.cd(CODE_DIR)
files = build_python_module_paths(files)
exitcode = 0
for path in files:
module = ast.parse(path.read_text(), filename=str(path))
funcdefs = [node for node in module.body if isinstance(node, ast.FunctionDef)]
for funcdef in funcdefs:
docstring = ast.get_docstring(funcdef, clean=True)
if not docstring:
continue
if ":doc:" in docstring:
utils.error(
"The {} function in {} contains ':doc:' usage", funcdef.name, path
)
exitcode += 1
return exitcode
@task(iterable=["files"])
def check_stubs(ctx, files):
# CD into Salt's repo root directory
ctx.cd(CODE_DIR)
files = build_python_module_paths(files)
exitcode = 0
for path in files:
strpath = str(path)
if strpath.endswith("__init__.py"):
continue
if not strpath.startswith(check_paths):
continue
if strpath.startswith(exclude_paths):
continue
stub_path = python_module_to_doc_path[path]
if not stub_path.exists():
exitcode += 1
utils.error(
"The module at {} does not have a sphinx stub at {}", path, stub_path
)
return exitcode
@task(iterable=["files"])
def check_virtual(ctx, files):
"""
Check if .rst files for each module contains the text ".. _virtual"
indicating it is a virtual doc page, and, in case a module exists by
the same name, it's going to be shaddowed and not accessible
"""
exitcode = 0
files = build_docs_paths(files)
for path in files:
if path.name == "index.rst":
continue
contents = path.read_text()
if ".. _virtual-" in contents:
try:
python_module = doc_path_to_python_module[path]
utils.error(
"The doc file at {} indicates that it's virtual, yet, there's a"
" python module at {} that will shaddow it.",
path,
python_module,
)
exitcode += 1
except KeyError:
# This is what we're expecting
continue
return exitcode
@task(iterable=["files"])
def check_module_indexes(ctx, files):
exitcode = 0
files = build_docs_paths(files)
for path in files:
if path.name != "index.rst":
continue
contents = path.read_text()
if ".. autosummary::" not in contents:
continue
module_index_block = re.search(
r"""
\.\.\s+autosummary::\s*\n
(\s+:[a-z]+:.*\n)*
(\s*\n)+
(?P<mods>(\s*[a-z0-9_\.]+\s*\n)+)
""",
contents,
flags=re.VERBOSE,
)
if not module_index_block:
continue
module_index = re.findall(
r"""\s*([a-z0-9_\.]+)\s*\n""", module_index_block.group("mods")
)
if module_index != sorted(module_index):
exitcode += 1
utils.error(
"The autosummary mods in {} are not properly sorted. Please sort them.",
path,
)
module_index_duplicates = [
mod for mod, count in collections.Counter(module_index).items() if count > 1
]
if module_index_duplicates:
exitcode += 1
utils.error(
"Module index {} contains duplicates: {}", path, module_index_duplicates
)
# Let's check if all python modules are included in the index
path_parts = list(path.parts)
# drop doc
path_parts.pop(0)
# drop ref
path_parts.pop(0)
# drop "index.rst"
path_parts.pop()
# drop "all"
path_parts.pop()
package = path_parts.pop(0)
if package == "clouds":
package = "cloud"
if package == "file_server":
package = "fileserver"
if package == "configuration":
package = "log"
path_parts = ["handlers"]
python_package = SALT_CODE_DIR.joinpath(package, *path_parts).relative_to(
CODE_DIR
)
modules = set()
for module in python_package.rglob("*.py"):
if package == "netapi":
if module.stem == "__init__":
continue
if len(module.parts) > 4:
continue
if len(module.parts) > 3:
modules.add(module.parent.stem)
else:
modules.add(module.stem)
elif package == "cloud":
if len(module.parts) < 4:
continue
if module.name == "__init__.py":
continue
modules.add(module.stem)
elif package == "modules":
if len(module.parts) > 3:
# salt.modules.inspeclib
if module.name == "__init__.py":
modules.add(module.parent.stem)
continue
modules.add("{}.{}".format(module.parent.stem, module.stem))
continue
if module.name == "__init__.py":
continue
modules.add(module.stem)
elif module.name == "__init__.py":
continue
elif module.name != "__init__.py":
modules.add(module.stem)
missing_modules_in_index = set(modules) - set(module_index)
if missing_modules_in_index:
exitcode += 1
utils.error(
"The module index at {} is missing the following modules: {}",
path,
", ".join(missing_modules_in_index),
)
extra_modules_in_index = set(module_index) - set(modules)
if extra_modules_in_index:
exitcode += 1
utils.error(
"The module index at {} has extra modules(non existing): {}",
path,
", ".join(extra_modules_in_index),
)
return exitcode
@task(iterable=["files"])
def check_stray(ctx, files):
exitcode = 0
exclude_paths = (
DOCS_DIR / "_inc",
DOCS_DIR / "ref" / "cli" / "_includes",
DOCS_DIR / "ref" / "cli",
DOCS_DIR / "ref" / "configuration",
DOCS_DIR / "ref" / "file_server" / "backends.rst",
DOCS_DIR / "ref" / "file_server" / "environments.rst",
DOCS_DIR / "ref" / "file_server" / "file_roots.rst",
DOCS_DIR / "ref" / "internals",
DOCS_DIR / "ref" / "modules" / "all" / "salt.modules.inspectlib.rst",
DOCS_DIR / "ref" / "peer.rst",
DOCS_DIR / "ref" / "publisheracl.rst",
DOCS_DIR / "ref" / "python-api.rst",
DOCS_DIR / "ref" / "states" / "aggregate.rst",
DOCS_DIR / "ref" / "states" / "altering_states.rst",
DOCS_DIR / "ref" / "states" / "backup_mode.rst",
DOCS_DIR / "ref" / "states" / "compiler_ordering.rst",
DOCS_DIR / "ref" / "states" / "extend.rst",
DOCS_DIR / "ref" / "states" / "failhard.rst",
DOCS_DIR / "ref" / "states" / "global_state_arguments.rst",
DOCS_DIR / "ref" / "states" / "highstate.rst",
DOCS_DIR / "ref" / "states" / "include.rst",
DOCS_DIR / "ref" / "states" / "layers.rst",
DOCS_DIR / "ref" / "states" / "master_side.rst",
DOCS_DIR / "ref" / "states" / "ordering.rst",
DOCS_DIR / "ref" / "states" / "parallel.rst",
DOCS_DIR / "ref" / "states" / "providers.rst",
DOCS_DIR / "ref" / "states" / "requisites.rst",
DOCS_DIR / "ref" / "states" / "startup.rst",
DOCS_DIR / "ref" / "states" / "testing.rst",
DOCS_DIR / "ref" / "states" / "top.rst",
DOCS_DIR / "ref" / "states" / "vars.rst",
DOCS_DIR / "ref" / "states" / "writing.rst",
DOCS_DIR / "topics",
)
exclude_paths = tuple([str(p.relative_to(CODE_DIR)) for p in exclude_paths])
files = build_docs_paths(files)
for path in files:
if not str(path).startswith(str((DOCS_DIR / "ref").relative_to(CODE_DIR))):
continue
if str(path).startswith(exclude_paths):
continue
if path.name in ("index.rst", "glossary.rst", "faq.rst", "README.rst"):
continue
try:
python_module = doc_path_to_python_module[path]
except KeyError:
contents = path.read_text()
if ".. _virtual-" in contents:
continue
exitcode += 1
utils.error(
"The doc at {} doesn't have a corresponding python module and is"
" considered a stray doc. Please remove it.",
path,
)
return exitcode
@task(iterable=["files"])
def check(ctx, files):
exitcode = 0
utils.info("Checking inline :doc: markup")
exitcode += check_inline_markup(ctx, files)
utils.info("Checking python module stubs")
exitcode += check_stubs(ctx, files)
utils.info("Checking virtual modules")
exitcode += check_virtual(ctx, files)
utils.info("Checking stray docs")
exitcode += check_stray(ctx, files)
utils.info("Checking doc module indexes")
exitcode += check_module_indexes(ctx, files)
utils.exit_invoke(exitcode)
|
from django.shortcuts import render
from .forms import ForecastForm,UploadFileForm
from .utils import RecurrentNetworks
import tensorflow as tf
import numpy as np
import pandas as pd
import os
from .utils import handle_uploaded_file
from django.http import HttpResponse, Http404
# Create your views here.
def download(request):
file_path = os.path.join(os.path.dirname(__file__), 'download.csv')
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/vnd.ms-excel")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
def multiforecast(request):
forecast = False
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_file(request.FILES.get('upload_file'))
data_path = os.path.join(os.path.dirname(__file__), 'file.csv')
data = pd.read_csv(data_path)
model = RecurrentNetworks(0)
series = model.process_data(data)
result = model.model_multi_forecast(series)
forecast = True
df = pd.Series(result,name='Temp')
df.to_csv(os.path.join(os.path.dirname(__file__), 'download.csv'))
else:
form = UploadFileForm()
return render(request, 'multi_forecast.html', {'form': form,'forecast':forecast})
def home(request):
return render(request,'home.html')
def singleforecast(request):
forecast = False
if request.method == "POST":
form = ForecastForm(request.POST)
if form.is_valid():
data = list(form.cleaned_data.values())
model = RecurrentNetworks(0)
forecast = str(round(model.model_forecast(data) + 5, 2)) + ' (°C)'
else:
form = ForecastForm()
ctx = {'form': form,'forecast': forecast}
return render(request,'forecast.html',ctx)
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("<str:name>", views.greet, name="greet"),
path("jeffer", views.jeffer, name="jeffer"),
path("david", views.david, name="david")
]
|
import React from "react";
import "./Button.css";
class ReactButton extends React.Component {
constructor(props){
super(props);
this.handleClick=this.handleClick.bind(this);
}
handleClick(){
if(this.props.DISABLED !== true){
this.props.onClick();
}
}
render() {
return (
<div onClick={this.handleClick} className="ReactButton">
<img draggable="false" style={{ filter: ( this.props.DISABLED === true ? ("grayscale(100%)"):("grayscale(0%)")) }} src={this.props.icon} width="25" alt="" />
</div>
);
}
}
export default ReactButton;
|
print('Hello, World')
########################################################################
# This line is a COMMENT -- a note to human readers of this file.
# When a program runs, it ignores everything from a # (hash) mark
# to the end of the line with the # mark.
#
# We call files that have Python code in them MODULES. Line 1 of this
# module (look at it now) prints onto the Console the STRING
# Hello, World
# Anything surrounded by quote marks (single or double) is a STRING.
########################################################################
########################################################################
#
# DONE: 1.
# (Yes, that means for YOU to DO things per these instructions:)
#
# Run this module by right clicking anywhere in this window and select
# Run 'name of file'
# After running, find the Console tab (below) and confirm that
# Hello, World
# did indeed get printed (displayed) on the Console.
#
########################################################################
########################################################################
#
# DONE: 2.
# Notice the small horizontal BLUE bars on the scrollbar-like thing
# on the right. Each blue bar indicates a TO DO in this module.
#
# a. You can use the blue bars to go from one TO DO to the next
# by clicking on the blue bars. ** Try that now. **
#
# b. When you have completed a TO DO, you should change the word
# DONE
# to
# DONE.
# Try it now on line 16 above, and note that its blue bar on
# the scrollbar-like thing to the right has gone away.
#
# If you change TODOs to DONEs like this, you can tell when you have
# finished all the exercises in a module -- there will be no blue bars
# left on the scrollbar-like thing to the right.
#
# You have now completed TO DO #2, so change its TO DO on line 29 to DONE
# (and proceed similarly for all forthcoming TODOs in this course).
#
########################################################################
########################################################################
#
# DONE: 3.
# Add another print statement below.
# It should print any string that you want (but keep it G-rated!)
# Test your code by re-running this module using either the right click
# method again or by using the play button in the upper right.
# Look at the Console to be sure that your string printed as expected.
#
########################################################################
print("Let's eat, grandma")
########################################################################
#
# DONE: 4.
# Add yet another print statement.
# This one should print the *product* of 3,607 and 34,227.
# Let the computer do the arithmetic for you (no calculators!).
# You do NOT have to use strings for this, so no quotation marks!
#
# TEST your code by re-running this module, then asking someone
# whom you trust: What number did your print display for TO DO 4?
# (HINT: It is an INTERESTING number.) Get help if your value is wrong.
#
########################################################################
print(3607 * 34227)
########################################################################
#
# DONE: 5.
# Look at the list of files in this project to the left.
# Note that this file (m2_hello_world.py) is now displayed in a blue
# font color (if the file is highlighted select a different file so yu can
# see the blue font color). That means that you have made changes to
# this file which have not yet been committed.
#
# COMMIT your work by selecting VCS from the menu bar, then select Commit Changes
# Make sure only the files you want to commit are checked and optionally
# add a quick Commit message to describe your work. Then hover over the
# Commit button and select Commit and Push. Commit saves the work to
# your computer. "and Push" saves a copy of your work up into your Github
# repository (saving to the cloud is a better way to permanently safe work).
#
# Oh, one more thing:
# Do you have any blue bars left on the on the scrollbar-like thing
# to the right? If so, click on each blue bar and change
# its TO DO to DONE. Then run the file (to make sure you didn't break
# anything) then Commit and Sync again.
#
# You can COMMIT as often as you like. DO FREQUENT COMMITS.
#
########################################################################
|
import React from "react"
import Display from "../components/display"
import Layout from "../components/layout"
const data = {
title: "Soli",
rows: [
{
name: "Kenkeni",
nodes: [0, 1, 1, 0, 2, 2, 0, 1, 1, 0, 2, 2],
},
{
name: "Sangban",
nodes: [2, 0, 1, 0, 3, 0, 3, 0, 1, 2, 0, 1],
},
{
name: "Dununba",
nodes: [2, 0, 2, 0, 1, 0, 1, 0, 2, 2, 0, 2],
},
],
}
const Soli = () => (
<Layout>
<Display data={data}></Display>
</Layout>
)
export default Soli
|
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <vector>
#include <unordered_set>
#include <unordered_map>
#include <memory>
#include <boost/optional.hpp>
#include "hazelcast/client/address.h"
#include "hazelcast/client/serialization_config.h"
#include "hazelcast/client/socket_interceptor.h"
#include "hazelcast/client/load_balancer.h"
#include "hazelcast/util/SynchronizedMap.h"
#include "hazelcast/client/config/reliable_topic_config.h"
#include "hazelcast/client/config/near_cache_config.h"
#include "hazelcast/client/config/client_network_config.h"
#include "hazelcast/client/config/client_connection_strategy_config.h"
#include "hazelcast/client/config/client_flake_id_generator_config.h"
#include "hazelcast/client/config/matcher/matching_point_config_pattern_matcher.h"
#include "hazelcast/client/internal/config/ConfigUtils.h"
#include "hazelcast/client/config/logger_config.h"
#include "hazelcast/client/serialization/serialization.h"
#include "hazelcast/client/lifecycle_listener.h"
#include "hazelcast/client/membership_listener.h"
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#pragma warning(push)
#pragma warning(disable: 4251) //for dll export
#endif
namespace hazelcast {
namespace client {
class initial_membership_event;
namespace connection {
class ClientConnectionManagerImpl;
};
namespace security {
class HAZELCAST_API credentials {
public:
enum credential_type {
username_password,
token
};
credentials(const std::string &name);
virtual ~credentials();
const std::string &name() const;
virtual const credential_type type() const = 0;
private:
std::string name_;
};
class HAZELCAST_API username_password_credentials : public credentials {
public:
username_password_credentials(const std::string &name, const std::string &password);
const std::string &password() const;
const credential_type type() const override;
private:
std::string password_;
};
class HAZELCAST_API token_credentials : public credentials {
public:
token_credentials(const std::vector<byte> &token);
const std::vector<byte> &token() const;
const credential_type type() const override;
private:
std::vector<byte> token_;
};
};
/**
* hazelcast_client configuration class.
*/
class HAZELCAST_API client_config {
friend class spi::impl::ClientClusterServiceImpl;
friend class connection::ClientConnectionManagerImpl;
public:
/**
* Constructor with default values.
* smart(true)
* redoOperation(false)
* connectionAttemptLimit(2)
* attemptPeriod(3000)
* defaultLoadBalancer: round robin load balancer
*/
client_config();
client_config(const client_config &rhs) = delete;
client_config &operator=(const client_config &rhs) = delete;
client_config(client_config &&rhs);
client_config &operator=(client_config &&rhs);
/**
* Returns the configured cluster name. The name is sent as part of client authentication message and may be verified on the
* member.
*
* \return the configured cluster name
*/
const std::string &get_cluster_name() const;
client_config &set_cluster_name(const std::string &cluster_name);
/**
* There are two types of credentials you can provide, \username_password_credentials and \token_credentials
*
* \return itself ClientConfig
*/
client_config &set_credentials(const std::shared_ptr<security::credentials> &credential);
const std::shared_ptr<security::credentials> &get_credentials() const;
/**
* If true, client will redo the operations that were executing on the server and client lost the connection.
* This can be because of network, or simply because the member died. However it is not clear whether the
* application is performed or not. For idempotent operations this is harmless, but for non idempotent ones
* retrying can cause to undesirable effects. Note that the redo can perform on any member.
*
* If false, the operation will throw io_exception.
*
* \param redoOperation
* return itself ClientConfig
*/
client_config &set_redo_operation(bool redo_operation);
/**
*
* see setRedoOperation
* returns redoOperation
*/
bool is_redo_operation() const;
/**
* Will be called with the Socket, each time client creates a connection to any Member.
*
* \return itself ClientConfig
*/
client_config &set_socket_interceptor(socket_interceptor &&interceptor);
/**
* Will be called with the Socket, each time client creates a connection to any Member.
*/
const socket_interceptor &get_socket_interceptor() const;
/**
* Adds a listener to configuration to be registered when hazelcast_client starts.
* Warning 1: If listener should do a time consuming operation, off-load the operation to another thread.
* otherwise it will slow down the system.
*
* Warning 2: Do not make a call to hazelcast. It can cause deadlock.
*
* \param listener lifecycle_listener
* \return itself ClientConfig
*/
client_config &add_listener(lifecycle_listener &&listener);
/**
*
* \return registered lifecycleListeners
*/
const std::vector<lifecycle_listener> &get_lifecycle_listeners() const;
/**
* Adds a listener to configuration to be registered when hazelcast_client starts.
* Warning 1: If listener should do a time consuming operation, off-load the operation to another thread.
* otherwise it will slow down the system.
*
* Warning 2: Do not make a call to hazelcast. It can cause deadlock.
*
* \param listener MembershipListener
* \return itself ClientConfig
*/
client_config &add_listener(membership_listener &&listener);
/**
* Returns registered membershipListeners
*
* \return registered membershipListeners
*/
const std::vector<membership_listener> &get_membership_listeners() const;
/**
* Used to distribute the operations to multiple Endpoints.
*
* \return load_balancer
*/
load_balancer &get_load_balancer();
/**
* Used to distribute the operations to multiple connections.
* If not set, round robin based load balancer is used
*
* \param load_balancer
*
* \return itself ClientConfig
*/
client_config &set_load_balancer(load_balancer &&load_balancer);
/**
*
* \return serializationConfig
*/
serialization_config &get_serialization_config();
/**
* SerializationConfig is used to
* * set version of portable classes in this client (@see versioned_portable_serializer)
*
* \param serializationConfig
* \return itself ClientConfig
*/
client_config &set_serialization_config(serialization_config const &serialization_config);
/**
* Gets a reference to properties map
*
* \return properties map
*/
const std::unordered_map<std::string, std::string> &get_properties() const;
/**
* Sets the value of a named property
*
* @see client_properties for properties that is used to configure client
*
* \param name property name
* \param value value of the property
* \return itself ClientConfig
*/
client_config &set_property(const std::string &name, const std::string &value);
/**
* Adds a ClientReliableTopicConfig.
*
* \param reliableTopicConfig the ReliableTopicConfig to add
* \return configured {\link ClientConfig} for chaining
*/
client_config &add_reliable_topic_config(const config::reliable_topic_config &reliable_topic_config);
/**
* Gets the ClientReliableTopicConfig for a given reliable topic name.
*
* \param name the name of the reliable topic
* \return the found config. If none is found, a default configured one is returned.
*/
const config::reliable_topic_config &get_reliable_topic_config(const std::string &name);
/**
* Helper method to add a new NearCacheConfig
*
* \param nearCacheConfig NearCacheConfig to be added
* \return configured client_config for chaining
* @see NearCacheConfig
*
* Memory ownership of the config is passed to the client config
*/
client_config &add_near_cache_config(const config::near_cache_config &near_cache_config);
/**
* Gets the NearCacheConfig configured for the map / cache with name
*
* \param name name of the map / cache
* \return Configured NearCacheConfig
* @see NearCacheConfig
*/
const config::near_cache_config *get_near_cache_config(const std::string &name) const;
/**
* Gets {\link com.hazelcast.client.config.client_network_config}
*
* \return {\link com.hazelcast.client.config.client_network_config}
* @see com.hazelcast.client.config.client_network_config
*/
config::client_network_config &get_network_config();
/**
* Sets {\link com.hazelcast.client.config.client_network_config}
*
* \param networkConfig {\link com.hazelcast.client.config.client_network_config} to be set
* \return configured client_config for chaining
* @see com.hazelcast.client.config.client_network_config
*/
client_config &set_network_config(const config::client_network_config &network_config);
const boost::optional<std::string> &get_instance_name() const;
client_config &set_instance_name(const std::string &instance_name);
/**
* Pool size for internal ExecutorService which handles responses etc.
*
* \return int Executor pool size.
*/
int32_t get_executor_pool_size() const;
/**
* Sets Client side Executor pool size.
*
* \param executorPoolSize pool size
* \return configured client_config for chaining
*/
void set_executor_pool_size(int32_t executor_pool_size);
config::client_connection_strategy_config &get_connection_strategy_config();
client_config &
set_connection_strategy_config(const config::client_connection_strategy_config &connection_strategy_config);
/**
* Returns a {\link ClientFlakeIdGeneratorConfig} configuration for the given flake ID generator name.
* <p>
* The name is matched by pattern to the configuration and by stripping the
* partition ID qualifier from the given {@code name}.
* If there is no config found by the name, it will return the configuration
* with the name {@code "default"}.
*
* \param name name of the flake ID generator config
* \return the flake ID generator configuration
* @throws ConfigurationException if ambiguous configurations are found
* @see StringPartitioningStrategy#getBaseName(std::string)
* @see #setConfigPatternMatcher(ConfigPatternMatcher)
* @see #getConfigPatternMatcher()
*/
const config::client_flake_id_generator_config *find_flake_id_generator_config(const std::string &name);
/**
* Returns the {\link ClientFlakeIdGeneratorConfig} for the given name, creating
* one if necessary and adding it to the collection of known configurations.
* <p>
* The configuration is found by matching the the configuration name
* pattern to the provided {@code name} without the partition qualifier
* (the part of the name after {@code '@'}).
* If no configuration matches, it will create one by cloning the
* {@code "default"} configuration and add it to the configuration
* collection.
* <p>
* This method is intended to easily and fluently create and add
* configurations more specific than the default configuration without
* explicitly adding it by invoking {\link #addFlakeIdGeneratorConfig(ClientFlakeIdGeneratorConfig)}.
* <p>
* Because it adds new configurations if they are not already present,
* this method is intended to be used before this config is used to
* create a hazelcast instance. Afterwards, newly added configurations
* may be ignored.
*
* \param name name of the flake ID generator config
* \return the cache configuration
* @throws ConfigurationException if ambiguous configurations are found
* @see StringPartitioningStrategy#getBaseName(std::string)
*/
const config::client_flake_id_generator_config *get_flake_id_generator_config(const std::string &name);
/**
* Adds a flake ID generator configuration. The configuration is saved under the config
* name, which may be a pattern with which the configuration will be
* obtained in the future.
*
* \param config the flake ID configuration
* \return this config instance
*/
client_config &add_flake_id_generator_config(const config::client_flake_id_generator_config &config);
/**
*
* \return The logger configuration.
*/
config::logger_config &get_logger_config();
const std::unordered_set<std::string> &get_labels() const;
client_config &set_labels(const std::unordered_set<std::string> &labels);
client_config &add_label(const std::string &label);
/**
* This feature reduces number of hops and increase performance for smart clients.
* It is enabled by default for smart clients.
* This config has no effect for unisocket clients.
*
* @param enabled enables client to get backup acknowledgements directly from the member
* that backups are applied
* @return configured \ClientConfig for chaining
*/
client_config &backup_acks_enabled(bool enabled);
/**
* Note that backup acks to client can be enabled only for smart client.
* This config has no effect for unisocket clients.
*
* @return true if backup acknowledgements comes to client
*/
bool backup_acks_enabled();
private:
friend class reliable_topic;
const config::reliable_topic_config *lookup_reliable_topic_config(const std::string &name) const;
std::string cluster_name_;
config::client_network_config network_config_;
serialization_config serialization_config_;
boost::optional<load_balancer> load_balancer_;
std::vector<membership_listener> membership_listeners_;
std::vector<lifecycle_listener> lifecycle_listeners_;
std::unordered_map<std::string, std::string> properties_;
bool redo_operation_;
socket_interceptor socket_interceptor_;
std::shared_ptr<security::credentials> credentials_;
std::unordered_map<std::string, config::reliable_topic_config> reliable_topic_config_map_;
std::unordered_map<std::string, config::near_cache_config> near_cache_config_map_;
boost::optional<std::string> instance_name_;
/**
* pool-size for internal ExecutorService which handles responses etc.
*/
int32_t executor_pool_size_;
config::client_connection_strategy_config connection_strategy_config_;
std::unordered_map<std::string, config::client_flake_id_generator_config> flake_id_generator_config_map_;
config::matcher::matching_point_config_pattern_matcher config_pattern_matcher_;
config::logger_config logger_config_;
std::unordered_set<std::string> labels_;
bool backup_acks_enabled_ = true;
};
}
}
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#pragma warning(pop)
#endif
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
es5id: 15.2.3.6-4-320-1
description: >
Object.defineProperty - 'O' is an Arguments object of a function
that has formal parameters, 'P' is own data property of 'O', test
TypeError is thrown when updating the [[Configurable]] attribute
value of 'P' which is not configurable (10.6 [[DefineOwnProperty]]
step 4)
includes: [propertyHelper.js]
---*/
(function(a, b, c) {
Object.defineProperty(arguments, "genericProperty", {
configurable: false
});
try {
Object.defineProperty(arguments, "genericProperty", {
configurable: true
});
$ERROR("Expected an exception.");
} catch (e) {
verifyEqualTo(arguments, "genericProperty", undefined);
verifyNotWritable(arguments, "genericProperty");
verifyNotEnumerable(arguments, "genericProperty");
verifyNotConfigurable(arguments, "genericProperty");
if (!(e instanceof TypeError)) {
$ERROR("Expected TypeError, got " + e);
}
}
}(1, 2, 3));
|
# requirements.txtの作るコマンド→ (myenv)$ pip freeze > requirements.txt
# 動画や音声はファイルライクなオブジェクトで
from __future__ import unicode_literals
import os
import io
import math
import random
import aiohttp #画像転送系
# import requests #req
import urllib.request
import urllib.parse
import json
import discord
from discord.ext import commands
import random
import youtube_dl
import socket
import platform
import psutil
import cpuid
import time
TOKEN=''
A3RT_KEY=''
VERSION='v2.2.7'
LOG=''
# 接続に必要なオブジェクトを生成
description = '''BさんのBBBot (v2.2.7)'''
bot = commands.Bot(command_prefix='?', description=description)
# 起動時に動作する処理
@bot.event
async def on_ready():
# ログイン通知
# await greet('BBBotが起動したよ!')
print(bot.user.name + ' is logged in.')
# https://discordpy.readthedocs.io/en/latest/ext/commands/api.html#discord.ext.commands.Bot.change_presence
# https://discordpy.readthedocs.io/en/latest/api.html#discord.BaseActivity
# await bot.change_presence(status=discord.Status.idle, activity=discord.CustomActivity(name="B is bot"))
# await bot.change_presence(status=discord.Status.online, activity=discord.Activity(name="BさんのPC", type=5))
# await bot.change_presence(activity=discord.Streaming(name="BBBot", url="youtube.com/channel/UC7eDVYgxplpe71AHqyp82Kg/"))
await bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="B", emoji="🍝"))
# メッセージ受信時に動作する処理
# async def on_message(message):
# await bot.send_message(LOG, message)
#---------------------------------------------------------- 計算系
class Calc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='計算 Eval')
# Evalなので攻撃しないでください。
async def calc (self, ctx, inc: str):
"""Calc number Eval"""
await ctx.send(eval(inc));
async def add(self, ctx, left: str, right: str):
"""Add number + number"""
left = float(left); right = float(right)
await ctx.send(left + right)
@commands.command(description='引き算')
async def sub(self, ctx, left: str, right: str):
"""Sub number - number"""
left = float(left); right = float(right)
await ctx.send(left - right)
@commands.command(description='掛け算')
async def mul(self, ctx, left: str, right: str):
"""Mul number * number"""
left = float(left); right = float(right)
await ctx.send(left * right)
@commands.command(description='割り算')
async def div(self, ctx, left: str, right: str):
"""Div number / number"""
left = float(left); right = float(right)
await ctx.send(left / right)
@commands.command(description='エントロピー計算')
async def ent(self, ctx, p: str):
"""Entropy P()"""
p = float(eval(p))
if p == 0.0:
await ctx.send(0.0)
else:
await ctx.send(-p*math.log2(p)-(1-p)*math.log2(1-p))
@commands.command(description='乱数(int) 1~x')
async def rand(self, ctx, p: str):
"""Random(int) 1~x"""
p = int(eval(p))
if p>1:
await ctx.send(random.randint(1, p))
else:
await ctx.send(random.randint(p, 1))
@commands.command(description='乱数(float)) 1.0~x')
async def randd(self, ctx, p: str):
"""Random(float) 1.0~x"""
p = float(eval(p))
if p>1.0:
await ctx.send(random.uniform(1.0, p))
else:
await ctx.send(random.uniform(p, 1.0))
#---------------------------------------------------------- B系
class B(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='Bを連続送信します')
async def BLOOP(self, ctx, times: int):
"""BLOOP number<=11"""
if times > 12 :
await ctx.send('too B!');
return
for i in range(times):
await ctx.send('B')
@commands.command(description='greet, hello, help, block')
# async def B(self, ctx, swit: str, swit2: str):
async def B(self, ctx, swit: str):
"""B + (is xxx)"""
if swit == 'greet':
await ctx.send('こんにちは! BBBot('+VERSION+')だよ。\nよろしくね')
elif swit == 'sysinfo':
ipInfo = 'IP :'+socket.gethostname()+': '+socket.gethostbyname(socket.gethostname())
platInfo = 'OS : '+platform.platform()
cpuInfo = 'CPU: '+cpuid.cpu_name()
#cpuInfo = 'CPU: ['+str(psutil.cpu_count(logical=False))+'C '+str(psutil.cpu_count())+'T]'
memInfo = 'MEM: '+str('{:.2f}'.format(psutil.virtual_memory().used/(1024*1024)))+'MB / '+str('{:.2f}'.format(psutil.virtual_memory().total/(1024*1024)))+'MB'
await ctx.send(ipInfo+"\n"+platInfo+"\n"+cpuInfo+"\n"+memInfo)
elif swit == 'hello':
await ctx.send('Hello B!')
elif swit == 'block':
await ctx.send('□□□□□□□□\n□■■■■□□□\n□■□□□■□□\n□■□□□■□□\n□■■■■□□□\n□■□□□■□□\n□■□□□□■□\n□■□□□□■□\n□■■■■■□□\n□□□□□□□□')
elif swit == 'typing':
async with ctx.typing():
time.sleep(10)
ctx.typing()
await ctx.send('B')
# elif swit == 'help':
# await ctx.send('?add, ?sub, ?mul, ?div : 計算\n?BLOOP n : Bを連続送信します\n?B greet, hello, help, block : あいさつ, Hello B!, ヘルプ, □と■のB')
# elif swit == 'is GOD':
# await ctx.send('B IS GOD!')
# elif swit == 'is CAT':
# await get_pic(self, ctx, 'https://cdn.discordapp.com/attachments/733937061199085610/766527935377047552/neko.png', 'uchuu_neko.png')
# elif swit == 'is DOG':
# await get_pic(self, ctx, 'https://cdn.discordapp.com/attachments/705099416083890281/767549816862539846/camera_dog.png', 'camera_dog.png')
# elif swit == 'is POT':
# await ctx.send('418: https://www.google.com/teapot')
else:
await ctx.send('B!')
#----------------------------------------------------------画像系
class Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='melt picture')
async def melt(self, ctx):
"""melt picture"""
await get_pic(self, ctx, 'https://dic.nicovideo.jp/oekaki/674964.png', 'melt.png')
@commands.command(description='abya picture')
async def abya(self, ctx):
"""abya picture"""
await get_pic(self, ctx, 'https://livedoor.blogimg.jp/mn726/imgs/0/3/03812153.jpg', 'abya.png')
@commands.command(description='shiran kedo~ picture')
async def shiran(self, ctx):
"""shiran kedo~ picture"""
await get_pic(self, ctx, 'https://pbs.twimg.com/media/DoGwbj0UwAALenI.jpg', 'shiran.jpg')
@commands.command(description='party parrot GIF')
async def party(self, ctx):
"""party parrot GIF"""
await get_pic(self, ctx, 'https://cdn.discordapp.com/attachments/705099416083890281/766528750456012841/parrot.gif', 'party_parrot.gif')
@commands.command(description='B picture')
async def b_pic(self, ctx):
"""B picture"""
await get_pic(self, ctx, 'https://cdn.discordapp.com/attachments/705099416083890281/766668684188975114/letter-b-clipart-158558-5546542.jpg', 'b_picture.jpg')
@commands.command(description='gaming presentation GIF')
async def presen(self, ctx):
"""gaming presentation GIF"""
await get_pic(self, ctx, 'https://cdn.discordapp.com/attachments/733937061199085610/768300192818135040/GPW.gif', 'gaming_presentation.gif')
@commands.command(description='send photo')
async def b_img(self, ctx, url: str, file_name: str):
"""b_img url file_name"""
await get_pic(self, ctx, url, file_name)
async def get_pic(self, ctx, url: str, file_name: str):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return await ctx.send('server error... b')
data = io.BytesIO(await resp.read())
await ctx.send(file=discord.File(data, file_name))
#----------------------------------------------------------AI系
class AI(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='a3rt AI TalkAPI')
async def ai(self, ctx, talk: str):
"""a3rt AI TalkAPI"""
data = urllib.parse.urlencode({"apikey":A3RT_KEY, "query":talk}).encode('utf-8')
request = urllib.request.Request('https://api.a3rt.recruit-tech.co.jp/talk/v1/smalltalk', data)
res = urllib.request.urlopen(request)
json_load = json.load(res)
# await ctx.send('精度:'+str(json_load['results'][0]['perplexity'])+"\n"+json_load['results'][0]['reply'])
await ctx.send(json_load['results'][0]['reply'])
#----------------------------------------------------------youtube-dl
class youtube(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='noconico-dl (.mp4)')
async def ndl(self, ctx, url: str):
"""niconico-dl(β) url : DL [.mp4 / max 8MB]"""
async with ctx.typing():
ydl_opts0={}
with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
meta = ydl.extract_info(url, download=False)
await ctx.send('ndl: step1 comp.')
file_name = 'tmp/'+meta['id']
file_fm = '.mp4'
ydl_opts1={
'outtmpl':file_name+file_fm,
}
try:
with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
ydl.download([url])
# data = io.BytesIO(await ydl.download([url]))
with open(file_name+file_fm, 'rb') as fp:
await ctx.send(file=discord.File(fp, file_name+file_fm))
except:
await ctx.send('ndl: end.')
try: os.remove(file_name+file_fm) #tmpファイル削除
except: print('ok')
@commands.command(description='youtube-dl (.mp4)')
async def ydl(self, ctx, url: str):
"""youtube-dl url : DL [.mp4 / max 8MB]"""
async with ctx.typing():
ydl_opts0={}
with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
meta = ydl.extract_info(url, download=False)
await ctx.send('ydl: step1 comp.')
file_name = 'tmp/'+meta['id']
file_fm = '.mp4'
ydl_opts1={
'outtmpl':file_name,
'format':'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
}
try:
with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
ydl.download([url])
# data = io.BytesIO(await ydl.download([url]))
with open(file_name+file_fm, 'rb') as fp:
await ctx.send(file=discord.File(fp, file_name+file_fm))
except:
await ctx.send('ydl: end.')
try: os.remove(file_name+file_fm) #tmpファイル削除
except: print('ok')
@commands.command(description='youtube-dl audio only')
async def ydl_m(self, ctx, url: str):
"""youtube-dl url : DL audio [.mp3 / max 8MB]"""
async with ctx.typing():
ydl_opts0={}
with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
meta = ydl.extract_info(url, download=False)
await ctx.send('ydl_m: step1 comp.')
file_name = 'tmp/'+meta['id']+'.mp3'
ydl_opts1={
'outtmpl':file_name,
'format':'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320',
}],
}
try:
with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
ydl.download([url])
# data = io.BytesIO(await ydl.download([url]))
with open(file_name, 'rb') as fp:
await ctx.send(file=discord.File(fp, file_name))
await os.remove(file_name) #tmpファイル削除
except:
await ctx.send('ydl_m: end.')
try: os.remove(file_name) #tmpファイル削除
except: print('ok')
@commands.command(description='youtube-dl audio only(m4a_best)')
async def ydl_m4a(self, ctx, url: str):
"""youtube-dl url : DL audio [.m4a / max 8MB]"""
async with ctx.typing():
ydl_opts0={}
with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
meta = ydl.extract_info(url, download=False)
await ctx.send('ydl_m4a: step1 comp.')
file_name = 'tmp/'+meta['id']+'.m4a'
ydl_opts1={
'outtmpl':file_name,
'format':'bestaudio[ext=m4a]/best',
}
try:
with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
ydl.download([url])
# data = io.BytesIO(await ydl.download([url]))
with open(file_name, 'rb') as fp:
await ctx.send(file=discord.File(fp, file_name))
await os.remove(file_name) #tmpファイル削除
except:
await ctx.send('ydl_m4a: end.')
try: os.remove(file_name) #tmpファイル削除
except: print('ok')
@commands.command(description='youtube-dl audio only(webm_best)')
async def ydl_webm(self, ctx, url: str):
"""youtube-dl url : DL audio [.webm / max 8MB]"""
async with ctx.typing():
ydl_opts0={}
with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
meta = ydl.extract_info(url, download=False)
await ctx.send('ydl_webm: step1 comp.')
file_name = 'tmp/'+meta['id']+'.webm'
ydl_opts1={
'outtmpl':file_name,
'format':'bestaudio[ext=webm]/best',
}
try:
with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
ydl.download([url])
# data = io.BytesIO(await ydl.download([url]))
with open(file_name, 'rb') as fp:
await ctx.send(file=discord.File(fp, file_name))
await os.remove(file_name) #tmpファイル削除
except:
await ctx.send('ydl_webm: end.')
try: os.remove(file_name) #tmpファイル削除
except: print('ok')
# @commands.command(description='youtube-dl audio only')
# async def ydl_flac(self, ctx, url: str):
# """youtube-dl url : DL audio [.flac / max 8MB]"""
# ydl_opts0={}
# with youtube_dl.YoutubeDL(ydl_opts0) as ydl:
# meta = ydl.extract_info(url, download=False)
# await ctx.send('ydl_flac: step1 comp.')
# file_name = 'tmp/'+meta['id']+'.flac'
# ydl_opts1={
# 'outtmpl':file_name,
# 'format':'bestaudio/best',
# 'postprocessors': [{
# 'key': 'FFmpegExtractAudio',
# 'preferredcodec': 'flac',
# }],
# }
# try:
# with youtube_dl.YoutubeDL(ydl_opts1) as ydl:
# ydl.download([url])
# # data = io.BytesIO(await ydl.download([url]))
# with open(file_name, 'rb') as fp:
# await ctx.send(file=discord.File(fp, file_name))
# await os.remove(file_name) #tmpファイル削除
# except:
# await ctx.send('ydl_flac: end.')
# try: os.remove(file_name) #tmpファイル削除
# except: print('ok')
#----------------------------------------------------------Discord_VoiceChat
class VoiceChat(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='Discord_VoiceChat Connect')
async def v_connect(self, ctx):
"""VoiceConnect"""
author = ctx.message.author
voice_channel = author.voice_channel
vc = await client.join_voice_channel(voice_channel)
# if cID == 'B': #Channnel ID が指定されていない場合
# vc = ctx.author.voice.channel
# await vc.connect()
# else:
# # vc = cID
# vc = bot.get_channel(cID)
# await vc.connect();
return
# @commands.command(description='Discord_VoiceChat play youtube')
# async def v_play(self, ctx, url: str):
# """Play youtube"""
# player = await vc.create_ytdl_player(url)
# player.start()
# return
@commands.command(description='Discord_VoiceChat Disconnect')
async def v_disconnect(self, ctx):
"""VoiceDisconnect"""
vc = ctx.message.guild.voice_client
await vc.disconnect()
# await ctx.voice.bot.disconnect()
return
# @bot.event
# async def on_message(message):
# # メッセージ送信者がBotだった場合は無視する
# if message.author.bot:
# return
# if message.content == '/B!':
# await message.channel.send('B!')
# if message.content == '/B greet':
# await message.channel.send('こんにちは! BBBot('+VERSION+')だよ.\nよろしくね')
# if message.content == '/B hello':
# await message.channel.send('Hello B!')
# if message.content == '/B help':
# await message.channel.send('/B! : B!で応答します\n /B block : ■と□でBを表現します\n /B greet : 挨拶をします\n /B hello : Hello B!\n /B help : コマンド一覧を表示します')
# # await message.channel.send('/B! : B!で応答します\n /B block : ■と□でBを表現します\n /B greet : 挨拶をします\n /B hello : Hello B!\n /B help : コマンド一覧を表示します\n /B melt : :melt:\n /B abya : :abya:')
# if message.content == '/B block':
# await message.channel.send('□□□□□□□□\n□■■■■□□□\n□■□□□■□□\n□■□□□■□□\n□■■■■□□□\n□■□□□■□□\n□■□□□□■□\n□■□□□□■□\n□■■■■■□□\n□□□□□□□□')
# if '知らんけど(画像略' in message.content:
# filepath = 'https://pbs.twimg.com/media/DoGwbj0UwAALenI.jpg'
# await message.channel.send(file=discord.File(filepath)
# if message.content == '/B melt':
# filepath = 'https://dic.nicovideo.jp/oekaki/674964.png'
# await message.channel.send_file(filepath)
# if message.content == '/B abya':
# filepath = 'https://livedoor.blogimg.jp/mn726/imgs/0/3/03812153.jpg'
# await message.channel.send_file(filepath)
#----------------------------------------------------------ASCII Encode
class Encode(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(description='ASCII Encode')
async def asc_enc(self, ctx, text:str):
"""ASCII Encode"""
lists=list(text)
send = ''
for w in lists:
send = send + ' ' + str(ord(w))
await ctx.send(send);
@commands.command(description='ASCII Decode')
async def asc_dec(self, ctx, text:int):
"""ASCII Decode"""
await ctx.send(chr(text));
# Botの起動とDiscordサーバーへの接続
bot.add_cog(Calc(bot))
bot.add_cog(B(bot))
bot.add_cog(Image(bot))
bot.add_cog(AI(bot))
bot.add_cog(youtube(bot))
# bot.add_cog(VoiceChat(bot))
bot.add_cog(Encode(bot))
bot.run(TOKEN)
|
/*
wl_types.h - Library for Arduino Wifi shield.
Copyright (c) 2011-2014 Arduino. All right reserved.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* wl_types.h
*
* Created on: Jul 30, 2010
* Author: dlafauci
*/
#ifndef _WL_TYPES_H_
#define _WL_TYPES_H_
#include <inttypes.h>
typedef enum {
WL_FAILURE = -1,
WL_SUCCESS = 1,
} wl_error_code_t;
/* Authentication modes */
enum wl_auth_mode {
AUTH_MODE_INVALID,
AUTH_MODE_AUTO,
AUTH_MODE_OPEN_SYSTEM,
AUTH_MODE_SHARED_KEY,
AUTH_MODE_WPA,
AUTH_MODE_WPA2,
AUTH_MODE_WPA_PSK,
AUTH_MODE_WPA2_PSK
};
typedef enum {
WL_PING_DEST_UNREACHABLE = -1,
WL_PING_TIMEOUT = -2,
WL_PING_UNKNOWN_HOST = -3,
WL_PING_ERROR = -4
} wl_ping_result_t;
#endif //_WL_TYPES_H_
|
import { loginByEmail, logout, getInfo } from 'api/login';
import Cookies from 'js-cookie';
const user = {
state: {
user: '',
status: '',
email: '',
code: '',
uid: undefined,
userType:'',
auth_type: '',
token: Cookies.get('Admin-Token'),
name: '',
avatar: '',
introduction: '',
roles: [],
setting: {
articlePlatform: []
}
},
mutations: {
SET_AUTH_TYPE: (state, type) => {
state.auth_type = type;
},
SET_CODE: (state, code) => {
state.code = code;
},
SET_TOKEN: (state, token) => {
state.token = token;
},
SET_UID: (state, uid) => {
state.uid = uid;
},
SET_USERTYPE: (state, userType) => {
state.userType = userType;
},
SET_EMAIL: (state, email) => {
state.email = email;
},
SET_INTRODUCTION: (state, introduction) => {
state.introduction = introduction;
},
SET_SETTING: (state, setting) => {
state.setting = setting;
},
SET_STATUS: (state, status) => {
state.status = status;
},
SET_NAME: (state, name) => {
state.name = name;
},
SET_AVATAR: (state, avatar) => {
state.avatar = avatar;
},
SET_ROLES: (state, roles) => {
state.roles = roles;
},
LOGIN_SUCCESS: () => {
console.log('login success')
},
LOGOUT_USER: state => {
state.user = '';
}
},
actions: {
// 邮箱登录
LoginByEmail({ commit }, userInfo) {
const email = userInfo.email.trim();
return new Promise((resolve, reject) => {
loginByEmail(email, userInfo.password).then(response => {
const data = response.data;
console.log(response.data);
Cookies.set('Admin-Token', response.data.token);
commit('SET_TOKEN', data.token);
commit('SET_EMAIL', email);
resolve();
}).catch(error => {
reject(error);
});
});
},
// 获取用户信息
GetInfo({ commit, state }) {
return new Promise((resolve, reject) => {
getInfo(state.token).then(response => {
const data = response.data;
commit('SET_ROLES', data.role);
commit('SET_NAME', data.name);
commit('SET_AVATAR', data.avatar);
commit('SET_UID', data.uid);
commit('SET_USERTYPE', data.userType);
commit('SET_INTRODUCTION', data.introduction);
resolve(response);
}).catch(error => {
reject(error);
});
});
},
// 第三方验证登录
LoginByThirdparty({ commit, state }, code) {
return new Promise((resolve, reject) => {
commit('SET_CODE', code);
loginByThirdparty(state.status, state.email, state.code, state.auth_type).then(response => {
commit('SET_TOKEN', response.data.token);
Cookies.set('Admin-Token', response.data.token);
resolve();
}).catch(error => {
reject(error);
});
});
},
// 登出
LogOut({ commit, state }) {
return new Promise((resolve, reject) => {
logout(state.token).then(() => {
commit('SET_TOKEN', '');
commit('SET_ROLES', []);
Cookies.remove('Admin-Token');
resolve();
}).catch(error => {
reject(error);
});
});
},
// 前端 登出
FedLogOut({ commit }) {
return new Promise(resolve => {
commit('SET_TOKEN', '');
Cookies.remove('Admin-Token');
alert("has logout");
resolve();
});
},
// 动态修改权限
ChangeRole({ commit }, role) {
return new Promise(resolve => {
commit('SET_ROLES', [role]);
commit('SET_TOKEN', role);
Cookies.set('Admin-Token', role);
resolve();
})
}
}
};
export default user;
|
ace.define("ace/mode/red_highlight_rules",[], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var RedHighlightRules = function() {
var compoundKeywords = "";
this.$rules = {
"start" : [
{token : "keyword.operator",
regex: /\s([\-+%/=<>*]|(?:\*\*\|\/\/|==|>>>?|<>|<<|=>|<=|=\?))(\s|(?=:))/},
{token : "string.email", regex : /\w[-\w._]*\@\w[-\w._]*/},
{token : "value.time", regex : /\b\d+:\d+(:\d+)?/},
{token : "string.url", regex : /\w[-\w_]*\:(\/\/)?\w[-\w._]*(:\d+)?/},
{token : "value.date", regex : /(\b\d{1,4}[-/]\d{1,2}[-/]\d{1,2}|\d{1,2}[-/]\d{1,2}[-/]\d{1,4})\b/},
{token : "value.tuple", regex : /\b\d{1,3}\.\d{1,3}\.\d{1,3}(\.\d{1,3}){0,9}/},
{token : "value.pair", regex: /[+-]?\d+x[-+]?\d+/},
{token : "value.binary", regex : /\b2#{([01]{8})+}/},
{token : "value.binary", regex : /\b64#{([\w/=+])+}/},
{token : "value.binary", regex : /(16)?#{([\dabcdefABCDEF][\dabcdefABCDEF])*}/},
{token : "value.issue", regex : /#\w[-\w'*.]*/},
{token : "value.numeric", regex: /[+-]?\d['\d]*(?:\.\d+)?e[-+]?\d{1,3}\%?(?!\w)/},
{token : "invalid.illegal", regex: /[+-]?\d['\d]*(?:\.\d+)?\%?[a-zA-Z]/},
{token : "value.numeric", regex: /[+-]?\d['\d]*(?:\.\d+)?\%?(?![a-zA-Z])/},
{token : "value.character", regex : /#"(\^[-@/_~^"HKLM\[]|.)"/},
{token : "string.file", regex : /%[-\w\.\/]+/},
{token : "string.tag", regex : /</, next : "tag"},
{token : "string", regex : /"/, next : "string"},
{token : "string.other", regex : "{", next : "string.other"},
{token : "comment", regex : "comment [{]", next : "comment"},
{token : "comment", regex : /;.+$/},
{token : "paren.map-start", regex : "#\\("},
{token : "paren.block-start", regex : "[\\[]"},
{token : "paren.block-end", regex : "[\\]]"},
{token : "paren.parens-start", regex : "[(]"},
{token : "paren.parens-end", regex : "\\)"},
{token : "keyword", regex : "/local|/external"},
{token : "keyword.preprocessor", regex : "#(if|either|" +
"switch|case|include|do|macrolocal|reset|process|trace)"},
{token : "constant.datatype!", regex :
"(?:datatype|unset|none|logic|block|paren|string|" +
"file|url|char|integer|float|word|set-word|lit-word|" +
"get-word|refinement|issue|native|action|op|function|" +
"path|lit-path|set-path|get-path|routine|bitset|point|" +
"object|typeset|error|vector|hash|pair|percent|tuple|" +
"map|binary|time|tag|email|handle|date|image|event|" +
"series|any-type|number|any-object|scalar|" +
"any-string|any-word|any-function|any-block|any-list|" +
"any-path|immediate|all-word|internal|external|default)!(?![-!?\\w~])"},
{token : "keyword.function", regex :
"\\b(?:collect|quote|on-parse-event|math|last|source|expand|" +
"show|context|object|input|quit|dir|make-dir|cause-error|" +
"error\\?|none\\?|block\\?|any-list\\?|word\\?|char\\?|" +
"any-string\\?|series\\?|binary\\?|attempt|url\\?|" +
"string\\?|suffix\\?|file\\?|object\\?|body-of|first|" +
"second|third|mod|clean-path|dir\\?|to-red-file|" +
"normalize-dir|list-dir|pad|empty\\?|dirize|offset\\?|" +
"what-dir|expand-directives|load|split-path|change-dir|" +
"to-file|path-thru|save|load-thru|View|float\\?|to-float|" +
"charset|\\?|probe|set-word\\?|q|words-of|replace|repend|" +
"react|function\\?|spec-of|unset\\?|halt|op\\?|" +
"any-function\\?|to-paren|tag\\?|routine|class-of|" +
"size-text|draw|handle\\?|link-tabs-to-parent|" +
"link-sub-to-parent|on-face-deep-change*|" +
"update-font-faces|do-actor|do-safe|do-events|pair\\?|" +
"foreach-face|hex-to-rgb|issue\\?|alter|path\\?|" +
"typeset\\?|datatype\\?|set-flag|layout|extract|image\\?|" +
"get-word\\?|to-logic|to-set-word|to-block|center-face|" +
"dump-face|request-font|request-file|request-dir|rejoin|" +
"ellipsize-at|any-block\\?|any-object\\?|map\\?|keys-of|" +
"a-an|also|parse-func-spec|help-string|what|routine\\?|" +
"action\\?|native\\?|refinement\\?|common-substr|" +
"red-complete-file|red-complete-path|unview|comment|\\?\\?|" +
"fourth|fifth|values-of|bitset\\?|email\\?|get-path\\?|" +
"hash\\?|integer\\?|lit-path\\?|lit-word\\?|logic\\?|" +
"paren\\?|percent\\?|set-path\\?|time\\?|tuple\\?|date\\?|" +
"vector\\?|any-path\\?|any-word\\?|number\\?|immediate\\?|" +
"scalar\\?|all-word\\?|to-bitset|to-binary|to-char|to-email|" +
"to-get-path|to-get-word|to-hash|to-integer|to-issue|" +
"to-lit-path|to-lit-word|to-map|to-none|to-pair|to-path|" +
"to-percent|to-refinement|to-set-path|to-string|to-tag|" +
"to-time|to-typeset|to-tuple|to-unset|to-url|to-word|" +
"to-image|to-date|parse-trace|modulo|eval-set-path|" +
"extract-boot-args|flip-exe-flag|split|do-file|" +
"exists-thru\\?|read-thru|do-thru|cos|sin|tan|acos|asin|" +
"atan|atan2|sqrt|clear-reactions|dump-reactions|react\\?|" +
"within\\?|overlap\\?|distance\\?|face\\?|metrics\\?|" +
"get-scroller|insert-event-func|remove-event-func|" +
"set-focus|help|fetch-help|about|ls|ll|pwd|cd|" +
"red-complete-input|matrix)(?![-!?\\w~])"},
{token : "keyword.action", regex :
"\\b(?:to|remove|copy|insert|change|clear|move|poke|put|" +
"random|reverse|sort|swap|take|trim|add|subtract|" +
"divide|multiply|make|reflect|form|mold|modify|" +
"absolute|negate|power|remainder|round|even\\?|odd\\?|" +
"and~|complement|or~|xor~|append|at|back|find|skip|" +
"tail|head|head\\?|index\\?|length\\?|next|pick|" +
"select|tail\\?|delete|read|write)(?![-_!?\\w~])"
},
{token : "keyword.native", regex :
"\\b(?:not|any|set|uppercase|lowercase|checksum|" +
"try|catch|browse|throw|all|as|" +
"remove-each|func|function|does|has|do|reduce|" +
"compose|get|print|prin|equal\\?|not-equal\\?|" +
"strict-equal\\?|lesser\\?|greater\\?|lesser-or-equal\\?|" +
"greater-or-equal\\?|same\\?|type\\?|stats|bind|in|parse|" +
"union|unique|intersect|difference|exclude|" +
"complement\\?|dehex|negative\\?|positive\\?|max|min|" +
"shift|to-hex|sine|cosine|tangent|arcsine|arccosine|" +
"arctangent|arctangent2|NaN\\?|zero\\?|log-2|log-10|log-e|" +
"exp|square-root|construct|value\\?|as-pair|" +
"extend|debase|enbase|to-local-file|" +
"wait|unset|new-line|new-line\\?|context\\?|set-env|" +
"get-env|list-env|now|sign\\?|call|size\\?)(?![-!?\\w~])"
},
{token : "keyword", regex :
"\\b(?:Red(?=\\s+\\[)|object|context|make|self|keep)(?![-!?\\w~])"
},
{token: "variable.language", regex : "this"},
{token: "keyword.control", regex :
"(?:while|if|return|case|unless|either|until|loop|repeat|" +
"forever|foreach|forall|switch|break|continue|exit)(?![-!?\\w~])"},
{token: "constant.language", regex :
"\\b(?:true|false|on|off|yes|none|no)(?![-!?\\w~])"},
{token: "constant.numeric", regex : /\bpi(?![^-_])/},
{token: "constant.character", regex : "\\b(space|tab|newline|cr|lf)(?![-!?\\w~])"},
{token: "keyword.operator", regex : "\s(or|and|xor|is)\s"},
{token : "variable.get-path", regex : /:\w[-\w'*.?!]*(\/\w[-\w'*.?!]*)(\/\w[-\w'*.?!]*)*/},
{token : "variable.set-path", regex : /\w[-\w'*.?!]*(\/\w[-\w'*.?!]*)(\/\w[-\w'*.?!]*)*:/},
{token : "variable.lit-path", regex : /'\w[-\w'*.?!]*(\/\w[-\w'*.?!]*)(\/\w[-\w'*.?!]*)*/},
{token : "variable.path", regex : /\w[-\w'*.?!]*(\/\w[-\w'*.?!]*)(\/\w[-\w'*.?!]*)*/},
{token : "variable.refinement", regex : /\/\w[-\w'*.?!]*/},
{token : "keyword.view.style", regex :
"\\b(?:window|base|button|text|field|area|check|" +
"radio|progress|slider|camera|text-list|" +
"drop-list|drop-down|panel|group-box|" +
"tab-panel|h1|h2|h3|h4|h5|box|image|init)(?![-!?\\w~])"},
{token : "keyword.view.event", regex :
"\\b(?:detect|on-detect|time|on-time|drawing|on-drawing|" +
"scroll|on-scroll|down|on-down|up|on-up|mid-down|" +
"on-mid-down|mid-up|on-mid-up|alt-down|on-alt-down|" +
"alt-up|on-alt-up|aux-down|on-aux-down|aux-up|" +
"on-aux-up|wheel|on-wheel|drag-start|on-drag-start|" +
"drag|on-drag|drop|on-drop|click|on-click|dbl-click|" +
"on-dbl-click|over|on-over|key|on-key|key-down|" +
"on-key-down|key-up|on-key-up|ime|on-ime|focus|" +
"on-focus|unfocus|on-unfocus|select|on-select|" +
"change|on-change|enter|on-enter|menu|on-menu|close|" +
"on-close|move|on-move|resize|on-resize|moving|" +
"on-moving|resizing|on-resizing|zoom|on-zoom|pan|" +
"on-pan|rotate|on-rotate|two-tap|on-two-tap|" +
"press-tap|on-press-tap|create|on-create|created|on-created)(?![-!?\\w~])"},
{token : "keyword.view.option", regex :
"\\b(?:all-over|center|color|default|disabled|down|" +
"flags|focus|font|font-color|font-name|" +
"font-size|hidden|hint|left|loose|name|" +
"no-border|now|rate|react|select|size|space)(?![-!?\\w~])"},
{token : "constant.other.colour", regex : "\\b(?:Red|white|transparent|" +
"black|gray|aqua|beige|blue|brick|brown|coal|coffee|" +
"crimson|cyan|forest|gold|green|ivory|khaki|leaf|linen|" +
"magenta|maroon|mint|navy|oldrab|olive|orange|papaya|" +
"pewter|pink|purple|reblue|rebolor|sienna|silver|sky|" +
"snow|tanned|teal|violet|water|wheat|yello|yellow|glass)(?![-!?\\w~])"},
{token : "variable.get-word", regex : /\:\w[-\w'*.?!]*/},
{token : "variable.set-word", regex : /\w[-\w'*.?!]*\:/},
{token : "variable.lit-word", regex : /'\w[-\w'*.?!]*/},
{token : "variable.word", regex : /\b\w+[-\w'*.!?]*/},
{caseInsensitive: true}
],
"string" : [
{token : "string", regex : /"/, next : "start"},
{defaultToken : "string"}
],
"string.other" : [
{token : "string.other", regex : /}/, next : "start"},
{defaultToken : "string.other"}
],
"tag" : [
{token : "string.tag", regex : />/, next : "start"},
{defaultToken : "string.tag"}
],
"comment" : [
{token : "comment", regex : /}/, next : "start"},
{defaultToken : "comment"}
]
};
};
oop.inherits(RedHighlightRules, TextHighlightRules);
exports.RedHighlightRules = RedHighlightRules;
});
ace.define("ace/mode/folding/cstyle",[], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(commentRegex) {
if (commentRegex) {
this.foldingStartMarker = new RegExp(
this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start)
);
this.foldingStopMarker = new RegExp(
this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end)
);
}
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.foldingStartMarker = /([\{\[\(])[^\}\]\)]*$|^\s*(\/\*)/;
this.foldingStopMarker = /^[^\[\{\(]*([\}\]\)])|^[\s\*]*(\*\/)/;
this.singleLineBlockCommentRe= /^\s*(\/\*).*\*\/\s*$/;
this.tripleStarBlockCommentRe = /^\s*(\/\*\*\*).*\*\/\s*$/;
this.startRegionRe = /^\s*(\/\*|\/\/)#?region\b/;
this._getFoldWidgetBase = this.getFoldWidget;
this.getFoldWidget = function(session, foldStyle, row) {
var line = session.getLine(row);
if (this.singleLineBlockCommentRe.test(line)) {
if (!this.startRegionRe.test(line) && !this.tripleStarBlockCommentRe.test(line))
return "";
}
var fw = this._getFoldWidgetBase(session, foldStyle, row);
if (!fw && this.startRegionRe.test(line))
return "start"; // lineCommentRegionStart
return fw;
};
this.getFoldWidgetRange = function(session, foldStyle, row, forceMultiline) {
var line = session.getLine(row);
if (this.startRegionRe.test(line))
return this.getCommentRegionBlock(session, line, row);
var match = line.match(this.foldingStartMarker);
if (match) {
var i = match.index;
if (match[1])
return this.openingBracketBlock(session, match[1], row, i);
var range = session.getCommentFoldRange(row, i + match[0].length, 1);
if (range && !range.isMultiLine()) {
if (forceMultiline) {
range = this.getSectionRange(session, row);
} else if (foldStyle != "all")
range = null;
}
return range;
}
if (foldStyle === "markbegin")
return;
var match = line.match(this.foldingStopMarker);
if (match) {
var i = match.index + match[0].length;
if (match[1])
return this.closingBracketBlock(session, match[1], row, i);
return session.getCommentFoldRange(row, i, -1);
}
};
this.getSectionRange = function(session, row) {
var line = session.getLine(row);
var startIndent = line.search(/\S/);
var startRow = row;
var startColumn = line.length;
row = row + 1;
var endRow = row;
var maxRow = session.getLength();
while (++row < maxRow) {
line = session.getLine(row);
var indent = line.search(/\S/);
if (indent === -1)
continue;
if (startIndent > indent)
break;
var subRange = this.getFoldWidgetRange(session, "all", row);
if (subRange) {
if (subRange.start.row <= startRow) {
break;
} else if (subRange.isMultiLine()) {
row = subRange.end.row;
} else if (startIndent == indent) {
break;
}
}
endRow = row;
}
return new Range(startRow, startColumn, endRow, session.getLine(endRow).length);
};
this.getCommentRegionBlock = function(session, line, row) {
var startColumn = line.search(/\s*$/);
var maxRow = session.getLength();
var startRow = row;
var re = /^\s*(?:\/\*|\/\/|--)#?(end)?region\b/;
var depth = 1;
while (++row < maxRow) {
line = session.getLine(row);
var m = re.exec(line);
if (!m) continue;
if (m[1]) depth--;
else depth++;
if (!depth) break;
}
var endRow = row;
if (endRow > startRow) {
return new Range(startRow, startColumn, endRow, line.length);
}
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/matching_brace_outdent",[], function(require, exports, module) {
"use strict";
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
ace.define("ace/mode/red",[], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var RedHighlightRules = require("./red_highlight_rules").RedHighlightRules;
var RedFoldMode = require("./folding/cstyle").FoldMode;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var Mode = function() {
this.HighlightRules = RedHighlightRules;
this.foldingRules = new RedFoldMode();
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = this.$defaultBehaviour;
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = ";";
this.blockComment = { start: "comment {", end: "}" };
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
var endState = tokenizedLine.state;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start") {
var match = line.match(/^.*[\{\[\(]\s*$/);
if (match) {
indent += tab;
}
} else if (state == "doc-start") {
if (endState == "start") {
return "";
}
var match = line.match(/^\s*(\/?)\*/);
if (match) {
if (match[1]) {
indent += " ";
}
indent += "* ";
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.$id = "ace/mode/red";
}).call(Mode.prototype);
exports.Mode = Mode;
}); (function() {
ace.require(["ace/mode/red"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
|
'''
Created on Nov 11, 2018
@author: nilson.nieto
'''
def custumgen(x,y):
while x<y:
yield x
x+=1
for i in custumgen(1, 4):
print(i)
print(next(custumgen(7, 13)))
|
from django.db import models
import django.contrib.auth.models as auth
import channels
class Room(models.Model):
name = models.CharField(max_length=50,unique=True)
moderator = models.ManyToManyField(auth.User,related_name='moderates')
lecturer = models.ForeignKey(auth.User,on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_posts(self):
return [post.get() for post in Post.objects.filter(room=self)]
def channel(self):
return channels.Group('interlecture.questions.room%d'%self.id)
def request_access_rights(self,user,rights):
pass
def get(self):
return {'id':self.id,'name':self.name,'lecturer':self.lecturer.username}
class Post(models.Model):
room = models.ForeignKey(Room,on_delete=models.CASCADE)
user = models.ForeignKey(auth.User,on_delete=models.CASCADE)
text = models.CharField(max_length=255)
parent_post = models.ForeignKey('self',on_delete=models.CASCADE,null=True)
supporters = models.ManyToManyField(auth.User,related_name='supported')
datetime = models.DateTimeField(auto_now_add=True)
def get(self,user=None):
return {
'id':self.id,
'room':self.room.name,
'user':self.user.username,
'text':self.text,
'datetime':str(self.datetime),
'parent_post':self.parent_post.id if self.parent_post else None,
'supporters':self.supporters.count()
}
def request_access_rights(self,user,rights):
if rights=='delete':
if not (self.user==user or self.room.moderator.filter(id=user.id).exists()):
from engine.access import NoAccessRightsException
raise NoAccessRightsException(rights,self.id)
|
/**
* @jsx React.DOM
* @copyright Prometheus Research, LLC 2014
*/
'use strict';
var Grid = require('./Grid');
var Row = require('./Row');
var Cell = require('./Cell');
module.exports = Grid;
module.exports.Row = Row;
module.exports.Cell = Cell;
|
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot
"""
import os
from collections import deque
import multiprocessing as mp
import time
import shutil
import sys
from nose.tools import assert_equal, assert_true, assert_false, \
assert_greater, assert_not_equal
import pandas as pd
from IPython.display import display, HTML
from plot_playground.stats import linux_stats_plot
from plot_playground.common import jupyter_helper
from plot_playground.common import selenium_helper
from plot_playground.common import img_helper
from plot_playground.common import d3_helper
from plot_playground.common import settings
TMP_TEST_LOG_DIR = './log_plotplayground_stats/test/'
def test__get_log_file_path():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_log_file_path --skip_jupyter 1
"""
log_file_path = linux_stats_plot._get_log_file_path(
log_dir_path='./log/')
assert_true(log_file_path.startswith('./log/'))
assert_true(log_file_path.endswith('.csv'))
def test__exec_gpustat_command():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__exec_gpustat_command --skip_jupyter 1
Notes
-----
Since this function depends on the OS and GPU, most of the functions
are tested manually in each environment.
"""
pre_bool = linux_stats_plot.is_gpu_stats_disabled
linux_stats_plot.is_gpu_stats_disabled = True
command_result = linux_stats_plot._exec_gpustat_command()
assert_equal(command_result, '')
linux_stats_plot.is_gpu_stats_disabled = pre_bool
def test__get_gpu_num():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_gpu_num --skip_jupyter 1
"""
pre_func = linux_stats_plot._exec_gpustat_command
def test_func_1():
return ''
linux_stats_plot._exec_gpustat_command = test_func_1
gpu_num = linux_stats_plot._get_gpu_num()
assert_equal(gpu_num, 0)
def test_func_2():
return 'Error on querying NVIDIA devices. Use --debug flag for details'
linux_stats_plot._exec_gpustat_command = test_func_2
gpu_num = linux_stats_plot._get_gpu_num()
assert_equal(gpu_num, 0)
def test_func_3():
return "28cb5cca2ca4 Wed Feb 20 07:04:22 2019\n[0] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |\n"
linux_stats_plot._exec_gpustat_command = test_func_3
gpu_num = linux_stats_plot._get_gpu_num()
assert_equal(gpu_num, 1)
def test_func_4():
return "28cb5cca2ca4 Wed Feb 20 07:04:22 2019\n[0] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |\n[1] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |\n"
linux_stats_plot._exec_gpustat_command = test_func_4
gpu_num = linux_stats_plot._get_gpu_num()
assert_equal(gpu_num, 2)
linux_stats_plot._exec_gpustat_command = pre_func
def test__get_memory_usage():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_memory_usage --skip_jupyter 1
"""
memory_usage = linux_stats_plot._get_memory_usage()
assert_true(isinstance(memory_usage, int))
assert_greater(memory_usage, 0)
def test__get_disk_usage():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_disk_usage --skip_jupyter 1
"""
disk_usage_gb = linux_stats_plot._get_disk_usage()
assert_true(isinstance(disk_usage_gb, float))
assert_greater(disk_usage_gb, 0)
def test__get_gpustat_line_str_by_gpu_idx():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_gpustat_line_str_by_gpu_idx --skip_jupyter 1
"""
pre_func = linux_stats_plot._exec_gpustat_command
def test_func():
return "28cb5cca2ca4 Wed Feb 20 07:04:22 2019\n[0] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |\n[1] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |\n"
linux_stats_plot._exec_gpustat_command = test_func
target_line_str = linux_stats_plot._get_gpustat_line_str_by_gpu_idx(
gpu_idx=1
)
assert_equal(
target_line_str,
"[1] Tesla K80 | 31'C, 0 % | 0 / 11441 MB |"
)
linux_stats_plot._exec_gpustat_command = pre_func
def test__get_gpu_memory_usage():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__get_gpu_memory_usage --skip_jupyter 1
"""
pre_func = linux_stats_plot._exec_gpustat_command
def test_func():
return "28cb5cca2ca4 Wed Feb 20 07:04:22 2019\n[0] Tesla K80 | 31'C, 0 % | 110 / 11441 MB |\n[1] Tesla K80 | 31'C, 0 % | 250 / 11441 MB |\n"
linux_stats_plot._exec_gpustat_command = test_func
gpu_memory_usage_mb = linux_stats_plot._get_gpu_memory_usage(
gpu_idx=0
)
assert_equal(gpu_memory_usage_mb, 110)
gpu_memory_usage_mb = linux_stats_plot._get_gpu_memory_usage(
gpu_idx=1
)
assert_equal(gpu_memory_usage_mb, 250)
linux_stats_plot._exec_gpustat_command = pre_func
def test__save_csv():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__save_csv --skip_jupyter 1
"""
log_dir_path = TMP_TEST_LOG_DIR
os.makedirs(log_dir_path, exist_ok=True)
log_file_path = linux_stats_plot._get_log_file_path(
log_dir_path=log_dir_path)
if os.path.exists(log_file_path):
os.remove(log_file_path)
memory_usage_deque = deque([1, 2, 3], maxlen=3)
disk_usage_deque = deque([4, 5, 6], maxlen=3)
gpu_memory_usage_deque_list = [
deque([5, 6, 7], maxlen=3),
deque([8, 9, 10], maxlen=3),
]
linux_stats_plot._save_csv(
memory_usage_deque=memory_usage_deque,
disk_usage_deque=disk_usage_deque,
gpu_memory_usage_deque_list=gpu_memory_usage_deque_list,
log_file_path=log_file_path)
assert_true(
os.path.exists(log_file_path)
)
df = pd.read_csv(log_file_path)
assert_equal(len(df), 3)
assert_equal(
df[linux_stats_plot._COLUMN_NAME_MEMORY_USAGE].tolist(),
[1, 2, 3]
)
assert_equal(
df[linux_stats_plot._COLUMN_NAME_DISK_USAGE].tolist(),
[4, 5, 6]
)
gpu_column_name_1 = linux_stats_plot.\
_COLUMN_NAME_GPU_MEMORY_USAGE_FORMAT.format(gpu_idx=0)
assert_equal(
df[gpu_column_name_1].tolist(),
[5, 6, 7]
)
gpu_column_name_2 = linux_stats_plot.\
_COLUMN_NAME_GPU_MEMORY_USAGE_FORMAT.format(gpu_idx=1)
assert_equal(
df[gpu_column_name_2].tolist(),
[8, 9, 10]
)
if os.path.exists(log_file_path):
os.remove(log_file_path)
def test__start_plot_data_updating():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__start_plot_data_updating --skip_jupyter 1
"""
log_dir_path = TMP_TEST_LOG_DIR
os.makedirs(log_dir_path, exist_ok=True)
log_file_path = linux_stats_plot._get_log_file_path(
log_dir_path=log_dir_path)
if os.path.exists(log_file_path):
os.remove(log_file_path)
parent_pid = os.getpid()
pre_disabled_val = linux_stats_plot.is_gpu_stats_disabled
linux_stats_plot.is_gpu_stats_disabled = False
process = mp.Process(
target=linux_stats_plot._start_plot_data_updating,
kwargs={
'interval_seconds': 1,
'buffer_size': 2,
'log_dir_path': log_dir_path,
'parent_pid': parent_pid,
})
process.deamon = True
process.start()
time.sleep(25)
process.terminate()
df = pd.read_csv(log_file_path)
assert_equal(len(df), 2)
is_in = linux_stats_plot._COLUMN_NAME_MEMORY_USAGE in df.columns
assert_true(is_in)
is_in = linux_stats_plot._COLUMN_NAME_DISK_USAGE in df.columns
assert_true(is_in)
linux_stats_plot.is_gpu_stats_disabled = pre_disabled_val
shutil.rmtree(log_dir_path, ignore_errors=True)
def test__exit_if_parent_process_has_died():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__exit_if_parent_process_has_died --skip_jupyter 1
"""
parent_pid = os.getpid()
kwargs = {'parent_pid': parent_pid}
process = mp.Process(
target=linux_stats_plot._exit_if_parent_process_has_died,
kwargs=kwargs
)
process.start()
assert_true(process.is_alive())
process.terminate()
kwargs['parent_pid'] = -1
process = mp.Process(
target=linux_stats_plot._exit_if_parent_process_has_died,
kwargs=kwargs
)
process.start()
time.sleep(10)
assert_false(process.is_alive())
def test__fill_deque_by_initial_value():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__fill_deque_by_initial_value --skip_jupyter 1
"""
buffer_size = 3
deque_obj = deque([100], maxlen=buffer_size)
deque_obj = linux_stats_plot._fill_deque_by_initial_value(
deque_obj=deque_obj,
initial_value=200,
buffer_size=buffer_size)
assert_equal(len(deque_obj), 1)
deque_obj = deque([], maxlen=buffer_size)
deque_obj = linux_stats_plot._fill_deque_by_initial_value(
deque_obj=deque_obj,
initial_value=200,
buffer_size=buffer_size
)
assert_equal(len(deque_obj), 3)
for value in deque_obj:
assert_equal(value, 200)
def _error_func():
"""
A function to generate an error and to confirm that error contents
are added to the file.
"""
linux_stats_plot._set_error_setting(
log_dir_path=TMP_TEST_LOG_DIR, save_error_to_file=True)
raise Exception('error test.')
def test__set_error_setting():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__set_error_setting --skip_jupyter 1
"""
linux_stats_plot._set_error_setting(
log_dir_path=TMP_TEST_LOG_DIR,
save_error_to_file=False)
is_in = 'nose' in str(sys.stderr)
assert_true(is_in)
process = mp.Process(target=_error_func)
process.start()
process.join()
error_log_path = os.path.join(
TMP_TEST_LOG_DIR, linux_stats_plot.ERR_FILE_NAME)
with open(error_log_path, 'r') as f:
error_log = f.read()
assert_not_equal(error_log, '')
sys.stderr = sys.__stderr__
def test__print_error_if_exists():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__print_error_if_exists --skip_jupyter 1
"""
error_log_path = os.path.join(
TMP_TEST_LOG_DIR, linux_stats_plot.ERR_FILE_NAME)
if os.path.exists(error_log_path):
os.remove(error_log_path)
tmp_stdout_path = os.path.join(TMP_TEST_LOG_DIR, 'tmp.log')
if os.path.exists(tmp_stdout_path):
os.remove(tmp_stdout_path)
os.makedirs(TMP_TEST_LOG_DIR, exist_ok=True)
sys.stdout = open(tmp_stdout_path, 'w')
with open(error_log_path, 'w') as f:
f.write('test error message')
linux_stats_plot._print_error_if_exists(
log_dir_path=TMP_TEST_LOG_DIR)
sys.stdout.close()
sys.stdout = sys.__stdout__
with open(tmp_stdout_path, 'r') as f:
printed_log = f.read()
is_in = 'test error message' in printed_log
assert_true(is_in)
if os.path.exists(tmp_stdout_path):
os.remove(tmp_stdout_path)
def test_display_plot():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test_display_plot
"""
source_code = """
from plot_playground.tests.test_linux_stats_plot import display_test_plot
display_test_plot()
"""
jupyter_helper.update_ipynb_test_source_code(
source_code=source_code
)
jupyter_helper.open_test_jupyter_note_book()
jupyter_helper.run_test_code(sleep_seconds=10)
jupyter_helper.hide_header()
jupyter_helper.hide_input_cell()
selenium_helper.driver.set_window_size(width=1400, height=1300)
count = 0
while True:
try:
svg_elem = selenium_helper.driver.find_element_by_id(
settings.TEST_SVG_ELEM_ID
)
break
except Exception:
count += 1
if count > 5:
break
time.sleep(3)
continue
selenium_helper.save_target_elem_screenshot(
target_elem=svg_elem)
expected_img_path = img_helper.get_test_expected_img_path(
file_name='stats_linux_stats_plot_display_plot')
similarity = img_helper.compare_img_hist(
img_path_1=selenium_helper.DEFAULT_TEST_IMG_PATH,
img_path_2=expected_img_path)
assert_greater(similarity, 0.8)
selenium_helper.exit_webdriver()
plot_meta = display_test_plot()
assert_true(
isinstance(plot_meta, d3_helper.PlotMeta)
)
jupyter_helper.empty_test_ipynb_code_cell()
def display_test_plot():
"""
Display a test plot.
Returns
-------
plot_meta : plot_playground.common.d3_helper.PlotMeta
An object that stores the metadata of the plot.
"""
linux_stats_plot._is_displayed = False
plot_meta = linux_stats_plot.display_plot(
log_dir_path=TMP_TEST_LOG_DIR,
svg_id=settings.TEST_SVG_ELEM_ID)
return plot_meta
def test__update_gpu_disabled_bool():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_linux_stats_plot:test__update_gpu_disabled_bool --skip_jupyter 1
"""
pre_bool = linux_stats_plot.is_gpu_stats_disabled
pre_func = linux_stats_plot._exec_gpustat_command
linux_stats_plot.is_gpu_stats_disabled = False
def raise_error():
raise Exception()
linux_stats_plot._exec_gpustat_command = raise_error
linux_stats_plot._update_gpu_disabled_bool()
assert_true(linux_stats_plot.is_gpu_stats_disabled)
def pass_func():
pass
linux_stats_plot._exec_gpustat_command = pass_func
linux_stats_plot.is_gpu_stats_disabled = False
linux_stats_plot._update_gpu_disabled_bool()
assert_false(linux_stats_plot.is_gpu_stats_disabled)
linux_stats_plot.is_gpu_stats_disabled = pre_bool
linux_stats_plot._exec_gpustat_command = pre_func
|
'use strict';
const pageState = {
loginPageVisible: false,
}
const loginStatus = {
status: 'NOT_LOGGED_IN',
}
export function isLogin(state=loginStatus, action) {
switch (action.type) {
case 'LOGGED_IN':
return {
...state,
status: 'LOGGED_IN',
}
break;
case 'NOT_LOGGED_IN':
return {
...state,
status: 'NOT_LOGGED_IN',
}
break;
default:
return state;
}
}
export function logout(state=loginStatus, action) {
switch (action.type) {
case 'NOT_LOGGED_IN':
return {
...state,
status: 'NOT_LOGGED_IN',
}
break;
default:
return state;
}
}
export function showLoginPage(state=pageState, action) {
switch (action.type) {
case 'LOGIN_PAGE_VISIBLE':
return {
...state,
loginPageVisible: true,
}
break;
case 'LOGIN_PAGE_INVISIBLE':
return {
...state,
loginPageVisible: false,
}
break;
default:
return state;
}
}
// export function login(state=initialState, action) {
// switch (action.type) {
// case 'LOGGIN_DONING':
// return {
// ...state,
// status: 'LOGGIN_DONING'
// }
// break;
// case 'LOGGIN_DONE':
// return {
// ...state,
// status: 'LOGGIN_DONE',
// user: action.user,
// }
// break;
// case 'LOGGIN_ERROR':
// return {
// ...state,
// status: 'LOGGIN_ERROR',
// user: null,
// }
// break;
// default:
// return state;
//
// }
// }
|
// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: The production x >>= y is the same as x = x >> y
es5id: 11.13.2_A4.7_T2.8
description: >
Type(x) is different from Type(y) and both types vary between
Boolean (primitive or object) and Undefined
---*/
var x;
//CHECK#1
x = true;
x >>= undefined;
if (x !== 1) {
throw new Test262Error('#1: x = true; x >>= undefined; x === 1. Actual: ' + (x));
}
//CHECK#2
x = undefined;
x >>= true;
if (x !== 0) {
throw new Test262Error('#2: x = undefined; x >>= true; x === 0. Actual: ' + (x));
}
//CHECK#3
x = new Boolean(true);
x >>= undefined;
if (x !== 1) {
throw new Test262Error('#3: x = new Boolean(true); x >>= undefined; x === 1. Actual: ' + (x));
}
//CHECK#4
x = undefined;
x >>= new Boolean(true);
if (x !== 0) {
throw new Test262Error('#4: x = undefined; x >>= new Boolean(true); x === 0. Actual: ' + (x));
}
|
// Copyright (c) 2012-2017, The CryptoNote developers, The Bytecoin developers
// Copyright (c) 2018-2019, The DeployCoin Developers
//
// Please see the included LICENSE file for more information.
#pragma once
#include "BlockchainExplorerData.h"
#include "ITransaction.h"
#include "crypto/crypto.h"
#include "cryptonotecore/CryptoNoteBasic.h"
#include "cryptonoteprotocol/CryptoNoteProtocolDefinitions.h"
#include "rpc/CoreRpcServerCommandsDefinitions.h"
#include <WalletTypes.h>
#include <cstdint>
#include <functional>
#include <memory>
#include <system_error>
#include <vector>
namespace CryptoNote
{
class INodeObserver
{
public:
virtual ~INodeObserver() {}
virtual void peerCountUpdated(size_t count) {}
virtual void localBlockchainUpdated(uint32_t height) {}
virtual void lastKnownBlockHeightUpdated(uint32_t height) {}
virtual void poolChanged() {}
virtual void blockchainSynchronized(uint32_t topHeight) {}
};
struct OutEntry
{
uint32_t outGlobalIndex;
Crypto::PublicKey outKey;
};
struct OutsForAmount
{
uint64_t amount;
std::vector<OutEntry> outs;
};
struct TransactionShortInfo
{
Crypto::Hash txId;
TransactionPrefix txPrefix;
};
struct BlockShortEntry
{
Crypto::Hash blockHash;
bool hasBlock;
CryptoNote::BlockTemplate block;
std::vector<TransactionShortInfo> txsShortInfo;
};
struct BlockHeaderInfo
{
uint32_t index;
uint8_t majorVersion;
uint8_t minorVersion;
uint64_t timestamp;
Crypto::Hash hash;
Crypto::Hash prevHash;
uint32_t nonce;
bool isAlternative;
uint32_t depth; // last block index = current block index + depth
uint64_t difficulty;
uint64_t reward;
};
class INode
{
public:
typedef std::function<void(std::error_code)> Callback;
virtual ~INode() {}
virtual bool addObserver(INodeObserver *observer) = 0;
virtual bool removeObserver(INodeObserver *observer) = 0;
// precondition: must be called in dispatcher's thread
virtual void init(const Callback &callback) = 0;
// precondition: must be called in dispatcher's thread
virtual bool shutdown() = 0;
// precondition: all of following methods must not be invoked in dispatcher's thread
virtual size_t getPeerCount() const = 0;
virtual uint32_t getLastLocalBlockHeight() const = 0;
virtual uint32_t getLastKnownBlockHeight() const = 0;
virtual uint32_t getLocalBlockCount() const = 0;
virtual uint32_t getKnownBlockCount() const = 0;
virtual uint64_t getNodeHeight() const = 0;
virtual void getFeeInfo() = 0;
virtual void getBlockHashesByTimestamps(
uint64_t timestampBegin,
size_t secondsCount,
std::vector<Crypto::Hash> &blockHashes,
const Callback &callback) = 0;
virtual void getTransactionHashesByPaymentId(
const Crypto::Hash &paymentId,
std::vector<Crypto::Hash> &transactionHashes,
const Callback &callback) = 0;
virtual BlockHeaderInfo getLastLocalBlockHeaderInfo() const = 0;
virtual void relayTransaction(const Transaction &transaction, const Callback &callback) = 0;
virtual void getRandomOutsByAmounts(
std::vector<uint64_t> &&amounts,
uint16_t outsCount,
std::vector<RandomOuts> &result,
const Callback &callback) = 0;
virtual void getTransactionOutsGlobalIndices(
const Crypto::Hash &transactionHash,
std::vector<uint32_t> &outsGlobalIndices,
const Callback &callback) = 0;
virtual void getGlobalIndexesForRange(
const uint64_t startHeight,
const uint64_t endHeight,
std::unordered_map<Crypto::Hash, std::vector<uint64_t>> &indexes,
const Callback &callback) = 0;
virtual void getTransactionsStatus(
const std::unordered_set<Crypto::Hash> transactionHashes,
std::unordered_set<Crypto::Hash> &transactionsInPool,
std::unordered_set<Crypto::Hash> &transactionsInBlock,
std::unordered_set<Crypto::Hash> &transactionsUnknown,
const Callback &callback) = 0;
virtual void queryBlocks(
std::vector<Crypto::Hash> &&knownBlockIds,
uint64_t timestamp,
std::vector<BlockShortEntry> &newBlocks,
uint32_t &startHeight,
const Callback &callback) = 0;
virtual void getWalletSyncData(
std::vector<Crypto::Hash> &&knownBlockIds,
uint64_t startHeight,
uint64_t startTimestamp,
std::vector<WalletTypes::WalletBlockInfo> &newBlocks,
const Callback &callback) = 0;
virtual void getPoolSymmetricDifference(
std::vector<Crypto::Hash> &&knownPoolTxIds,
Crypto::Hash knownBlockId,
bool &isBcActual,
std::vector<std::unique_ptr<ITransactionReader>> &newTxs,
std::vector<Crypto::Hash> &deletedTxIds,
const Callback &callback) = 0;
virtual void getBlocks(
const std::vector<uint32_t> &blockHeights,
std::vector<std::vector<BlockDetails>> &blocks,
const Callback &callback) = 0;
virtual void getBlocks(
const std::vector<Crypto::Hash> &blockHashes,
std::vector<BlockDetails> &blocks,
const Callback &callback) = 0;
virtual void getBlock(const uint32_t blockHeight, BlockDetails &block, const Callback &callback) = 0;
virtual void getTransactions(
const std::vector<Crypto::Hash> &transactionHashes,
std::vector<TransactionDetails> &transactions,
const Callback &callback) = 0;
virtual void isSynchronized(bool &syncStatus, const Callback &callback) = 0;
virtual std::string feeAddress() = 0;
virtual uint32_t feeAmount() = 0;
};
} // namespace CryptoNote
|
// ** I18N
// Calendar SV language (Swedish, svenska)
// Author: Mihai Bazon, <mihai_bazon@yahoo.com>
// Translation team: <sv@li.org>
// Translator: Leonard Norrg�rd <leonard.norrgard@refactor.fi>
// Last translator: Leonard Norrg�rd <leonard.norrgard@refactor.fi>
// Encoding: iso-latin-1
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array("s�ndag", "m�ndag", "tisdag", "onsdag", "torsdag", "fredag", "l�rdag", "s�ndag");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
Calendar._SDN_len = 2;
Calendar._SMN_len = 3;
// full month names
Calendar._MN = new Array("januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Om kalendern";
Calendar._TT["ABOUT"] =
"DHTML Datum/tid-v�ljare\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" +
"F�r senaste version g� till: http://www.dynarch.com/projects/calendar/\n" +
"Distribueras under GNU LGPL. Se http://gnu.org/licenses/lgpl.html f�r detaljer." +
"\n\n" +
"Val av datum:\n" +
"- Anv�nd knapparna \xab, \xbb f�r att v�lja �r\n" +
"- Anv�nd knapparna " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " f�r att v�lja m�nad\n" +
"- H�ll musknappen nedtryckt p� n�gon av ovanst�ende knappar f�r snabbare val.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Val av tid:\n" +
"- Klicka p� en del av tiden f�r att �ka den delen\n" +
"- eller skift-klicka f�r att minska den\n" +
"- eller klicka och drag f�r snabbare val.";
Calendar._TT["PREV_YEAR"] = "F�reg�ende �r (h�ll f�r menu)";
Calendar._TT["PREV_MONTH"] = "F�reg�ende m�nad (h�ll f�r menu)";
Calendar._TT["GO_TODAY"] = "G� till dagens datum";
Calendar._TT["NEXT_MONTH"] = "F�ljande m�nad (h�ll f�r menu)";
Calendar._TT["NEXT_YEAR"] = "F�ljande �r (h�ll f�r menu)";
Calendar._TT["SEL_DATE"] = "V�lj datum";
Calendar._TT["DRAG_TO_MOVE"] = "Drag f�r att flytta";
Calendar._TT["PART_TODAY"] = " (idag)";
Calendar._TT["MON_FIRST"] = "Visa m�ndag f�rst";
Calendar._TT["SUN_FIRST"] = "Visa s�ndag f�rst";
Calendar._TT["CLOSE"] = "St�ng";
Calendar._TT["TODAY"] = "Idag";
Calendar._TT["TIME_PART"] = "(Skift-)klicka eller drag f�r att �ndra tid";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%A %d %b %Y";
Calendar._TT["WK"] = "vecka";
Calendar._TT["TIME"] = "Tid:";
Calendar._TT["WEEKEND"] = "0,6";
|
import os
import cv2
import shutil
import numpy as np
NAME_TRAFIC_SIGN = ["Cam dung va do xe", "Cam do xe", "Cam bop coi", "Cam xe tai",
"Cam xe tai tu 2,5 tan", "Cam re trai", "Cam re phai",
"Cam do xe ngay le", "Cam do xe ngay chan",
"Cam di nguoc chieu", "Han che trong luong xe",
"Toc do toi da cho phep", "Nguoi di bo cat ngang", "Tre em",
"Giao nhau voi duong uu tien", "Cho ngoat nguy hiem",
"Noi giao nhau cua duong dong cap",
"Giao nhau voi duong khong uu tien", "Di cham",
"Duong di thang phai theo", "Cho", "Diem dung xe buyt",
"Tram xang", "Chi huong duong", "Benh vien", "Duong di bo",
"Noi do xe"]
def get_all_file_image_and_label():
path_image = '/home/vuong/Videos/17:07:37220898/image_left/'
path_label = '/home/vuong/Videos/17:07:37220898/label_left/'
list_files_image = []
list_files_label = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path_image):
for file in f:
if '.jpg' in file:
list_files_image.append(os.path.join(r, file))
for r, d, f in os.walk(path_label):
for file in f:
if '.txt' in file:
list_files_label.append(os.path.join(r, file))
return list_files_image, list_files_label
def show_label_select():
list_files_image, list_files_label = get_all_file_image_and_label()
list_files_label.sort()
list_files_image.sort()
for i in range(len(list_files_image)):
image_file = list_files_image[i]
label_file = list_files_label[i]
img = cv2.imread(image_file)
(H, W) = img.shape[:2]
print('label_file', label_file)
with open(label_file) as fr:
lines = fr.readlines()
print(lines)
print(len(lines))
for line in lines:
class_id = int(float(line.split(' ')[0]))
x = int(float(line.split(' ')[1]) * W)
y = int(float(line.split(' ')[2]) * H)
w = int(float(line.split(' ')[3]) * W)
h = int(float(line.split(' ')[4]) * H)
# x = int(float(line.split(' ')[1]))
# y = int(float(line.split(' ')[2]))
# w = int(float(line.split(' ')[3]))
# h = int(float(line.split(' ')[4]))
print('x, y, w, h ', x, y, w, h)
# [('Tram dung xe buyt', [(685, 182), (724, 182), (724, 245), (685, 245)], None, None, False),
# ('Giao nhau voi duong khong uu tien', [(412, 141), (442, 141), (442, 164), (412, 164)], None, None, False)]
print(
'[({}, [({}, {}), ({}, {}), ({}, {}), ({}, {})]'.format(NAME_TRAFIC_SIGN[class_id], x, y, x + w, y,
0, 0, y, y + h))
crop_img = img[y - int(h / 2):y + int(h / 2), x - int(w / 2):x + int(w / 2)]
cv2.imshow("image", img)
cv2.imshow("cropped", crop_img)
cv2.waitKey(0)
# key = cv2.waitKey(1) & 0xFF
# if key == ord("q"):
# break
def copy_file_image_and_label_with_class_id(class_id_compare):
list_files_image, list_files_label = get_all_file_image_and_label()
list_files_label.sort()
list_files_image.sort()
path_class_id_image = '/home/vuong/Pictures/BBGT/image/'
path_class_id_label = '/home/vuong/Pictures/BBGT/label/'
if os.path.exists(path_class_id_label) and os.path.exists(path_class_id_image):
shutil.rmtree(path_class_id_label)
shutil.rmtree(path_class_id_image)
if not os.path.isdir(path_class_id_image):
os.makedirs(path_class_id_image)
if not os.path.isdir(path_class_id_label):
os.makedirs(path_class_id_label)
for i in range(len(list_files_image)):
image_file = list_files_image[i]
label_file = list_files_label[i]
if image_file.split('.')[1] != label_file.split('.')[1]:
list_files_label = np.asarray(list_files_label)
list_files_image = np.asarray(list_files_image)
a = 0
list_class_id = []
with open(label_file) as fr:
lines = fr.readlines()
for line in lines:
class_id = int(float(line.split(' ')[0]))
list_class_id.append(class_id)
if class_id_compare in list_class_id:
print(class_id_compare)
print(list_class_id)
print(image_file)
print(label_file)
shutil.copy2(image_file, path_class_id_image)
shutil.copy2(label_file, path_class_id_label)
# a = 0
# img = cv2.imread(image_file)
# cv2.imshow("image", img)
# cv2.waitKey(0)
if __name__ == "__main__":
copy_file_image_and_label_with_class_id(class_id_compare=2)
# show_label_select()
|
/* @flow */
import StackFrame from './stack-frame'
const regexExtractLocation = /\(?(.+?)(?::(\d+))?(?::(\d+))?\)?$/
function extractLocation(token: string): [string, number, number] {
return regexExtractLocation
.exec(token)
.slice(1)
.map(v => {
const p = Number(v)
if (!isNaN(p)) {
return p
}
return v
})
}
const regexValidFrame_Chrome = /^\s*(at|in)\s.+(:\d+)/
const regexValidFrame_FireFox = /(^|@)\S+:\d+|.+line\s+\d+\s+>\s+(eval|Function).+/
function parseStack(stack: string[]): StackFrame[] {
const frames = stack
.filter(
e => regexValidFrame_Chrome.test(e) || regexValidFrame_FireFox.test(e),
)
.map(e => {
if (regexValidFrame_FireFox.test(e)) {
// Strip eval, we don't care about it
let isEval = false
if (/ > (eval|Function)/.test(e)) {
e = e.replace(
/ line (\d+)(?: > eval line \d+)* > (eval|Function):\d+:\d+/g,
':$1',
)
isEval = true
}
const data = e.split(/[@]/g)
const last = data.pop()
return new StackFrame(
data.join('@') || (isEval ? 'eval' : null),
...extractLocation(last),
)
} else {
// Strip eval, we don't care about it
if (e.indexOf('(eval ') !== -1) {
e = e.replace(/(\(eval at [^()]*)|(\),.*$)/g, '')
}
if (e.indexOf('(at ') !== -1) {
e = e.replace(/\(at /, '(')
}
const data = e
.trim()
.split(/\s+/g)
.slice(1)
const last = data.pop()
return new StackFrame(data.join(' ') || null, ...extractLocation(last))
}
})
return frames
}
/**
* Turns an <code>Error</code>, or similar object, into a set of <code>StackFrame</code>s.
* @alias parse
*/
function parseError(error: Error | string | string[]): StackFrame[] {
if (error == null) {
throw new Error('You cannot pass a null object.')
}
if (typeof error === 'string') {
return parseStack(error.split('\n'))
}
if (Array.isArray(error)) {
return parseStack(error)
}
if (typeof error.stack === 'string') {
return parseStack(error.stack.split('\n'))
}
throw new Error('The error you provided does not contain a stack trace.')
}
export {parseError as parse}
export default parseError
|
# Generated by Django 2.2.13 on 2020-08-18 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exportplan', '0017_targetmarketdocuments'),
]
operations = [
migrations.AlterField(
model_name='routetomarkets',
name='promote',
field=models.CharField(blank=True, choices=[('DIRECT_SALES', 'Direct sales'), ('INTERNATIONAL_E_COMMERCE', 'International e-commerce'), ('AGENT_OR_DISTRIBUTOR', 'Agent or distributor'), ('LICENSING', 'Licensing'), ('FRANCHISING', 'Franchising'), ('JOINT_VENTURES', 'Joint ventures'), ('SET_UP_A_BUSINESS_ABROAD', 'Set up a business abroad'), ('OTHER', 'Other')], default='', max_length=30, null=True),
),
migrations.AlterField(
model_name='routetomarkets',
name='route',
field=models.CharField(blank=True, choices=[('MARKETING_AT_EVENTS', 'Marketing at events'), ('ONLINE_MARKETING', 'Online marketing'), ('OTHER', 'Other')], default='', max_length=30, null=True),
),
]
|
// eslint-disable-next-line strict
'use strict';
const Model = require('../model.js');
const schema = require('./recipient-schema.js');
class Recipient extends Model {}
module.exports = new Recipient(schema);
|
define({
"name": "SimCam Api documentation",
"version": "0.0.7",
"description": "Documentation for the REST api access provided at SimCam SDK",
"title": "SimCam Api Documentation",
"url": "http://127.0.0.1/simcam",
"template": {
"withCompare": true,
"withGenerator": true,
"aloneDisplay": false
},
"sampleUrl": false,
"defaultVersion": "0.0.0",
"apidoc": "0.3.0",
"generator": {
"name": "apidoc",
"time": "2020-08-16T10:21:31.241Z",
"url": "http://apidocjs.com",
"version": "0.22.1"
}
});
|
/**
* Copyright 2018 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const {createCacheUrl} = require('amp-toolbox-cache-url');
const Signature = require('./Signature');
const Caches = require('amp-toolbox-cache-list');
const {URL} = require('url');
/**
* Generates update-cache URLs, according to the specification available at:
* https://developers.google.com/amp/cache/update-ping#update-cache-request
*/
class UpdateCacheUrlProvider {
constructor(signature, caches) {
this._caches = caches;
this._sig = signature;
}
/**
* Generates update-cache URLs for each known AMP cache.
*
* @param {string} originUrl the URL for the content on the origin (ex: https://example.com)
* @param {Number} [timestamp] as a UNIX Epoch in seconds
* @returns {Promise<Array<Object>>} an array with objects containing the cache ID, cache name and
* update-cache url.
*/
calculateFromOriginUrl(originUrl, timestamp = defaultTimestamp_()) {
return this._caches.list().then((caches) => Promise.all(
caches.map((cache) =>
createCacheUrl(cache.updateCacheApiDomainSuffix, originUrl)
.then((cacheUrl) => this.calculateFromCacheUrl(cacheUrl, timestamp))
.then((updateCacheUrl) => {
return {
cacheId: cache.id,
cacheName: cache.name,
updateCacheUrl: updateCacheUrl,
};
})
)
));
}
/**
* Generates a signed update-cache request URL from an the AMP Cache URL, as documented here:
* https://developers.google.com/amp/cache/update-ping#update-cache-request
*
* @param {String} cacheUrl the URL for the content on an AMP Cache
* (eg: https://example_com.cdn.ampproject.org/example.com/)
* @param {Number} [timestamp] as a UNIX Epoch in seconds
* @return {Promise<String>} the signed update-cache URL.
*/
calculateFromCacheUrl(cacheUrl, timestamp = defaultTimestamp_()) {
const url = new URL(cacheUrl);
// Create the Cache Refresh URL to be signed.
url.pathname = '/update-cache' + url.pathname;
url.searchParams.append('amp_action', 'flush');
url.searchParams.append('amp_ts', timestamp);
// Append the signature to the Cache Refresh Url.
const urlSignature = this._sig.generate(url.pathname + url.search);
url.searchParams.append('amp_url_signature', urlSignature);
return Promise.resolve(url.toString());
}
/**
* Creates an instance of UpdateCacheUrlProvider that uses the privateKey
* to sign the Urls.
*
* @param {string} privateKey Private Key to be used when signing Urls.
* @returns {UpdateCacheUrlProvider} an instance of UpdateCacheUrlProvider.
*/
static create(privateKey) {
const signature = new Signature(privateKey);
const caches = new Caches();
return new UpdateCacheUrlProvider(signature, caches);
}
}
function defaultTimestamp_() {
return (Date.now() / 1000) | 0;
}
/** @module UpdateCacheUrlProvider */
module.exports = UpdateCacheUrlProvider;
|
const fs = require('fs')
const axios = require('axios')
const d3 = require('d3')
const Chess = require('chess.js').Chess
const fangol = JSON.parse(
fs.readFileSync(
'../../content/2016-02-27-visual-look-chess/chess/fangol.json'
)
).openings
const openingsFromData = d3
.hierarchy(fangol)
.sum(d => (!d.children.length ? d.count : 0))
.sort((a, b) => b.height - a.height || b.value - a.value)
const allOpenings = []
openingsFromData.each(d => {
const chess = new Chess()
d.ancestors()
.reverse()
.slice(1) // remove 'start' position
.forEach(n => {
chess.move(n.data.san)
})
const fen = chess.fen().split(' ')[0]
allOpenings.push(fen)
})
const collectEco = async () => {
const urls = 'abcde'
.split('')
.map(char => `https://github.com/niklasf/eco/raw/master/${char}.tsv`)
const files = await Promise.all(urls.map(url => axios.get(url)))
const ecos = files.flatMap(({ data: str }) => {
return str
.split('\n')
.slice(1)
.filter(Boolean)
.map(line => {
const [eco, name, fen, moves] = line.split('\t')
return { eco, name, fen, moves }
})
})
const neededEcos = allOpenings
.map(fen => {
const matchingEco = ecos.find(d => {
return d.fen.split(' ')[0] === fen
})
if (!matchingEco) return null
return matchingEco
})
.filter(Boolean)
const fileContent = neededEcos.reduce(
(obj, val) => ({
...obj,
[val.fen.split(' ')[0]]: val.eco + ' - ' + val.name,
}),
{}
)
// fs.writeFileSync('ecos.json', JSON.stringify(fileContent, null, 2))
fs.writeFileSync('ecos.json', JSON.stringify(fileContent))
}
collectEco()
|
import gulp from "gulp";
import { spawn } from "child_process";
import browserSync from "browser-sync";
import autoprefix from "gulp-autoprefixer";
import minify from "gulp-clean-css";
import sass from "gulp-sass";
const siteRoot = "_site";
const mainStylesheet = "_sass/main.scss"; /* Main stylesheet (pre-build) */
const jekyll =
process.platform === "win32"
? "jekyll.bat"
: "jekyll"; /* Fix Windows compatibility issue */
/**
* Build Jekyll Site
*/
const buildJekyll = () => {
browserSync.notify("Running: $ jekyll build");
return spawn(jekyll, ["build"]);
};
/**
* Compile styles
*/
const compileStyles = () => {
return gulp
.src(mainStylesheet)
.pipe(
sass({
includePaths: ["scss"],
onError: browserSync.notify
})
)
.pipe(
autoprefix({
browsers: ["last 2 versions"],
cascade: false
})
)
.pipe(minify())
.pipe(gulp.dest("assets/css/"));
};
/**
* Build Jekyll and compile styles
*/
const buildSite = done => {
gulp.series(compileStyles, buildJekyll)(done);
};
/**
* Start BrowserSync server
*/
const startServer = () => {
browserSync.init({
files: [siteRoot + "/**"],
port: 4000,
open: "local",
server: {
baseDir: siteRoot
}
});
};
/**
* Build site and start BrowserSync server
*/
const serve = gulp.series(buildSite, startServer);
const watch = () => {
gulp.watch(
[
"**/*.scss",
"**/**/*.scss",
"**/*.html",
"**/*.md",
"**/*.yml",
"!_site/**/*"
],
buildSite
);
};
const build = done => {
gulp.parallel(serve, watch)(done);
};
export default build;
|
/* Two-Factor Auth Tutorial Code Sample
`nexmo.verify.request` to send a temp code to a user's phone, then
`nexmo.verify.check` to validate the code entered by the user (on te web interface)
In this sample app, upon a user registartion, store the user's phone number
(as a key) and the generated request ID (as the value) in the persist storage.
When the user enter the PIN code, look the info up and match the PIN with the
requerst ID fromt he storage to verify.
Verify API Reference: https://docs.nexmo.com/verify/api-reference/api-reference
*/
'use strict';
const express = require('express');
const bodyParser = require('body-parser');
const ejs = require('ejs');
const app = express();
app.use(bodyParser.json()); // for parsing POST req
app.use(bodyParser.urlencoded({ extended: true }));
app.set('views', __dirname + '/views'); // Render on browser
app.set('view engine', 'html');
app.engine('html', ejs.renderFile);
app.use(express.static(__dirname + '/views'));
const server = app.listen(process.env.PORT || 5000, () => {
console.log('Express server listening on port %d in %s mode', server.address().port, app.settings.env);
});
const Nexmo = require('nexmo');
const nexmo = new Nexmo({
apiKey: process.env.API_KEY,
apiSecret: process.env.API_SECRET
});
// Web UI ("Registration Form")
app.get('/', function (req, res) {
res.render('index');
});
app.post('/register', (req, res) => {
// A user registers with a mobile phone number
let phoneNumber = req.body.number;
phoneNumber = phoneNumber.replace(/\D/g,'');
console.log(phoneNumber);
nexmo.verify.request({number: phoneNumber, brand: 'Awesome Company'}, (err, result) => {
if(err) {
res.render('status', {message: 'Server Error'});
} else {
console.log(result);
let requestId = result.request_id;
if(result.status == '0') {
res.render('verify', {requestId: requestId});
} else {
res.render('status', {message: result.error_text, requestId: requestId});
}
}
});
});
app.post('/verify', (req, res) => {
// Checking to see if the code matches
let pin = req.body.pin;
let requestId = req.body.requestId;
nexmo.verify.check({request_id: requestId, code: pin}, (err, result) => {
if(err) {
res.render('status', {message: 'Server Error'});
} else {
console.log(result);
// Error status code: https://docs.nexmo.com/verify/api-reference/api-reference#check
if(result && result.status == '0') {
res.render('status', {message: 'Account verified! 🎉'});
} else {
res.render('status', {message: result.error_text, requestId: requestId});
}
}
});
});
|
const ADJECTIVES = ["pretty", "large", "big", "small", "tall", "short", "long", "handsome", "plain", "quaint", "clean", "elegant", "easy", "angry", "crazy", "helpful", "mushy", "odd", "unsightly", "adorable", "important", "inexpensive", "cheap", "expensive", "fancy"];
const COLOURS = ["red", "yellow", "blue", "green", "pink", "brown", "purple", "brown", "white", "black", "orange"];
const NOUNS = ["table", "chair", "house", "bbq", "desk", "car", "pony", "cookie", "sandwich", "burger", "pizza", "mouse", "keyboard"];
const len_ADJECTIVES = ADJECTIVES.length;
const len_COLOURS = COLOURS.length;
const len_NOUNS = NOUNS.length;
let nextId = 1;
export default function(count){
const data = new Array(count);
for(let i = 0; i < count; i++){
data[i] = {
"id": nextId++,
"label": ADJECTIVES[random(len_ADJECTIVES)] + " " + COLOURS[random(len_COLOURS)] + " " + NOUNS[random(len_NOUNS)]
};
}
return data;
}
function random(max){
return (Math.random() * max) | 0;
}
|
// Copyright (c) 2019-2020, The Worktips Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <vector>
#include <functional>
#include <cstring>
#include <sstream>
#include <ostream>
#include <string>
#include <string_view>
#include <variant>
#include <cstdint>
#include <limits>
#include <stdexcept>
#include <type_traits>
#include <utility>
#include <tuple>
#include <algorithm>
#include "bt_value.h"
namespace worktipsmq {
using namespace std::literals;
/** \file
* WorktipsMQ serialization for internal commands is very simple: we support two primitive types,
* strings and integers, and two container types, lists and dicts with string keys. On the wire
* these go in BitTorrent byte encoding as described in BEP-0003
* (https://www.bittorrent.org/beps/bep_0003.html#bencoding).
*
* On the C++ side, on input we allow strings, integral types, STL-like containers of these types,
* and STL-like containers of pairs with a string first value and any of these types as second
* value. We also accept std::variants of these.
*
* One minor deviation from BEP-0003 is that we don't support serializing values that don't fit in a
* 64-bit integer (BEP-0003 specifies arbitrary precision integers).
*
* On deserialization we can either deserialize into a special bt_value type supports everything
* (with arbitrary nesting), or we can fill a container of your given type (though this fails if the
* container isn't compatible with the deserialized data).
*
* There is also a stream deserialization that allows you to deserialize without needing heap
* allocations (as long as you know the precise data structure layout).
*/
/// Exception throw if deserialization fails
class bt_deserialize_invalid : public std::invalid_argument {
using std::invalid_argument::invalid_argument;
};
/// A more specific subclass that is thown if the serialization type is an initial mismatch: for
/// example, trying deserializing an int but the next thing in input is a list. This is not,
/// however, thrown if the type initially looks fine but, say, a nested serialization fails. This
/// error will only be thrown when the input stream has not been advanced (and so can be tried for a
/// different type).
class bt_deserialize_invalid_type : public bt_deserialize_invalid {
using bt_deserialize_invalid::bt_deserialize_invalid;
};
namespace detail {
/// Reads digits into an unsigned 64-bit int.
uint64_t extract_unsigned(std::string_view& s);
// (Provide non-constant lvalue and rvalue ref functions so that we only accept explicit
// string_views but not implicitly converted ones)
inline uint64_t extract_unsigned(std::string_view&& s) { return extract_unsigned(s); }
// Fallback base case; we only get here if none of the partial specializations below work
template <typename T, typename SFINAE = void>
struct bt_serialize { static_assert(!std::is_same_v<T, T>, "Cannot serialize T: unsupported type for bt serialization"); };
template <typename T, typename SFINAE = void>
struct bt_deserialize { static_assert(!std::is_same_v<T, T>, "Cannot deserialize T: unsupported type for bt deserialization"); };
/// Checks that we aren't at the end of a string view and throws if we are.
inline void bt_need_more(const std::string_view &s) {
if (s.empty())
throw bt_deserialize_invalid{"Unexpected end of string while deserializing"};
}
/// Deserializes a signed or unsigned 64-bit integer from a string. Sets the second bool to true
/// iff the value read was negative, false if positive; in either case the unsigned value is return
/// in .first. Throws an exception if the read value doesn't fit in a int64_t (if negative) or a
/// uint64_t (if positive). Removes consumed characters from the string_view.
std::pair<uint64_t, bool> bt_deserialize_integer(std::string_view& s);
/// Integer specializations
template <typename T>
struct bt_serialize<T, std::enable_if_t<std::is_integral_v<T>>> {
static_assert(sizeof(T) <= sizeof(uint64_t), "Serialization of integers larger than uint64_t is not supported");
void operator()(std::ostream &os, const T &val) {
// Cast 1-byte types to a larger type to avoid iostream interpreting them as single characters
using output_type = std::conditional_t<(sizeof(T) > 1), T, std::conditional_t<std::is_signed_v<T>, int, unsigned>>;
os << 'i' << static_cast<output_type>(val) << 'e';
}
};
template <typename T>
struct bt_deserialize<T, std::enable_if_t<std::is_integral_v<T>>> {
void operator()(std::string_view& s, T &val) {
constexpr uint64_t umax = static_cast<uint64_t>(std::numeric_limits<T>::max());
constexpr int64_t smin = static_cast<int64_t>(std::numeric_limits<T>::min());
auto [magnitude, negative] = bt_deserialize_integer(s);
if (std::is_signed_v<T>) {
if (!negative) {
if (magnitude > umax)
throw bt_deserialize_invalid("Integer deserialization failed: found too-large value " + std::to_string(magnitude) + " > " + std::to_string(umax));
val = static_cast<T>(magnitude);
} else {
auto sval = -static_cast<int64_t>(magnitude);
if (!std::is_same_v<T, int64_t> && sval < smin)
throw bt_deserialize_invalid("Integer deserialization failed: found too-low value " + std::to_string(sval) + " < " + std::to_string(smin));
val = static_cast<T>(sval);
}
} else {
if (negative)
throw bt_deserialize_invalid("Integer deserialization failed: found negative value -" + std::to_string(magnitude) + " but type is unsigned");
if (!std::is_same_v<T, uint64_t> && magnitude > umax)
throw bt_deserialize_invalid("Integer deserialization failed: found too-large value " + std::to_string(magnitude) + " > " + std::to_string(umax));
val = static_cast<T>(magnitude);
}
}
};
extern template struct bt_deserialize<int64_t>;
extern template struct bt_deserialize<uint64_t>;
template <>
struct bt_serialize<std::string_view> {
void operator()(std::ostream &os, const std::string_view &val) { os << val.size(); os.put(':'); os.write(val.data(), val.size()); }
};
template <>
struct bt_deserialize<std::string_view> {
void operator()(std::string_view& s, std::string_view& val);
};
/// String specialization
template <>
struct bt_serialize<std::string> {
void operator()(std::ostream &os, const std::string &val) { bt_serialize<std::string_view>{}(os, val); }
};
template <>
struct bt_deserialize<std::string> {
void operator()(std::string_view& s, std::string& val) { std::string_view view; bt_deserialize<std::string_view>{}(s, view); val = {view.data(), view.size()}; }
};
/// char * and string literals -- we allow serialization for convenience, but not deserialization
template <>
struct bt_serialize<char *> {
void operator()(std::ostream &os, const char *str) { bt_serialize<std::string_view>{}(os, {str, std::strlen(str)}); }
};
template <size_t N>
struct bt_serialize<char[N]> {
void operator()(std::ostream &os, const char *str) { bt_serialize<std::string_view>{}(os, {str, N-1}); }
};
/// Partial dict validity; we don't check the second type for serializability, that will be handled
/// via the base case static_assert if invalid.
template <typename T, typename = void> struct is_bt_input_dict_container_impl : std::false_type {};
template <typename T>
struct is_bt_input_dict_container_impl<T, std::enable_if_t<
std::is_same_v<std::string, std::remove_cv_t<typename T::value_type::first_type>> ||
std::is_same_v<std::string_view, std::remove_cv_t<typename T::value_type::first_type>>,
std::void_t<typename T::const_iterator /* is const iterable */,
typename T::value_type::second_type /* has a second type */>>>
: std::true_type {};
/// Determines whether the type looks like something we can insert into (using `v.insert(v.end(), x)`)
template <typename T, typename = void> struct is_bt_insertable_impl : std::false_type {};
template <typename T>
struct is_bt_insertable_impl<T,
std::void_t<decltype(std::declval<T>().insert(std::declval<T>().end(), std::declval<typename T::value_type>()))>>
: std::true_type {};
template <typename T>
constexpr bool is_bt_insertable = is_bt_insertable_impl<T>::value;
/// Determines whether the given type looks like a compatible map (i.e. has std::string keys) that
/// we can insert into.
template <typename T, typename = void> struct is_bt_output_dict_container_impl : std::false_type {};
template <typename T>
struct is_bt_output_dict_container_impl<T, std::enable_if_t<
std::is_same_v<std::string, std::remove_cv_t<typename T::value_type::first_type>> && is_bt_insertable<T>,
std::void_t<typename T::value_type::second_type /* has a second type */>>>
: std::true_type {};
template <typename T>
constexpr bool is_bt_output_dict_container = is_bt_output_dict_container_impl<T>::value;
template <typename T>
constexpr bool is_bt_input_dict_container = is_bt_output_dict_container_impl<T>::value;
// Sanity checks:
static_assert(is_bt_input_dict_container<bt_dict>);
static_assert(is_bt_output_dict_container<bt_dict>);
/// Specialization for a dict-like container (such as an unordered_map). We accept anything for a
/// dict that is const iterable over something that looks like a pair with std::string for first
/// value type. The value (i.e. second element of the pair) also must be serializable.
template <typename T>
struct bt_serialize<T, std::enable_if_t<is_bt_input_dict_container<T>>> {
using second_type = typename T::value_type::second_type;
using ref_pair = std::reference_wrapper<const typename T::value_type>;
void operator()(std::ostream &os, const T &dict) {
os << 'd';
std::vector<ref_pair> pairs;
pairs.reserve(dict.size());
for (const auto &pair : dict)
pairs.emplace(pairs.end(), pair);
std::sort(pairs.begin(), pairs.end(), [](ref_pair a, ref_pair b) { return a.get().first < b.get().first; });
for (auto &ref : pairs) {
bt_serialize<std::string>{}(os, ref.get().first);
bt_serialize<second_type>{}(os, ref.get().second);
}
os << 'e';
}
};
template <typename T>
struct bt_deserialize<T, std::enable_if_t<is_bt_output_dict_container<T>>> {
using second_type = typename T::value_type::second_type;
void operator()(std::string_view& s, T& dict) {
// Smallest dict is 2 bytes "de", for an empty dict.
if (s.size() < 2) throw bt_deserialize_invalid("Deserialization failed: end of string found where dict expected");
if (s[0] != 'd') throw bt_deserialize_invalid_type("Deserialization failed: expected 'd', found '"s + s[0] + "'"s);
s.remove_prefix(1);
dict.clear();
bt_deserialize<std::string> key_deserializer;
bt_deserialize<second_type> val_deserializer;
while (!s.empty() && s[0] != 'e') {
std::string key;
second_type val;
key_deserializer(s, key);
val_deserializer(s, val);
dict.insert(dict.end(), typename T::value_type{std::move(key), std::move(val)});
}
if (s.empty())
throw bt_deserialize_invalid("Deserialization failed: encountered end of string before dict was finished");
s.remove_prefix(1); // Consume the 'e'
}
};
/// Accept anything that looks iterable; value serialization validity isn't checked here (it fails
/// via the base case static assert).
template <typename T, typename = void> struct is_bt_input_list_container_impl : std::false_type {};
template <typename T>
struct is_bt_input_list_container_impl<T, std::enable_if_t<
!std::is_same_v<T, std::string> && !std::is_same_v<T, std::string_view> && !is_bt_input_dict_container<T>,
std::void_t<typename T::const_iterator, typename T::value_type>>>
: std::true_type {};
template <typename T, typename = void> struct is_bt_output_list_container_impl : std::false_type {};
template <typename T>
struct is_bt_output_list_container_impl<T, std::enable_if_t<
!std::is_same_v<T, std::string> && !is_bt_output_dict_container<T> && is_bt_insertable<T>>>
: std::true_type {};
template <typename T>
constexpr bool is_bt_output_list_container = is_bt_output_list_container_impl<T>::value;
template <typename T>
constexpr bool is_bt_input_list_container = is_bt_input_list_container_impl<T>::value;
// Sanity checks:
static_assert(is_bt_input_list_container<bt_list>);
static_assert(is_bt_output_list_container<bt_list>);
/// List specialization
template <typename T>
struct bt_serialize<T, std::enable_if_t<is_bt_input_list_container<T>>> {
void operator()(std::ostream& os, const T& list) {
os << 'l';
for (const auto &v : list)
bt_serialize<std::remove_cv_t<typename T::value_type>>{}(os, v);
os << 'e';
}
};
template <typename T>
struct bt_deserialize<T, std::enable_if_t<is_bt_output_list_container<T>>> {
using value_type = typename T::value_type;
void operator()(std::string_view& s, T& list) {
// Smallest list is 2 bytes "le", for an empty list.
if (s.size() < 2) throw bt_deserialize_invalid("Deserialization failed: end of string found where list expected");
if (s[0] != 'l') throw bt_deserialize_invalid_type("Deserialization failed: expected 'l', found '"s + s[0] + "'"s);
s.remove_prefix(1);
list.clear();
bt_deserialize<value_type> deserializer;
while (!s.empty() && s[0] != 'e') {
value_type v;
deserializer(s, v);
list.insert(list.end(), std::move(v));
}
if (s.empty())
throw bt_deserialize_invalid("Deserialization failed: encountered end of string before list was finished");
s.remove_prefix(1); // Consume the 'e'
}
};
/// Serializes a tuple or pair of serializable values (as a list on the wire)
/// Common implementation for both tuple and pair:
template <template<typename...> typename Tuple, typename... T>
struct bt_serialize_tuple {
private:
template <size_t... Is>
void operator()(std::ostream& os, const Tuple<T...>& elems, std::index_sequence<Is...>) {
os << 'l';
(bt_serialize<T>{}(os, std::get<Is>(elems)), ...);
os << 'e';
}
public:
void operator()(std::ostream& os, const Tuple<T...>& elems) {
operator()(os, elems, std::index_sequence_for<T...>{});
}
};
template <template<typename...> typename Tuple, typename... T>
struct bt_deserialize_tuple {
private:
template <size_t... Is>
void operator()(std::string_view& s, Tuple<T...>& elems, std::index_sequence<Is...>) {
// Smallest list is 2 bytes "le", for an empty list.
if (s.size() < 2) throw bt_deserialize_invalid("Deserialization failed: end of string found where tuple expected");
if (s[0] != 'l') throw bt_deserialize_invalid_type("Deserialization of tuple failed: expected 'l', found '"s + s[0] + "'"s);
s.remove_prefix(1);
(bt_deserialize<T>{}(s, std::get<Is>(elems)), ...);
if (s.empty())
throw bt_deserialize_invalid("Deserialization failed: encountered end of string before tuple was finished");
if (s[0] != 'e')
throw bt_deserialize_invalid("Deserialization failed: expected end of tuple but found something else");
s.remove_prefix(1); // Consume the 'e'
}
public:
void operator()(std::string_view& s, Tuple<T...>& elems) {
operator()(s, elems, std::index_sequence_for<T...>{});
}
};
template <typename... T>
struct bt_serialize<std::tuple<T...>> : bt_serialize_tuple<std::tuple, T...> {};
template <typename... T>
struct bt_deserialize<std::tuple<T...>> : bt_deserialize_tuple<std::tuple, T...> {};
template <typename S, typename T>
struct bt_serialize<std::pair<S, T>> : bt_serialize_tuple<std::pair, S, T> {};
template <typename S, typename T>
struct bt_deserialize<std::pair<S, T>> : bt_deserialize_tuple<std::pair, S, T> {};
template <typename T>
constexpr bool is_bt_tuple = false;
template <typename... T>
constexpr bool is_bt_tuple<std::tuple<T...>> = true;
template <typename S, typename T>
constexpr bool is_bt_tuple<std::pair<S, T>> = true;
template <typename T>
constexpr bool is_bt_deserializable = std::is_same_v<T, std::string> || std::is_integral_v<T> ||
is_bt_output_dict_container<T> || is_bt_output_list_container<T> || is_bt_tuple<T>;
// General template and base case; this base will only actually be invoked when Ts... is empty,
// which means we reached the end without finding any variant type capable of holding the value.
template <typename SFINAE, typename Variant, typename... Ts>
struct bt_deserialize_try_variant_impl {
void operator()(std::string_view&, Variant&) {
throw bt_deserialize_invalid("Deserialization failed: could not deserialize value into any variant type");
}
};
template <typename... Ts, typename Variant>
void bt_deserialize_try_variant(std::string_view& s, Variant& variant) {
bt_deserialize_try_variant_impl<void, Variant, Ts...>{}(s, variant);
}
template <typename Variant, typename T, typename... Ts>
struct bt_deserialize_try_variant_impl<std::enable_if_t<is_bt_deserializable<T>>, Variant, T, Ts...> {
void operator()(std::string_view& s, Variant& variant) {
if ( is_bt_output_list_container<T> ? s[0] == 'l' :
is_bt_tuple<T> ? s[0] == 'l' :
is_bt_output_dict_container<T> ? s[0] == 'd' :
std::is_integral_v<T> ? s[0] == 'i' :
std::is_same_v<T, std::string> ? s[0] >= '0' && s[0] <= '9' :
false) {
T val;
bt_deserialize<T>{}(s, val);
variant = std::move(val);
} else {
bt_deserialize_try_variant<Ts...>(s, variant);
}
}
};
template <typename Variant, typename T, typename... Ts>
struct bt_deserialize_try_variant_impl<std::enable_if_t<!is_bt_deserializable<T>>, Variant, T, Ts...> {
void operator()(std::string_view& s, Variant& variant) {
// Unsupported deserialization type, skip it
bt_deserialize_try_variant<Ts...>(s, variant);
}
};
// Serialization of a variant; all variant types must be bt-serializable.
template <typename... Ts>
struct bt_serialize<std::variant<Ts...>, std::void_t<bt_serialize<Ts>...>> {
void operator()(std::ostream &os, const std::variant<Ts...>& val) {
std::visit(
[&os] (const auto& val) {
using T = std::remove_cv_t<std::remove_reference_t<decltype(val)>>;
bt_serialize<T>{}(os, val);
},
val);
}
};
// Deserialization to a variant; at least one variant type must be bt-deserializble.
template <typename... Ts>
struct bt_deserialize<std::variant<Ts...>, std::enable_if_t<(is_bt_deserializable<Ts> || ...)>> {
void operator()(std::string_view& s, std::variant<Ts...>& val) {
bt_deserialize_try_variant<Ts...>(s, val);
}
};
template <>
struct bt_serialize<bt_value> : bt_serialize<bt_variant> {};
template <>
struct bt_deserialize<bt_value> {
void operator()(std::string_view& s, bt_value& val);
};
template <typename T>
struct bt_stream_serializer {
const T &val;
explicit bt_stream_serializer(const T &val) : val{val} {}
operator std::string() const {
std::ostringstream oss;
oss << *this;
return oss.str();
}
};
template <typename T>
std::ostream &operator<<(std::ostream &os, const bt_stream_serializer<T> &s) {
bt_serialize<T>{}(os, s.val);
return os;
}
} // namespace detail
/// Returns a wrapper around a value reference that can serialize the value directly to an output
/// stream. This class is intended to be used inline (i.e. without being stored) as in:
///
/// std::list<int> my_list{{1,2,3}};
/// std::cout << bt_serializer(my_list);
///
/// While it is possible to store the returned object and use it, such as:
///
/// auto encoded = bt_serializer(42);
/// std::cout << encoded;
///
/// this approach is not generally recommended: the returned object stores a reference to the
/// passed-in type, which may not survive. If doing this note that it is the caller's
/// responsibility to ensure the serializer is not used past the end of the lifetime of the value
/// being serialized.
///
/// Also note that serializing directly to an output stream is more efficient as no intermediate
/// string containing the entire serialization has to be constructed.
///
template <typename T>
detail::bt_stream_serializer<T> bt_serializer(const T &val) { return detail::bt_stream_serializer<T>{val}; }
/// Serializes the given value into a std::string.
///
/// int number = 42;
/// std::string encoded = bt_serialize(number);
/// // Equivalent:
/// //auto encoded = (std::string) bt_serialize(number);
///
/// This takes any serializable type: integral types, strings, lists of serializable types, and
/// string->value maps of serializable types.
template <typename T>
std::string bt_serialize(const T &val) { return bt_serializer(val); }
/// Deserializes the given string view directly into `val`. Usage:
///
/// std::string encoded = "i42e";
/// int value;
/// bt_deserialize(encoded, value); // Sets value to 42
///
template <typename T, std::enable_if_t<!std::is_const_v<T>, int> = 0>
void bt_deserialize(std::string_view s, T& val) {
return detail::bt_deserialize<T>{}(s, val);
}
/// Deserializes the given string_view into a `T`, which is returned.
///
/// std::string encoded = "li1ei2ei3ee"; // bt-encoded list of ints: [1,2,3]
/// auto mylist = bt_deserialize<std::list<int>>(encoded);
///
template <typename T>
T bt_deserialize(std::string_view s) {
T val;
bt_deserialize(s, val);
return val;
}
/// Deserializes the given value into a generic `bt_value` type (wrapped std::variant) which is
/// capable of holding all possible BT-encoded values (including recursion).
///
/// Example:
///
/// std::string encoded = "i42e";
/// auto val = bt_get(encoded);
/// int v = get_int<int>(val); // fails unless the encoded value was actually an integer that
/// // fits into an `int`
///
inline bt_value bt_get(std::string_view s) {
return bt_deserialize<bt_value>(s);
}
/// Helper functions to extract a value of some integral type from a bt_value which contains either
/// a int64_t or uint64_t. Does range checking, throwing std::overflow_error if the stored value is
/// outside the range of the target type.
///
/// Example:
///
/// std::string encoded = "i123456789e";
/// auto val = bt_get(encoded);
/// auto v = get_int<uint32_t>(val); // throws if the decoded value doesn't fit in a uint32_t
template <typename IntType, std::enable_if_t<std::is_integral_v<IntType>, int> = 0>
IntType get_int(const bt_value &v) {
if (std::holds_alternative<uint64_t>(v)) {
uint64_t value = std::get<uint64_t>(v);
if constexpr (!std::is_same_v<IntType, uint64_t>)
if (value > static_cast<uint64_t>(std::numeric_limits<IntType>::max()))
throw std::overflow_error("Unable to extract integer value: stored value is too large for the requested type");
return static_cast<IntType>(value);
}
int64_t value = std::get<int64_t>(v);
if constexpr (!std::is_same_v<IntType, int64_t>)
if (value > static_cast<int64_t>(std::numeric_limits<IntType>::max())
|| value < static_cast<int64_t>(std::numeric_limits<IntType>::min()))
throw std::overflow_error("Unable to extract integer value: stored value is outside the range of the requested type");
return static_cast<IntType>(value);
}
namespace detail {
template <typename Tuple, size_t... Is>
void get_tuple_impl(Tuple& t, const bt_list& l, std::index_sequence<Is...>);
}
/// Converts a bt_list into the given template std::tuple or std::pair. Throws a
/// std::invalid_argument if the list has the wrong size or wrong element types. Supports recursion
/// (i.e. if the tuple itself contains tuples or pairs). The tuple (or nested tuples) may only
/// contain integral types, strings, string_views, bt_list, bt_dict, and tuples/pairs of those.
template <typename Tuple>
Tuple get_tuple(const bt_list& x) {
Tuple t;
detail::get_tuple_impl(t, x, std::make_index_sequence<std::tuple_size_v<Tuple>>{});
return t;
}
template <typename Tuple>
Tuple get_tuple(const bt_value& x) {
return get_tuple<Tuple>(std::get<bt_list>(static_cast<const bt_variant&>(x)));
}
namespace detail {
template <typename T, typename It>
void get_tuple_impl_one(T& t, It& it) {
const bt_variant& v = *it++;
if constexpr (std::is_integral_v<T>) {
t = worktipsmq::get_int<T>(v);
} else if constexpr (is_bt_tuple<T>) {
if (std::holds_alternative<bt_list>(v))
throw std::invalid_argument{"Unable to convert tuple: cannot create sub-tuple from non-bt_list"};
t = get_tuple<T>(std::get<bt_list>(v));
} else if constexpr (std::is_same_v<std::string, T> || std::is_same_v<std::string_view, T>) {
// If we request a string/string_view, we might have the other one and need to copy/view it.
if (std::holds_alternative<std::string_view>(v))
t = std::get<std::string_view>(v);
else
t = std::get<std::string>(v);
} else {
t = std::get<T>(v);
}
}
template <typename Tuple, size_t... Is>
void get_tuple_impl(Tuple& t, const bt_list& l, std::index_sequence<Is...>) {
if (l.size() != sizeof...(Is))
throw std::invalid_argument{"Unable to convert tuple: bt_list has wrong size"};
auto it = l.begin();
(get_tuple_impl_one(std::get<Is>(t), it), ...);
}
} // namespace detail
/// Class that allows you to walk through a bt-encoded list in memory without copying or allocating
/// memory. It accesses existing memory directly and so the caller must ensure that the referenced
/// memory stays valid for the lifetime of the bt_list_consumer object.
class bt_list_consumer {
protected:
std::string_view data;
bt_list_consumer() = default;
public:
bt_list_consumer(std::string_view data_);
/// Copy constructor. Making a copy copies the current position so can be used for multipass
/// iteration through a list.
bt_list_consumer(const bt_list_consumer&) = default;
bt_list_consumer& operator=(const bt_list_consumer&) = default;
/// Get a copy of the current buffer
std::string_view current_buffer() const { return data; }
/// Returns true if the next value indicates the end of the list
bool is_finished() const { return data.front() == 'e'; }
/// Returns true if the next element looks like an encoded string
bool is_string() const { return data.front() >= '0' && data.front() <= '9'; }
/// Returns true if the next element looks like an encoded integer
bool is_integer() const { return data.front() == 'i'; }
/// Returns true if the next element looks like an encoded negative integer
bool is_negative_integer() const { return is_integer() && data.size() >= 2 && data[1] == '-'; }
/// Returns true if the next element looks like an encoded list
bool is_list() const { return data.front() == 'l'; }
/// Returns true if the next element looks like an encoded dict
bool is_dict() const { return data.front() == 'd'; }
/// Attempt to parse the next value as a string (and advance just past it). Throws if the next
/// value is not a string.
std::string consume_string();
std::string_view consume_string_view();
/// Attempts to parse the next value as an integer (and advance just past it). Throws if the
/// next value is not an integer.
template <typename IntType>
IntType consume_integer() {
if (!is_integer()) throw bt_deserialize_invalid_type{"next value is not an integer"};
std::string_view next{data};
IntType ret;
detail::bt_deserialize<IntType>{}(next, ret);
data = next;
return ret;
}
/// Consumes a list, return it as a list-like type. Can also be used for tuples/pairs. This
/// typically requires dynamic allocation, but only has to parse the data once. Compare with
/// consume_list_data() which allows alloc-free traversal, but requires parsing twice (if the
/// contents are to be used).
template <typename T = bt_list>
T consume_list() {
T list;
consume_list(list);
return list;
}
/// Same as above, but takes a pre-existing list-like data type.
template <typename T>
void consume_list(T& list) {
if (!is_list()) throw bt_deserialize_invalid_type{"next bt value is not a list"};
std::string_view n{data};
detail::bt_deserialize<T>{}(n, list);
data = n;
}
/// Consumes a dict, return it as a dict-like type. This typically requires dynamic allocation,
/// but only has to parse the data once. Compare with consume_dict_data() which allows
/// alloc-free traversal, but requires parsing twice (if the contents are to be used).
template <typename T = bt_dict>
T consume_dict() {
T dict;
consume_dict(dict);
return dict;
}
/// Same as above, but takes a pre-existing dict-like data type.
template <typename T>
void consume_dict(T& dict) {
if (!is_dict()) throw bt_deserialize_invalid_type{"next bt value is not a dict"};
std::string_view n{data};
detail::bt_deserialize<T>{}(n, dict);
data = n;
}
/// Consumes a value without returning it.
void skip_value();
/// Attempts to parse the next value as a list and returns the string_view that contains the
/// entire thing. This is recursive into both lists and dicts and likely to be quite
/// inefficient for large, nested structures (unless the values only need to be skipped but
/// aren't separately needed). This, however, does not require dynamic memory allocation.
std::string_view consume_list_data();
/// Attempts to parse the next value as a dict and returns the string_view that contains the
/// entire thing. This is recursive into both lists and dicts and likely to be quite
/// inefficient for large, nested structures (unless the values only need to be skipped but
/// aren't separately needed). This, however, does not require dynamic memory allocation.
std::string_view consume_dict_data();
};
/// Class that allows you to walk through key-value pairs of a bt-encoded dict in memory without
/// copying or allocating memory. It accesses existing memory directly and so the caller must
/// ensure that the referenced memory stays valid for the lifetime of the bt_dict_consumer object.
class bt_dict_consumer : private bt_list_consumer {
std::string_view key_;
/// Consume the key if not already consumed and there is a key present (rather than 'e').
/// Throws exception if what should be a key isn't a string, or if the key consumes the entire
/// data (i.e. requires that it be followed by something). Returns true if the key was consumed
/// (either now or previously and cached).
bool consume_key();
/// Clears the cached key and returns it. Must have already called consume_key directly or
/// indirectly via one of the `is_{...}` methods.
std::string_view flush_key() {
std::string_view k;
k.swap(key_);
return k;
}
public:
bt_dict_consumer(std::string_view data_);
/// Copy constructor. Making a copy copies the current position so can be used for multipass
/// iteration through a list.
bt_dict_consumer(const bt_dict_consumer&) = default;
bt_dict_consumer& operator=(const bt_dict_consumer&) = default;
/// Returns true if the next value indicates the end of the dict
bool is_finished() { return !consume_key() && data.front() == 'e'; }
/// Operator bool is an alias for `!is_finished()`
operator bool() { return !is_finished(); }
/// Returns true if the next value looks like an encoded string
bool is_string() { return consume_key() && data.front() >= '0' && data.front() <= '9'; }
/// Returns true if the next element looks like an encoded integer
bool is_integer() { return consume_key() && data.front() == 'i'; }
/// Returns true if the next element looks like an encoded negative integer
bool is_negative_integer() { return is_integer() && data.size() >= 2 && data[1] == '-'; }
/// Returns true if the next element looks like an encoded list
bool is_list() { return consume_key() && data.front() == 'l'; }
/// Returns true if the next element looks like an encoded dict
bool is_dict() { return consume_key() && data.front() == 'd'; }
/// Returns the key of the next pair. This does not have to be called; it is also returned by
/// all of the other consume_* methods. The value is cached whether called here or by some
/// other method; accessing it multiple times simple accesses the cache until the next value is
/// consumed.
std::string_view key() {
if (!consume_key())
throw bt_deserialize_invalid{"Cannot access next key: at the end of the dict"};
return key_;
}
/// Attempt to parse the next value as a string->string pair (and advance just past it). Throws
/// if the next value is not a string.
std::pair<std::string_view, std::string_view> next_string();
/// Attempts to parse the next value as an string->integer pair (and advance just past it).
/// Throws if the next value is not an integer.
template <typename IntType>
std::pair<std::string_view, IntType> next_integer() {
if (!is_integer()) throw bt_deserialize_invalid_type{"next bt dict value is not an integer"};
std::pair<std::string_view, IntType> ret;
ret.second = bt_list_consumer::consume_integer<IntType>();
ret.first = flush_key();
return ret;
}
/// Consumes a string->list pair, return it as a list-like type. This typically requires
/// dynamic allocation, but only has to parse the data once. Compare with consume_list_data()
/// which allows alloc-free traversal, but requires parsing twice (if the contents are to be
/// used).
template <typename T = bt_list>
std::pair<std::string_view, T> next_list() {
std::pair<std::string_view, T> pair;
pair.first = next_list(pair.second);
return pair;
}
/// Same as above, but takes a pre-existing list-like data type. Returns the key.
template <typename T>
std::string_view next_list(T& list) {
if (!is_list()) throw bt_deserialize_invalid_type{"next bt value is not a list"};
bt_list_consumer::consume_list(list);
return flush_key();
}
/// Consumes a string->dict pair, return it as a dict-like type. This typically requires
/// dynamic allocation, but only has to parse the data once. Compare with consume_dict_data()
/// which allows alloc-free traversal, but requires parsing twice (if the contents are to be
/// used).
template <typename T = bt_dict>
std::pair<std::string_view, T> next_dict() {
std::pair<std::string_view, T> pair;
pair.first = consume_dict(pair.second);
return pair;
}
/// Same as above, but takes a pre-existing dict-like data type. Returns the key.
template <typename T>
std::string_view next_dict(T& dict) {
if (!is_dict()) throw bt_deserialize_invalid_type{"next bt value is not a dict"};
bt_list_consumer::consume_dict(dict);
return flush_key();
}
/// Attempts to parse the next value as a string->list pair and returns the string_view that
/// contains the entire thing. This is recursive into both lists and dicts and likely to be
/// quite inefficient for large, nested structures (unless the values only need to be skipped
/// but aren't separately needed). This, however, does not require dynamic memory allocation.
std::pair<std::string_view, std::string_view> next_list_data() {
if (data.size() < 2 || !is_list()) throw bt_deserialize_invalid_type{"next bt dict value is not a list"};
return {flush_key(), bt_list_consumer::consume_list_data()};
}
/// Same as next_list_data(), but wraps the value in a bt_list_consumer for convenience
std::pair<std::string_view, bt_list_consumer> next_list_consumer() { return next_list_data(); }
/// Attempts to parse the next value as a string->dict pair and returns the string_view that
/// contains the entire thing. This is recursive into both lists and dicts and likely to be
/// quite inefficient for large, nested structures (unless the values only need to be skipped
/// but aren't separately needed). This, however, does not require dynamic memory allocation.
std::pair<std::string_view, std::string_view> next_dict_data() {
if (data.size() < 2 || !is_dict()) throw bt_deserialize_invalid_type{"next bt dict value is not a dict"};
return {flush_key(), bt_list_consumer::consume_dict_data()};
}
/// Same as next_dict_data(), but wraps the value in a bt_dict_consumer for convenience
std::pair<std::string_view, bt_dict_consumer> next_dict_consumer() { return next_dict_data(); }
/// Skips ahead until we find the first key >= the given key or reach the end of the dict.
/// Returns true if we found an exact match, false if we reached some greater value or the end.
/// If we didn't hit the end, the next `consumer_*()` call will return the key-value pair we
/// found (either the exact match or the first key greater than the requested key).
///
/// Two important notes:
///
/// - properly encoded bt dicts must have lexicographically sorted keys, and this method assumes
/// that the input is correctly sorted (and thus if we find a greater value then your key does
/// not exist).
/// - this is irreversible; you cannot returned to skipped values without reparsing. (You *can*
/// however, make a copy of the bt_dict_consumer before calling and use the copy to return to
/// the pre-skipped position).
bool skip_until(std::string_view find) {
while (consume_key() && key_ < find) {
flush_key();
skip_value();
}
return key_ == find;
}
/// The `consume_*` functions are wrappers around next_whatever that discard the returned key.
///
/// Intended for use with skip_until such as:
///
/// std::string value;
/// if (d.skip_until("key"))
/// value = d.consume_string();
///
auto consume_string_view() { return next_string().second; }
auto consume_string() { return std::string{consume_string_view()}; }
template <typename IntType>
auto consume_integer() { return next_integer<IntType>().second; }
template <typename T = bt_list>
auto consume_list() { return next_list<T>().second; }
template <typename T>
void consume_list(T& list) { next_list(list); }
template <typename T = bt_dict>
auto consume_dict() { return next_dict<T>().second; }
template <typename T>
void consume_dict(T& dict) { next_dict(dict); }
std::string_view consume_list_data() { return next_list_data().second; }
std::string_view consume_dict_data() { return next_dict_data().second; }
bt_list_consumer consume_list_consumer() { return consume_list_data(); }
bt_dict_consumer consume_dict_consumer() { return consume_dict_data(); }
};
} // namespace worktipsmq
|
# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.pytorch_transformers.modeling_bert import (BertEmbeddings,
BertSelfAttention, BertAttention, BertEncoder, BertLayer,
BertSelfOutput, BertIntermediate, BertOutput,
BertPooler, BertLayerNorm, BertPreTrainedModel,
BertPredictionHeadTransform, BertOnlyMLMHead, BertLMPredictionHead,
BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, GELU,
load_tf_weights_in_bert)
from .modeling_utils import CaptionPreTrainedModel, ImgPreTrainedModel
from ..utils.cbs import ConstrainedBeamSearch, select_best_beam_with_constraints
import time, random, copy
import numpy as np
# from .clipp.clip import *
# from .clipp.model import *
logger = logging.getLogger(__name__)
def soft_cross_entropy(target, input_prob, reduction='mean'):
logprobs = nn.functional.log_softmax(input_prob, dim=1)
target = target.float()
neg_target = 1-target
target = torch.stack([neg_target, target], dim=1)
batchloss = - torch.sum(target.view(target.shape[0], -1) * logprobs, dim=1)
if reduction == 'none':
return batchloss
elif reduction == 'mean':
return torch.mean(batchloss)
elif reduction == 'sum':
return torch.sum(batchloss)
else:
raise NotImplementedError('Unsupported reduction mode.')
def get_parameter_dtype(parameter):
try:
return next(parameter.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
class CaptionBertSelfAttention(BertSelfAttention):
"""
Modified from BertSelfAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertSelfAttention, self).__init__(config)
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
if history_state is not None:
x_states = torch.cat([history_state, hidden_states], dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
else:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class CaptionBertAttention(BertAttention):
"""
Modified from BertAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertAttention, self).__init__(config)
self.self = CaptionBertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None,
history_state=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class CaptionBertEncoder(BertEncoder):
"""
Modified from BertEncoder to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertEncoder, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.num_layers = config.num_hidden_layers
self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None,
encoder_history_states=None, return_at_layer=None):
all_hidden_states = ()
all_attentions = ()
stage_output = None
mid_output = None
if isinstance(attention_mask, list):
num_layer_per_phase = math.ceil(self.num_layers / len(attention_mask))
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
history_state = None if encoder_history_states is None else encoder_history_states[i]
if isinstance(attention_mask, list):
layer_outputs = layer_module(
hidden_states, attention_mask[i//num_layer_per_phase], head_mask[i],
history_state)
if i == num_layer_per_phase-1:
stage_output = layer_outputs[0]
else:
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i],
history_state)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if return_at_layer is not None and i==return_at_layer:
mid_output = hidden_states
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
if stage_output is not None:
outputs = outputs + (stage_output,)
return outputs, mid_output # outputs, (hidden states), (attentions), (stage outputs)
class CaptionBertLayer(BertLayer):
"""
Modified from BertLayer to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertLayer, self).__init__(config)
self.attention = CaptionBertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
attention_outputs = self.attention(hidden_states, attention_mask,
head_mask, history_state)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertImgModel(BertPreTrainedModel):
""" Expand from BertModel to handle image region features as input
"""
def __init__(self, config):
super(BertImgModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = CaptionBertEncoder(config)
self.pooler = BertPooler(config)
self.img_dim = config.img_feature_dim
logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type
if hasattr(config, 'use_img_layernorm'):
self.use_img_layernorm = config.use_img_layernorm
else:
self.use_img_layernorm = None
if config.img_feature_type == 'dis_code':
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_t': # transpose
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_scale': # scaled
self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True)
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
else:
self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.use_img_layernorm:
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
position_ids=None, head_mask=None, img_feats=None,
encoder_history_states=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if isinstance(attention_mask, list):
extended_attention_mask = []
for i in range(len(attention_mask)):
if attention_mask[i].dim() == 2:
extended_attention_mask_c = attention_mask[i].unsqueeze(1).unsqueeze(2)
elif attention_mask[i].dim() == 3:
extended_attention_mask_c = attention_mask[i].unsqueeze(1)
else:
raise NotImplementedError
extended_attention_mask_c = extended_attention_mask_c.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_c = (1.0 - extended_attention_mask_c) * -10000.0
extended_attention_mask.append(extended_attention_mask_c)
else:
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
# head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility (edited here to adapt to pytorch>=1.5.0)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids,
token_type_ids=token_type_ids)
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output = torch.cat((embedding_output, img_embedding_output), 1)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask, head_mask=head_mask,
encoder_history_states=encoder_history_states)
sequence_output = encoder_outputs[0]
if len(encoder_outputs) > 1:
stage_outputs = encoder_outputs[1]
pooled_output = self.pooler(sequence_output)
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]
return outputs
@property
def dtype(self):
return get_parameter_dtype(self)
class BiBertImgModel2(BertPreTrainedModel):
""" Expand from BertImgModel to handle 2-stream input
"""
def __init__(self, config):
super(BiBertImgModel2, self).__init__(config)
self.embeddings = BertEmbeddings(config)
half_config = copy.deepcopy(config)
half_config.num_hidden_layers = half_config.num_hidden_layers//2 # 2-phases
self.vis_encoder = CaptionBertEncoder(half_config)
self.txt_encoder = CaptionBertEncoder(half_config)
self.mul_encoder = CaptionBertEncoder(half_config)
scale = config.hidden_size ** -0.5
self.txt_proj = nn.Parameter(scale * torch.randn(config.hidden_size, config.hidden_size))
self.vis_proj = nn.Parameter(scale * torch.randn(config.hidden_size, config.hidden_size))
self.pooler = BertPooler(config)
self.img_dim = config.img_feature_dim
logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type
if hasattr(config, 'use_img_layernorm'):
self.use_img_layernorm = config.use_img_layernorm
else:
self.use_img_layernorm = None
if config.img_feature_type == 'dis_code':
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_t': # transpose
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_scale': # scaled
self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True)
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
else:
self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.use_img_layernorm:
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, max_tag_length=None,
position_ids_a=None, input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, phrase_layer=None,
position_ids_b=None, head_mask=None, img_feats=None, encoder_history_states=None, encode_hn=False):
if attention_mask_a is None:
attention_mask_a = torch.ones_like(input_ids_a)
if attention_mask_b is None:
attention_mask_b = torch.ones_like(input_ids_b)
if token_type_ids_a is None:
token_type_ids_a = torch.zeros_like(input_ids_a)
if token_type_ids_b is None:
token_type_ids_b = torch.zeros_like(input_ids_b)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask_a.dim() == 2:
extended_attention_mask_a = attention_mask_a.unsqueeze(1).unsqueeze(2)
elif attention_mask_a.dim() == 3:
extended_attention_mask_a = attention_mask_a.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_a = extended_attention_mask_a.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_a = (1.0 - extended_attention_mask_a) * -10000.0
if attention_mask_b.dim() == 2:
extended_attention_mask_b = attention_mask_b.unsqueeze(1).unsqueeze(2)
elif attention_mask_b.dim() == 3:
extended_attention_mask_b = attention_mask_b.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_b = extended_attention_mask_b.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_b = (1.0 - extended_attention_mask_b) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers/2, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
# head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility (edited here to adapt to pytorch>=1.5.0)
else:
head_mask = [None] * (self.config.num_hidden_layers//2)
embedding_output_a = self.embeddings(input_ids_a, position_ids=position_ids_a,
token_type_ids=token_type_ids_a)
embedding_output_b = self.embeddings(input_ids_b, position_ids=position_ids_b,
token_type_ids=token_type_ids_b)
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output_b = torch.cat((embedding_output_b, img_embedding_output), 1)
# single-stream encoder
txt_encoder_outputs = self.txt_encoder(embedding_output_a, extended_attention_mask_a,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
vis_encoder_outputs = self.vis_encoder(embedding_output_b, extended_attention_mask_b,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
only_vis_embeddings = vis_encoder_outputs[:, max_tag_length:, :]
only_vis_mask = extended_attention_mask_b[:, :, :, max_tag_length:]
# VSE-based retrieval loss similar to CLIP
global_txt = F.normalize(txt_encoder_outputs[:, 0, :] @ self.txt_proj, p=2, dim=-1)
global_img = F.normalize(vis_encoder_outputs[:, 0, :] @ self.vis_proj, p=2, dim=-1)
sim_mat = global_txt @ global_img.t()
if encode_hn:
masked_sim_mat = sim_mat - 2 * torch.eye(sim_mat.shape[0], dtype=sim_mat.dtype, device=sim_mat.device)
# hardest in-batch sample
hard_img_index = torch.max(masked_sim_mat, dim=1)[1]
hard_txt_index = torch.max(masked_sim_mat, dim=0)[1]
# text-hard_img pair
hard_img_seq = torch.cat([txt_encoder_outputs,
torch.index_select(only_vis_embeddings, dim=0, index=hard_img_index)], dim=1)
hard_img_mask = torch.cat([extended_attention_mask_a,
torch.index_select(only_vis_mask, dim=0, index=hard_img_index)], dim=-1)
# hard_text-img pair
hard_txt_seq = torch.cat([torch.index_select(txt_encoder_outputs, dim=0, index=hard_txt_index),
only_vis_embeddings], dim=1)
hard_txt_mask = torch.cat([torch.index_select(extended_attention_mask_a, dim=0, index=hard_txt_index),
only_vis_mask], dim=-1)
# select hard-img or hard-text with 0.5/0.5 probability
n_sample = hard_img_seq.shape[0]
dice_index = torch.randperm(n_sample, device=hard_img_seq.device)
hard_seqs = torch.cat([torch.index_select(hard_img_seq, dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(hard_txt_seq, dim=0, index=dice_index[(n_sample//2):])], dim=0)
hard_mask = torch.cat([torch.index_select(hard_img_mask, dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(hard_txt_mask, dim=0, index=dice_index[(n_sample//2):])], dim=0)
# arange the hard image and text index for later processing!
# print('hard_txt_index', hard_txt_index, 'dice_index',dice_index)
hard_txt_index_full = torch.cat([torch.index_select(torch.arange(n_sample, device=hard_img_seq.device), dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(hard_txt_index, dim=0, index=dice_index[(n_sample//2):])], dim=0)
hard_img_index_full = torch.cat([torch.index_select(hard_img_index, dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(torch.arange(n_sample, device=hard_img_seq.device), dim=0, index=dice_index[(n_sample//2):])], dim=0)
# encoding for hard-negatives
hard_encoder_outputs, mid_hard = self.mul_encoder(hard_seqs, hard_mask, head_mask=head_mask,
encoder_history_states=encoder_history_states, return_at_layer=phrase_layer)
hard_encoder_outputs = hard_encoder_outputs[0]
hard_pooled_output = self.pooler(hard_encoder_outputs)
else:
hard_txt_index_full = None
hard_img_index_full = None
hard_encoder_outputs = None
hard_pooled_output = None
mid_hard = None
joint_bi_output = torch.cat([txt_encoder_outputs, only_vis_embeddings], dim=1)
joint_bi_mask = torch.cat([extended_attention_mask_a, only_vis_mask], dim=-1)
encoder_outputs, mid_joint = self.mul_encoder(joint_bi_output,
joint_bi_mask, head_mask=head_mask, return_at_layer=phrase_layer,
encoder_history_states=encoder_history_states)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output,) + (hard_encoder_outputs, hard_pooled_output)
single_stream_output = (txt_encoder_outputs, vis_encoder_outputs, sim_mat)
hard_indexes = (hard_txt_index_full, hard_img_index_full)
mid_output = (mid_joint, mid_hard)
return outputs, single_stream_output, hard_indexes, mid_output
def forward_single(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, max_tag_length=None,
position_ids_a=None, input_ids_b=None, token_type_ids_b=None, attention_mask_b=None,
position_ids_b=None, head_mask=None, img_feats=None, encoder_history_states=None):
if attention_mask_a is None:
attention_mask_a = torch.ones_like(input_ids_a)
if attention_mask_b is None:
attention_mask_b = torch.ones_like(input_ids_b)
if token_type_ids_a is None:
token_type_ids_a = torch.zeros_like(input_ids_a)
if token_type_ids_b is None:
token_type_ids_b = torch.zeros_like(input_ids_b)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask_a.dim() == 2:
extended_attention_mask_a = attention_mask_a.unsqueeze(1).unsqueeze(2)
elif attention_mask_a.dim() == 3:
extended_attention_mask_a = attention_mask_a.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_a = extended_attention_mask_a.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_a = (1.0 - extended_attention_mask_a) * -10000.0
if attention_mask_b.dim() == 2:
extended_attention_mask_b = attention_mask_b.unsqueeze(1).unsqueeze(2)
elif attention_mask_b.dim() == 3:
extended_attention_mask_b = attention_mask_b.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_b = extended_attention_mask_b.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_b = (1.0 - extended_attention_mask_b) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers/2, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
# head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility (edited here to adapt to pytorch>=1.5.0)
else:
head_mask = [None] * (self.config.num_hidden_layers//2)
embedding_output_a = self.embeddings(input_ids_a, position_ids=position_ids_a,
token_type_ids=token_type_ids_a)
embedding_output_b = self.embeddings(input_ids_b, position_ids=position_ids_b,
token_type_ids=token_type_ids_b)
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output_b = torch.cat((embedding_output_b, img_embedding_output), 1)
# single-stream encoder
txt_encoder_outputs = self.txt_encoder(embedding_output_a, extended_attention_mask_a,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
vis_encoder_outputs = self.vis_encoder(embedding_output_b, extended_attention_mask_b,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
# VSE-based retrieval loss similar to CLIP
global_txt = F.normalize(txt_encoder_outputs[:, 0, :] @ self.txt_proj, p=2, dim=-1)
global_img = F.normalize(vis_encoder_outputs[:, 0, :] @ self.vis_proj, p=2, dim=-1)
single_stream_output = (global_txt, global_img)
return single_stream_output
@property
def dtype(self):
return get_parameter_dtype(self)
class BiBertImgModel(BertPreTrainedModel):
""" Expand from BertImgModel to handle 2-stream input
"""
def __init__(self, config):
super(BiBertImgModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
half_config = copy.deepcopy(config)
half_config.num_hidden_layers = half_config.num_hidden_layers//2 # 2-phases
self.vis_encoder = CaptionBertEncoder(half_config)
self.txt_encoder = CaptionBertEncoder(half_config)
self.mul_encoder = CaptionBertEncoder(half_config)
self.pooler = BertPooler(config)
scale = config.hidden_size ** -0.5
self.txt_proj = nn.Parameter(scale * torch.randn(config.hidden_size, config.hidden_size))
self.vis_proj = nn.Parameter(scale * torch.randn(config.hidden_size, config.hidden_size))
self.img_dim = config.img_feature_dim
logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type
if hasattr(config, 'use_img_layernorm'):
self.use_img_layernorm = config.use_img_layernorm
else:
self.use_img_layernorm = None
if config.img_feature_type == 'dis_code':
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_t': # transpose
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_scale': # scaled
self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True)
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
else:
self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.use_img_layernorm:
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, max_tag_length=None,
position_ids_a=None, input_ids_b=None, token_type_ids_b=None, attention_mask_b=None,
position_ids_b=None, head_mask=None, img_feats=None, encoder_history_states=None, encode_hn=False):
if attention_mask_a is None:
attention_mask_a = torch.ones_like(input_ids_a)
if attention_mask_b is None:
attention_mask_b = torch.ones_like(input_ids_b)
if token_type_ids_a is None:
token_type_ids_a = torch.zeros_like(input_ids_a)
if token_type_ids_b is None:
token_type_ids_b = torch.zeros_like(input_ids_b)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask_a.dim() == 2:
extended_attention_mask_a = attention_mask_a.unsqueeze(1).unsqueeze(2)
elif attention_mask_a.dim() == 3:
extended_attention_mask_a = attention_mask_a.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_a = extended_attention_mask_a.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_a = (1.0 - extended_attention_mask_a) * -10000.0
if attention_mask_b.dim() == 2:
extended_attention_mask_b = attention_mask_b.unsqueeze(1).unsqueeze(2)
elif attention_mask_b.dim() == 3:
extended_attention_mask_b = attention_mask_b.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_b = extended_attention_mask_b.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_b = (1.0 - extended_attention_mask_b) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers/2, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
# head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility (edited here to adapt to pytorch>=1.5.0)
else:
head_mask = [None] * (self.config.num_hidden_layers//2)
embedding_output_a = self.embeddings(input_ids_a, position_ids=position_ids_a,
token_type_ids=token_type_ids_a)
embedding_output_b = self.embeddings(input_ids_b, position_ids=position_ids_b,
token_type_ids=token_type_ids_b)
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output_b = torch.cat((embedding_output_b, img_embedding_output), 1)
# single-stream encoder
txt_encoder_outputs = self.txt_encoder(embedding_output_a, extended_attention_mask_a,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
vis_encoder_outputs = self.vis_encoder(embedding_output_b, extended_attention_mask_b,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0][0]
only_vis_embeddings = vis_encoder_outputs[:, max_tag_length:, :]
only_vis_mask = extended_attention_mask_b[:, :, :, max_tag_length:]
# VSE-based retrieval loss similar to CLIP
global_txt = F.normalize(txt_encoder_outputs[:, 0, :] @ self.txt_proj, p=2, dim=-1)
global_img = F.normalize(vis_encoder_outputs[:, 0, :] @ self.vis_proj, p=2, dim=-1)
sim_mat = global_txt @ global_img.t()
if encode_hn:
masked_sim_mat = sim_mat - 2 * torch.eye(sim_mat.shape[0], dtype=sim_mat.dtype, device=sim_mat.device)
# hardest in-batch sample
hard_img_index = torch.max(masked_sim_mat, dim=1)[1]
hard_txt_index = torch.max(masked_sim_mat, dim=0)[1]
# text-hard_img pair
hard_img_seq = torch.cat([txt_encoder_outputs,
torch.index_select(only_vis_embeddings, dim=0, index=hard_img_index)], dim=1)
hard_img_mask = torch.cat([extended_attention_mask_a,
torch.index_select(only_vis_mask, dim=0, index=hard_img_index)], dim=-1)
# hard_text-img pair
hard_txt_seq = torch.cat([torch.index_select(txt_encoder_outputs, dim=0, index=hard_txt_index),
only_vis_embeddings], dim=1)
hard_txt_mask = torch.cat([torch.index_select(extended_attention_mask_a, dim=0, index=hard_txt_index),
only_vis_mask], dim=-1)
# select hard-img or hard-text with 0.5/0.5 probability
n_sample = hard_img_seq.shape[0]
dice_index = torch.randperm(n_sample, device=hard_img_seq.device)
hard_seqs = torch.cat([torch.index_select(hard_img_seq, dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(hard_txt_seq, dim=0, index=dice_index[(n_sample//2):])], dim=0)
hard_mask = torch.cat([torch.index_select(hard_img_mask, dim=0, index=dice_index[:(n_sample//2)]),
torch.index_select(hard_txt_mask, dim=0, index=dice_index[(n_sample//2):])], dim=0)
# encoding for hard-negatives
hard_encoder_outputs, hard_mid = self.mul_encoder(hard_seqs, hard_mask, head_mask=head_mask,
encoder_history_states=encoder_history_states)
hard_encoder_outputs = hard_encoder_outputs[0]
hard_pooled_output = self.pooler(hard_encoder_outputs)
else:
hard_encoder_outputs = None
hard_pooled_output = None
joint_bi_output = torch.cat([txt_encoder_outputs, only_vis_embeddings], dim=1)
joint_bi_mask = torch.cat([extended_attention_mask_a, only_vis_mask], dim=-1)
encoder_outputs, joint_mid = self.mul_encoder(joint_bi_output,
joint_bi_mask, head_mask=head_mask,
encoder_history_states=encoder_history_states)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output,) + (hard_encoder_outputs, hard_pooled_output)
single_stream_output = (txt_encoder_outputs, vis_encoder_outputs, sim_mat)
return outputs, single_stream_output
@property
def dtype(self):
return get_parameter_dtype(self)
def forward_single(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, max_tag_length=None,
position_ids_a=None, input_ids_b=None, token_type_ids_b=None, attention_mask_b=None,
position_ids_b=None, head_mask=None, img_feats=None, encoder_history_states=None):
if attention_mask_a is None:
attention_mask_a = torch.ones_like(input_ids_a)
if attention_mask_b is None:
attention_mask_b = torch.ones_like(input_ids_b)
if token_type_ids_a is None:
token_type_ids_a = torch.zeros_like(input_ids_a)
if token_type_ids_b is None:
token_type_ids_b = torch.zeros_like(input_ids_b)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask_a.dim() == 2:
extended_attention_mask_a = attention_mask_a.unsqueeze(1).unsqueeze(2)
elif attention_mask_a.dim() == 3:
extended_attention_mask_a = attention_mask_a.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_a = extended_attention_mask_a.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_a = (1.0 - extended_attention_mask_a) * -10000.0
if attention_mask_b.dim() == 2:
extended_attention_mask_b = attention_mask_b.unsqueeze(1).unsqueeze(2)
elif attention_mask_b.dim() == 3:
extended_attention_mask_b = attention_mask_b.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask_b = extended_attention_mask_b.to(dtype=self.dtype) # fp16 compatibility (editted here to adapt to pytorch>=1.5.0)
extended_attention_mask_b = (1.0 - extended_attention_mask_b) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers/2, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
# head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility (edited here to adapt to pytorch>=1.5.0)
else:
head_mask = [None] * (self.config.num_hidden_layers//2)
embedding_output_a = self.embeddings(input_ids_a, position_ids=position_ids_a,
token_type_ids=token_type_ids_a)
embedding_output_b = self.embeddings(input_ids_b, position_ids=position_ids_b,
token_type_ids=token_type_ids_b)
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output_b = torch.cat((embedding_output_b, img_embedding_output), 1)
# single-stream encoder
txt_encoder_outputs = self.txt_encoder(embedding_output_a, extended_attention_mask_a,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0]
vis_encoder_outputs = self.vis_encoder(embedding_output_b, extended_attention_mask_b,
head_mask=head_mask, encoder_history_states=encoder_history_states)[0]
# VSE-based retrieval loss similar to CLIP
global_txt = F.normalize(txt_encoder_outputs[:, 0, :] @ self.txt_proj, p=2, dim=-1)
global_img = F.normalize(vis_encoder_outputs[:, 0, :] @ self.vis_proj, p=2, dim=-1)
# global_txt = F.normalize(txt_encoder_outputs[:, 0, :], p=2, dim=-1)
# global_img = F.normalize(vis_encoder_outputs[:, 0, :], p=2, dim=-1)
single_stream_output = (global_txt, global_img)
return single_stream_output
def instance_bce_with_logits(logits, labels, reduction='mean', pos_weight=None):
assert logits.dim() == 2
loss = F.binary_cross_entropy_with_logits(logits, labels, reduction=reduction, pos_weight=pos_weight)
if reduction == 'mean':
loss *= labels.size(1)
return loss
class ImageBertForSequenceClassification(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(ImageBertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, soft_label=False,
position_ids=None, head_mask=None, img_feats=None, concep_span=None, loss_weights=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
if concep_span is not None:
if isinstance(concep_span, list):
concept_emb = outputs[0][:, concep_span[0]:concep_span[1]]
else:
concept_emb = []
for i in range(input_ids.shape[0]):
concept_emb.append(outputs[0][i, concep_span[i][0]:concep_span[i][1]])
concept_emb = torch.cat([c.contiguous() for c in concept_emb], dim=0)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if soft_label:
loss = soft_cross_entropy(labels, logits)
elif self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels, pos_weight=loss_weights)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss(weight=loss_weights)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
return outputs
class ImageBertForSequenceClassification_ML(BertPreTrainedModel):
"""
Modified from ImageBertForSequenceClassification to support multi-label.
"""
def __init__(self, config):
super(ImageBertForSequenceClassification_ML, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, soft_label=False,
position_ids=None, head_mask=None, img_feats=None, concep_span=None, loss_weights=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
if concep_span is not None:
if isinstance(concep_span, list):
concept_emb = outputs[0][:, concep_span[0]:concep_span[1]]
else:
concept_emb = []
for i in range(input_ids.shape[0]):
concept_emb.append(outputs[0][i, concep_span[i][0]:concep_span[i][1]])
concept_emb = torch.cat([c.contiguous() for c in concept_emb], dim=0)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if soft_label:
loss = soft_cross_entropy(labels, logits)
elif self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss(weight=loss_weights)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
return outputs
class ImageBertForSequenceClassification2(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(ImageBertForSequenceClassification2, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
# self.con_classifier = nn.Sequential(
# nn.Linear(config.hidden_size, config.hidden_size * 2),
# nn.Dropout(config.hidden_dropout_prob),
# nn.ReLU(),
# nn.Linear(config.hidden_size * 2, 1)
# )
self.con_classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, img_feats=None, concep_span=None, con_pos=None, con_lab=None, con_lambda=0):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
if con_pos is not None:
bs = outputs[2].shape[0]
con_emb = outputs[2][:, con_pos, :][torch.arange(bs),torch.arange(bs),:]
num_con_per_ins = con_pos.shape[1]
con_emb = con_emb.reshape(bs*num_con_per_ins, -1)
con_lab = con_lab.reshape(-1)
con_logits = self.con_classifier(con_emb).squeeze()
con_logits = torch.sigmoid(con_logits)
con_loss_fn = nn.BCELoss()
con_loss = con_loss_fn(con_logits, con_lab)
else:
con_loss = 0
if concep_span is not None:
if isinstance(concep_span, list):
concept_emb = outputs[0][:, concep_span[0]:concep_span[1]]
else:
concept_emb = []
for i in range(input_ids.shape[0]):
concept_emb.append(outputs[0][i, concep_span[i][0]:concep_span[i][1]])
concept_emb = torch.cat([c.contiguous() for c in concept_emb], dim=0)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if con_lab is not None:
outputs = (logits, con_logits,) + outputs[2:]
else:
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
loss += con_lambda * con_loss
outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
return outputs
class ImageBertForMultipleChoice(BertPreTrainedModel):
"""
Modified from BertForMultipleChoice to support oscar training.
"""
def __init__(self, config):
super(ImageBertForMultipleChoice, self).__init__(config)
self.loss_type = config.loss_type
if config.img_feature_dim > 0:
self.bert = BertImgModel(config) # ImageBERT
else:
self.bert = BertModel(config) # original BERT
if hasattr(config, 'use_img_layernorm'):
self.use_img_layernorm = config.use_img_layernorm
else:
self.use_img_layernorm = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'): config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels)
elif config.classifier == 'mlp':
if self.use_img_layernorm:
self.classifier = nn.Sequential(
nn.Linear(config.num_choice*config.hidden_size, config.hidden_size*config.cls_hidden_scale),
nn.ReLU(),
BertLayerNorm(config.hidden_size*config.cls_hidden_scale, eps=config.layer_norm_eps),
nn.Linear(config.hidden_size*config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Sequential(
nn.Linear(config.num_choice*config.hidden_size, config.hidden_size*config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size*config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, img_feats=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_img_feats = img_feats.view(-1, img_feats.size(-2), img_feats.size(-1)) if img_feats is not None else None
if isinstance(self.bert, BertImgModel):
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask, img_feats=flat_img_feats)
else:
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# reshaped_pool_output
reshaped_pool_output = pooled_output.view(-1, self.config.num_choice*(pooled_output.shape[1]))
logits = self.classifier(reshaped_pool_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.loss_type == 'bce':
loss = instance_bce_with_logits(logits, labels.view(-1, self.config.num_labels))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
outputs = (loss,) + outputs
return outputs
""" Oscar for Multiple Choice """
class OscarForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMultipleChoice(config)
>>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(OscarForMultipleChoice, self).__init__(config)
self.loss_type = config.loss_type
if config.img_feature_dim > 0:
self.bert = BertImgModel(config) # ImageBERT
else:
self.bert = BertModel(config) # original BERT
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'): config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size, 2) # original
#self.classifier = weight_norm(nn.Linear(config.hidden_size, self.config.num_labels), dim=None)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size*config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size*config.cls_hidden_scale, 2)) # bce loss
else:
self.classifier = nn.Linear(config.hidden_size, config.num_labels) # original
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, img_feats=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_img_feats = img_feats.view(-1, img_feats.size(-2), img_feats.size(-1)) if img_feats is not None else None
if isinstance(self.bert, BertImgModel):
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask, img_feats=flat_img_feats)
else:
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
#logger.info('pooled_output: {}, reshaped_pool_output: {}, logits: {}'.format(pooled_output.shape, reshaped_pool_output.shape, logits.shape))
#logger.info('logits: {}, reshaped_logits: {}'.format(logits.shape, reshaped_logits.shape))
#logger.info('labels: {}, labels.veiw: {}, labels.view(-1, num_labels): {}'.format(labels.shape, labels.view(-1).shape, labels.view(-1, self.config.num_labels).shape))
if labels is not None:
if self.loss_type == 'bce': #[batch_size, 2] v1
#loss = instance_bce_with_logits(reshaped_logits, labels)
loss = instance_bce_with_logits(logits, labels.view(-1, self.config.num_labels))
elif self.loss_type == 'bxe':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
#loss = loss_fct(reshaped_logits, labels)
loss = loss_fct(logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
class BertCaptioningLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.label_smoothing = getattr(config, 'label_smoothing', 0)
self.drop_worst_ratio = getattr(config, 'drop_worst_ratio', 0)
self.drop_worst_after = getattr(config, 'drop_worst_after', 0)
self.log_soft = nn.LogSoftmax(dim=1)
self.kl = nn.KLDivLoss(reduction='none')
self.iter = 0
def forward(self, logits, target):
self.iter += 1
eps = self.label_smoothing
n_class = logits.size(1)
one_hot = torch.zeros_like(logits).scatter(1, target.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = self.log_soft(logits)
loss = self.kl(log_prb, one_hot).sum(1)
if self.drop_worst_ratio > 0 and self.iter > self.drop_worst_after:
loss, _ = torch.topk(loss,
k=int(loss.shape[0] * (1-self.drop_worst_ratio)),
largest=False)
loss = loss.mean()
return loss
class BertForImageCaptioning(CaptionPreTrainedModel):
"""
Bert for Image Captioning.
"""
def __init__(self, config):
super(BertForImageCaptioning, self).__init__(config)
self.config = config
self.bert = BertImgModel(config)
self.cls = BertOnlyMLMHead(config)
self.loss = BertCaptioningLoss(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
if hasattr(self.config, 'tie_weights') and self.config.tie_weights:
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
freeze = False
if hasattr(self.config, 'freeze_embedding'):
freeze = self.config.freeze_embedding
self.bert.embeddings.word_embeddings.weight.requires_grad = not freeze
def forward(self, *args, **kwargs):
is_decode = kwargs.get('is_decode', False)
if is_decode:
return self.generate(*args, **kwargs)
else:
return self.encode_forward(*args, **kwargs)
def encode_forward(self, input_ids, img_feats, attention_mask, masked_pos, masked_ids=None,
token_type_ids=None, position_ids=None, head_mask=None,
is_training=True, encoder_history_states=None):
outputs = self.bert(input_ids, img_feats=img_feats, attention_mask=attention_mask,
position_ids=position_ids, token_type_ids=token_type_ids,
head_mask=head_mask,
encoder_history_states=encoder_history_states)
sequence_output = outputs[0][:, :masked_pos.shape[-1], :]
if is_training:
sequence_output = outputs[0][:, :masked_pos.shape[-1], :]
# num_masks_in_batch * hidden_size
sequence_output_masked = sequence_output[masked_pos==1, :]
class_logits = self.cls(sequence_output_masked)
masked_ids = masked_ids[masked_ids != 0] # remove padding masks
masked_loss = self.loss(class_logits.float(), masked_ids)
outputs = (masked_loss, class_logits,) + outputs[2:]
else:
sequence_output = outputs[0][:, :input_ids.shape[-1], :]
class_logits = self.cls(sequence_output)
outputs = (class_logits,) + outputs[2:]
return outputs
def prepare_inputs_for_generation(self, curr_ids, past=None):
# NOTE: if attention is on, it should be the token used to mask words in training
mask_token_id = self.mask_token_id
batch_size = curr_ids.shape[0]
mask_ids = torch.full(
(batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device
)
def _slice(t, start, end):
if t is None:
return t
assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len)
return t[:, start: end]
def _remove_elements(t, start, end):
if t is None:
return t
assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len)
return torch.cat([t[:, :start], t[:, end:]], dim=1)
if past is None:
input_ids = torch.cat([curr_ids, mask_ids], dim=1)
curr_len = input_ids.shape[1]
full_len = self.max_seq_len + self.od_labels_len + self.img_seq_len
assert self.full_attention_mask.shape == (batch_size,
full_len, full_len)
def _remove_rows_cols(t, row_start, row_end, col_start, col_end):
t00 = t[:, :row_start, :col_start]
t01 = t[:, :row_start, col_end:]
t10 = t[:, row_end:, :col_start]
t11 = t[:, row_end:, col_end:]
res = torch.cat([torch.cat([t00, t01], dim=2), torch.cat([t10, t11],
dim=2)], dim=1)
assert res.shape == (t.shape[0], t.shape[1]-row_end+row_start,
t.shape[2]-col_end+col_start)
return res
seq_start = curr_len
seq_end = self.max_seq_len
attention_mask = _remove_rows_cols(self.full_attention_mask, seq_start,
seq_end, seq_start, seq_end)
masked_pos = _remove_elements(self.full_masked_pos, seq_start, seq_end)
token_type_ids = _remove_elements(self.full_token_type_ids, seq_start, seq_end)
position_ids = _remove_elements(self.full_position_ids, seq_start, seq_end)
img_feats = self.img_feats
if self.add_od_labels:
assert self.od_label_ids.shape[1] == self.od_labels_len
input_ids = torch.cat([input_ids, self.od_label_ids], dim=1)
else:
last_token = curr_ids[:, -1:]
# The representation of last token should be re-computed, because
# it depends on both self-attention context and input tensor
input_ids = torch.cat([last_token, mask_ids], dim=1)
start_pos = curr_ids.shape[1] - 1
end_pos = start_pos + input_ids.shape[1]
masked_pos = _slice(self.full_masked_pos, start_pos, end_pos)
token_type_ids = _slice(self.full_token_type_ids, start_pos, end_pos)
position_ids = _slice(self.full_position_ids, start_pos, end_pos)
img_feats = None
assert past[0].shape[0] == batch_size
if self.prev_encoded_layers is None:
assert start_pos == 1 # the first token after BOS
assert past[0].shape[1] == 2 + self.od_labels_len + self.img_seq_len
# reorder to [od_labels, img_feats, sentence]
self.prev_encoded_layers = [
torch.cat([x[:, 2:, :], x[:, :start_pos,:]], dim=1)
for x in past]
s2s = self.full_attention_mask[:, :self.max_seq_len,
:self.max_seq_len]
s2i = self.full_attention_mask[:, :self.max_seq_len,
self.max_seq_len:]
i2s = self.full_attention_mask[:, self.max_seq_len:,
:self.max_seq_len]
i2i = self.full_attention_mask[:, self.max_seq_len:,
self.max_seq_len:]
self.full_attention_mask = torch.cat(
[torch.cat([i2i, i2s], dim=2),
torch.cat([s2i, s2s], dim=2)],
dim=1)
else:
assert start_pos > 1
assert past[0].shape[1] == 2
self.prev_encoded_layers = [torch.cat([x, p[:, :-1, :]], dim=1)
for x, p in zip(self.prev_encoded_layers, past)]
attention_mask = self.full_attention_mask[:,
self.od_labels_len+self.img_seq_len+start_pos: self.od_labels_len+self.img_seq_len+end_pos,
:self.od_labels_len+self.img_seq_len+end_pos]
return {'input_ids': input_ids, 'img_feats': img_feats,
'masked_pos': masked_pos, 'attention_mask': attention_mask,
'token_type_ids': token_type_ids, 'position_ids': position_ids,
'is_training': False,
'encoder_history_states': self.prev_encoded_layers}
def get_output_embeddings(self):
return self.decoder
def generate(self, img_feats, attention_mask, masked_pos, token_type_ids=None,
position_ids=None, head_mask=None, input_ids=None, max_length=None,
do_sample=None, num_beams=None, temperature=None, top_k=None, top_p=None,
repetition_penalty=None, bos_token_id=None, pad_token_id=None,
eos_token_ids=None, mask_token_id=None, length_penalty=None,
num_return_sequences=None,
num_keep_best=1, is_decode=None,
add_od_labels=False, od_labels_start_posid=None,
use_cbs=False, fsm=None, num_constraints=None,
min_constraints_to_satisfy=None, use_hypo=False,
decoding_constraint_flag=None, bad_ending_ids=None,
):
""" Generates captions given image features
"""
assert is_decode
batch_size = img_feats.shape[0]
self.img_seq_len = img_feats.shape[1]
self.max_seq_len = max_length
self.mask_token_id = mask_token_id
self.prev_encoded_layers = None
# NOTE: num_keep_best is not equavilant to num_return_sequences
# num_keep_best is the number of hypotheses to keep in beam search
# num_return_sequences is the repeating times of input, coupled with
# do_sample=True can generate more than one samples per image
self.num_keep_best = num_keep_best
vocab_size = self.config.vocab_size
if not use_cbs:
num_fsm_states = 1
else:
b, num_fsm_states, f1, v = fsm.shape
assert b==batch_size and v==vocab_size and f1==num_fsm_states
self.add_od_labels = add_od_labels
# avoid position_ids collision of caption and od labels
self.od_labels_start_posid = max(od_labels_start_posid, self.max_seq_len)
if self.add_od_labels:
# get od labels part from input_ids
assert input_ids.shape[0] == batch_size
od_label_ids = input_ids[:, self.max_seq_len:]
self.od_labels_len = input_ids.shape[1] - self.max_seq_len
input_ids = None
else:
self.od_labels_len = 0
od_label_ids = None
assert input_ids.shape == (batch_size, self.max_seq_len)
input_ids = None
if input_ids is None:
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=img_feats.device
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
assert input_ids.shape[0] == batch_size, "Input batch size must match image features"
cur_len = input_ids.shape[1]
if num_return_sequences != 1:
# Expand input to num return sequences
input_ids = self._expand_for_beams(input_ids, num_return_sequences)
effective_batch_size = batch_size * num_return_sequences
else:
effective_batch_size = batch_size
if position_ids is None:
position_ids = torch.arange(self.max_seq_len, dtype=torch.long, device=input_ids.device)
posids_len = self.max_seq_len
if self.add_od_labels:
od_labels_posids = torch.arange(
self.od_labels_start_posid,
self.od_labels_start_posid + self.od_labels_len, dtype=torch.long, device=input_ids.device)
position_ids = torch.cat([position_ids, od_labels_posids])
posids_len += self.od_labels_len
position_ids = position_ids.unsqueeze(0).expand([batch_size, posids_len])
num_expand = num_beams * num_fsm_states * num_return_sequences
self.od_label_ids = self._expand_for_beams(od_label_ids, num_expand)
self.img_feats = self._expand_for_beams(img_feats, num_expand)
self.full_attention_mask = self._expand_for_beams(attention_mask, num_expand)
self.full_masked_pos = self._expand_for_beams(masked_pos, num_expand)
self.full_token_type_ids = self._expand_for_beams(token_type_ids, num_expand)
self.full_position_ids = self._expand_for_beams(position_ids, num_expand)
self.full_head_mask = self._expand_for_beams(head_mask, num_expand)
if not use_cbs:
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
effective_batch_size,
length_penalty,
num_beams,
vocab_size,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
effective_batch_size,
)
else:
assert self.num_keep_best == 1, 'not supported n_best > 1 for CBS'
searcher = ConstrainedBeamSearch(eos_token_ids, max_length,
num_beams)
curr_ids, sum_logprobs = searcher.search(
input_ids,
None,
self._decode_step,
fsm,
)
curr_ids, logprobs = select_best_beam_with_constraints(
curr_ids,
sum_logprobs,
num_constraints,
min_constraints_to_satisfy,
eos_token_ids,
)
# (batch_size, n_best, max_len), (batch_size, n_best)
output = (curr_ids.unsqueeze(1), logprobs.unsqueeze(1))
return output
def _expand_for_beams(self, x, num_expand):
if x is None or num_expand == 1:
return x
input_shape = list(x.shape)
expanded_shape = input_shape[:1] + [num_expand] + input_shape[1:]
x = x.unsqueeze(1).expand(expanded_shape)
# (batch_size * num_expand, ...)
x = x.contiguous().view([input_shape[0] * num_expand] + input_shape[1:])
return x
def _do_output_past(self, outputs):
return len(outputs) > 1
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, only_vocab=False):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, only_vocab=only_vocab)
num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.seq_relationship = nn.Linear(config.hidden_size, num_seq_relations)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VLBertPreTrainingHeads(nn.Module):
def __init__(self, config, img_emb_weight):
super(VLBertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.seq_relationship = nn.Linear(config.hidden_size, num_seq_relations)
self.MRF_predictor = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),
GELU(),
BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps))
self.MRF_bias = nn.Parameter(torch.zeros(config.img_feature_dim))
self.MRF_weight = img_emb_weight
self.MRC_predictor = nn.Linear(config.hidden_size, config.od_tag_size)
def forward(self, sequence_output, pooled_output, txt_length=None):
if txt_length is not None:
prediction_scores = self.predictions(sequence_output[:, :txt_length, :])
else:
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
if txt_length is not None:
mrf_hidden = self.MRF_predictor(sequence_output[:, txt_length:, :])
mrf_feature = F.linear(mrf_hidden, self.MRF_weight.t(), self.MRF_bias)
mrc_score = self.MRC_predictor(sequence_output[:, txt_length:, :])
else:
mrf_hidden = self.MRF_predictor(sequence_output)
mrf_feature = F.linear(mrf_hidden, self.MRF_weight.t(), self.MRF_bias)
mrc_score = self.MRC_predictor(sequence_output)
return prediction_scores, seq_relationship_score, mrf_feature, mrc_score
class BertImgForPreTraining(ImgPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertImgForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, config):
super(BertImgForPreTraining, self).__init__(config)
#self.bert = BertModel(config) # original BERT
self.bert = BertImgModel(config)
self.cls = BertPreTrainingHeads(config)
self.num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.max_text_seq_length = config.max_text_seq_length if hasattr(config, "max_text_seq_length") else None
# self.max_text_seq_length = None
self.apply(self.init_weights)
self.tie_weights()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None, img_feats=None):
time_start = time.time()
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
sequence_length = input_ids.shape[1]
img_length = img_feats.shape[1]
time_1 = time.time()
sequence_output, pooled_output = outputs[:2]
if self.max_text_seq_length is not None:
prediction_scores, seq_relationship_score = self.cls(sequence_output[:, :self.max_text_seq_length,:], pooled_output)
else:
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
time_2 = time.time()
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
if self.max_text_seq_length is not None:
# print('before:', masked_lm_labels.shape)
masked_lm_labels = masked_lm_labels[:, :self.max_text_seq_length].contiguous()
# print('after:', masked_lm_labels.shape)
# print('prediction:', prediction_scores.shape)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.reshape(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs + (masked_lm_loss,)
time_3 = time.time()
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
class ImageBertForSequenceClassification_F(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training
for background information fusion
"""
def __init__(self, config):
super(ImageBertForSequenceClassification_F, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.fusion_layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_fusion_layers)])
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, img_feats=None, bg_input_ids=None, bg_token_type_ids=None,
bg_attention_mask=None, bg_position_ids=None, bg_head_mask=None, bg_img_feats=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
background = self.bert(bg_input_ids, position_ids=bg_position_ids, token_type_ids=bg_token_type_ids,
attention_mask=bg_attention_mask, head_mask=bg_head_mask, img_feats=bg_img_feats)
fused_input = [outputs]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
return outputs
class VLBertImgForPreTraining(ImgPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertImgForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, config):
super(VLBertImgForPreTraining, self).__init__(config)
#self.bert = BertModel(config) # original BERT
self.bert = BertImgModel(config)
self.cls = VLBertPreTrainingHeads(config, self.bert.img_embedding.weight)
self.num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.max_text_seq_length = config.max_text_seq_length if hasattr(config, "max_text_seq_length") else None
self.apply(self.init_weights)
self.tie_weights()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None, img_feats=None,
masked_region_labels=None, masked_target_feature=None, masked_region_id=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
sequence_output, pooled_output = outputs[:2]
if self.max_text_seq_length is not None:
prediction_scores, seq_relationship_score, mrf_feat, mrc_score = self.cls(sequence_output, pooled_output, self.max_text_seq_length)
else:
prediction_scores, seq_relationship_score, mrf_feat, mrc_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
if self.max_text_seq_length is not None:
masked_lm_labels = masked_lm_labels[:, :self.max_text_seq_length].contiguous()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = outputs + (masked_lm_loss,)
else:
total_loss = 0
if masked_region_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_mrc_loss = loss_fct(mrc_score.view(-1, mrc_score.shape[-1]), masked_region_labels.view(-1))
mrf_feat = torch.masked_select(mrf_feat, masked_region_id.unsqueeze(-1).bool()).view(-1, mrf_feat.shape[-1])
target_feat = torch.masked_select(masked_target_feature, masked_region_id.unsqueeze(-1).bool()).view(-1, mrf_feat.shape[-1])
# masked_mrf_loss = torch.mean(torch.norm(mrf_feat-target_feat, p=2, dim=-1)**2)
masked_mrf_loss = F.mse_loss(mrf_feat, target_feat)
total_loss = total_loss + masked_mrc_loss+masked_mrf_loss
outputs = outputs + (masked_mrf_loss, masked_mrc_loss)
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
class BertImgForPreTraining2(ImgPreTrainedModel): # quick version
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertImgForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, config):
super(BertImgForPreTraining2, self).__init__(config)
#self.bert = BertModel(config) # original BERT
self.bert = BertImgModel(config)
self.cls = BertPreTrainingHeads(config)
self.num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.max_text_seq_length = config.max_text_seq_length if hasattr(config, "max_text_seq_length") else None
# self.max_text_seq_length = None
self.apply(self.init_weights)
self.tie_weights()
self.last_time = None
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None, img_feats=None):
time_start = time.time()
sequence_length = input_ids.shape[1]
img_length = img_feats.shape[1]
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
time_1 = time.time()
sequence_output, pooled_output = outputs[:2]
lm_mask = masked_lm_labels > -1
masked_sequence_output = torch.masked_select(sequence_output, lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
prediction_scores, seq_relationship_score = self.cls(masked_sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
time_2 = time.time()
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_labels = torch.masked_select(masked_lm_labels, lm_mask).reshape(-1)
masked_lm_loss = loss_fct(prediction_scores, masked_lm_labels.reshape(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs + (masked_lm_loss,)
time_3 = time.time()
if self.last_time is not None:
data_time = time_start - self.last_time
else:
data_time = 0
self.last_time = time_3
# print('Token sequence length:{}, Image sequence length {}'.format(sequence_length, img_length), 'Bert encoding time:', time_1-time_start, 'Head time:', time_2-time_1, 'Loss time:', time_3-time_2, 'data time', data_time)
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
class ImageBertForSequenceClassification_MLM(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
adding possible MLM loss
"""
def __init__(self, config):
super(ImageBertForSequenceClassification_MLM, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.mlm_head = BertLMPredictionHead(config)
self.mlm_weight = config.mlm_weight
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
self.tie_weights()
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.mlm_head.decoder,
self.bert.embeddings.word_embeddings)
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, masked_lm_labels=None,
position_ids=None, head_mask=None, img_feats=None, concep_span=None, loss_weights=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
sequence_output = outputs[0]
if concep_span is not None:
if isinstance(concep_span, list):
concept_emb = outputs[0][:, concep_span[0]:concep_span[1]]
else:
concept_emb = []
for i in range(input_ids.shape[0]):
concept_emb.append(outputs[0][i, concep_span[i][0]:concep_span[i][1]])
concept_emb = torch.cat([c.contiguous() for c in concept_emb], dim=0)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss(weight=loss_weights)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
if masked_lm_labels is not None:
mlm_lfn = CrossEntropyLoss(ignore_index=-1)
lm_mask = masked_lm_labels > -1
masked_sequence_output = torch.masked_select(sequence_output, lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
prediction_scores = self.mlm_head(masked_sequence_output)
masked_lm_labels = torch.masked_select(masked_lm_labels, lm_mask).reshape(-1)
mlm_loss = mlm_lfn(prediction_scores, masked_lm_labels)
outputs = (loss+self.mlm_weight*mlm_loss, ) + outputs
else:
outputs = (loss,) + outputs
return outputs
class ImageBertForSequenceClassificationR(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(ImageBertForSequenceClassificationR, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.r_lambda = self.config.r_lambda
self.reason_penalty = self.config.reason_penalty
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
self.r_cls = nn.Linear(config.hidden_size,
self.config.num_reasons)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
self.r_cls = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_reasons)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.r_cls = nn.Linear(config.hidden_size, self.config.num_reasons)
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, soft_label=False, r_lamba=None,
position_ids=None, head_mask=None, img_feats=None, concep_span=None, loss_weights=None, r_labels=None, r_weights=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats)
pooled_output = outputs[1]
if concep_span is not None:
if isinstance(concep_span, list):
concept_emb = outputs[0][:, concep_span[0]:concep_span[1]]
else:
concept_emb = []
for i in range(input_ids.shape[0]):
concept_emb.append(outputs[0][i, concep_span[i][0]:concep_span[i][1]])
concept_emb = torch.cat([c.contiguous() for c in concept_emb], dim=0)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
r_logits = self.r_cls(pooled_output)
outputs = (logits, r_logits) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if soft_label:
loss = soft_cross_entropy(labels, logits)
elif self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels, pos_weight=loss_weights)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss(weight=loss_weights)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if r_labels is not None:
loss += self.r_lambda * instance_bce_with_logits(r_logits, r_labels, pos_weight=r_weights)
if self.reason_penalty:
pos_prob = F.softmax(logits)[:, 1]
r_prob = torch.sigmoid(r_logits)
loss += torch.mean(pos_prob * r_prob.max(dim=1)[0])
outputs = (loss,) + outputs
if concep_span is not None:
outputs += (concept_emb, )
return outputs
class BiBertImgForPreTraining(ImgPreTrainedModel): # a version with weakly-supervised grounding
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertImgForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, config):
super(BiBertImgForPreTraining, self).__init__(config)
#self.bert = BertModel(config) # original BERT
# tmp = BiBertImgModel2
self.bert = BiBertImgModel(config)
self.cls = BertPreTrainingHeads(config, only_vocab=True)
self.half_mlm = BertLMPredictionHead(config, only_vocab=True)
self.only_vocab_size = config.only_word_size
self.num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.max_text_seq_length = config.max_text_seq_length if hasattr(config, "max_text_seq_length") else None
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
# self.max_text_seq_length = None
self.apply(self.init_weights)
self.tie_weights()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings, only_vocab=True, only_word_size=self.only_vocab_size)
self._tie_or_clone_weights(self.half_mlm.decoder, self.bert.embeddings.word_embeddings,
only_vocab=True, only_word_size=self.only_vocab_size)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, masked_lm_labels_a=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, masked_lm_labels_b=None, max_tag_length=20,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None,is_img_match=None, img_index=None, phrase_index=None):
outputs, single_stream_output = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=True)
txt_encoder_outputs, vis_encoder_outputs, sim_mat = single_stream_output
ce_loss = CrossEntropyLoss(ignore_index=-1)
# visual encoder MLM
vis_lm_mask = masked_lm_labels_b > -1
vis_masked_seq_output = torch.masked_select(vis_encoder_outputs, vis_lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
vis_lm_score = self.half_mlm(vis_masked_seq_output)
masked_vis_lm_labels = torch.masked_select(masked_lm_labels_b, vis_lm_mask).reshape(-1)
vis_mlm_loss = ce_loss(vis_lm_score, masked_vis_lm_labels)
# VSE cross entropy loss similar to CLIP
logit_scale = self.logit_scale.exp()
retrieval_logit_mat = sim_mat * logit_scale
pseudo_label = torch.arange(sim_mat.shape[0], device=sim_mat.device)
retrieval_loss = (ce_loss(retrieval_logit_mat, pseudo_label) + ce_loss(retrieval_logit_mat.t(), pseudo_label))/2
sequence_output, pooled_output, hard_sequence_output, hard_pooled_output = outputs
lm_mask = masked_lm_labels_a > -1
masked_sequence_output = torch.masked_select(sequence_output[:, :input_ids_a.shape[1], :], lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
prediction_scores, seq_relationship_score = self.cls(masked_sequence_output, torch.cat([pooled_output, hard_pooled_output], dim=0))
masked_lm_labels = torch.masked_select(masked_lm_labels_a, lm_mask).reshape(-1)
masked_lm_loss = ce_loss(prediction_scores, masked_lm_labels.reshape(-1))
next_sentence_label = torch.cat([torch.zeros(pooled_output.shape[0], dtype=torch.long), torch.ones(hard_pooled_output.shape[0], dtype=torch.long)], dim=0).to(seq_relationship_score.device)
next_sentence_loss = ce_loss(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
# outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
# total_loss = vis_mlm_loss+retrieval_loss+masked_lm_loss+next_sentence_loss
total_loss = retrieval_loss+masked_lm_loss+next_sentence_loss
outputs = (vis_mlm_loss, retrieval_loss, masked_lm_loss, next_sentence_loss)
# if masked_lm_labels is not None and next_sentence_label is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-1)
# masked_lm_labels = torch.masked_select(masked_lm_labels, lm_mask).reshape(-1)
# masked_lm_loss = loss_fct(prediction_scores, masked_lm_labels.reshape(-1))
# next_sentence_loss = loss_fct(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
# total_loss = masked_lm_loss + next_sentence_loss
# outputs = outputs + (masked_lm_loss,)
if phrase_index is not None:
# weakly supervised phrase grouding
# start_time = time.time()
valid_phrases = F.normalize(mask_slice_and_stack(sequence_output, phrase_index), p=2, dim=-1)
valid_images = F.normalize(mask_slice_and_stack(sequence_output, img_index), p=2, dim=-1)
# time_1 = time.time()
full_sims = valid_phrases @ valid_images.t()
# time_2 = time.time()
pos_sims, neg_sims = get_pos_neg_sims(full_sims, phrase_index, img_index)
# time_3 = time.time()
wra_loss = torch.clamp(neg_sims + 0.2 - pos_sims, min=0)
# wra_loss = torch.max(wra_loss, dim=1)[0]
# time_4 = time.time()
wra_valid_mask = (phrase_index[:, 1] - phrase_index[:, 0])>0
# wra_valid_mask = torch.bitwise_and(wra_valid_mask, is_img_match==0)
wra_loss = torch.mean(torch.masked_select(wra_loss, wra_valid_mask))
total_loss = total_loss + wra_loss
# time_5 = time.time()
# print('scatter time:', time_1-start_time, 'matmul time:', time_2-time_1, 'pos_neg_sim:', time_3-time_2,
# 'loss_time', time_4-time_3, 'mask loss time', time_5-time_4)
outputs = (total_loss,) + outputs + (wra_loss,)
else:
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
class BiBertImgForPreTraining2(ImgPreTrainedModel): # a version with weakly-supervised grounding
r"""
with a contrastive learning for wrong qa pairs!
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertImgForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, config):
super(BiBertImgForPreTraining2, self).__init__(config)
#self.bert = BertModel(config) # original BERT
self.bert = BiBertImgModel2(config)
self.cls = BertPreTrainingHeads(config, only_vocab=True)
self.half_mlm = BertLMPredictionHead(config, only_vocab=True)
# self.qa_head = nn.Linear(config.hidden_size, config.qa_answer_size)
self.only_vocab_size = config.only_word_size
self.qa_seq_relation = nn.Linear(config.hidden_size, 2)
self.num_seq_relations = config.num_contrast_classes if hasattr(config, "num_contrast_classes") else 2
self.max_text_seq_length = config.max_text_seq_length if hasattr(config, "max_text_seq_length") else None
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
# self.max_text_seq_length = None
self.apply(self.init_weights)
self.tie_weights()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings, only_vocab=True, only_word_size=self.only_vocab_size)
self._tie_or_clone_weights(self.half_mlm.decoder, self.bert.embeddings.word_embeddings,
only_vocab=True, only_word_size=self.only_vocab_size)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, masked_lm_labels_a=None, qa_is_next=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, masked_lm_labels_b=None, max_tag_length=20, phrase_layer=None,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None,is_img_match=None, img_index=None, phrase_index=None, phrase_mod='sample'):
outputs, single_stream_output, hard_indexes, mid_output = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats, phrase_layer=phrase_layer,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=True)
txt_encoder_outputs, vis_encoder_outputs, sim_mat = single_stream_output
ce_loss = CrossEntropyLoss(ignore_index=-1)
# visual encoder MLM
vis_lm_mask = masked_lm_labels_b > -1
vis_masked_seq_output = torch.masked_select(vis_encoder_outputs, vis_lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
vis_lm_score = self.half_mlm(vis_masked_seq_output)
masked_vis_lm_labels = torch.masked_select(masked_lm_labels_b, vis_lm_mask).reshape(-1)
vis_mlm_loss = ce_loss(vis_lm_score, masked_vis_lm_labels)
# VSE cross entropy loss similar to CLIP
logit_scale = self.logit_scale.exp()
retrieval_logit_mat = sim_mat * logit_scale
pseudo_label = torch.arange(sim_mat.shape[0], device=sim_mat.device)
retrieval_loss = (ce_loss(retrieval_logit_mat, pseudo_label) + ce_loss(retrieval_logit_mat.t(), pseudo_label))/2
sequence_output, pooled_output, hard_sequence_output, hard_pooled_output = outputs
lm_mask = masked_lm_labels_a > -1
masked_sequence_output = torch.masked_select(sequence_output[:, :input_ids_a.shape[1], :], lm_mask.unsqueeze(-1)).reshape(-1, self.config.hidden_size)
prediction_scores, seq_relationship_score = self.cls(masked_sequence_output, torch.cat([pooled_output, hard_pooled_output], dim=0))
masked_lm_labels = torch.masked_select(masked_lm_labels_a, lm_mask).reshape(-1)
masked_lm_loss = ce_loss(prediction_scores, masked_lm_labels.reshape(-1))
next_sentence_label = torch.cat([torch.zeros(pooled_output.shape[0], dtype=torch.long), torch.ones(hard_pooled_output.shape[0], dtype=torch.long)], dim=0).to(seq_relationship_score.device)
next_sentence_loss = ce_loss(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
# outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
total_loss = vis_mlm_loss+retrieval_loss+masked_lm_loss+next_sentence_loss
outputs = (vis_mlm_loss, retrieval_loss, masked_lm_loss, next_sentence_loss)
# if masked_lm_labels is not None and next_sentence_label is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-1)
# masked_lm_labels = torch.masked_select(masked_lm_labels, lm_mask).reshape(-1)
# masked_lm_loss = loss_fct(prediction_scores, masked_lm_labels.reshape(-1))
# next_sentence_loss = loss_fct(seq_relationship_score.view(-1, self.num_seq_relations), next_sentence_label.view(-1))
# total_loss = masked_lm_loss + next_sentence_loss
# outputs = outputs + (masked_lm_loss,)
if qa_is_next is not None:
qa_logits = self.qa_seq_relation(pooled_output)
qa_rel_loss = ce_loss(qa_logits, qa_is_next)
total_loss += qa_rel_loss
outputs = outputs + (qa_rel_loss,)
if phrase_index is not None:
if phrase_layer is not None:
sequence_output_mid, hard_sequence_output_mid = mid_output
else:
sequence_output_mid, hard_sequence_output_mid = sequence_output, hard_sequence_output
if phrase_mod == 'hard':
hard_txt_index, hard_img_index = hard_indexes
hard_phrase_index = torch.index_select(phrase_index, dim=0, index=hard_txt_index)
hard_object_index = torch.index_select(img_index, dim=0, index=hard_img_index)
pos_sims = get_pos_sims(sequence_output=sequence_output_mid, text_index=phrase_index, img_index=img_index)
neg_sims = get_pos_sims(sequence_output=hard_sequence_output_mid, text_index=hard_phrase_index, img_index=hard_object_index)
wra_loss = torch.clamp(neg_sims + 0.2 - pos_sims, min=0)
wra_valid_mask1 = (phrase_index[:, 1] - phrase_index[:, 0])>0
wra_valid_mask2 = (hard_phrase_index[:, 1] - hard_phrase_index[:, 0])>0
wra_valid_mask = torch.bitwise_and(wra_valid_mask1, wra_valid_mask2)
wra_loss = torch.mean(torch.masked_select(wra_loss, wra_valid_mask))
total_loss = total_loss + wra_loss
outputs = (total_loss,) + outputs + (wra_loss,)
elif phrase_mod == 'sample':
# weakly supervised phrase grouding
# start_time = time.time()
valid_phrases = F.normalize(mask_slice_and_stack(sequence_output_mid, phrase_index), p=2, dim=-1)
valid_images = F.normalize(mask_slice_and_stack(sequence_output_mid, img_index), p=2, dim=-1)
# time_1 = time.time()
full_sims = valid_phrases @ valid_images.t()
# time_2 = time.time()
pos_sims, neg_sims = get_pos_neg_sims(full_sims, phrase_index, img_index)
# time_3 = time.time()
wra_loss = torch.clamp(neg_sims + 0.2 - pos_sims, min=0)
# wra_loss = torch.max(wra_loss, dim=1)[0]
# time_4 = time.time()
wra_valid_mask = (phrase_index[:, 1] - phrase_index[:, 0])>0
# wra_valid_mask = torch.bitwise_and(wra_valid_mask, is_img_match==0)
wra_loss = torch.mean(torch.masked_select(wra_loss, wra_valid_mask))
total_loss = total_loss + wra_loss
# time_5 = time.time()
# print('scatter time:', time_1-start_time, 'matmul time:', time_2-time_1, 'pos_neg_sim:', time_3-time_2,
# 'loss_time', time_4-time_3, 'mask loss time', time_5-time_4)
outputs = (total_loss,) + outputs + (wra_loss,)
else:
raise NotImplementedError
else:
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
def get_pos_sims(sequence_output, text_index, img_index):
# only calculate the pos similarity in a batch
pos_sims = []
for idx in range(text_index.shape[0]):
cur_valid_features = sequence_output[idx]
cur_valid_txt_index = torch.arange(text_index[idx,0], text_index[idx,1], device=sequence_output.device)
cur_valid_txt_feat = F.normalize(cur_valid_features.index_select(0, cur_valid_txt_index), p=2, dim=-1)
cur_valid_img_index = torch.arange(img_index[idx,0], img_index[idx,1], device=sequence_output.device)
cur_valid_img_feat = F.normalize(cur_valid_features.index_select(0, cur_valid_img_index), p=2, dim=-1)
if cur_valid_txt_feat.shape[0] == 0:
pos_sims.append(torch.zeros(1, dtype=cur_valid_img_feat.dtype, device=cur_valid_img_feat.device).squeeze())
else:
pos_sims.append(t2i_sim(cur_valid_txt_feat @ cur_valid_img_feat.t()))
pos_sims = torch.stack(pos_sims)
return pos_sims
def mask_slice_and_stack(features, valid_index):
stacker = []
for idx in range(features.shape[0]):
cur_valid_features = features[idx]
cur_valid_index = torch.arange(valid_index[idx,0], valid_index[idx,1], device=features.device)
stacker.append(cur_valid_features.index_select(0, cur_valid_index))
return torch.cat(stacker, dim=0)
def t2i_sim(sim_matrix):
if sim_matrix.shape[0] == 0:
return torch.zeros(1, dtype=sim_matrix.dtype, device=sim_matrix.device).squeeze()
# f_sim = sim_matrix.max(dim=1)[0]
f_sim = sim_matrix.topk(3, dim=1)[0]
rand_index = torch.randint(0, 3, (f_sim.shape[0],), device=f_sim.device)
f_sim = f_sim[torch.arange(f_sim.shape[0], device=f_sim.device), rand_index]
return torch.mean(f_sim)
def get_pos_neg_sims(sims, text_index, img_index):
text_n_input = text_index[:, 1] - text_index[:, 0]
img_n_input = img_index[:, 1] - img_index[:, 0]
text_index_border = text_n_input.cumsum(dim=0)
img_index_border = img_n_input.cumsum(dim=0)
my_zero = torch.zeros(1, device=sims.device, dtype=text_n_input.dtype)
text_index_border = torch.cat([my_zero, text_index_border], dim=0)
img_index_border = torch.cat([my_zero, img_index_border], dim=0)
doc2pos_sim = {}
doc2neg_img_sims = {}
for text_idx in range(text_index.shape[0]):
doc2pos_sim[text_idx] = t2i_sim
text_start = text_index_border[text_idx]
text_end = text_index_border[text_idx+1]
img_start = img_index_border[text_idx]
img_end = img_index_border[text_idx+1]
doc2pos_sim[text_idx] = t2i_sim(sims[text_start:text_end, img_start:img_end])
neg_img_indexs = list(range(0, text_idx)) + list(range(text_idx + 1, img_index.shape[0]))
neg_img_idx = random.choice(neg_img_indexs)
neg_img_start = img_index_border[neg_img_idx]
neg_img_end = img_index_border[neg_img_idx+1]
doc2neg_img_sims[text_idx] = t2i_sim(sims[text_start:text_end, neg_img_start:neg_img_end])
# for img_idx in range(img_index.shape[0]):
# text_start = text_index_border[text_idx]
# text_end = text_index_border[text_idx+1]
# img_start = img_index_border[img_idx]
# img_end = img_index_border[img_idx+1]
# c_sim = t2i_sim(sims[text_start:text_end, img_start:img_end])
# if text_idx == img_idx:
# doc2pos_sim[text_idx] = c_sim
# else:
# doc2neg_img_sims[text_idx].append(c_sim)
pos_sims, neg_sims = [], []
for idx in range(text_index.shape[0]):
pos_sims.append(doc2pos_sim[idx])
neg_sims.append(doc2neg_img_sims[idx])
pos_sims = torch.stack(pos_sims)
neg_sims = torch.stack(neg_sims)
return pos_sims, neg_sims
class BiImageBertForRetrieval(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(BiImageBertForRetrieval, self).__init__(config)
self.num_labels = 2
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BiBertImgModel2(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.forward_mod = 'train'
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, phrase_index=None, img_index=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20, phrase_layer=None, no_itm=False,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None):
if self.forward_mod == 'train':
return self.forward_train(input_ids_a=input_ids_a, token_type_ids_a=token_type_ids_a, attention_mask_a=attention_mask_a, no_itm=no_itm,
input_ids_b=input_ids_b, token_type_ids_b=token_type_ids_b, attention_mask_b=attention_mask_b, img_feats=img_feats, max_tag_length=max_tag_length,
position_ids_a=position_ids_a, position_ids_b=position_ids_b, head_mask=head_mask, phrase_index=phrase_index, img_index=img_index, phrase_layer=phrase_layer)
elif self.forward_mod == 'coarse':
return self.forward_emb(input_ids_a=input_ids_a, token_type_ids_a=token_type_ids_a, attention_mask_a=attention_mask_a,
input_ids_b=input_ids_b, token_type_ids_b=token_type_ids_b, attention_mask_b=attention_mask_b, img_feats=img_feats, max_tag_length=max_tag_length,
position_ids_a=position_ids_a, position_ids_b=position_ids_b, head_mask=head_mask)
elif self.forward_mod == 'fine':
return self.forward_fine(input_ids_a=input_ids_a, token_type_ids_a=token_type_ids_a, attention_mask_a=attention_mask_a,
input_ids_b=input_ids_b, token_type_ids_b=token_type_ids_b, attention_mask_b=attention_mask_b, img_feats=img_feats, max_tag_length=max_tag_length,
position_ids_a=position_ids_a, position_ids_b=position_ids_b, head_mask=head_mask)
else:
raise NotImplementedError
def forward_train(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, phrase_index=None, img_index=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20, phrase_layer=None,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None, no_itm=False):
# print(input_ids_a.shape)
outputs, single_stream_output, hard_indexes, mid_output = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats, phrase_layer=phrase_layer,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=True)
txt_encoder_outputs, vis_encoder_outputs, sim_mat = single_stream_output
ce_loss = CrossEntropyLoss(ignore_index=-1)
# VSE cross entropy loss similar to CLIP
logit_scale = self.logit_scale.exp()
retrieval_logit_mat = sim_mat * logit_scale
pseudo_label = torch.arange(sim_mat.shape[0], device=sim_mat.device)
retrieval_loss = (ce_loss(retrieval_logit_mat, pseudo_label) + ce_loss(retrieval_logit_mat.t(), pseudo_label))/2
sequence_output, pooled_output, hard_sequence_output, hard_pooled_output = outputs
seq_relationship_score = self.classifier(self.dropout(torch.cat([pooled_output, hard_pooled_output], dim=0)))
next_sentence_label = torch.cat([torch.ones(pooled_output.shape[0], dtype=torch.long), torch.zeros(hard_pooled_output.shape[0], dtype=torch.long)], dim=0).to(seq_relationship_score.device)
next_sentence_loss = ce_loss(seq_relationship_score.view(-1, self.num_labels), next_sentence_label.view(-1))
if no_itm:
total_loss = retrieval_loss
else:
total_loss = retrieval_loss + next_sentence_loss
phrase_mod = 'hard'
if phrase_index is not None:
if phrase_layer is not None:
sequence_output_mid, hard_sequence_output_mid = mid_output
else:
sequence_output_mid, hard_sequence_output_mid = sequence_output, hard_sequence_output
if phrase_mod == 'hard':
hard_txt_index, hard_img_index = hard_indexes
hard_phrase_index = torch.index_select(phrase_index, dim=0, index=hard_txt_index)
hard_object_index = torch.index_select(img_index, dim=0, index=hard_img_index)
pos_sims = get_pos_sims(sequence_output=sequence_output_mid, text_index=phrase_index, img_index=img_index)
neg_sims = get_pos_sims(sequence_output=hard_sequence_output_mid, text_index=hard_phrase_index, img_index=hard_object_index)
wra_loss = torch.clamp(neg_sims + 0.2 - pos_sims, min=0)
wra_valid_mask1 = (phrase_index[:, 1] - phrase_index[:, 0])>0
wra_valid_mask2 = (hard_phrase_index[:, 1] - hard_phrase_index[:, 0])>0
wra_valid_mask = torch.bitwise_and(wra_valid_mask1, wra_valid_mask2)
wra_loss = torch.mean(torch.masked_select(wra_loss, wra_valid_mask))
total_loss = total_loss + wra_loss
# outputs = (total_loss,) + outputs + (wra_loss,)
elif phrase_mod == 'sample':
# weakly supervised phrase grouding
# start_time = time.time()
valid_phrases = F.normalize(mask_slice_and_stack(sequence_output_mid, phrase_index), p=2, dim=-1)
valid_images = F.normalize(mask_slice_and_stack(sequence_output_mid, img_index), p=2, dim=-1)
# time_1 = time.time()
full_sims = valid_phrases @ valid_images.t()
# time_2 = time.time()
pos_sims, neg_sims = get_pos_neg_sims(full_sims, phrase_index, img_index)
# time_3 = time.time()
wra_loss = torch.clamp(neg_sims + 0.2 - pos_sims, min=0)
# wra_loss = torch.max(wra_loss, dim=1)[0]
# time_4 = time.time()
wra_valid_mask = (phrase_index[:, 1] - phrase_index[:, 0])>0
# wra_valid_mask = torch.bitwise_and(wra_valid_mask, is_img_match==0)
wra_loss = torch.mean(torch.masked_select(wra_loss, wra_valid_mask))
total_loss = total_loss + wra_loss
# time_5 = time.time()
# print('scatter time:', time_1-start_time, 'matmul time:', time_2-time_1, 'pos_neg_sim:', time_3-time_2,
# 'loss_time', time_4-time_3, 'mask loss time', time_5-time_4)
# outputs = (total_loss,) + outputs + (wra_loss,)
else:
raise NotImplementedError
outputs = (total_loss, seq_relationship_score, retrieval_loss, next_sentence_loss, next_sentence_label)
if phrase_index is not None:
outputs += (wra_loss,)
# outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
def forward_emb(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None):
global_txt, global_img = self.bert.forward_single(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length)
return (global_txt, global_img) # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
def forward_fine(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None):
outputs, single_stream_output, hard_indexes, mid_output = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=False)
sequence_output, pooled_output, hard_sequence_output, hard_pooled_output = outputs
seq_relationship_score = self.classifier(pooled_output)
# outputs = (total_loss, sim_mat, seq_relationship_score)
# outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
return seq_relationship_score # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
class BiImageBertForSequenceClassification(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(BiImageBertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BiBertImgModel(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, labels=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None, soft_label=False):
outputs, single_stream_output = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=False)
sequence_output, pooled_output, hard_sequence_output, hard_pooled_output = outputs
pooled_output = self.dropout(pooled_output)
# pooled_output = self.dropout(sequence_output[:,0])
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# print('in model batch size', logits.shape)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if soft_label:
loss = soft_cross_entropy(labels, logits)
elif self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
class BiImageBertForRE(BertPreTrainedModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(BiImageBertForRE, self).__init__(config)
self.num_labels = 1
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = BiBertImgModel2(config)
else:
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def freeze_backbone(self):
for param in self.bert.parameters():
param.requires_grad = False
def unfreeze_backbone(self):
for param in self.bert.parameters():
param.requires_grad = True
def reinit_cls_head(self):
# make a re-initialization for the classifier
self.classifier.apply(self.init_weights)
def forward(self, input_ids_a, token_type_ids_a=None, attention_mask_a=None, labels=None, phrase_layer=None,
input_ids_b=None, token_type_ids_b=None, attention_mask_b=None, max_tag_length=20, mod=1,
position_ids_a=None, position_ids_b=None, head_mask=None, img_feats=None, soft_label=False):
outputs, single_stream_output, hard_indexes, mid_out = self.bert(input_ids_a=input_ids_a, position_ids_a=position_ids_a, token_type_ids_a=token_type_ids_a,
attention_mask_a=attention_mask_a, head_mask=head_mask, img_feats=img_feats, phrase_layer=phrase_layer,
input_ids_b=input_ids_b, position_ids_b=position_ids_b, token_type_ids_b=token_type_ids_b,
attention_mask_b=attention_mask_b, max_tag_length=max_tag_length, encode_hn=False)
if phrase_layer is not None:
sequence_output = mid_out[0]
else:
sequence_output = outputs[0]
seq_a_length = input_ids_a.shape[1]
sequence_output = self.dropout(sequence_output)
vis_sequence_output = sequence_output[:, seq_a_length:]
cls_output = sequence_output[:, 0]
if mod == 1:
# mse with cosine similarity
vis_sequence_output = F.normalize(vis_sequence_output, p=2, dim=-1)
cls_output = F.normalize(cls_output, p=2 ,dim=-1).unsqueeze(-1)
logits = torch.bmm(vis_sequence_output, cls_output).squeeze()
label_mask = labels >= 0
mse = MSELoss()
# mse = torch.nn.L1Loss()
loss = mse(torch.masked_select(labels, label_mask), torch.masked_select(logits, label_mask))
elif mod == 2:
# bce with cls as classifier
logits = torch.bmm(vis_sequence_output, cls_output.unsqueeze(-1)).squeeze()
label_mask = labels >= 0
hard_labels = (labels >= 0.5).float()
loss = F.binary_cross_entropy_with_logits(torch.masked_select(logits, label_mask), torch.masked_select(hard_labels, label_mask))
logits = torch.sigmoid(logits)
elif mod == 3:
logits = self.classifier(vis_sequence_output).squeeze()
label_mask = labels >= 0
hard_labels = (labels >= 0.5).float()
loss = F.binary_cross_entropy_with_logits(torch.masked_select(logits, label_mask), torch.masked_select(labels, label_mask))
else:
raise NotImplementedError
outputs = (loss, logits)
return outputs
|
var fileUpload = angular.module('shop');
fileUpload.directive("fileUpload", ['FileService', 'Upload', '$timeout',
function(FileService, Upload, $timeout) {
return {
restrict: 'EA',
require : '^ngModel',
scope: {
files:'=',
fileIds:'=',
openPicture:'&',
disabled: '=',
control: '=',
filesUpload: '=',
isSaved: '=',
onSelect: '&',
onEdit: '&',
fileType: '=',
multipleFile: '='
},
replace: true,
transclude: true,
templateUrl: baseUrl + '/app/shared/file-upload/views/file-upload.html?v=1',
link: function(scope, element, attrs, ngModel) {
scope.baseUrl = baseUrl;
scope.fileUpload = {};
scope.fileUploaded = [];
scope.fileError = {};
/**
* After save file then begin upload file
* @param {Boolen} newVal
* @param {Boolen} oldVal
*/
scope.$watch('isSaved', function(newVal, oldVal) {
if (angular.isDefined(newVal)) {
scope.upload(scope.selectedFile);
}
})
/**
* Load file when edit
*/
if (angular.isDefined(scope.filesUpload)) {
scope.selectedFile = [];
angular.forEach(scope.filesUpload, function(value, key) {
// Show file in view directive
value.type = scope.fileType;
value.progress = 100;
scope.fileUpload[value['uniId']] = value;
scope.selectedFile.push(value);
// Set to scope file
scope.fileUploaded.push(value);
ngModel.$setViewValue({files: scope.fileUploaded});
// Check user choose image
scope.onSelect({selected: true});
})
}
/**
* Choose file upload
*
* @author Thanh Tuan <tuan@httsolution.com>
*
* @param {File} files File
*
* @return {Void}
*/
scope.chooseFile = function(files) {
scope.selectedFile = [];
if (files && files.length) {
// Check user choose image
scope.onSelect({selected: true});
for (var i = 0; i < files.length; i++) {
(function(i){
var file = files[i];
if (angular.isDefined(window.maxUpload)) {
if(file['size'] > window.maxUpload['size']){
file['uniId'] = getId();
file['proccess'] = 100;
file['error'] = 1;
file['status'] = 0;
scope.fileUpload[file['uniId']] = file;
scope.fileUpload[file['uniId']]['error'] = 'Max file size is ' + window.maxUpload['name'];
scope.fileError[file['uniId']] =file;
return;
}
}
file['uniId'] = getId();
file['proccess'] = 0;
file['error'] = '';
scope.fileUpload[file['uniId']] = file;
// Push to scope file selected
scope.selectedFile.push(file);
})(i);
}
}
}
/**
* Upload file
*
* @author Thanh Tuan <thanhtuancr2011@gmail.com>
*
* @param {File} files File
*
* @return {Void}
*/
scope.upload = function (files) {
// When user edit but no choose file
if (angular.isUndefined(files)) {
scope.onEdit({edited: true});
}
var count = 0;
angular.forEach(files, function(file, key) {
Upload.upload({
url: baseUrl + window.linkUpload,
file: file
}).progress(function(evt) {
if(angular.isDefined(scope.fileUpload[file['uniId']])) {
var progressPercentage = parseInt(100.0 * evt.loaded / evt.total);
} else {
var progressPercentage =100;
}
if(angular.isDefined(scope.fileUpload[file['uniId']])) {
scope.fileUpload[file['uniId']]['proccess'] = progressPercentage;
}
}).error(function(data, status, headers, config) {
files.splice(1, i);
if(angular.isDefined(scope.fileUpload[file['uniId']])) {
if (angular.isDefined(data.message)) {
scope.fileUpload[file['uniId']]['error'] = data.message;
}
scope.fileError[config.file['uniId']] = data;
}
}).success(function(data, status, headers, config) {
if(angular.isDefined(scope.fileUpload[config.file.uniId])){
if (angular.isDefined(data.item)) {
data.item['uniId'] = config.file.uniId;
scope.fileUploaded.push(data.item);
}
}
}).finally(function(){
count++;
if (count == files.length) {
$timeout(function(){
ngModel.$setViewValue({files: scope.fileUploaded});
})
}
});
});
}
ngModel.$render = function(){
$timeout(function(){
scope.filesUpload = ngModel.$viewValue;
})
}
/**
* [checkFile description]
* @param {[type]} type [description]
* @return {[type]} [description]
*/
scope.checkFile = function(type){
return FileService.checkFile(type);
}
/**
* Get id of file
* @return {Void}
*/
function getId() {
return Math.floor((1 + Math.random()) * 0x10000).toString(16).substring(1);
}
/**
* Delete file
* @param {Double} uniId
* @return {Void}
*/
scope.deleteFile = function(uniId) {
delete scope.fileUpload[uniId];
// Delete fileUploaded
angular.forEach(scope.fileUploaded, function(value, key) {
if (uniId == value['uniId']) {
scope.fileUploaded.splice(key, 1);
}
});
// Delete file selected
angular.forEach(scope.selectedFile, function(value, key) {
if (uniId == value['uniId']) {
scope.selectedFile.splice(key, 1);
}
});
$timeout(function(){
ngModel.$setViewValue({files: scope.fileUploaded});
// Set validate file
if (scope.selectedFile.length == 0) {
scope.onSelect({selected: false});
}
});
}
scope.$on("emptyFiles", function (event, args) {
scope.fileUpload = {};
scope.fileUploaded = [];
});
}
}
}
]).filter('bytes', function() {
return function(bytes, precision) {
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
if (typeof precision === 'undefined') precision = 1;
var units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
number = Math.floor(Math.log(bytes) / Math.log(1024));
return (bytes / Math.pow(1024, Math.floor(number))).toFixed(precision) + ' ' + units[number];
}
});
|
// SPDX-License-Identifier: Apache-2.0
import React, { useRef } from "react";
import { useQuery } from "@apollo/react-hooks";
import Container from 'react-bootstrap/Container';
import Nav from 'react-bootstrap/Nav';
import Tab from 'react-bootstrap/Tab';
import Row from 'react-bootstrap/Row';
import Col from 'react-bootstrap/Col';
import NavigationBar from "./components/navigation-bar";
import Dashboard from "./components/dashboard";
import IssueForm from "./components/issue-form";
import TransferForm from "./components/transfer-form";
import RetireForm from "./components/retire-form";
import AccessControlForm from "./components/access-control-form";
import GovernanceDashboard from "./components/governance-dashboard";
import useWeb3Modal from "./hooks/useWeb3Modal";
import { Link, Route, Switch, Redirect, useLocation } from "wouter"
import GET_TRANSFERS from "./graphql/subgraph";
function App() {
const { loading, error, data } = useQuery(GET_TRANSFERS);
const [provider, loadWeb3Modal, logoutOfWeb3Modal, signedInAddress, roles] = useWeb3Modal();
const [location] = useLocation();
const dashboardRef = useRef();
React.useEffect(() => {
if (!loading && !error && data && data.transfers) {
console.log({ transfers: data.transfers });
}
}, [loading, error, data]);
let isOwnerOrDealer = (roles[0] === true || roles[1] === true || roles[2] === true || roles[3] === true);
return (
<>
<NavigationBar
provider={provider}
loadWeb3Modal={loadWeb3Modal}
logoutOfWeb3Modal={logoutOfWeb3Modal}
signedInAddress={signedInAddress}
roles={roles}
/>
{/* Tabs to pages */}
<Nav fill variant="tabs" className="mt-2 mb-4 border-bottom-0">
{/* On dashboard page, click this link to refresh the balances */}
{/* Else on other page, click this link to go to dashboard */}
{(location.substring(1) === "dashboard")
? <Nav.Link onClick={() => dashboardRef.current.refresh()} eventKey="dashboard">Dashboard</Nav.Link>
: <Link href="dashboard"><Nav.Link eventKey="dashboard">Dashboard</Nav.Link></Link>
}
<Link href="governance"><Nav.Link eventKey="governance">Governance</Nav.Link></Link>
<Link href="issue"><Nav.Link eventKey="issue">Issue tokens</Nav.Link></Link>
<Link href="transfer"><Nav.Link eventKey="transfer">Transfer tokens</Nav.Link></Link>
<Link href="retire"><Nav.Link eventKey="retire">Retire tokens</Nav.Link></Link>
{/* Display "Manage Roles" if owner/dealer, "My Roles" otherwise */}
<Link href="access-control"><Nav.Link eventKey="access-control">
{(isOwnerOrDealer)
? "Manage roles"
: "My roles"
}
</Nav.Link></Link>
</Nav>
<Container className="my-2">
<Tab.Container defaultActiveKey={location.substring(1) || "dashboard"}>
<Tab.Content animation="true">
<Switch>
<Route exact path="/"><Redirect to="/dashboard" /></Route>
<Route path="/dashboard">
<Dashboard ref={dashboardRef} provider={provider} signedInAddress={signedInAddress} roles={roles} />
</Route>
<Route path="/governance">
<GovernanceDashboard provider={provider} roles={roles} signedInAddress={signedInAddress} />
</Route>
<Route path="/issue">
<IssueForm provider={provider} roles={roles} signedInAddress={signedInAddress} />
</Route>
<Route path="/transfer">
<TransferForm provider={provider} roles={roles} />
</Route>
<Route path="/retire">
<RetireForm provider={provider} roles={roles} />
</Route>
<Route path="/access-control">
<AccessControlForm provider={provider} signedInAddress={signedInAddress} roles={roles} />
</Route>
</Switch>
</Tab.Content>
</Tab.Container>
<div className="my-5"></div>
</Container>
</>
);
}
export default App;
|
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel, Field
class PapersGetIn(BaseModel):
"""/papers get リクエストクラス
Attributes:
published_at (str): 論文の公開日時(YYYY/MM/DD)
arxiv_query_id (int): ArxivクエリID
is_stocked (bool): ストックのみを表示するか判定するフラグ
page (int): ページネーションに使用する表示したいページ数
"""
published_at: List[str] = Field([str(datetime.now().strftime("%Y/%m/%d")),
str(datetime.now().strftime("%Y/%m/%d"))])
arxiv_query_id: Optional[int]
is_stocked: Optional[bool] = Field(False)
page: int = Field(1)
class Config:
schema_extra = {
'example': {
'published_at': ['2021/09/20', '2021/09/24'],
'arxiv_query_id': 1,
'is_stocked': True,
'page': 1
}
}
|
const { reactive, computed } = Vue
function useWindow(top, left, width, height) {
const rectangle = reactive({ top, left, width, height })
const cmRect = computed(() => ({
top: `${rectangle.top}px`,
left: `${rectangle.left}px`,
width: `${rectangle.width}px`,
height: `${rectangle.height}px`
}))
return {
render(r) {
return Vue.h('div', { class: 'window', style: cmRect.value }, [r({})])
}
}
}
|
"""Top-level package for django-fsmfield."""
__author__ = """dryprojects"""
__email__ = 'rk19931211@hotmail.com'
__version__ = '0.1.0'
from .fields import *
__all__ = [
'FSMField',
'FSMMixin',
'State',
]
|
from nltk.corpus import cmudict
DOUBLERS = ('b', 'd', 'g', 'm', 'n', 'p', 't', 'z')
DOUBLING_EXCEPTIONS = ('mb', 'ng', 'en')
VOWELS = ('a', 'e', 'i', 'o', 'u')
d = cmudict.dict()
def double_up_con(word, suffix):
return word[:-1] + word[-1] * 2 + suffix
def to_past_tense(verb):
""" Takes a present tense verb and changes it to past tense 'ed' or similar. """
if verb in past_tense_exceptions:
return past_tense_exceptions[verb]
elif verb.endswith('e'):
return verb + 'd'
elif verb.endswith(DOUBLING_EXCEPTIONS):
return verb + 'ed'
elif verb.endswith(DOUBLERS):
return double_up_con(verb, 'ed')
else:
return verb + 'ed'
def to_ing_tense(verb):
""" Takes a present tense verb and adds the suffix 'ing' """
if verb.endswith('e'):
return verb[:-1] + 'ing' # Remove the ending e
elif verb.endswith(DOUBLING_EXCEPTIONS):
return verb + 'ing'
elif verb[-3] in VOWELS and verb[-2] in VOWELS: # No doubling for double vowels!
return verb + 'ing'
elif verb.endswith(DOUBLERS) and verb[-2] in VOWELS:
return double_up_con(verb, 'ing')
else:
return verb + 'ing'
def verb_to_noun(verb):
""" Takes a noun and adds the suffix 'er' to transform it into a noun. """
if verb in er_exceptions:
return er_exceptions[verb]
elif verb.endswith('e'):
return verb + 'r'
elif verb.endswith(DOUBLING_EXCEPTIONS):
return verb + 'er'
elif verb[-3] in VOWELS and verb[-2] in VOWELS:
return verb + 'er'
elif verb.endswith(DOUBLERS) and verb[-2] in VOWELS: # No doubling for double vowels!
return double_up_con(verb, 'er')
else:
return verb + 'er'
def pluralize_noun(n):
""" Takes a noun and turns it into it's plural form """
# Check exceptions first:
if n in exceptions:
return exceptions[n]
if n.endswith('us'):
return n[:-2] + 'i'
elif n.endswith('is'):
return n[:-2] + 'es'
elif n.endswith(('s', 'ss', 'sh', 'ch', 'x', 'z', 'o')):
return n + 'es'
elif n.endswith('f'):
return n[:-1] + 'ves'
elif n.endswith('fe'):
return n[:-2] + 'ves'
elif n.endswith('tion'):
return n + 's'
elif n.endswith('on'):
return n[:-2] + 'a'
elif n.endswith('y') and n[-2] in 'bcdfghjklmnpqrstvwxz':
return n[:-1] + 'ies'
else:
return n + 's'
def count_syllables(word):
# print("lookin up: '{}'".format(word))
lookup = d.get(word.lower(), None)
# print("result: {}".format(lookup))
if lookup:
return len(list(x for x in lookup.pop() if x[-1].isdigit()))
else:
return -1
def too_wordy(word1, word2):
word1_ct = count_syllables(word1)
word2_ct = count_syllables(word2)
# print('{} = {}, {} = {}'.format(word1, word1_ct, word2, word2_ct))
if word1_ct == -1 or word2_ct == -1:
return False
elif word1_ct >= 3 and word2_ct >= 4:
return True
else:
return False
er_exceptions = {
'create': 'creator',
'act': 'actor',
'mediate': 'mediator',
'alternate': 'alternator',
'collect': 'collector',
'dictate': 'dictator',
'vend': 'vendor',
'invest': 'investor',
'credit': 'creditor',
'instruct': 'instructor',
'guide': 'guide',
'hurry': 'hurrier'
}
past_tense_exceptions = {
'drive': 'drove',
'dive': 'dove',
'eat': 'ate',
'bite': 'bit',
'lead': 'led',
'swim': 'swam',
'run': 'ran',
'fly': 'flown', # flown
'fight': 'fought',
'sew': 'sewn',
'see': 'saw', # seen
'throw': 'threw', # thrown
}
exceptions = {
'roof': 'roofs',
'belief': 'beliefs',
'chef': 'chefs',
'chief': 'chiefs',
'gas': 'gasses',
'fez': 'fezzes',
'photo': 'photos',
'piano': 'pianos',
'halo': 'halos',
'tooth': 'teeth',
'mouse': 'mice',
'deer': 'deer',
'rodeo': 'rodeos', # Maybe a rule for this...
'ion': 'ions'
}
|
# -*- coding: utf-8 -*-
from pytest import _, type_check_test
@type_check_test()
def test_wrapper(x: int, *args, **kwargs: type('T', (), {})) -> int:
"Test docstring."
@type_check_test(
ok=[
_(1.1)
],
fail=[
(_(1), 'invalid `x`: expected float, got int'),
(_('foo'), 'invalid `x`: expected float, got str'),
(_(0.), 'invalid return value: expected tuple, got float')
]
)
def test_basic(x: float) -> tuple:
return (x, x) if x else x
@type_check_test(
ok=[
_(),
_(x=1),
_(x=1, y=2),
],
fail=[
(_(x='foo'), 'invalid keyword argument `x`: expected int, got str'),
(_(x=1, y=2.2), 'invalid keyword argument `y`: expected int, got float')
]
)
def test_kwargs(**kwargs: int):
...
@type_check_test(
ok=[
_(),
_(1),
_(1, 2)
],
fail=[
(_('a'), r'invalid item #0 of `\*args`: expected int, got str'),
(_(1, 'a'), r'invalid item #1 of `\*args`: expected int, got str')
]
)
def test_varargs(*args: int):
...
@type_check_test(
ok=[
_(1),
_(1, b='a'),
_(1, c=1.1, d=1.2),
_(1, b='a', c=1.1, d=1.2)
],
fail=[
(_('a'), 'invalid `a`: expected int, got str'),
(_(1, b=1), 'invalid `b`: expected str, got int'),
(_(1, c=1.1, d='a'), 'keyword argument `d`: expected float, got str'),
]
)
def test_mixed_args(a: int, *, b: str = 'foo', **kwargs: float):
...
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class OfferLetter(Document):
pass
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/ce/CostExplorer_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <aws/ce/model/Coverage.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace CostExplorer
{
namespace Model
{
/**
* <p>A group of reservations that share a set of attributes.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/ce-2017-10-25/ReservationCoverageGroup">AWS
* API Reference</a></p>
*/
class AWS_COSTEXPLORER_API ReservationCoverageGroup
{
public:
ReservationCoverageGroup();
ReservationCoverageGroup(Aws::Utils::Json::JsonView jsonValue);
ReservationCoverageGroup& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The attributes for this group of reservations.</p>
*/
inline const Aws::Map<Aws::String, Aws::String>& GetAttributes() const{ return m_attributes; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline bool AttributesHasBeenSet() const { return m_attributesHasBeenSet; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline void SetAttributes(const Aws::Map<Aws::String, Aws::String>& value) { m_attributesHasBeenSet = true; m_attributes = value; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline void SetAttributes(Aws::Map<Aws::String, Aws::String>&& value) { m_attributesHasBeenSet = true; m_attributes = std::move(value); }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& WithAttributes(const Aws::Map<Aws::String, Aws::String>& value) { SetAttributes(value); return *this;}
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& WithAttributes(Aws::Map<Aws::String, Aws::String>&& value) { SetAttributes(std::move(value)); return *this;}
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(const Aws::String& key, const Aws::String& value) { m_attributesHasBeenSet = true; m_attributes.emplace(key, value); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(Aws::String&& key, const Aws::String& value) { m_attributesHasBeenSet = true; m_attributes.emplace(std::move(key), value); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(const Aws::String& key, Aws::String&& value) { m_attributesHasBeenSet = true; m_attributes.emplace(key, std::move(value)); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(Aws::String&& key, Aws::String&& value) { m_attributesHasBeenSet = true; m_attributes.emplace(std::move(key), std::move(value)); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(const char* key, Aws::String&& value) { m_attributesHasBeenSet = true; m_attributes.emplace(key, std::move(value)); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(Aws::String&& key, const char* value) { m_attributesHasBeenSet = true; m_attributes.emplace(std::move(key), value); return *this; }
/**
* <p>The attributes for this group of reservations.</p>
*/
inline ReservationCoverageGroup& AddAttributes(const char* key, const char* value) { m_attributesHasBeenSet = true; m_attributes.emplace(key, value); return *this; }
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline const Coverage& GetCoverage() const{ return m_coverage; }
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline bool CoverageHasBeenSet() const { return m_coverageHasBeenSet; }
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline void SetCoverage(const Coverage& value) { m_coverageHasBeenSet = true; m_coverage = value; }
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline void SetCoverage(Coverage&& value) { m_coverageHasBeenSet = true; m_coverage = std::move(value); }
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline ReservationCoverageGroup& WithCoverage(const Coverage& value) { SetCoverage(value); return *this;}
/**
* <p>How much instance usage this group of reservations covered.</p>
*/
inline ReservationCoverageGroup& WithCoverage(Coverage&& value) { SetCoverage(std::move(value)); return *this;}
private:
Aws::Map<Aws::String, Aws::String> m_attributes;
bool m_attributesHasBeenSet;
Coverage m_coverage;
bool m_coverageHasBeenSet;
};
} // namespace Model
} // namespace CostExplorer
} // namespace Aws
|
import chain_flow
import datetime
import time
class Execute_Cf_Environment():
def __init__(self,cf ):
self.cf = cf
def execute(self):
time_stamp = datetime.datetime.today()
old_hour = time_stamp.hour
old_minute = time_stamp.minute
old_second = time_stamp.second
self.cf.execute_initialize()
while True:
time.sleep(.01)
self.cf.queue_event("SUB_SECOND_TICK",10)
time_stamp = datetime.datetime.today()
hour = time_stamp.hour
minute = time_stamp.minute
second = time_stamp.second
if old_second != second :
self.cf.queue_event( "TIME_TICK", second )
if old_minute != minute :
self.cf.queue_event( "MINUTE_TICK", minute )
if old_hour != hour :
self.cf.queue_event( "HOUR_TICK", minute )
old_hour = hour
old_minute = minute
old_second = second
try:
self.cf.execute( )
except:
print "chain flow exception"
print "current chain is ",self.cf.current_chain["name"]
print "current link is ",self.cf.current_link
raise
class CF_Interpreter(chain_flow.CF_Base_Interpreter ):
def __init__(self):
chain_flow.CF_Base_Interpreter.__init__(self)
def terminate( self, link_name ):
self.opcodes[ "Terminate"] = self.terminate_code
def halt( self, link):
self.insert_link( link ,"Halt", [] )
def one_step( self, link, function, parameters):
self.insert_link( link ,"Reset", [function, parameters] )
def reset( self, link):
self.insert_link( link ,"Reset", [] )
def send_event( self, event_name,data ):
self.insert_link( link ,"SendEvent",[event_name, data] )
#note python dow is Monday 0 Sunday 6
def wait_tod( self, link,dow,hour,minute,second):
self.insert_link( link ,"WaitTod",[dow,hour,minute,second] )
def wait_event( self, link,event_name):
self.insert_link( link ,"WaitEvent",[event_name] )
def wait_time( self, link, time_tick):
self.insert_link( link ,"WaitTime",[time_tick] )
def wait_condition( self, link, function, parameters ):
self.insert_link( link ,"Wait",[function, parameters] )
def wait_tod_reset( self, link):
self.opcodes["WaitTod_Reset" ] = self.wait_tod_code_reset
def wait_event_reset( self, link, event_name):
self.insert_link( link ,"WaitEvent_Reset",[event_name] )
#note python dow is Monday 0 Sunday 6 -- fix later ?
def wait_time_reset( self, link,dow,hour,minute,second ):
self.insert_link( link ,"WaitTime_Reset",[dow,hour,minute,second] )
def wait_condition( self, link,function,parameters ):
self.insert_link( link ,"Wait_Reset",[function, parameters] )
def verify_condition( self, link,function,parameters):
self.insert_link( link ,"Verify",[function, parameters] )
def nop( self, link):
self.insert_link( link ,"Nop" )
def log( self, link, message ):
self.insert_link( link ,"Log",[message] )
def enable_chain( self, link, chain_names ):
self.insert_link(link,"Enable_Chain",[chain_names])
def disable_chain( self, link, chain_names):
self.insert_link(link,"Disable_Chain",[chain_names] )
def init_state( self, link):
self.opcodes["Init_State_Machine"] = self.init_state
pass
def change_state( self, link, chain, state ):
self.insert_link(link,"Change_State",[chain,state] )
def system_reset( self, link):
self.insert_link(link,"RESET_SYSTEM")
# test code
if __name__ == "__main__":
cf = CF_Interpreter()
cf.define_chain( "Chain_1", True )
cf.log( "test1","Chain_1 +++ is printed" )
cf.reset("test2")
cf.define_chain( "Chain_2", True )
cf.log( "test1","Chain_2 +++ is printed" )
cf.reset("test2")
cf.execute_initialize()
for i in range(0,10):
print i
cf.queue_event("TEST", [] )
cf.execute( )
print("done")
|
# ---------------------------------------------------------#
# astroNN.datasets.apogee_distances: APOGEE Distances
# ---------------------------------------------------------#
import numpy as np
from astropy import units as u
from astropy.io import fits
from astroNN.apogee import allstar
from astroNN.apogee.downloader import apogee_distances
from astroNN.gaia import mag_to_absmag, mag_to_fakemag, extinction_correction
# noinspection PyUnresolvedReferences
def load_apogee_distances(dr=None, metric='distance', cuts=True, extinction=True, keepdims=False):
"""
Load apogee distances (absolute magnitude from stellar model)
:param dr: Apogee DR
:type dr: int
:param metric: which metric you want to get back
- "absmag" for absolute magnitude
- "fakemag" for fake magnitude
- "distance" for distance in parsec
:type metric: string
:param cuts: Whether to cut bad data (negative parallax and percentage error more than 20%), or a float to set the threshold
:type cuts: Union[boolean, float]
:param extinction: Whether to take extinction into account, only affect when metric is NOT 'distance'
:type extinction: bool
:param keepdims: Whether to preserve indices the same as APOGEE allstar DR14, no effect when cuts=False, set to -9999 for bad indices when cuts=True keepdims=True
:type keepdims: boolean
:return: numpy array of ra, dec, metrics_array, metrics_err_array
:rtype: ndarrays
:History: 2018-Jan-25 - Written - Henry Leung (University of Toronto)
"""
fullfilename = apogee_distances(dr=dr)
with fits.open(fullfilename) as F:
hdulist = F[1].data
# Convert kpc to pc
distance = hdulist['BPG_dist50'] * 1000
dist_err = (hdulist['BPG_dist84'] - hdulist['BPG_dist16']) * 1000
allstarfullpath = allstar(dr=dr)
with fits.open(allstarfullpath) as F:
k_mag = F[1].data['K']
if extinction:
k_mag = extinction_correction(k_mag, F[1].data['AK_TARG'])
ra = F[1].data['RA']
dec = F[1].data['DEC']
# Bad index refers to nan index
bad_index = np.argwhere(np.isnan(distance))
if metric == 'distance':
# removed astropy units because of -9999. is dimensionless, will have issues
output = distance
output_err = dist_err
elif metric == 'absmag':
absmag, absmag_err = mag_to_absmag(k_mag, 1 / distance * u.arcsec, (1 / distance) * (dist_err / distance))
output = absmag
output_err = absmag_err
elif metric == 'fakemag':
# fakemag requires parallax (mas)
fakemag, fakemag_err = mag_to_fakemag(k_mag, 1000 / distance * u.mas, (1000 / distance) * (dist_err / distance))
output = fakemag
output_err = fakemag_err
else:
raise ValueError('Unknown metric')
# Set the nan index to -9999. as they are bad and unknown. Not magic_number as this is an APOGEE dataset
output[bad_index], output_err[bad_index] = -9999., -9999.
if cuts is False:
pass
else:
distance[bad_index], dist_err[bad_index] = -9999., -9999.
good_idx = ((dist_err / distance < (0.2 if cuts is True else cuts)) & (distance != -9999.))
if not keepdims:
ra = ra[good_idx]
dec = dec[good_idx]
output = output[good_idx]
output_err = output_err[good_idx]
else:
output[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
output_err[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
return ra, dec, output, output_err
|