file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
classifier.d15b2d9b.js | // modules are defined as an array
// [ module function, map of requires ]
//
// map of requires is short require name -> numeric require
//
// anything defined in a previous bundle is accessed via the
// orig method which is the require for previous bundles
// eslint-disable-next-line no-global-assign
parcelRequire = (function (modules, cache, entry, globalName) {
// Save the require from previous bundle to this closure if any
var previousRequire = typeof parcelRequire === 'function' && parcelRequire;
var nodeRequire = typeof require === 'function' && require;
function newRequire(name, jumped) {
if (!cache[name]) {
if (!modules[name]) {
// if we cannot find the module within our internal map or
// cache jump to the current global require ie. the last bundle
// that was added to the page.
var currentRequire = typeof parcelRequire === 'function' && parcelRequire;
if (!jumped && currentRequire) {
return currentRequire(name, true);
}
// If there are other bundles on this page the require from the
// previous one is saved to 'previousRequire'. Repeat this as
// many times as there are bundles until the module is found or
// we exhaust the require chain.
if (previousRequire) {
return previousRequire(name, true);
}
// Try the node require function if it exists.
if (nodeRequire && typeof name === 'string') {
return nodeRequire(name);
}
var err = new Error('Cannot find module \'' + name + '\'');
err.code = 'MODULE_NOT_FOUND';
throw err;
}
localRequire.resolve = resolve;
var module = cache[name] = new newRequire.Module(name);
modules[name][0].call(module.exports, localRequire, module, module.exports, this);
}
return cache[name].exports;
function localRequire(x){
return newRequire(localRequire.resolve(x));
}
function resolve(x){
return modules[name][1][x] || x;
}
}
function Module(moduleName) {
this.id = moduleName;
this.bundle = newRequire;
this.exports = {};
}
newRequire.isParcelRequire = true;
newRequire.Module = Module;
newRequire.modules = modules;
newRequire.cache = cache;
newRequire.parent = previousRequire;
newRequire.register = function (id, exports) {
modules[id] = [function (require, module) {
module.exports = exports;
}, {}];
};
for (var i = 0; i < entry.length; i++) {
newRequire(entry[i]);
}
if (entry.length) {
// Expose entry point to Node, AMD or browser globals
// Based on https://github.com/ForbesLindesay/umd/blob/master/template.js
var mainExports = newRequire(entry[entry.length - 1]);
// CommonJS
if (typeof exports === "object" && typeof module !== "undefined") {
module.exports = mainExports;
// RequireJS
} else if (typeof define === "function" && define.amd) {
define(function () {
return mainExports;
});
// <script>
} else if (globalName) {
this[globalName] = mainExports;
}
}
// Override the current require with this new one
return newRequire;
})({"data\\Emotion_data.json":[function(require,module,exports) {
module.exports="/Emotion_data.e4259c96.json";
},{}],"toClassify\\Emotion_features.json":[function(require,module,exports) {
module.exports="/Emotion_features.09a2a8d7.json";
},{}],"scripts\\ShapeData.ts":[function(require,module,exports) {
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var ShapeData = function () {
function ShapeData() {
this.featuresList = ["tempo", "total_beats", "average_beats", "chroma_stft_mean", "chroma_stft_std", "chroma_stft_var", "chroma_cq_mean", "chroma_cq_std", "chroma_cq_var", "chroma_cens_mean", "chroma_cens_std", "chroma_cens_var", "melspectrogram_mean", "melspectrogram_std", "melspectrogram_var", "mfcc_mean", "mfcc_std", "mfcc_var", "mfcc_delta_mean", "mfcc_delta_std", "mfcc_delta_var", "rmse_mean", "rmse_std", "rmse_var", "cent_mean", "cent_std", "cent_var", "spec_bw_mean", "spec_bw_std", "spec_bw_var", "contrast_mean", "contrast_std", "contrast_var", "rolloff_mean", "rolloff_std", "rolloff_var", "poly_mean", "poly_std", "poly_var", "tonnetz_mean", "tonnetz_std", "tonnetz_var", "zcr_mean", "zcr_std", "zcr_var", "harm_mean", "harm_std", "harm_var", "perc_mean", "perc_std", "perc_var", "frame_mean", "frame_std", "frame_var"];
this.featuresToIgnore = [];
}
ShapeData.prototype.makeDatasetForTensors = function (data) {
var dataInputs = [];
var dataOutputs = [];
for (var singleSong in data) {
var newArray = this.convertObjectToArray(data[singleSong]);
var input = newArray.splice(4);
var output = newArray.splice(2, 1);
dataInputs.push(input);
dataOutputs.push(output);
}
dataInputs = this.removeFeatures(dataInputs);
return [dataInputs, dataOutputs];
};
;
ShapeData.prototype.makeUnclassifiedSongsForTensors = function (originalData, songsToClassify) {
var enumFeatures = this.convertObjectToArray(songsToClassify);
var numberOfSongs = Object.keys(enumFeatures[0]).length;
var songNames = [];
var allFeatures = [];
for (var i = 1; i < numberOfSongs + 1; i++) {
var songName = "";
var singleSongFeatures = [];
for (var j = 0; j < enumFeatures.length; j++) {
if (j === 0) {
songName = enumFeatures[j][i];
} else {
singleSongFeatures.push(enumFeatures[j][i]);
}
}
songNames.push(songName);
allFeatures.push(singleSongFeatures);
}
allFeatures = this.removeFeatures(allFeatures);
return [songNames, this.normalizeData(originalData, allFeatures)];
};
ShapeData.prototype.getInputDim = function () {
return this.featuresList.length - this.featuresToIgnore.length;
};
ShapeData.prototype.removeFeatures = function (features) {
for (var song in features) {
for (var f = 0; f < this.featuresToIgnore.length; f++) {
var featureIndex = this.featuresList.indexOf(this.featuresToIgnore[f]);
features[song].splice(featureIndex, 1);
}
}
return features;
};
ShapeData.prototype.convertObjectToArray = function (data) {
var newArray = [];
for (var _i = 0, _a = Object.entries(data); _i < _a.length; _i++) {
var _b = _a[_i],
key = _b[0],
value = _b[1];
if (!Object.entries) Object.entries = function (obj) {
var ownProps = Object.keys(obj),
i = ownProps.length,
resArray = new Array(i);
while (i--) {
resArray[i] = [ownProps[i], obj[ownProps[i]]];
}if (i < ownProps.length - 3) {
return resArray;
}
};
newArray.push(value);
}
return newArray;
};
;
ShapeData.prototype.normalizeData = function (originalData, arrayLikeData) {
var normalizedData = [];
var featuresRange = this.getMinMaxValues(originalData); | var singleNormalizedData = [];
for (var i = 0; i < arrayLikeData[song].length; i++) {
var norm = this.normalize(arrayLikeData[song][i], featuresRange[i].min, featuresRange[i].max);
singleNormalizedData.push(norm);
}
normalizedData.push(singleNormalizedData);
}
return normalizedData;
};
;
ShapeData.prototype.normalize = function (value, minValue, maxValue) {
return (value - minValue) / (maxValue - minValue);
};
ShapeData.prototype.getMinMaxValues = function (data) {
var featuresMinMax = [];
for (var i = 0; i < this.featuresList.length; i++) {
var maxValue = 0;
var minValue = 0;
var counter = 0;
for (var song in data) {
var value = data[song][this.featuresList[i]];
if (counter === 0) {
maxValue = value;
minValue = value;
}
if (value > maxValue) {
maxValue = value;
}
if (value < minValue) {
minValue = value;
}
counter++;
}
featuresMinMax.push({
"feature": this.featuresList[i],
"min": minValue,
"max": maxValue
});
}
return featuresMinMax;
};
ShapeData.prototype.isIterable = function (obj) {
console.log(obj);
if (obj == null) {
return false;
}
return typeof obj[Symbol.iterator] === 'function';
};
return ShapeData;
}();
exports.ShapeData = ShapeData;
},{}],"scripts\\classifier.ts":[function(require,module,exports) {
'use strict';
var __awaiter = this && this.__awaiter || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) {
try {
step(generator.next(value));
} catch (e) {
reject(e);
}
}
function rejected(value) {
try {
step(generator["throw"](value));
} catch (e) {
reject(e);
}
}
function step(result) {
result.done ? resolve(result.value) : new P(function (resolve) {
resolve(result.value);
}).then(fulfilled, rejected);
}
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = this && this.__generator || function (thisArg, body) {
var _ = { label: 0, sent: function sent() {
if (t[0] & 1) throw t[1];return t[1];
}, trys: [], ops: [] },
f,
y,
t,
g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () {
return this;
}), g;
function verb(n) {
return function (v) {
return step([n, v]);
};
}
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) {
try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0:case 1:
t = op;break;
case 4:
_.label++;return { value: op[1], done: false };
case 5:
_.label++;y = op[1];op = [0];continue;
case 7:
op = _.ops.pop();_.trys.pop();continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
_ = 0;continue;
}
if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
_.label = op[1];break;
}
if (op[0] === 6 && _.label < t[1]) {
_.label = t[1];t = op;break;
}
if (t && _.label < t[2]) {
_.label = t[2];_.ops.push(op);break;
}
if (t[2]) _.ops.pop();
_.trys.pop();continue;
}
op = body.call(thisArg, _);
} catch (e) {
op = [6, e];y = 0;
} finally {
f = t = 0;
}
}if (op[0] & 5) throw op[1];return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __importStar = this && this.__importStar || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) {
if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
}result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
var dataset = __importStar(require("../data/Emotion_data.json"));
var toClassify = __importStar(require("../toClassify/Emotion_features.json"));
var SD = __importStar(require("./ShapeData"));
var ShapeData = new SD.ShapeData();
var labelList = ["sad", "happy", "relax", "angry"];
document.querySelector("#submit").addEventListener('click', function () {
var epochs = parseInt(document.querySelector("#epochs").value);
var learningRate = parseFloat(document.querySelector("#learningRate").value);
var validationSplit = parseFloat(document.querySelector("#validationSplit").value);
var unitsHiddenLayer = parseInt(document.querySelector("#epochs").value);
var hiddenLayerActivation = String(document.querySelector("#hiddenLayerActivation").value);
var outputLayerActivation = String(document.querySelector("#outputLayerActivation").value);
classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation);
});
classify();
function classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation) {
if (epochs === void 0) {
epochs = 30;
}
if (learningRate === void 0) {
learningRate = 0.3;
}
if (validationSplit === void 0) {
validationSplit = 0.2;
}
if (unitsHiddenLayer === void 0) {
unitsHiddenLayer = 50;
}
if (hiddenLayerActivation === void 0) {
hiddenLayerActivation = "relu";
}
if (outputLayerActivation === void 0) {
outputLayerActivation = "softmax";
}
var data = {};
var songsToClassify = {};
var dataInputs = [];
var labels = [];
var normalizedData = [];
var model;
loadJSON(dataset.default).then(function (jsonDataset) {
data = JSON.parse(jsonDataset);
return loadJSON(toClassify.default);
}).then(function (jsonSongs) {
songsToClassify = JSON.parse(jsonSongs);
var toClassify = ShapeData.makeUnclassifiedSongsForTensors(data, songsToClassify);
var songNames = toClassify[0];
var songFeatures = toClassify[1];
var newData = ShapeData.makeDatasetForTensors(data);
dataInputs = newData[0];
var dataOutputs = newData[1];
for (var i = 0; i < dataOutputs.length; i++) {
labels.push(labelList.indexOf(dataOutputs[i][0]));
}
normalizedData = ShapeData.normalizeData(data, dataInputs);
var xs = tf.tensor2d(normalizedData);
var labelsTensor = tf.tensor1d(labels, "int32");
var ys = tf.oneHot(labelsTensor, labelList.length);
labelsTensor.dispose();
var inputDim = ShapeData.getInputDim();
model = tf.sequential();
var hiddenLayer = tf.layers.dense({
units: unitsHiddenLayer,
activation: hiddenLayerActivation,
inputDim: inputDim
});
var outputLayer = tf.layers.dense({
units: 4,
activation: outputLayerActivation
});
model.add(hiddenLayer);
model.add(outputLayer);
var learningR = learningRate;
var myOptimizer = tf.train.sgd(learningR);
model.compile({
optimizer: myOptimizer,
loss: "categoricalCrossentropy",
metrics: ["accuracy"]
});
train(xs, ys).then(function (result) {
tf.tidy(function () {
var classifiedSongs = [];
for (var song in songFeatures) {
var toGuess = tf.tensor2d([songFeatures[song]]);
var results = model.predict(toGuess);
var argMax = results.argMax(1);
var index = argMax.dataSync()[0];
var label = labelList[index];
model.getWeights();
classifiedSongs.push({
songName: songNames[song],
label: label,
labelIndex: index
});
if (document.querySelector("#showSingleResults").checked) {
console.log("I think that " + songNames[song] + " is a " + label + " song");
}
}
if (document.querySelector("#showFinalResult").checked) {
console.log("Classified songs:", classifiedSongs);
}
});
});
}).catch(function (err) {
return console.log(err);
});
function train(xs, ys) {
return __awaiter(this, void 0, void 0, function () {
var options;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
options = {
epochs: epochs,
validationSplit: validationSplit,
shuffle: true,
callbacks: {
onTrainBegin: function onTrainBegin() {
return console.log("training start");
},
onTrainEnd: function onTrainEnd() {
return console.log("training complete");
},
onEpochEnd: function onEpochEnd(num, logs) {
if (document.querySelector("#showEpochs").checked) {
console.log("Epoch: " + num);
console.log(logs);
}
}
}
};
return [4, model.fit(xs, ys, options)];
case 1:
return [2, _a.sent()];
}
});
});
}
function makeInputs() {
var features = [];
for (var _i = 0, data_1 = data; _i < data_1.length; _i++) {
var singleSong = data_1[_i];
for (var _a = 0, _b = data[singleSong]; _a < _b.length; _a++) {
var singleFeature = _b[_a];
console.log(data[singleSong][singleFeature]);
}
}
}
function loadJSON(url) {
return new Promise(function (resolve, reject) {
var xobj = new XMLHttpRequest();
xobj.overrideMimeType("application/json");
xobj.open('GET', url, true);
xobj.onreadystatechange = function () {
if (xobj.readyState == 4 && xobj.status == 200) {
resolve(xobj.responseText);
}
};
xobj.send(null);
xobj.onerror = function () {
return reject(xobj.statusText);
};
});
}
}
},{"../data/Emotion_data.json":"data\\Emotion_data.json","../toClassify/Emotion_features.json":"toClassify\\Emotion_features.json","./ShapeData":"scripts\\ShapeData.ts"}],"node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js":[function(require,module,exports) {
var global = arguments[3];
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || function () {});
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = null;
}
module.bundle.Module = Module;
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = '' || location.hostname;
var protocol = location.protocol === 'https:' ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + ':' + '65517' + '/');
ws.onmessage = function (event) {
var data = JSON.parse(event.data);
if (data.type === 'update') {
console.clear();
data.assets.forEach(function (asset) {
hmrApply(global.parcelRequire, asset);
});
data.assets.forEach(function (asset) {
if (!asset.isNew) {
hmrAccept(global.parcelRequire, asset.id);
}
});
}
if (data.type === 'reload') {
ws.close();
ws.onclose = function () {
location.reload();
};
}
if (data.type === 'error-resolved') {
console.log('[parcel] ✨ Error resolved');
removeErrorOverlay();
}
if (data.type === 'error') {
console.error('[parcel] 🚨 ' + data.error.message + '\n' + data.error.stack);
removeErrorOverlay();
var overlay = createErrorOverlay(data);
document.body.appendChild(overlay);
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
}
}
function createErrorOverlay(data) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
// html encode message and stack trace
var message = document.createElement('div');
var stackTrace = document.createElement('pre');
message.innerText = data.error.message;
stackTrace.innerText = data.error.stack;
overlay.innerHTML = '<div style="background: black; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; opacity: 0.85; font-family: Menlo, Consolas, monospace; z-index: 9999;">' + '<span style="background: red; padding: 2px 4px; border-radius: 2px;">ERROR</span>' + '<span style="top: 2px; margin-left: 5px; position: relative;">🚨</span>' + '<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">' + message.innerHTML + '</div>' + '<pre>' + stackTrace.innerHTML + '</pre>' + '</div>';
return overlay;
}
function getParents(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push(k);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function hmrApply(bundle, asset) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (modules[asset.id] || !bundle.parent) {
var fn = new Function('require', 'module', 'exports', asset.generated.js);
asset.isNew = !modules[asset.id];
modules[asset.id] = [fn, asset.deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAccept(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (!modules[id] && bundle.parent) {
return hmrAccept(bundle.parent, id);
}
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot._disposeCallbacks.length) {
cached.hot._disposeCallbacks.forEach(function (cb) {
cb(bundle.hotData);
});
}
delete bundle.cache[id];
bundle(id);
cached = bundle.cache[id];
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
cached.hot._acceptCallbacks.forEach(function (cb) {
cb();
});
return true;
}
return getParents(global.parcelRequire, id).some(function (id) {
return hmrAccept(global.parcelRequire, id);
});
}
},{}]},{},["node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js","scripts\\classifier.ts"], null)
//# sourceMappingURL=/classifier.d15b2d9b.map | for (var song in arrayLikeData) { | random_line_split |
classifier.d15b2d9b.js | // modules are defined as an array
// [ module function, map of requires ]
//
// map of requires is short require name -> numeric require
//
// anything defined in a previous bundle is accessed via the
// orig method which is the require for previous bundles
// eslint-disable-next-line no-global-assign
parcelRequire = (function (modules, cache, entry, globalName) {
// Save the require from previous bundle to this closure if any
var previousRequire = typeof parcelRequire === 'function' && parcelRequire;
var nodeRequire = typeof require === 'function' && require;
function newRequire(name, jumped) {
if (!cache[name]) {
if (!modules[name]) {
// if we cannot find the module within our internal map or
// cache jump to the current global require ie. the last bundle
// that was added to the page.
var currentRequire = typeof parcelRequire === 'function' && parcelRequire;
if (!jumped && currentRequire) {
return currentRequire(name, true);
}
// If there are other bundles on this page the require from the
// previous one is saved to 'previousRequire'. Repeat this as
// many times as there are bundles until the module is found or
// we exhaust the require chain.
if (previousRequire) {
return previousRequire(name, true);
}
// Try the node require function if it exists.
if (nodeRequire && typeof name === 'string') {
return nodeRequire(name);
}
var err = new Error('Cannot find module \'' + name + '\'');
err.code = 'MODULE_NOT_FOUND';
throw err;
}
localRequire.resolve = resolve;
var module = cache[name] = new newRequire.Module(name);
modules[name][0].call(module.exports, localRequire, module, module.exports, this);
}
return cache[name].exports;
function localRequire(x){
return newRequire(localRequire.resolve(x));
}
function resolve(x){
return modules[name][1][x] || x;
}
}
function Module(moduleName) {
this.id = moduleName;
this.bundle = newRequire;
this.exports = {};
}
newRequire.isParcelRequire = true;
newRequire.Module = Module;
newRequire.modules = modules;
newRequire.cache = cache;
newRequire.parent = previousRequire;
newRequire.register = function (id, exports) {
modules[id] = [function (require, module) {
module.exports = exports;
}, {}];
};
for (var i = 0; i < entry.length; i++) {
newRequire(entry[i]);
}
if (entry.length) {
// Expose entry point to Node, AMD or browser globals
// Based on https://github.com/ForbesLindesay/umd/blob/master/template.js
var mainExports = newRequire(entry[entry.length - 1]);
// CommonJS
if (typeof exports === "object" && typeof module !== "undefined") {
module.exports = mainExports;
// RequireJS
} else if (typeof define === "function" && define.amd) {
define(function () {
return mainExports;
});
// <script>
} else if (globalName) {
this[globalName] = mainExports;
}
}
// Override the current require with this new one
return newRequire;
})({"data\\Emotion_data.json":[function(require,module,exports) {
module.exports="/Emotion_data.e4259c96.json";
},{}],"toClassify\\Emotion_features.json":[function(require,module,exports) {
module.exports="/Emotion_features.09a2a8d7.json";
},{}],"scripts\\ShapeData.ts":[function(require,module,exports) {
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var ShapeData = function () {
function ShapeData() {
this.featuresList = ["tempo", "total_beats", "average_beats", "chroma_stft_mean", "chroma_stft_std", "chroma_stft_var", "chroma_cq_mean", "chroma_cq_std", "chroma_cq_var", "chroma_cens_mean", "chroma_cens_std", "chroma_cens_var", "melspectrogram_mean", "melspectrogram_std", "melspectrogram_var", "mfcc_mean", "mfcc_std", "mfcc_var", "mfcc_delta_mean", "mfcc_delta_std", "mfcc_delta_var", "rmse_mean", "rmse_std", "rmse_var", "cent_mean", "cent_std", "cent_var", "spec_bw_mean", "spec_bw_std", "spec_bw_var", "contrast_mean", "contrast_std", "contrast_var", "rolloff_mean", "rolloff_std", "rolloff_var", "poly_mean", "poly_std", "poly_var", "tonnetz_mean", "tonnetz_std", "tonnetz_var", "zcr_mean", "zcr_std", "zcr_var", "harm_mean", "harm_std", "harm_var", "perc_mean", "perc_std", "perc_var", "frame_mean", "frame_std", "frame_var"];
this.featuresToIgnore = [];
}
ShapeData.prototype.makeDatasetForTensors = function (data) {
var dataInputs = [];
var dataOutputs = [];
for (var singleSong in data) {
var newArray = this.convertObjectToArray(data[singleSong]);
var input = newArray.splice(4);
var output = newArray.splice(2, 1);
dataInputs.push(input);
dataOutputs.push(output);
}
dataInputs = this.removeFeatures(dataInputs);
return [dataInputs, dataOutputs];
};
;
ShapeData.prototype.makeUnclassifiedSongsForTensors = function (originalData, songsToClassify) {
var enumFeatures = this.convertObjectToArray(songsToClassify);
var numberOfSongs = Object.keys(enumFeatures[0]).length;
var songNames = [];
var allFeatures = [];
for (var i = 1; i < numberOfSongs + 1; i++) {
var songName = "";
var singleSongFeatures = [];
for (var j = 0; j < enumFeatures.length; j++) {
if (j === 0) {
songName = enumFeatures[j][i];
} else {
singleSongFeatures.push(enumFeatures[j][i]);
}
}
songNames.push(songName);
allFeatures.push(singleSongFeatures);
}
allFeatures = this.removeFeatures(allFeatures);
return [songNames, this.normalizeData(originalData, allFeatures)];
};
ShapeData.prototype.getInputDim = function () {
return this.featuresList.length - this.featuresToIgnore.length;
};
ShapeData.prototype.removeFeatures = function (features) {
for (var song in features) {
for (var f = 0; f < this.featuresToIgnore.length; f++) {
var featureIndex = this.featuresList.indexOf(this.featuresToIgnore[f]);
features[song].splice(featureIndex, 1);
}
}
return features;
};
ShapeData.prototype.convertObjectToArray = function (data) {
var newArray = [];
for (var _i = 0, _a = Object.entries(data); _i < _a.length; _i++) {
var _b = _a[_i],
key = _b[0],
value = _b[1];
if (!Object.entries) Object.entries = function (obj) {
var ownProps = Object.keys(obj),
i = ownProps.length,
resArray = new Array(i);
while (i--) {
resArray[i] = [ownProps[i], obj[ownProps[i]]];
}if (i < ownProps.length - 3) {
return resArray;
}
};
newArray.push(value);
}
return newArray;
};
;
ShapeData.prototype.normalizeData = function (originalData, arrayLikeData) {
var normalizedData = [];
var featuresRange = this.getMinMaxValues(originalData);
for (var song in arrayLikeData) {
var singleNormalizedData = [];
for (var i = 0; i < arrayLikeData[song].length; i++) {
var norm = this.normalize(arrayLikeData[song][i], featuresRange[i].min, featuresRange[i].max);
singleNormalizedData.push(norm);
}
normalizedData.push(singleNormalizedData);
}
return normalizedData;
};
;
ShapeData.prototype.normalize = function (value, minValue, maxValue) {
return (value - minValue) / (maxValue - minValue);
};
ShapeData.prototype.getMinMaxValues = function (data) {
var featuresMinMax = [];
for (var i = 0; i < this.featuresList.length; i++) {
var maxValue = 0;
var minValue = 0;
var counter = 0;
for (var song in data) {
var value = data[song][this.featuresList[i]];
if (counter === 0) {
maxValue = value;
minValue = value;
}
if (value > maxValue) {
maxValue = value;
}
if (value < minValue) {
minValue = value;
}
counter++;
}
featuresMinMax.push({
"feature": this.featuresList[i],
"min": minValue,
"max": maxValue
});
}
return featuresMinMax;
};
ShapeData.prototype.isIterable = function (obj) {
console.log(obj);
if (obj == null) {
return false;
}
return typeof obj[Symbol.iterator] === 'function';
};
return ShapeData;
}();
exports.ShapeData = ShapeData;
},{}],"scripts\\classifier.ts":[function(require,module,exports) {
'use strict';
var __awaiter = this && this.__awaiter || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) {
try {
step(generator.next(value));
} catch (e) {
reject(e);
}
}
function rejected(value) {
try {
step(generator["throw"](value));
} catch (e) {
reject(e);
}
}
function | (result) {
result.done ? resolve(result.value) : new P(function (resolve) {
resolve(result.value);
}).then(fulfilled, rejected);
}
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = this && this.__generator || function (thisArg, body) {
var _ = { label: 0, sent: function sent() {
if (t[0] & 1) throw t[1];return t[1];
}, trys: [], ops: [] },
f,
y,
t,
g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () {
return this;
}), g;
function verb(n) {
return function (v) {
return step([n, v]);
};
}
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) {
try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0:case 1:
t = op;break;
case 4:
_.label++;return { value: op[1], done: false };
case 5:
_.label++;y = op[1];op = [0];continue;
case 7:
op = _.ops.pop();_.trys.pop();continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
_ = 0;continue;
}
if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
_.label = op[1];break;
}
if (op[0] === 6 && _.label < t[1]) {
_.label = t[1];t = op;break;
}
if (t && _.label < t[2]) {
_.label = t[2];_.ops.push(op);break;
}
if (t[2]) _.ops.pop();
_.trys.pop();continue;
}
op = body.call(thisArg, _);
} catch (e) {
op = [6, e];y = 0;
} finally {
f = t = 0;
}
}if (op[0] & 5) throw op[1];return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __importStar = this && this.__importStar || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) {
if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
}result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
var dataset = __importStar(require("../data/Emotion_data.json"));
var toClassify = __importStar(require("../toClassify/Emotion_features.json"));
var SD = __importStar(require("./ShapeData"));
var ShapeData = new SD.ShapeData();
var labelList = ["sad", "happy", "relax", "angry"];
document.querySelector("#submit").addEventListener('click', function () {
var epochs = parseInt(document.querySelector("#epochs").value);
var learningRate = parseFloat(document.querySelector("#learningRate").value);
var validationSplit = parseFloat(document.querySelector("#validationSplit").value);
var unitsHiddenLayer = parseInt(document.querySelector("#epochs").value);
var hiddenLayerActivation = String(document.querySelector("#hiddenLayerActivation").value);
var outputLayerActivation = String(document.querySelector("#outputLayerActivation").value);
classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation);
});
classify();
function classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation) {
if (epochs === void 0) {
epochs = 30;
}
if (learningRate === void 0) {
learningRate = 0.3;
}
if (validationSplit === void 0) {
validationSplit = 0.2;
}
if (unitsHiddenLayer === void 0) {
unitsHiddenLayer = 50;
}
if (hiddenLayerActivation === void 0) {
hiddenLayerActivation = "relu";
}
if (outputLayerActivation === void 0) {
outputLayerActivation = "softmax";
}
var data = {};
var songsToClassify = {};
var dataInputs = [];
var labels = [];
var normalizedData = [];
var model;
loadJSON(dataset.default).then(function (jsonDataset) {
data = JSON.parse(jsonDataset);
return loadJSON(toClassify.default);
}).then(function (jsonSongs) {
songsToClassify = JSON.parse(jsonSongs);
var toClassify = ShapeData.makeUnclassifiedSongsForTensors(data, songsToClassify);
var songNames = toClassify[0];
var songFeatures = toClassify[1];
var newData = ShapeData.makeDatasetForTensors(data);
dataInputs = newData[0];
var dataOutputs = newData[1];
for (var i = 0; i < dataOutputs.length; i++) {
labels.push(labelList.indexOf(dataOutputs[i][0]));
}
normalizedData = ShapeData.normalizeData(data, dataInputs);
var xs = tf.tensor2d(normalizedData);
var labelsTensor = tf.tensor1d(labels, "int32");
var ys = tf.oneHot(labelsTensor, labelList.length);
labelsTensor.dispose();
var inputDim = ShapeData.getInputDim();
model = tf.sequential();
var hiddenLayer = tf.layers.dense({
units: unitsHiddenLayer,
activation: hiddenLayerActivation,
inputDim: inputDim
});
var outputLayer = tf.layers.dense({
units: 4,
activation: outputLayerActivation
});
model.add(hiddenLayer);
model.add(outputLayer);
var learningR = learningRate;
var myOptimizer = tf.train.sgd(learningR);
model.compile({
optimizer: myOptimizer,
loss: "categoricalCrossentropy",
metrics: ["accuracy"]
});
train(xs, ys).then(function (result) {
tf.tidy(function () {
var classifiedSongs = [];
for (var song in songFeatures) {
var toGuess = tf.tensor2d([songFeatures[song]]);
var results = model.predict(toGuess);
var argMax = results.argMax(1);
var index = argMax.dataSync()[0];
var label = labelList[index];
model.getWeights();
classifiedSongs.push({
songName: songNames[song],
label: label,
labelIndex: index
});
if (document.querySelector("#showSingleResults").checked) {
console.log("I think that " + songNames[song] + " is a " + label + " song");
}
}
if (document.querySelector("#showFinalResult").checked) {
console.log("Classified songs:", classifiedSongs);
}
});
});
}).catch(function (err) {
return console.log(err);
});
function train(xs, ys) {
return __awaiter(this, void 0, void 0, function () {
var options;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
options = {
epochs: epochs,
validationSplit: validationSplit,
shuffle: true,
callbacks: {
onTrainBegin: function onTrainBegin() {
return console.log("training start");
},
onTrainEnd: function onTrainEnd() {
return console.log("training complete");
},
onEpochEnd: function onEpochEnd(num, logs) {
if (document.querySelector("#showEpochs").checked) {
console.log("Epoch: " + num);
console.log(logs);
}
}
}
};
return [4, model.fit(xs, ys, options)];
case 1:
return [2, _a.sent()];
}
});
});
}
function makeInputs() {
var features = [];
for (var _i = 0, data_1 = data; _i < data_1.length; _i++) {
var singleSong = data_1[_i];
for (var _a = 0, _b = data[singleSong]; _a < _b.length; _a++) {
var singleFeature = _b[_a];
console.log(data[singleSong][singleFeature]);
}
}
}
function loadJSON(url) {
return new Promise(function (resolve, reject) {
var xobj = new XMLHttpRequest();
xobj.overrideMimeType("application/json");
xobj.open('GET', url, true);
xobj.onreadystatechange = function () {
if (xobj.readyState == 4 && xobj.status == 200) {
resolve(xobj.responseText);
}
};
xobj.send(null);
xobj.onerror = function () {
return reject(xobj.statusText);
};
});
}
}
},{"../data/Emotion_data.json":"data\\Emotion_data.json","../toClassify/Emotion_features.json":"toClassify\\Emotion_features.json","./ShapeData":"scripts\\ShapeData.ts"}],"node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js":[function(require,module,exports) {
var global = arguments[3];
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || function () {});
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = null;
}
module.bundle.Module = Module;
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = '' || location.hostname;
var protocol = location.protocol === 'https:' ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + ':' + '65517' + '/');
ws.onmessage = function (event) {
var data = JSON.parse(event.data);
if (data.type === 'update') {
console.clear();
data.assets.forEach(function (asset) {
hmrApply(global.parcelRequire, asset);
});
data.assets.forEach(function (asset) {
if (!asset.isNew) {
hmrAccept(global.parcelRequire, asset.id);
}
});
}
if (data.type === 'reload') {
ws.close();
ws.onclose = function () {
location.reload();
};
}
if (data.type === 'error-resolved') {
console.log('[parcel] ✨ Error resolved');
removeErrorOverlay();
}
if (data.type === 'error') {
console.error('[parcel] 🚨 ' + data.error.message + '\n' + data.error.stack);
removeErrorOverlay();
var overlay = createErrorOverlay(data);
document.body.appendChild(overlay);
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
}
}
function createErrorOverlay(data) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
// html encode message and stack trace
var message = document.createElement('div');
var stackTrace = document.createElement('pre');
message.innerText = data.error.message;
stackTrace.innerText = data.error.stack;
overlay.innerHTML = '<div style="background: black; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; opacity: 0.85; font-family: Menlo, Consolas, monospace; z-index: 9999;">' + '<span style="background: red; padding: 2px 4px; border-radius: 2px;">ERROR</span>' + '<span style="top: 2px; margin-left: 5px; position: relative;">🚨</span>' + '<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">' + message.innerHTML + '</div>' + '<pre>' + stackTrace.innerHTML + '</pre>' + '</div>';
return overlay;
}
function getParents(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push(k);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function hmrApply(bundle, asset) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (modules[asset.id] || !bundle.parent) {
var fn = new Function('require', 'module', 'exports', asset.generated.js);
asset.isNew = !modules[asset.id];
modules[asset.id] = [fn, asset.deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAccept(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (!modules[id] && bundle.parent) {
return hmrAccept(bundle.parent, id);
}
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot._disposeCallbacks.length) {
cached.hot._disposeCallbacks.forEach(function (cb) {
cb(bundle.hotData);
});
}
delete bundle.cache[id];
bundle(id);
cached = bundle.cache[id];
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
cached.hot._acceptCallbacks.forEach(function (cb) {
cb();
});
return true;
}
return getParents(global.parcelRequire, id).some(function (id) {
return hmrAccept(global.parcelRequire, id);
});
}
},{}]},{},["node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js","scripts\\classifier.ts"], null)
//# sourceMappingURL=/classifier.d15b2d9b.map | step | identifier_name |
classifier.d15b2d9b.js | // modules are defined as an array
// [ module function, map of requires ]
//
// map of requires is short require name -> numeric require
//
// anything defined in a previous bundle is accessed via the
// orig method which is the require for previous bundles
// eslint-disable-next-line no-global-assign
parcelRequire = (function (modules, cache, entry, globalName) {
// Save the require from previous bundle to this closure if any
var previousRequire = typeof parcelRequire === 'function' && parcelRequire;
var nodeRequire = typeof require === 'function' && require;
function newRequire(name, jumped) {
if (!cache[name]) {
if (!modules[name]) {
// if we cannot find the module within our internal map or
// cache jump to the current global require ie. the last bundle
// that was added to the page.
var currentRequire = typeof parcelRequire === 'function' && parcelRequire;
if (!jumped && currentRequire) {
return currentRequire(name, true);
}
// If there are other bundles on this page the require from the
// previous one is saved to 'previousRequire'. Repeat this as
// many times as there are bundles until the module is found or
// we exhaust the require chain.
if (previousRequire) {
return previousRequire(name, true);
}
// Try the node require function if it exists.
if (nodeRequire && typeof name === 'string') {
return nodeRequire(name);
}
var err = new Error('Cannot find module \'' + name + '\'');
err.code = 'MODULE_NOT_FOUND';
throw err;
}
localRequire.resolve = resolve;
var module = cache[name] = new newRequire.Module(name);
modules[name][0].call(module.exports, localRequire, module, module.exports, this);
}
return cache[name].exports;
function localRequire(x){
return newRequire(localRequire.resolve(x));
}
function resolve(x){
return modules[name][1][x] || x;
}
}
function Module(moduleName) {
this.id = moduleName;
this.bundle = newRequire;
this.exports = {};
}
newRequire.isParcelRequire = true;
newRequire.Module = Module;
newRequire.modules = modules;
newRequire.cache = cache;
newRequire.parent = previousRequire;
newRequire.register = function (id, exports) {
modules[id] = [function (require, module) {
module.exports = exports;
}, {}];
};
for (var i = 0; i < entry.length; i++) {
newRequire(entry[i]);
}
if (entry.length) {
// Expose entry point to Node, AMD or browser globals
// Based on https://github.com/ForbesLindesay/umd/blob/master/template.js
var mainExports = newRequire(entry[entry.length - 1]);
// CommonJS
if (typeof exports === "object" && typeof module !== "undefined") {
module.exports = mainExports;
// RequireJS
} else if (typeof define === "function" && define.amd) {
define(function () {
return mainExports;
});
// <script>
} else if (globalName) {
this[globalName] = mainExports;
}
}
// Override the current require with this new one
return newRequire;
})({"data\\Emotion_data.json":[function(require,module,exports) {
module.exports="/Emotion_data.e4259c96.json";
},{}],"toClassify\\Emotion_features.json":[function(require,module,exports) {
module.exports="/Emotion_features.09a2a8d7.json";
},{}],"scripts\\ShapeData.ts":[function(require,module,exports) {
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var ShapeData = function () {
function ShapeData() {
this.featuresList = ["tempo", "total_beats", "average_beats", "chroma_stft_mean", "chroma_stft_std", "chroma_stft_var", "chroma_cq_mean", "chroma_cq_std", "chroma_cq_var", "chroma_cens_mean", "chroma_cens_std", "chroma_cens_var", "melspectrogram_mean", "melspectrogram_std", "melspectrogram_var", "mfcc_mean", "mfcc_std", "mfcc_var", "mfcc_delta_mean", "mfcc_delta_std", "mfcc_delta_var", "rmse_mean", "rmse_std", "rmse_var", "cent_mean", "cent_std", "cent_var", "spec_bw_mean", "spec_bw_std", "spec_bw_var", "contrast_mean", "contrast_std", "contrast_var", "rolloff_mean", "rolloff_std", "rolloff_var", "poly_mean", "poly_std", "poly_var", "tonnetz_mean", "tonnetz_std", "tonnetz_var", "zcr_mean", "zcr_std", "zcr_var", "harm_mean", "harm_std", "harm_var", "perc_mean", "perc_std", "perc_var", "frame_mean", "frame_std", "frame_var"];
this.featuresToIgnore = [];
}
ShapeData.prototype.makeDatasetForTensors = function (data) {
var dataInputs = [];
var dataOutputs = [];
for (var singleSong in data) {
var newArray = this.convertObjectToArray(data[singleSong]);
var input = newArray.splice(4);
var output = newArray.splice(2, 1);
dataInputs.push(input);
dataOutputs.push(output);
}
dataInputs = this.removeFeatures(dataInputs);
return [dataInputs, dataOutputs];
};
;
ShapeData.prototype.makeUnclassifiedSongsForTensors = function (originalData, songsToClassify) {
var enumFeatures = this.convertObjectToArray(songsToClassify);
var numberOfSongs = Object.keys(enumFeatures[0]).length;
var songNames = [];
var allFeatures = [];
for (var i = 1; i < numberOfSongs + 1; i++) {
var songName = "";
var singleSongFeatures = [];
for (var j = 0; j < enumFeatures.length; j++) {
if (j === 0) {
songName = enumFeatures[j][i];
} else {
singleSongFeatures.push(enumFeatures[j][i]);
}
}
songNames.push(songName);
allFeatures.push(singleSongFeatures);
}
allFeatures = this.removeFeatures(allFeatures);
return [songNames, this.normalizeData(originalData, allFeatures)];
};
ShapeData.prototype.getInputDim = function () {
return this.featuresList.length - this.featuresToIgnore.length;
};
ShapeData.prototype.removeFeatures = function (features) {
for (var song in features) {
for (var f = 0; f < this.featuresToIgnore.length; f++) {
var featureIndex = this.featuresList.indexOf(this.featuresToIgnore[f]);
features[song].splice(featureIndex, 1);
}
}
return features;
};
ShapeData.prototype.convertObjectToArray = function (data) {
var newArray = [];
for (var _i = 0, _a = Object.entries(data); _i < _a.length; _i++) {
var _b = _a[_i],
key = _b[0],
value = _b[1];
if (!Object.entries) Object.entries = function (obj) {
var ownProps = Object.keys(obj),
i = ownProps.length,
resArray = new Array(i);
while (i--) {
resArray[i] = [ownProps[i], obj[ownProps[i]]];
}if (i < ownProps.length - 3) {
return resArray;
}
};
newArray.push(value);
}
return newArray;
};
;
ShapeData.prototype.normalizeData = function (originalData, arrayLikeData) {
var normalizedData = [];
var featuresRange = this.getMinMaxValues(originalData);
for (var song in arrayLikeData) {
var singleNormalizedData = [];
for (var i = 0; i < arrayLikeData[song].length; i++) {
var norm = this.normalize(arrayLikeData[song][i], featuresRange[i].min, featuresRange[i].max);
singleNormalizedData.push(norm);
}
normalizedData.push(singleNormalizedData);
}
return normalizedData;
};
;
ShapeData.prototype.normalize = function (value, minValue, maxValue) {
return (value - minValue) / (maxValue - minValue);
};
ShapeData.prototype.getMinMaxValues = function (data) {
var featuresMinMax = [];
for (var i = 0; i < this.featuresList.length; i++) {
var maxValue = 0;
var minValue = 0;
var counter = 0;
for (var song in data) {
var value = data[song][this.featuresList[i]];
if (counter === 0) {
maxValue = value;
minValue = value;
}
if (value > maxValue) {
maxValue = value;
}
if (value < minValue) {
minValue = value;
}
counter++;
}
featuresMinMax.push({
"feature": this.featuresList[i],
"min": minValue,
"max": maxValue
});
}
return featuresMinMax;
};
ShapeData.prototype.isIterable = function (obj) {
console.log(obj);
if (obj == null) {
return false;
}
return typeof obj[Symbol.iterator] === 'function';
};
return ShapeData;
}();
exports.ShapeData = ShapeData;
},{}],"scripts\\classifier.ts":[function(require,module,exports) {
'use strict';
var __awaiter = this && this.__awaiter || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) {
try {
step(generator.next(value));
} catch (e) {
reject(e);
}
}
function rejected(value) {
try {
step(generator["throw"](value));
} catch (e) {
reject(e);
}
}
function step(result) {
result.done ? resolve(result.value) : new P(function (resolve) {
resolve(result.value);
}).then(fulfilled, rejected);
}
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = this && this.__generator || function (thisArg, body) {
var _ = { label: 0, sent: function sent() {
if (t[0] & 1) throw t[1];return t[1];
}, trys: [], ops: [] },
f,
y,
t,
g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () {
return this;
}), g;
function verb(n) {
return function (v) {
return step([n, v]);
};
}
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) {
try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0:case 1:
t = op;break;
case 4:
_.label++;return { value: op[1], done: false };
case 5:
_.label++;y = op[1];op = [0];continue;
case 7:
op = _.ops.pop();_.trys.pop();continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
_ = 0;continue;
}
if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
_.label = op[1];break;
}
if (op[0] === 6 && _.label < t[1]) {
_.label = t[1];t = op;break;
}
if (t && _.label < t[2]) {
_.label = t[2];_.ops.push(op);break;
}
if (t[2]) _.ops.pop();
_.trys.pop();continue;
}
op = body.call(thisArg, _);
} catch (e) {
op = [6, e];y = 0;
} finally {
f = t = 0;
}
}if (op[0] & 5) throw op[1];return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __importStar = this && this.__importStar || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) {
if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
}result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
var dataset = __importStar(require("../data/Emotion_data.json"));
var toClassify = __importStar(require("../toClassify/Emotion_features.json"));
var SD = __importStar(require("./ShapeData"));
var ShapeData = new SD.ShapeData();
var labelList = ["sad", "happy", "relax", "angry"];
document.querySelector("#submit").addEventListener('click', function () {
var epochs = parseInt(document.querySelector("#epochs").value);
var learningRate = parseFloat(document.querySelector("#learningRate").value);
var validationSplit = parseFloat(document.querySelector("#validationSplit").value);
var unitsHiddenLayer = parseInt(document.querySelector("#epochs").value);
var hiddenLayerActivation = String(document.querySelector("#hiddenLayerActivation").value);
var outputLayerActivation = String(document.querySelector("#outputLayerActivation").value);
classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation);
});
classify();
function classify(epochs, learningRate, validationSplit, unitsHiddenLayer, hiddenLayerActivation, outputLayerActivation) {
if (epochs === void 0) {
epochs = 30;
}
if (learningRate === void 0) {
learningRate = 0.3;
}
if (validationSplit === void 0) {
validationSplit = 0.2;
}
if (unitsHiddenLayer === void 0) {
unitsHiddenLayer = 50;
}
if (hiddenLayerActivation === void 0) {
hiddenLayerActivation = "relu";
}
if (outputLayerActivation === void 0) {
outputLayerActivation = "softmax";
}
var data = {};
var songsToClassify = {};
var dataInputs = [];
var labels = [];
var normalizedData = [];
var model;
loadJSON(dataset.default).then(function (jsonDataset) {
data = JSON.parse(jsonDataset);
return loadJSON(toClassify.default);
}).then(function (jsonSongs) {
songsToClassify = JSON.parse(jsonSongs);
var toClassify = ShapeData.makeUnclassifiedSongsForTensors(data, songsToClassify);
var songNames = toClassify[0];
var songFeatures = toClassify[1];
var newData = ShapeData.makeDatasetForTensors(data);
dataInputs = newData[0];
var dataOutputs = newData[1];
for (var i = 0; i < dataOutputs.length; i++) {
labels.push(labelList.indexOf(dataOutputs[i][0]));
}
normalizedData = ShapeData.normalizeData(data, dataInputs);
var xs = tf.tensor2d(normalizedData);
var labelsTensor = tf.tensor1d(labels, "int32");
var ys = tf.oneHot(labelsTensor, labelList.length);
labelsTensor.dispose();
var inputDim = ShapeData.getInputDim();
model = tf.sequential();
var hiddenLayer = tf.layers.dense({
units: unitsHiddenLayer,
activation: hiddenLayerActivation,
inputDim: inputDim
});
var outputLayer = tf.layers.dense({
units: 4,
activation: outputLayerActivation
});
model.add(hiddenLayer);
model.add(outputLayer);
var learningR = learningRate;
var myOptimizer = tf.train.sgd(learningR);
model.compile({
optimizer: myOptimizer,
loss: "categoricalCrossentropy",
metrics: ["accuracy"]
});
train(xs, ys).then(function (result) {
tf.tidy(function () {
var classifiedSongs = [];
for (var song in songFeatures) {
var toGuess = tf.tensor2d([songFeatures[song]]);
var results = model.predict(toGuess);
var argMax = results.argMax(1);
var index = argMax.dataSync()[0];
var label = labelList[index];
model.getWeights();
classifiedSongs.push({
songName: songNames[song],
label: label,
labelIndex: index
});
if (document.querySelector("#showSingleResults").checked) {
console.log("I think that " + songNames[song] + " is a " + label + " song");
}
}
if (document.querySelector("#showFinalResult").checked) {
console.log("Classified songs:", classifiedSongs);
}
});
});
}).catch(function (err) {
return console.log(err);
});
function train(xs, ys) {
return __awaiter(this, void 0, void 0, function () {
var options;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
options = {
epochs: epochs,
validationSplit: validationSplit,
shuffle: true,
callbacks: {
onTrainBegin: function onTrainBegin() {
return console.log("training start");
},
onTrainEnd: function onTrainEnd() {
return console.log("training complete");
},
onEpochEnd: function onEpochEnd(num, logs) {
if (document.querySelector("#showEpochs").checked) {
console.log("Epoch: " + num);
console.log(logs);
}
}
}
};
return [4, model.fit(xs, ys, options)];
case 1:
return [2, _a.sent()];
}
});
});
}
function makeInputs() |
function loadJSON(url) {
return new Promise(function (resolve, reject) {
var xobj = new XMLHttpRequest();
xobj.overrideMimeType("application/json");
xobj.open('GET', url, true);
xobj.onreadystatechange = function () {
if (xobj.readyState == 4 && xobj.status == 200) {
resolve(xobj.responseText);
}
};
xobj.send(null);
xobj.onerror = function () {
return reject(xobj.statusText);
};
});
}
}
},{"../data/Emotion_data.json":"data\\Emotion_data.json","../toClassify/Emotion_features.json":"toClassify\\Emotion_features.json","./ShapeData":"scripts\\ShapeData.ts"}],"node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js":[function(require,module,exports) {
var global = arguments[3];
var OVERLAY_ID = '__parcel__error__overlay__';
var OldModule = module.bundle.Module;
function Module(moduleName) {
OldModule.call(this, moduleName);
this.hot = {
data: module.bundle.hotData,
_acceptCallbacks: [],
_disposeCallbacks: [],
accept: function (fn) {
this._acceptCallbacks.push(fn || function () {});
},
dispose: function (fn) {
this._disposeCallbacks.push(fn);
}
};
module.bundle.hotData = null;
}
module.bundle.Module = Module;
var parent = module.bundle.parent;
if ((!parent || !parent.isParcelRequire) && typeof WebSocket !== 'undefined') {
var hostname = '' || location.hostname;
var protocol = location.protocol === 'https:' ? 'wss' : 'ws';
var ws = new WebSocket(protocol + '://' + hostname + ':' + '65517' + '/');
ws.onmessage = function (event) {
var data = JSON.parse(event.data);
if (data.type === 'update') {
console.clear();
data.assets.forEach(function (asset) {
hmrApply(global.parcelRequire, asset);
});
data.assets.forEach(function (asset) {
if (!asset.isNew) {
hmrAccept(global.parcelRequire, asset.id);
}
});
}
if (data.type === 'reload') {
ws.close();
ws.onclose = function () {
location.reload();
};
}
if (data.type === 'error-resolved') {
console.log('[parcel] ✨ Error resolved');
removeErrorOverlay();
}
if (data.type === 'error') {
console.error('[parcel] 🚨 ' + data.error.message + '\n' + data.error.stack);
removeErrorOverlay();
var overlay = createErrorOverlay(data);
document.body.appendChild(overlay);
}
};
}
function removeErrorOverlay() {
var overlay = document.getElementById(OVERLAY_ID);
if (overlay) {
overlay.remove();
}
}
function createErrorOverlay(data) {
var overlay = document.createElement('div');
overlay.id = OVERLAY_ID;
// html encode message and stack trace
var message = document.createElement('div');
var stackTrace = document.createElement('pre');
message.innerText = data.error.message;
stackTrace.innerText = data.error.stack;
overlay.innerHTML = '<div style="background: black; font-size: 16px; color: white; position: fixed; height: 100%; width: 100%; top: 0px; left: 0px; padding: 30px; opacity: 0.85; font-family: Menlo, Consolas, monospace; z-index: 9999;">' + '<span style="background: red; padding: 2px 4px; border-radius: 2px;">ERROR</span>' + '<span style="top: 2px; margin-left: 5px; position: relative;">🚨</span>' + '<div style="font-size: 18px; font-weight: bold; margin-top: 20px;">' + message.innerHTML + '</div>' + '<pre>' + stackTrace.innerHTML + '</pre>' + '</div>';
return overlay;
}
function getParents(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return [];
}
var parents = [];
var k, d, dep;
for (k in modules) {
for (d in modules[k][1]) {
dep = modules[k][1][d];
if (dep === id || Array.isArray(dep) && dep[dep.length - 1] === id) {
parents.push(k);
}
}
}
if (bundle.parent) {
parents = parents.concat(getParents(bundle.parent, id));
}
return parents;
}
function hmrApply(bundle, asset) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (modules[asset.id] || !bundle.parent) {
var fn = new Function('require', 'module', 'exports', asset.generated.js);
asset.isNew = !modules[asset.id];
modules[asset.id] = [fn, asset.deps];
} else if (bundle.parent) {
hmrApply(bundle.parent, asset);
}
}
function hmrAccept(bundle, id) {
var modules = bundle.modules;
if (!modules) {
return;
}
if (!modules[id] && bundle.parent) {
return hmrAccept(bundle.parent, id);
}
var cached = bundle.cache[id];
bundle.hotData = {};
if (cached) {
cached.hot.data = bundle.hotData;
}
if (cached && cached.hot && cached.hot._disposeCallbacks.length) {
cached.hot._disposeCallbacks.forEach(function (cb) {
cb(bundle.hotData);
});
}
delete bundle.cache[id];
bundle(id);
cached = bundle.cache[id];
if (cached && cached.hot && cached.hot._acceptCallbacks.length) {
cached.hot._acceptCallbacks.forEach(function (cb) {
cb();
});
return true;
}
return getParents(global.parcelRequire, id).some(function (id) {
return hmrAccept(global.parcelRequire, id);
});
}
},{}]},{},["node_modules\\parcel-bundler\\src\\builtins\\hmr-runtime.js","scripts\\classifier.ts"], null)
//# sourceMappingURL=/classifier.d15b2d9b.map | {
var features = [];
for (var _i = 0, data_1 = data; _i < data_1.length; _i++) {
var singleSong = data_1[_i];
for (var _a = 0, _b = data[singleSong]; _a < _b.length; _a++) {
var singleFeature = _b[_a];
console.log(data[singleSong][singleFeature]);
}
}
} | identifier_body |
wfq.go | package wfq
import (
"container/heap"
"sync"
)
/*****************************************************************************
*
*****************************************************************************/
// An implementation of this interface is passed to NewQueue
// and used to obtain properties of items passed to Queue().
// Each method will be called only once per Queue()/item call
//
type Interface interface {
// Key returns the identity of the flow for the item.
// All items with the same key are placed in the same flow.
//
Key(item interface{}) uint64
// Size returns the size of the item.
// The value returned can be any unit but all items in a queue must be
// sized according to the same unit (e.g. bytes).
Size(item interface{}) uint64
// The weight/priority of the item. A higher value represents a higher
// priority. All items with a specific key should (but are not required to)
// have the same weight. Internally the Queue add 1 to the weight so that
// weight range is shifted from 0-255 to 1-256.
Weight(item interface{}) uint8
}
/*****************************************************************************
*
*****************************************************************************/
type heapItem struct {
fi *flowInfo
value interface{}
size uint64
weight uint8
key uint64
vft uint64
}
var hi_pool sync.Pool
func newHeapItem() interface{} {
return new(heapItem)
}
func getHeapItem() *heapItem {
return hi_pool.Get().(*heapItem)
}
func putHeapItem(hi *heapItem) {
hi.fi = nil
hi.value = nil
hi_pool.Put(hi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemHeap []*heapItem
func (h *itemHeap) Len() int {
return len(*h)
}
func (h *itemHeap) Less(i, j int) bool {
return (*h)[i].vft < (*h)[j].vft
}
func (h *itemHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemHeap) Push(x interface{}) {
item := x.(*heapItem)
*h = append(*h, item)
}
func (h *itemHeap) Pop() interface{} {
old := *h
n := len(old)
item := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return item
}
/*****************************************************************************
*
*****************************************************************************/
type overflowHeapItem struct {
hi *heapItem
arrord uint64
wg sync.WaitGroup
}
func (i *overflowHeapItem) less(o *overflowHeapItem) bool {
if i.hi.weight > o.hi.weight {
return true
} else if i.hi.weight == o.hi.weight && i.arrord < o.arrord {
return true
}
return false
}
var ohi_pool sync.Pool
func newOverflowHeapItem() interface{} |
func getOverflowHeapItem() *overflowHeapItem {
return ohi_pool.Get().(*overflowHeapItem)
}
func putOverflowHeapItem(ohi *overflowHeapItem) {
ohi.hi = nil
ohi_pool.Put(ohi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemOverflowHeap []*overflowHeapItem
func (h *itemOverflowHeap) Len() int {
return len(*h)
}
func (h *itemOverflowHeap) Less(i, j int) bool {
return (*h)[i].less((*h)[j])
}
func (h *itemOverflowHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemOverflowHeap) Push(x interface{}) {
item := x.(*overflowHeapItem)
*h = append(*h, item)
}
func (h *itemOverflowHeap) Pop() interface{} {
old := *h
n := len(old)
ohi := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return ohi
}
/*****************************************************************************
*
*****************************************************************************/
type flowInfo struct {
cond sync.Cond
last_vft uint64
size uint64
pendSize uint64
weight uint8
inv_w uint64
}
var fi_pool sync.Pool
func newFlowInfo() interface{} {
return new(flowInfo)
}
func getFlowInfo() *flowInfo {
return fi_pool.Get().(*flowInfo)
}
func putFlowInfo(fi *flowInfo) {
fi.cond.L = nil
}
/*****************************************************************************
*
*****************************************************************************/
func init() {
hi_pool.New = newHeapItem
ohi_pool.New = newOverflowHeapItem
fi_pool.New = newFlowInfo
}
/*****************************************************************************
*
*****************************************************************************/
// A queue that implements a version of the weighted fair queue algorithm.
// When all items have the same weight then each flow's throughput will be
// <total throughput>/<number of flows>.
//
// If items have different weights, then all flows with the same weight will
// share their portion of the throughput evenly.
// Each weight "class" receives a portion of the total throughput according to
// the following the formula RWi/W1 + W2 ... + WN where R = total throughput
// and W1 through WN are the weights of the individual flows.
// If the total size of all items that passed through the queue was 10,000, and
// the weights of each of 3 flows was 1, 4 and 18, then the portion of the total
// that was dedicated to each flow would be 10000*1/(1+4+18) = 435 (4.35%),
// 10000*4/(1+4+18) = 1739 (17.39%) and 10000*18/(1+4+18) = 7826 (78.26%).
//
//
//
type Queue struct {
lock sync.Mutex
cond sync.Cond
closed bool
maxQueueSize uint64
maxFlowSize uint64
helper Interface
items itemHeap
overflow itemOverflowHeap
next_ohi *overflowHeapItem
flows map[uint64]*flowInfo
ovfcnt uint64
vt uint64
size uint64
wsum uint64
inv_wsum uint64
}
const (
scaledOne uint64 = 1 << 16
)
// Create a new Queue instance.
// If maxFlowSize > maxQueueSize or if helper is nil then it will panic.
// The maxFlowSize value limits the total size of all items that can be queued in a single flow.
// The maxQueueSize value limits the total size of all items that can be in the queue.
// It is recomeneded that maxQueueSize be set to maxFlowSize*<Max # of expected flows>, and
// that maxFlowSize be at least twice the largest expected item size.
//
func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {
if maxFlowSize > maxQueueSize {
panic("MaxFlowSize > MaxQueueSize")
}
if helper == nil {
panic("helper is nil")
}
q := new(Queue)
q.cond.L = &q.lock
q.maxQueueSize = maxQueueSize
q.maxFlowSize = maxFlowSize
q.helper = helper
q.flows = make(map[uint64]*flowInfo)
return q
}
// Place on item on the queue. Queue will not return (i.e. block) until the item can be placed on the queue
// or the queue was closed. If Queue returns true, then DeQueue will eventually return the item.
// If Queue returns false, then the item was not placed on the queue because the queue has been closed.
// Queue will panic if the size of the item is greater then maxFlowSize (set in NewQueue).
// Queue is safe for concurrent use.
//
func (q *Queue) Queue(item interface{}) bool {
hi := getHeapItem()
hi.value = item
hi.key = q.helper.Key(item)
hi.size = q.helper.Size(item)
hi.weight = q.helper.Weight(item)
if hi.size == 0 {
panic("Item size is zero")
}
if hi.size > q.maxFlowSize {
panic("Item size is larger than MaxFlowSize")
}
q.lock.Lock()
if q.closed {
q.lock.Unlock()
return false
}
// Get the flowInfo, or add one if there is none
fi, ok := q.flows[hi.key]
if !ok {
fi = getFlowInfo()
fi.cond.L = &q.lock
fi.last_vft = q.vt
fi.weight = hi.weight + 1
fi.inv_w = scaledOne / uint64(fi.weight)
q.flows[hi.key] = fi
q.wsum += uint64(fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
}
hi.fi = fi
// This prevents DeQueue from deleting the flowInfo from q.flows
// while the flow is till active
fi.pendSize += hi.size
// Wait till there is room in the flow queue
for !q.closed && fi.size+hi.size > q.maxFlowSize {
fi.cond.Wait()
}
if q.closed {
q.lock.Unlock()
return false
}
// Calculate the items virtual finish time
hi.vft = fi.last_vft + hi.size*fi.inv_w
fi.last_vft = hi.vft
// Add the item's size to the flow
fi.size += hi.size
// Subtract it's size from pendSize since it is no longer pending
fi.pendSize -= hi.size
if q.size+hi.size > q.maxQueueSize {
/*
The queue is full, place our request in the overflow heap.
Unlike the main heap, the overflow heap is strictly prioritized by
weight and arrival order. A higher priority flow could completely starve out
a lower priority flow if the incoming rate of the higher priority flow exceeds
the total outgoing rate.
*/
ohi := getOverflowHeapItem()
ohi.hi = hi
ohi.arrord = q.ovfcnt
q.ovfcnt++
ohi.wg.Add(1)
if q.next_ohi == nil {
q.next_ohi = ohi
} else {
if ohi.less(q.next_ohi) {
heap.Push(&q.overflow, q.next_ohi)
q.next_ohi = ohi
} else {
heap.Push(&q.overflow, ohi)
}
}
q.lock.Unlock()
ohi.wg.Wait()
putOverflowHeapItem(ohi)
if q.closed {
return false
}
} else {
q.size += hi.size
// The queue has room, place our item in the main heap
heap.Push(&q.items, hi)
q.cond.Signal()
q.lock.Unlock()
}
return true
}
// DeQueue removes the next item from the queue. DeQueue will not return (i.e. block) until an item can
// be returned or the queue is empty and closed. DeQueue will return an item and true if an item could be
// removed from the queue or nil and false, if the queue is empty and closed.
// DeQueue is safe for concurrent use.
//
func (q *Queue) DeQueue() (interface{}, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if q.closed && q.items.Len() == 0 {
return nil, false
}
for !q.closed && q.items.Len() == 0 {
q.cond.Wait()
}
if q.closed && q.items.Len() == 0 {
return nil, false
}
hi := heap.Pop(&q.items).(*heapItem)
item := hi.value
q.vt += hi.size * q.inv_wsum
hi.fi.size -= hi.size
q.size -= hi.size
if hi.fi.size == 0 && hi.fi.pendSize == 0 {
// The flow is empty (i.e. inactive), delete it
delete(q.flows, hi.key)
q.wsum += uint64(hi.fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
putFlowInfo(hi.fi)
putHeapItem(hi)
} else {
hi.fi.cond.Signal()
putHeapItem(hi)
}
if !q.closed {
// While there is room in the queue move items from the overflow to the main heap.
for q.next_ohi != nil && q.size+q.next_ohi.hi.size <= q.maxQueueSize {
q.size += q.next_ohi.hi.size
heap.Push(&q.items, q.next_ohi.hi)
q.next_ohi.wg.Done()
if q.overflow.Len() > 0 {
q.next_ohi = heap.Pop(&q.overflow).(*overflowHeapItem)
} else {
q.next_ohi = nil
}
}
}
return item, true
}
func (q *Queue) Close() {
q.lock.Lock()
defer q.lock.Unlock()
q.closed = true
// All overflow requests get flushed
for q.next_ohi != nil {
q.next_ohi.wg.Done()
q.next_ohi = q.overflow.Pop().(*overflowHeapItem)
}
// Wake up all those waiting to get into a flow queue
for _, fi := range q.flows {
fi.cond.Broadcast()
}
// Wake up all DeQueue'ers
q.cond.Broadcast()
}
| {
return new(overflowHeapItem)
} | identifier_body |
wfq.go | package wfq
import (
"container/heap"
"sync"
)
/*****************************************************************************
*
*****************************************************************************/
// An implementation of this interface is passed to NewQueue
// and used to obtain properties of items passed to Queue().
// Each method will be called only once per Queue()/item call
//
type Interface interface {
// Key returns the identity of the flow for the item.
// All items with the same key are placed in the same flow.
//
Key(item interface{}) uint64
// Size returns the size of the item.
// The value returned can be any unit but all items in a queue must be
// sized according to the same unit (e.g. bytes).
Size(item interface{}) uint64
// The weight/priority of the item. A higher value represents a higher
// priority. All items with a specific key should (but are not required to)
// have the same weight. Internally the Queue add 1 to the weight so that
// weight range is shifted from 0-255 to 1-256.
Weight(item interface{}) uint8
}
/*****************************************************************************
*
*****************************************************************************/
type heapItem struct {
fi *flowInfo
value interface{}
size uint64
weight uint8
key uint64
vft uint64
}
var hi_pool sync.Pool
func newHeapItem() interface{} {
return new(heapItem)
}
func getHeapItem() *heapItem {
return hi_pool.Get().(*heapItem)
}
func putHeapItem(hi *heapItem) {
hi.fi = nil
hi.value = nil
hi_pool.Put(hi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemHeap []*heapItem
func (h *itemHeap) Len() int {
return len(*h)
}
func (h *itemHeap) Less(i, j int) bool {
return (*h)[i].vft < (*h)[j].vft
}
func (h *itemHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemHeap) Push(x interface{}) {
item := x.(*heapItem)
*h = append(*h, item)
}
func (h *itemHeap) Pop() interface{} {
old := *h
n := len(old)
item := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return item
}
/*****************************************************************************
*
*****************************************************************************/
type overflowHeapItem struct {
hi *heapItem
arrord uint64
wg sync.WaitGroup
}
func (i *overflowHeapItem) | (o *overflowHeapItem) bool {
if i.hi.weight > o.hi.weight {
return true
} else if i.hi.weight == o.hi.weight && i.arrord < o.arrord {
return true
}
return false
}
var ohi_pool sync.Pool
func newOverflowHeapItem() interface{} {
return new(overflowHeapItem)
}
func getOverflowHeapItem() *overflowHeapItem {
return ohi_pool.Get().(*overflowHeapItem)
}
func putOverflowHeapItem(ohi *overflowHeapItem) {
ohi.hi = nil
ohi_pool.Put(ohi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemOverflowHeap []*overflowHeapItem
func (h *itemOverflowHeap) Len() int {
return len(*h)
}
func (h *itemOverflowHeap) Less(i, j int) bool {
return (*h)[i].less((*h)[j])
}
func (h *itemOverflowHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemOverflowHeap) Push(x interface{}) {
item := x.(*overflowHeapItem)
*h = append(*h, item)
}
func (h *itemOverflowHeap) Pop() interface{} {
old := *h
n := len(old)
ohi := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return ohi
}
/*****************************************************************************
*
*****************************************************************************/
type flowInfo struct {
cond sync.Cond
last_vft uint64
size uint64
pendSize uint64
weight uint8
inv_w uint64
}
var fi_pool sync.Pool
func newFlowInfo() interface{} {
return new(flowInfo)
}
func getFlowInfo() *flowInfo {
return fi_pool.Get().(*flowInfo)
}
func putFlowInfo(fi *flowInfo) {
fi.cond.L = nil
}
/*****************************************************************************
*
*****************************************************************************/
func init() {
hi_pool.New = newHeapItem
ohi_pool.New = newOverflowHeapItem
fi_pool.New = newFlowInfo
}
/*****************************************************************************
*
*****************************************************************************/
// A queue that implements a version of the weighted fair queue algorithm.
// When all items have the same weight then each flow's throughput will be
// <total throughput>/<number of flows>.
//
// If items have different weights, then all flows with the same weight will
// share their portion of the throughput evenly.
// Each weight "class" receives a portion of the total throughput according to
// the following the formula RWi/W1 + W2 ... + WN where R = total throughput
// and W1 through WN are the weights of the individual flows.
// If the total size of all items that passed through the queue was 10,000, and
// the weights of each of 3 flows was 1, 4 and 18, then the portion of the total
// that was dedicated to each flow would be 10000*1/(1+4+18) = 435 (4.35%),
// 10000*4/(1+4+18) = 1739 (17.39%) and 10000*18/(1+4+18) = 7826 (78.26%).
//
//
//
type Queue struct {
lock sync.Mutex
cond sync.Cond
closed bool
maxQueueSize uint64
maxFlowSize uint64
helper Interface
items itemHeap
overflow itemOverflowHeap
next_ohi *overflowHeapItem
flows map[uint64]*flowInfo
ovfcnt uint64
vt uint64
size uint64
wsum uint64
inv_wsum uint64
}
const (
scaledOne uint64 = 1 << 16
)
// Create a new Queue instance.
// If maxFlowSize > maxQueueSize or if helper is nil then it will panic.
// The maxFlowSize value limits the total size of all items that can be queued in a single flow.
// The maxQueueSize value limits the total size of all items that can be in the queue.
// It is recomeneded that maxQueueSize be set to maxFlowSize*<Max # of expected flows>, and
// that maxFlowSize be at least twice the largest expected item size.
//
func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {
if maxFlowSize > maxQueueSize {
panic("MaxFlowSize > MaxQueueSize")
}
if helper == nil {
panic("helper is nil")
}
q := new(Queue)
q.cond.L = &q.lock
q.maxQueueSize = maxQueueSize
q.maxFlowSize = maxFlowSize
q.helper = helper
q.flows = make(map[uint64]*flowInfo)
return q
}
// Place on item on the queue. Queue will not return (i.e. block) until the item can be placed on the queue
// or the queue was closed. If Queue returns true, then DeQueue will eventually return the item.
// If Queue returns false, then the item was not placed on the queue because the queue has been closed.
// Queue will panic if the size of the item is greater then maxFlowSize (set in NewQueue).
// Queue is safe for concurrent use.
//
func (q *Queue) Queue(item interface{}) bool {
hi := getHeapItem()
hi.value = item
hi.key = q.helper.Key(item)
hi.size = q.helper.Size(item)
hi.weight = q.helper.Weight(item)
if hi.size == 0 {
panic("Item size is zero")
}
if hi.size > q.maxFlowSize {
panic("Item size is larger than MaxFlowSize")
}
q.lock.Lock()
if q.closed {
q.lock.Unlock()
return false
}
// Get the flowInfo, or add one if there is none
fi, ok := q.flows[hi.key]
if !ok {
fi = getFlowInfo()
fi.cond.L = &q.lock
fi.last_vft = q.vt
fi.weight = hi.weight + 1
fi.inv_w = scaledOne / uint64(fi.weight)
q.flows[hi.key] = fi
q.wsum += uint64(fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
}
hi.fi = fi
// This prevents DeQueue from deleting the flowInfo from q.flows
// while the flow is till active
fi.pendSize += hi.size
// Wait till there is room in the flow queue
for !q.closed && fi.size+hi.size > q.maxFlowSize {
fi.cond.Wait()
}
if q.closed {
q.lock.Unlock()
return false
}
// Calculate the items virtual finish time
hi.vft = fi.last_vft + hi.size*fi.inv_w
fi.last_vft = hi.vft
// Add the item's size to the flow
fi.size += hi.size
// Subtract it's size from pendSize since it is no longer pending
fi.pendSize -= hi.size
if q.size+hi.size > q.maxQueueSize {
/*
The queue is full, place our request in the overflow heap.
Unlike the main heap, the overflow heap is strictly prioritized by
weight and arrival order. A higher priority flow could completely starve out
a lower priority flow if the incoming rate of the higher priority flow exceeds
the total outgoing rate.
*/
ohi := getOverflowHeapItem()
ohi.hi = hi
ohi.arrord = q.ovfcnt
q.ovfcnt++
ohi.wg.Add(1)
if q.next_ohi == nil {
q.next_ohi = ohi
} else {
if ohi.less(q.next_ohi) {
heap.Push(&q.overflow, q.next_ohi)
q.next_ohi = ohi
} else {
heap.Push(&q.overflow, ohi)
}
}
q.lock.Unlock()
ohi.wg.Wait()
putOverflowHeapItem(ohi)
if q.closed {
return false
}
} else {
q.size += hi.size
// The queue has room, place our item in the main heap
heap.Push(&q.items, hi)
q.cond.Signal()
q.lock.Unlock()
}
return true
}
// DeQueue removes the next item from the queue. DeQueue will not return (i.e. block) until an item can
// be returned or the queue is empty and closed. DeQueue will return an item and true if an item could be
// removed from the queue or nil and false, if the queue is empty and closed.
// DeQueue is safe for concurrent use.
//
func (q *Queue) DeQueue() (interface{}, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if q.closed && q.items.Len() == 0 {
return nil, false
}
for !q.closed && q.items.Len() == 0 {
q.cond.Wait()
}
if q.closed && q.items.Len() == 0 {
return nil, false
}
hi := heap.Pop(&q.items).(*heapItem)
item := hi.value
q.vt += hi.size * q.inv_wsum
hi.fi.size -= hi.size
q.size -= hi.size
if hi.fi.size == 0 && hi.fi.pendSize == 0 {
// The flow is empty (i.e. inactive), delete it
delete(q.flows, hi.key)
q.wsum += uint64(hi.fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
putFlowInfo(hi.fi)
putHeapItem(hi)
} else {
hi.fi.cond.Signal()
putHeapItem(hi)
}
if !q.closed {
// While there is room in the queue move items from the overflow to the main heap.
for q.next_ohi != nil && q.size+q.next_ohi.hi.size <= q.maxQueueSize {
q.size += q.next_ohi.hi.size
heap.Push(&q.items, q.next_ohi.hi)
q.next_ohi.wg.Done()
if q.overflow.Len() > 0 {
q.next_ohi = heap.Pop(&q.overflow).(*overflowHeapItem)
} else {
q.next_ohi = nil
}
}
}
return item, true
}
func (q *Queue) Close() {
q.lock.Lock()
defer q.lock.Unlock()
q.closed = true
// All overflow requests get flushed
for q.next_ohi != nil {
q.next_ohi.wg.Done()
q.next_ohi = q.overflow.Pop().(*overflowHeapItem)
}
// Wake up all those waiting to get into a flow queue
for _, fi := range q.flows {
fi.cond.Broadcast()
}
// Wake up all DeQueue'ers
q.cond.Broadcast()
}
| less | identifier_name |
wfq.go | package wfq
import (
"container/heap"
"sync"
)
/*****************************************************************************
*
*****************************************************************************/
// An implementation of this interface is passed to NewQueue
// and used to obtain properties of items passed to Queue().
// Each method will be called only once per Queue()/item call
//
type Interface interface {
// Key returns the identity of the flow for the item.
// All items with the same key are placed in the same flow.
//
Key(item interface{}) uint64
// Size returns the size of the item.
// The value returned can be any unit but all items in a queue must be
// sized according to the same unit (e.g. bytes).
Size(item interface{}) uint64
// The weight/priority of the item. A higher value represents a higher
// priority. All items with a specific key should (but are not required to)
// have the same weight. Internally the Queue add 1 to the weight so that
// weight range is shifted from 0-255 to 1-256.
Weight(item interface{}) uint8
}
/*****************************************************************************
*
*****************************************************************************/
type heapItem struct {
fi *flowInfo
value interface{}
size uint64
weight uint8
key uint64
vft uint64
}
var hi_pool sync.Pool
func newHeapItem() interface{} {
return new(heapItem)
}
func getHeapItem() *heapItem {
return hi_pool.Get().(*heapItem)
}
func putHeapItem(hi *heapItem) {
hi.fi = nil
hi.value = nil
hi_pool.Put(hi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemHeap []*heapItem
func (h *itemHeap) Len() int {
return len(*h)
}
func (h *itemHeap) Less(i, j int) bool {
return (*h)[i].vft < (*h)[j].vft
}
func (h *itemHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemHeap) Push(x interface{}) {
item := x.(*heapItem)
*h = append(*h, item)
}
func (h *itemHeap) Pop() interface{} {
old := *h
n := len(old)
item := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return item
}
/*****************************************************************************
*
*****************************************************************************/
type overflowHeapItem struct {
hi *heapItem
arrord uint64
wg sync.WaitGroup
}
func (i *overflowHeapItem) less(o *overflowHeapItem) bool {
if i.hi.weight > o.hi.weight {
return true
} else if i.hi.weight == o.hi.weight && i.arrord < o.arrord {
return true
}
return false
}
var ohi_pool sync.Pool
func newOverflowHeapItem() interface{} {
return new(overflowHeapItem)
}
func getOverflowHeapItem() *overflowHeapItem {
return ohi_pool.Get().(*overflowHeapItem)
}
func putOverflowHeapItem(ohi *overflowHeapItem) {
ohi.hi = nil
ohi_pool.Put(ohi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemOverflowHeap []*overflowHeapItem
func (h *itemOverflowHeap) Len() int {
return len(*h)
}
func (h *itemOverflowHeap) Less(i, j int) bool {
return (*h)[i].less((*h)[j])
}
func (h *itemOverflowHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemOverflowHeap) Push(x interface{}) {
item := x.(*overflowHeapItem)
*h = append(*h, item)
}
func (h *itemOverflowHeap) Pop() interface{} {
old := *h
n := len(old)
ohi := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return ohi
}
/*****************************************************************************
*
*****************************************************************************/
type flowInfo struct {
cond sync.Cond
last_vft uint64
size uint64
pendSize uint64
weight uint8
inv_w uint64
}
var fi_pool sync.Pool
func newFlowInfo() interface{} {
return new(flowInfo)
}
func getFlowInfo() *flowInfo {
return fi_pool.Get().(*flowInfo)
}
func putFlowInfo(fi *flowInfo) {
fi.cond.L = nil
}
/*****************************************************************************
*
*****************************************************************************/
func init() {
hi_pool.New = newHeapItem
ohi_pool.New = newOverflowHeapItem
fi_pool.New = newFlowInfo
}
/*****************************************************************************
*
*****************************************************************************/
// A queue that implements a version of the weighted fair queue algorithm.
// When all items have the same weight then each flow's throughput will be
// <total throughput>/<number of flows>.
//
// If items have different weights, then all flows with the same weight will
// share their portion of the throughput evenly.
// Each weight "class" receives a portion of the total throughput according to
// the following the formula RWi/W1 + W2 ... + WN where R = total throughput
// and W1 through WN are the weights of the individual flows.
// If the total size of all items that passed through the queue was 10,000, and
// the weights of each of 3 flows was 1, 4 and 18, then the portion of the total
// that was dedicated to each flow would be 10000*1/(1+4+18) = 435 (4.35%),
// 10000*4/(1+4+18) = 1739 (17.39%) and 10000*18/(1+4+18) = 7826 (78.26%).
//
//
//
type Queue struct {
lock sync.Mutex
cond sync.Cond
closed bool
maxQueueSize uint64
maxFlowSize uint64
helper Interface
items itemHeap
overflow itemOverflowHeap
next_ohi *overflowHeapItem
flows map[uint64]*flowInfo
ovfcnt uint64
vt uint64
size uint64
wsum uint64
inv_wsum uint64
}
const (
scaledOne uint64 = 1 << 16
)
// Create a new Queue instance.
// If maxFlowSize > maxQueueSize or if helper is nil then it will panic.
// The maxFlowSize value limits the total size of all items that can be queued in a single flow.
// The maxQueueSize value limits the total size of all items that can be in the queue.
// It is recomeneded that maxQueueSize be set to maxFlowSize*<Max # of expected flows>, and
// that maxFlowSize be at least twice the largest expected item size.
//
func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {
if maxFlowSize > maxQueueSize {
panic("MaxFlowSize > MaxQueueSize")
}
if helper == nil {
panic("helper is nil")
}
q := new(Queue)
q.cond.L = &q.lock
q.maxQueueSize = maxQueueSize
q.maxFlowSize = maxFlowSize
q.helper = helper
q.flows = make(map[uint64]*flowInfo)
return q
}
// Place on item on the queue. Queue will not return (i.e. block) until the item can be placed on the queue
// or the queue was closed. If Queue returns true, then DeQueue will eventually return the item.
// If Queue returns false, then the item was not placed on the queue because the queue has been closed.
// Queue will panic if the size of the item is greater then maxFlowSize (set in NewQueue).
// Queue is safe for concurrent use.
//
func (q *Queue) Queue(item interface{}) bool {
hi := getHeapItem()
hi.value = item
hi.key = q.helper.Key(item)
hi.size = q.helper.Size(item)
hi.weight = q.helper.Weight(item)
if hi.size == 0 {
panic("Item size is zero")
}
if hi.size > q.maxFlowSize {
panic("Item size is larger than MaxFlowSize")
}
q.lock.Lock()
if q.closed {
q.lock.Unlock()
return false
}
// Get the flowInfo, or add one if there is none
fi, ok := q.flows[hi.key]
if !ok {
fi = getFlowInfo()
fi.cond.L = &q.lock
fi.last_vft = q.vt
fi.weight = hi.weight + 1
fi.inv_w = scaledOne / uint64(fi.weight)
q.flows[hi.key] = fi
q.wsum += uint64(fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
}
hi.fi = fi
// This prevents DeQueue from deleting the flowInfo from q.flows
// while the flow is till active
fi.pendSize += hi.size
// Wait till there is room in the flow queue
for !q.closed && fi.size+hi.size > q.maxFlowSize {
fi.cond.Wait()
}
if q.closed {
q.lock.Unlock()
return false
}
// Calculate the items virtual finish time
hi.vft = fi.last_vft + hi.size*fi.inv_w
fi.last_vft = hi.vft
// Add the item's size to the flow
fi.size += hi.size
// Subtract it's size from pendSize since it is no longer pending
fi.pendSize -= hi.size
if q.size+hi.size > q.maxQueueSize {
/*
The queue is full, place our request in the overflow heap.
Unlike the main heap, the overflow heap is strictly prioritized by
weight and arrival order. A higher priority flow could completely starve out
a lower priority flow if the incoming rate of the higher priority flow exceeds
the total outgoing rate.
*/
ohi := getOverflowHeapItem()
ohi.hi = hi
ohi.arrord = q.ovfcnt
q.ovfcnt++
ohi.wg.Add(1)
if q.next_ohi == nil {
q.next_ohi = ohi
} else {
if ohi.less(q.next_ohi) {
heap.Push(&q.overflow, q.next_ohi)
q.next_ohi = ohi
} else {
heap.Push(&q.overflow, ohi)
}
}
q.lock.Unlock()
ohi.wg.Wait()
putOverflowHeapItem(ohi)
if q.closed |
} else {
q.size += hi.size
// The queue has room, place our item in the main heap
heap.Push(&q.items, hi)
q.cond.Signal()
q.lock.Unlock()
}
return true
}
// DeQueue removes the next item from the queue. DeQueue will not return (i.e. block) until an item can
// be returned or the queue is empty and closed. DeQueue will return an item and true if an item could be
// removed from the queue or nil and false, if the queue is empty and closed.
// DeQueue is safe for concurrent use.
//
func (q *Queue) DeQueue() (interface{}, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if q.closed && q.items.Len() == 0 {
return nil, false
}
for !q.closed && q.items.Len() == 0 {
q.cond.Wait()
}
if q.closed && q.items.Len() == 0 {
return nil, false
}
hi := heap.Pop(&q.items).(*heapItem)
item := hi.value
q.vt += hi.size * q.inv_wsum
hi.fi.size -= hi.size
q.size -= hi.size
if hi.fi.size == 0 && hi.fi.pendSize == 0 {
// The flow is empty (i.e. inactive), delete it
delete(q.flows, hi.key)
q.wsum += uint64(hi.fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
putFlowInfo(hi.fi)
putHeapItem(hi)
} else {
hi.fi.cond.Signal()
putHeapItem(hi)
}
if !q.closed {
// While there is room in the queue move items from the overflow to the main heap.
for q.next_ohi != nil && q.size+q.next_ohi.hi.size <= q.maxQueueSize {
q.size += q.next_ohi.hi.size
heap.Push(&q.items, q.next_ohi.hi)
q.next_ohi.wg.Done()
if q.overflow.Len() > 0 {
q.next_ohi = heap.Pop(&q.overflow).(*overflowHeapItem)
} else {
q.next_ohi = nil
}
}
}
return item, true
}
func (q *Queue) Close() {
q.lock.Lock()
defer q.lock.Unlock()
q.closed = true
// All overflow requests get flushed
for q.next_ohi != nil {
q.next_ohi.wg.Done()
q.next_ohi = q.overflow.Pop().(*overflowHeapItem)
}
// Wake up all those waiting to get into a flow queue
for _, fi := range q.flows {
fi.cond.Broadcast()
}
// Wake up all DeQueue'ers
q.cond.Broadcast()
}
| {
return false
} | conditional_block |
wfq.go | package wfq
import (
"container/heap"
"sync"
)
/*****************************************************************************
*
*****************************************************************************/
// An implementation of this interface is passed to NewQueue
// and used to obtain properties of items passed to Queue().
// Each method will be called only once per Queue()/item call
//
type Interface interface {
// Key returns the identity of the flow for the item.
// All items with the same key are placed in the same flow.
//
Key(item interface{}) uint64
// Size returns the size of the item.
// The value returned can be any unit but all items in a queue must be
// sized according to the same unit (e.g. bytes).
Size(item interface{}) uint64
// The weight/priority of the item. A higher value represents a higher
// priority. All items with a specific key should (but are not required to)
// have the same weight. Internally the Queue add 1 to the weight so that
// weight range is shifted from 0-255 to 1-256.
Weight(item interface{}) uint8
}
/*****************************************************************************
*
*****************************************************************************/
type heapItem struct {
fi *flowInfo
value interface{}
size uint64
weight uint8
key uint64
vft uint64
}
var hi_pool sync.Pool
func newHeapItem() interface{} {
return new(heapItem)
}
func getHeapItem() *heapItem {
return hi_pool.Get().(*heapItem)
}
func putHeapItem(hi *heapItem) {
hi.fi = nil
hi.value = nil
hi_pool.Put(hi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemHeap []*heapItem
func (h *itemHeap) Len() int {
return len(*h)
}
func (h *itemHeap) Less(i, j int) bool {
return (*h)[i].vft < (*h)[j].vft
}
func (h *itemHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemHeap) Push(x interface{}) {
item := x.(*heapItem)
*h = append(*h, item)
}
func (h *itemHeap) Pop() interface{} {
old := *h
n := len(old)
item := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return item
}
/*****************************************************************************
*
*****************************************************************************/
type overflowHeapItem struct {
hi *heapItem
arrord uint64
wg sync.WaitGroup
}
func (i *overflowHeapItem) less(o *overflowHeapItem) bool {
if i.hi.weight > o.hi.weight {
return true
} else if i.hi.weight == o.hi.weight && i.arrord < o.arrord {
return true
}
return false
}
var ohi_pool sync.Pool
func newOverflowHeapItem() interface{} {
return new(overflowHeapItem)
}
func getOverflowHeapItem() *overflowHeapItem {
return ohi_pool.Get().(*overflowHeapItem)
}
func putOverflowHeapItem(ohi *overflowHeapItem) {
ohi.hi = nil
ohi_pool.Put(ohi)
}
/*****************************************************************************
*
*****************************************************************************/
type itemOverflowHeap []*overflowHeapItem
func (h *itemOverflowHeap) Len() int {
return len(*h)
}
func (h *itemOverflowHeap) Less(i, j int) bool {
return (*h)[i].less((*h)[j])
}
func (h *itemOverflowHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *itemOverflowHeap) Push(x interface{}) {
item := x.(*overflowHeapItem)
*h = append(*h, item)
}
func (h *itemOverflowHeap) Pop() interface{} {
old := *h
n := len(old)
ohi := old[n-1]
*h = old[0 : n-1]
old[n-1] = nil
return ohi
}
/*****************************************************************************
*
*****************************************************************************/
type flowInfo struct {
cond sync.Cond
last_vft uint64
size uint64
pendSize uint64
weight uint8
inv_w uint64
}
var fi_pool sync.Pool
func newFlowInfo() interface{} {
return new(flowInfo)
}
func getFlowInfo() *flowInfo {
return fi_pool.Get().(*flowInfo)
}
func putFlowInfo(fi *flowInfo) {
fi.cond.L = nil
}
/*****************************************************************************
*
*****************************************************************************/
func init() {
hi_pool.New = newHeapItem
ohi_pool.New = newOverflowHeapItem
fi_pool.New = newFlowInfo
}
/*****************************************************************************
*
*****************************************************************************/
// A queue that implements a version of the weighted fair queue algorithm.
// When all items have the same weight then each flow's throughput will be
// <total throughput>/<number of flows>.
//
// If items have different weights, then all flows with the same weight will
// share their portion of the throughput evenly.
// Each weight "class" receives a portion of the total throughput according to
// the following the formula RWi/W1 + W2 ... + WN where R = total throughput
// and W1 through WN are the weights of the individual flows.
// If the total size of all items that passed through the queue was 10,000, and
// the weights of each of 3 flows was 1, 4 and 18, then the portion of the total
// that was dedicated to each flow would be 10000*1/(1+4+18) = 435 (4.35%),
// 10000*4/(1+4+18) = 1739 (17.39%) and 10000*18/(1+4+18) = 7826 (78.26%).
//
//
//
type Queue struct {
lock sync.Mutex
cond sync.Cond
closed bool
maxQueueSize uint64
maxFlowSize uint64
helper Interface
items itemHeap
overflow itemOverflowHeap
next_ohi *overflowHeapItem
flows map[uint64]*flowInfo
ovfcnt uint64
vt uint64
size uint64
wsum uint64
inv_wsum uint64
}
const (
scaledOne uint64 = 1 << 16
)
// Create a new Queue instance.
// If maxFlowSize > maxQueueSize or if helper is nil then it will panic.
// The maxFlowSize value limits the total size of all items that can be queued in a single flow.
// The maxQueueSize value limits the total size of all items that can be in the queue.
// It is recomeneded that maxQueueSize be set to maxFlowSize*<Max # of expected flows>, and
// that maxFlowSize be at least twice the largest expected item size.
//
func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {
if maxFlowSize > maxQueueSize {
panic("MaxFlowSize > MaxQueueSize")
}
if helper == nil {
panic("helper is nil")
}
q := new(Queue)
q.cond.L = &q.lock
q.maxQueueSize = maxQueueSize
q.maxFlowSize = maxFlowSize
q.helper = helper
q.flows = make(map[uint64]*flowInfo)
return q | // Queue will panic if the size of the item is greater then maxFlowSize (set in NewQueue).
// Queue is safe for concurrent use.
//
func (q *Queue) Queue(item interface{}) bool {
hi := getHeapItem()
hi.value = item
hi.key = q.helper.Key(item)
hi.size = q.helper.Size(item)
hi.weight = q.helper.Weight(item)
if hi.size == 0 {
panic("Item size is zero")
}
if hi.size > q.maxFlowSize {
panic("Item size is larger than MaxFlowSize")
}
q.lock.Lock()
if q.closed {
q.lock.Unlock()
return false
}
// Get the flowInfo, or add one if there is none
fi, ok := q.flows[hi.key]
if !ok {
fi = getFlowInfo()
fi.cond.L = &q.lock
fi.last_vft = q.vt
fi.weight = hi.weight + 1
fi.inv_w = scaledOne / uint64(fi.weight)
q.flows[hi.key] = fi
q.wsum += uint64(fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
}
hi.fi = fi
// This prevents DeQueue from deleting the flowInfo from q.flows
// while the flow is till active
fi.pendSize += hi.size
// Wait till there is room in the flow queue
for !q.closed && fi.size+hi.size > q.maxFlowSize {
fi.cond.Wait()
}
if q.closed {
q.lock.Unlock()
return false
}
// Calculate the items virtual finish time
hi.vft = fi.last_vft + hi.size*fi.inv_w
fi.last_vft = hi.vft
// Add the item's size to the flow
fi.size += hi.size
// Subtract it's size from pendSize since it is no longer pending
fi.pendSize -= hi.size
if q.size+hi.size > q.maxQueueSize {
/*
The queue is full, place our request in the overflow heap.
Unlike the main heap, the overflow heap is strictly prioritized by
weight and arrival order. A higher priority flow could completely starve out
a lower priority flow if the incoming rate of the higher priority flow exceeds
the total outgoing rate.
*/
ohi := getOverflowHeapItem()
ohi.hi = hi
ohi.arrord = q.ovfcnt
q.ovfcnt++
ohi.wg.Add(1)
if q.next_ohi == nil {
q.next_ohi = ohi
} else {
if ohi.less(q.next_ohi) {
heap.Push(&q.overflow, q.next_ohi)
q.next_ohi = ohi
} else {
heap.Push(&q.overflow, ohi)
}
}
q.lock.Unlock()
ohi.wg.Wait()
putOverflowHeapItem(ohi)
if q.closed {
return false
}
} else {
q.size += hi.size
// The queue has room, place our item in the main heap
heap.Push(&q.items, hi)
q.cond.Signal()
q.lock.Unlock()
}
return true
}
// DeQueue removes the next item from the queue. DeQueue will not return (i.e. block) until an item can
// be returned or the queue is empty and closed. DeQueue will return an item and true if an item could be
// removed from the queue or nil and false, if the queue is empty and closed.
// DeQueue is safe for concurrent use.
//
func (q *Queue) DeQueue() (interface{}, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if q.closed && q.items.Len() == 0 {
return nil, false
}
for !q.closed && q.items.Len() == 0 {
q.cond.Wait()
}
if q.closed && q.items.Len() == 0 {
return nil, false
}
hi := heap.Pop(&q.items).(*heapItem)
item := hi.value
q.vt += hi.size * q.inv_wsum
hi.fi.size -= hi.size
q.size -= hi.size
if hi.fi.size == 0 && hi.fi.pendSize == 0 {
// The flow is empty (i.e. inactive), delete it
delete(q.flows, hi.key)
q.wsum += uint64(hi.fi.weight)
q.inv_wsum = scaledOne / uint64(q.wsum)
putFlowInfo(hi.fi)
putHeapItem(hi)
} else {
hi.fi.cond.Signal()
putHeapItem(hi)
}
if !q.closed {
// While there is room in the queue move items from the overflow to the main heap.
for q.next_ohi != nil && q.size+q.next_ohi.hi.size <= q.maxQueueSize {
q.size += q.next_ohi.hi.size
heap.Push(&q.items, q.next_ohi.hi)
q.next_ohi.wg.Done()
if q.overflow.Len() > 0 {
q.next_ohi = heap.Pop(&q.overflow).(*overflowHeapItem)
} else {
q.next_ohi = nil
}
}
}
return item, true
}
func (q *Queue) Close() {
q.lock.Lock()
defer q.lock.Unlock()
q.closed = true
// All overflow requests get flushed
for q.next_ohi != nil {
q.next_ohi.wg.Done()
q.next_ohi = q.overflow.Pop().(*overflowHeapItem)
}
// Wake up all those waiting to get into a flow queue
for _, fi := range q.flows {
fi.cond.Broadcast()
}
// Wake up all DeQueue'ers
q.cond.Broadcast()
} | }
// Place on item on the queue. Queue will not return (i.e. block) until the item can be placed on the queue
// or the queue was closed. If Queue returns true, then DeQueue will eventually return the item.
// If Queue returns false, then the item was not placed on the queue because the queue has been closed. | random_line_split |
mod.rs | // Import the rendering contract
use crate::traits::RendererBase;
use crate::traits::UIElement;
use crate::traits::UIEvent;
// We make use of the arguments of the event loop
use piston::input::{UpdateArgs, RenderArgs, Key};
// Used to send events to the application
use std::sync::mpsc;
// Use the GlGraphics backend
use opengl_graphics::GlGraphics;
use graphics::character::CharacterCache;
// Import drawing helper functions
use graphics::{Context, rectangle, text, line, Transformed};
// Font imports
use opengl_graphics::GlyphCache;
use opengl_graphics::TextureSettings;
// Needed to satisfy the trait
use crate::audio::AnalyzedAudio;
// Import UI elements
mod dropdown;
use dropdown::UIDropdown;
mod util;
use util::{cursor_in_rect, format_number, find_font};
// Needed for the timeout
use std::time;
// Needed to resolve the asset path
use std::env::current_dir;
use std::path::Path;
// Necessary to retrieve a list of available input devices.
use crate::audio::util::{fetch_devices, AudioDevice};
static AUDIO_IO_ID: usize = 1;
static RENDERER_ID: usize = 2;
pub struct UI<'a> {
width: u32,
height: u32,
last_cursor_x: f64,
last_cursor_y: f64,
cursor_over_window: bool,
should_display_ui: bool,
menu_display_time: u64, // in seconds
mouse_last_moved: time::Instant,
ui_opacity: f64,
target_opacity: f64,
ui_font: graphics::glyph_cache::rusttype::GlyphCache<'a, (), opengl_graphics::Texture>,
// Displayable settings
available_devices: Vec<AudioDevice>,
available_renderers: Vec<String>,
ui_elements: Vec<Box<dyn UIElement>>,
selected_device: usize,
selected_renderer: usize,
event_sender: Option<mpsc::Sender<UIEvent>>,
device_info: String,
base_font_size: f64,
font_path: String,
input_selector_button_rect: [f64; 4],
renderer_selector_button_rect: [f64; 4],
input_selector_index: i32,
renderer_selector_index: i32,
min_amp: f32,
max_amp: f32
}
impl UI<'static> {
pub fn create () -> Self {
let font_path = find_font();
if let Err(e) = font_path {
use std::io::Write;
let mut file = std::fs::File::create("/Users/hendrik/Desktop/log.txt").unwrap();
file.write_all(b"Could not find the font path!").unwrap();
}
let glyph_cache = GlyphCache::new(find_font().unwrap(), (), TextureSettings::new()).unwrap();
Self {
// General window parameters
width: 0,
height: 0,
last_cursor_x: 0.0,
last_cursor_y: 0.0,
cursor_over_window: false,
mouse_last_moved: time::Instant::now(),
// General UI parameters
should_display_ui: false, // If true, will increase the ui_opacity to the target
ui_opacity: 0.0, // Will increase as long as display var is true, else decrease
target_opacity: 0.7, // The final opacity of the UI when fully shown
menu_display_time: 2,
ui_font: glyph_cache,
// Information
available_devices: fetch_devices(),
available_renderers: Vec::new(),
selected_device: 0,
selected_renderer: 0,
event_sender: None,
device_info: String::from("No device selected"),
font_path: find_font().unwrap(),
ui_elements: Vec::new(),
base_font_size: 12.0,
input_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
renderer_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
input_selector_index: -1,
renderer_selector_index: -1,
min_amp: 0.0,
max_amp: 0.0
}
}
// Helper and utility functions
pub fn selected_audio_device_changed (&mut self, idx: usize) {
self.selected_device = idx;
}
pub fn selected_renderer_changed (&mut self, idx: usize) {
self.selected_renderer = idx;
}
pub fn register_action_callback (&mut self, tx: mpsc::Sender<UIEvent>) {
self.event_sender = Some(tx);
}
pub fn set_available_renderers (&mut self, rend: Vec<String>) {
self.available_renderers = rend;
}
/// Draw a text button and return the actual rectangle where it has been drawn
fn draw_text_button (&mut self, begin_point: [f64; 2], text: String, gl: &mut GlGraphics, context: Context) -> [f64; 4] {
// Draws a text button with the UIs style
let padding = 5.0;
let real_width = self.ui_font.width(self.base_font_size as u32, text.as_str()).unwrap() + 2.0 * padding;
let real_height = self.base_font_size + 2.0 * padding;
let rect = [
begin_point[0],
begin_point[1],
begin_point[0] + real_width,
begin_point[1] + real_height
];
// Hover effect
// if cursor_in_rect([self.last_cursor_x, self.last_cursor_y], self.input_selector_button_rect) {
let line_color = [0.9, 0.9, 0.9, self.ui_opacity as f32];
// Four lines surrounding the button
line(line_color, 0.5, [rect[0], rect[1], rect[2], rect[1]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[1], rect[2], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[3], rect[0], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[0], rect[3], rect[0], rect[1]], context.transform, gl);
// }
// Now the text within it
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
text.as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(begin_point[0] + padding, begin_point[1] + padding + self.base_font_size),
gl
).unwrap();
// Finally return the actual rectangle
[
begin_point[0],
begin_point[1],
real_width,
real_height
]
}
}
impl RendererBase for UI<'static> {
fn render (&mut self, gl: &mut GlGraphics, context: Context, args: &RenderArgs, audio: &AnalyzedAudio) {
if self.ui_opacity == 0.0 {
return // If the opacity is zero, we don't need to waste resources
}
// Window size
self.width = args.draw_size[0];
self.height = args.draw_size[1];
// Overlay size (width is always full)
let overlay_top = self.height as f64 * 0.8;
let overlay_height = self.height as f64 * 0.2;
// Font size relative to UI overlay (always three lines high)
self.base_font_size = (overlay_height / 3.0 * 0.95).floor();
if self.base_font_size > 14.0 {
self.base_font_size = 14.0; // Don't overdo it
}
// Colors
let bg_color = [0.0, 0.0, 0.0, self.ui_opacity as f32];
// Overlay area
let overlay_rect = [
0.0,
overlay_top,
self.width as f64,
overlay_height
];
// Draw the overlay
rectangle(bg_color, overlay_rect, context.transform, gl);
let mut selected_device = String::from("No device selected");
// Check if we have a device selected
if !self.available_devices.is_empty() && self.selected_device < self.available_devices.len() {
selected_device = self.available_devices[self.selected_device].name.clone();
}
self.device_info = format!("IN: {}", selected_device);
// Draw the input selection button
self.input_selector_button_rect = self.draw_text_button([10.0, overlay_rect[1] + 10.0], self.device_info.clone(), gl, context);
// ... and the renderer
self.renderer_selector_button_rect = self.draw_text_button(
[10.0 + self.input_selector_button_rect[2] + 20.0, overlay_rect[1] + 10.0],
format!("Renderer: {}", self.available_renderers[self.selected_renderer].clone()),
gl, context);
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
// Draw a small spectrogram to indicate whether audio is actually being received
let amp_bar_height = self.renderer_selector_button_rect[3] as f32;
let start_x = self.renderer_selector_button_rect[0] + self.renderer_selector_button_rect[2] + 10.0;
let start_y = self.renderer_selector_button_rect[1] + self.renderer_selector_button_rect[3];
let w = 50.0 / audio.amplitude[0].len() as f64;
for (i, sample) in audio.amplitude[0].iter().enumerate() {
let h = (sample.abs() * amp_bar_height) as f64;
rectangle(fg_color, [start_x + i as f64 * w, start_y - h, w, h], context.transform, gl);
}
// Now provide audio information in the next lines
let padding = 5.0;
// Sample rate
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Sample rate: {} Hz", format_number(audio.sample_rate as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 2.0 + 3.0 * padding),
gl
).unwrap();
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Buffer size: {} samples", format_number(audio.buffer_size as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 3.0 + 4.0 * padding),
gl
).unwrap();
let mut max_frequency = 0.0;
for sample in audio.frequency[0].clone() {
if sample > max_frequency {
max_frequency = sample;
}
if sample < self.min_amp |
if sample > self.max_amp {
self.max_amp = sample
}
}
// Min/max frequency
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!(
"Analyzed frequencies: {} Hz to {} Hz (channels: {})",
format_number(audio.bin_frequency.round() as f64),
format_number((audio.bin_frequency * audio.frequency[0].len() as f32).round() as f64),
audio.channels
).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 4.0 + 5.0 * padding),
gl
).unwrap();
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push(device.name.clone());
}
// Now display all UI elements
for elem in self.ui_elements.iter_mut() {
elem.render(gl, context, args);
}
}
fn update (&mut self, _args: &UpdateArgs) {
let now = time::Instant::now();
if now.duration_since(self.mouse_last_moved) > time::Duration::new(self.menu_display_time, 0) {
self.should_display_ui = false;
}
// Adapt the animation
if !self.should_display_ui && self.ui_opacity > 0.0 {
self.ui_opacity -= 0.1;
} else if self.should_display_ui && self.ui_opacity < self.target_opacity {
self.ui_opacity += 0.1;
}
}
fn on_cursor_movement (&mut self, x: f64, y: f64) {
self.last_cursor_x = x;
self.last_cursor_y = y;
self.mouse_last_moved = time::Instant::now();
self.should_display_ui = true;
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
elem.on_cursor_movement(x, y);
}
}
fn on_cursor_state (&mut self, is_over_window: bool) {
self.cursor_over_window = is_over_window;
if !is_over_window {
self.should_display_ui = false;
}
}
fn on_click (&mut self) {
// Check for generated events on the UI Elements
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
if let Some(event) = elem.on_click() {
if let UIEvent::Selection(idx, id) = event {
// Send event to application
if self.event_sender.is_some() && id == AUDIO_IO_ID {
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeAudioDevice(idx)).unwrap();
} else if self.event_sender.is_some() && id == RENDERER_ID {
// let event = match idx {
// 1 => {
// RendererType::Circle
// },
// 2 => {
// RendererType::Tree
// },
// _ => { RendererType::Square } // Everything 0 and non-covered
// };
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeRenderer(idx)).unwrap();
}
}
}
}
// Display the dropdown if the cursor is currently in the input device selector button rect
if cursor_in_rect(
[self.last_cursor_x, self.last_cursor_y],
self.input_selector_button_rect
) && self.input_selector_index < 0 {
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push((device.index, device.name.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
AUDIO_IO_ID,
items, true,
[self.input_selector_button_rect[0], self.input_selector_button_rect[1]],
self.input_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.input_selector_index > -1 && !self.ui_elements.is_empty() {
// Remove that thing again
self.ui_elements.remove(self.input_selector_index as usize);
self.input_selector_index = -1;
}
if cursor_in_rect([self.last_cursor_x, self.last_cursor_y],
self.renderer_selector_button_rect) && self.renderer_selector_index < 0 {
let mut items = Vec::new();
for (i, renderer) in self.available_renderers.iter().enumerate() {
items.push((i, renderer.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
RENDERER_ID,
items, true,
[self.renderer_selector_button_rect[0], self.renderer_selector_button_rect[1]],
self.renderer_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.renderer_selector_index > -1 && !self.ui_elements.is_empty() {
self.ui_elements.remove(self.renderer_selector_index as usize);
self.renderer_selector_index = -1;
}
}
fn on_keypress (&mut self, _key: Key) {
// ...
}
}
| {
self.min_amp = sample
} | conditional_block |
mod.rs | // Import the rendering contract
use crate::traits::RendererBase;
use crate::traits::UIElement;
use crate::traits::UIEvent;
// We make use of the arguments of the event loop
use piston::input::{UpdateArgs, RenderArgs, Key};
// Used to send events to the application
use std::sync::mpsc;
// Use the GlGraphics backend
use opengl_graphics::GlGraphics;
use graphics::character::CharacterCache;
// Import drawing helper functions
use graphics::{Context, rectangle, text, line, Transformed};
// Font imports
use opengl_graphics::GlyphCache;
use opengl_graphics::TextureSettings;
// Needed to satisfy the trait
use crate::audio::AnalyzedAudio;
// Import UI elements
mod dropdown;
use dropdown::UIDropdown;
mod util;
use util::{cursor_in_rect, format_number, find_font};
// Needed for the timeout
use std::time;
// Needed to resolve the asset path
use std::env::current_dir;
use std::path::Path;
// Necessary to retrieve a list of available input devices.
use crate::audio::util::{fetch_devices, AudioDevice};
static AUDIO_IO_ID: usize = 1;
static RENDERER_ID: usize = 2;
pub struct UI<'a> {
width: u32,
height: u32,
last_cursor_x: f64,
last_cursor_y: f64,
cursor_over_window: bool,
should_display_ui: bool,
menu_display_time: u64, // in seconds
mouse_last_moved: time::Instant,
ui_opacity: f64,
target_opacity: f64,
ui_font: graphics::glyph_cache::rusttype::GlyphCache<'a, (), opengl_graphics::Texture>,
// Displayable settings
available_devices: Vec<AudioDevice>,
available_renderers: Vec<String>,
ui_elements: Vec<Box<dyn UIElement>>,
selected_device: usize,
selected_renderer: usize,
event_sender: Option<mpsc::Sender<UIEvent>>,
device_info: String,
base_font_size: f64,
font_path: String,
input_selector_button_rect: [f64; 4],
renderer_selector_button_rect: [f64; 4],
input_selector_index: i32,
renderer_selector_index: i32,
min_amp: f32,
max_amp: f32
}
impl UI<'static> {
pub fn create () -> Self {
let font_path = find_font();
if let Err(e) = font_path {
use std::io::Write;
let mut file = std::fs::File::create("/Users/hendrik/Desktop/log.txt").unwrap();
file.write_all(b"Could not find the font path!").unwrap();
}
let glyph_cache = GlyphCache::new(find_font().unwrap(), (), TextureSettings::new()).unwrap();
Self {
// General window parameters
width: 0,
height: 0,
last_cursor_x: 0.0,
last_cursor_y: 0.0,
cursor_over_window: false,
mouse_last_moved: time::Instant::now(),
// General UI parameters
should_display_ui: false, // If true, will increase the ui_opacity to the target
ui_opacity: 0.0, // Will increase as long as display var is true, else decrease
target_opacity: 0.7, // The final opacity of the UI when fully shown
menu_display_time: 2,
ui_font: glyph_cache,
// Information
available_devices: fetch_devices(),
available_renderers: Vec::new(),
selected_device: 0,
selected_renderer: 0,
event_sender: None,
device_info: String::from("No device selected"),
font_path: find_font().unwrap(),
ui_elements: Vec::new(),
base_font_size: 12.0,
input_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
renderer_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
input_selector_index: -1,
renderer_selector_index: -1,
min_amp: 0.0,
max_amp: 0.0
}
}
// Helper and utility functions
pub fn selected_audio_device_changed (&mut self, idx: usize) {
self.selected_device = idx;
}
pub fn selected_renderer_changed (&mut self, idx: usize) {
self.selected_renderer = idx;
}
pub fn register_action_callback (&mut self, tx: mpsc::Sender<UIEvent>) {
self.event_sender = Some(tx);
}
pub fn set_available_renderers (&mut self, rend: Vec<String>) {
self.available_renderers = rend;
}
/// Draw a text button and return the actual rectangle where it has been drawn
fn draw_text_button (&mut self, begin_point: [f64; 2], text: String, gl: &mut GlGraphics, context: Context) -> [f64; 4] {
// Draws a text button with the UIs style
let padding = 5.0;
let real_width = self.ui_font.width(self.base_font_size as u32, text.as_str()).unwrap() + 2.0 * padding;
let real_height = self.base_font_size + 2.0 * padding;
let rect = [
begin_point[0],
begin_point[1],
begin_point[0] + real_width,
begin_point[1] + real_height
];
// Hover effect
// if cursor_in_rect([self.last_cursor_x, self.last_cursor_y], self.input_selector_button_rect) {
let line_color = [0.9, 0.9, 0.9, self.ui_opacity as f32];
// Four lines surrounding the button
line(line_color, 0.5, [rect[0], rect[1], rect[2], rect[1]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[1], rect[2], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[3], rect[0], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[0], rect[3], rect[0], rect[1]], context.transform, gl);
// }
// Now the text within it
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
text.as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(begin_point[0] + padding, begin_point[1] + padding + self.base_font_size),
gl
).unwrap();
// Finally return the actual rectangle
[
begin_point[0],
begin_point[1],
real_width,
real_height
]
}
}
impl RendererBase for UI<'static> {
fn render (&mut self, gl: &mut GlGraphics, context: Context, args: &RenderArgs, audio: &AnalyzedAudio) {
if self.ui_opacity == 0.0 {
return // If the opacity is zero, we don't need to waste resources
}
// Window size
self.width = args.draw_size[0];
self.height = args.draw_size[1];
// Overlay size (width is always full)
let overlay_top = self.height as f64 * 0.8;
let overlay_height = self.height as f64 * 0.2;
// Font size relative to UI overlay (always three lines high)
self.base_font_size = (overlay_height / 3.0 * 0.95).floor();
if self.base_font_size > 14.0 {
self.base_font_size = 14.0; // Don't overdo it
}
// Colors
let bg_color = [0.0, 0.0, 0.0, self.ui_opacity as f32];
// Overlay area
let overlay_rect = [
0.0,
overlay_top,
self.width as f64,
overlay_height
];
// Draw the overlay
rectangle(bg_color, overlay_rect, context.transform, gl);
let mut selected_device = String::from("No device selected");
// Check if we have a device selected
if !self.available_devices.is_empty() && self.selected_device < self.available_devices.len() {
selected_device = self.available_devices[self.selected_device].name.clone();
}
self.device_info = format!("IN: {}", selected_device);
// Draw the input selection button
self.input_selector_button_rect = self.draw_text_button([10.0, overlay_rect[1] + 10.0], self.device_info.clone(), gl, context);
// ... and the renderer
self.renderer_selector_button_rect = self.draw_text_button(
[10.0 + self.input_selector_button_rect[2] + 20.0, overlay_rect[1] + 10.0],
format!("Renderer: {}", self.available_renderers[self.selected_renderer].clone()),
gl, context);
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
// Draw a small spectrogram to indicate whether audio is actually being received
let amp_bar_height = self.renderer_selector_button_rect[3] as f32;
let start_x = self.renderer_selector_button_rect[0] + self.renderer_selector_button_rect[2] + 10.0;
let start_y = self.renderer_selector_button_rect[1] + self.renderer_selector_button_rect[3];
let w = 50.0 / audio.amplitude[0].len() as f64;
for (i, sample) in audio.amplitude[0].iter().enumerate() {
let h = (sample.abs() * amp_bar_height) as f64;
rectangle(fg_color, [start_x + i as f64 * w, start_y - h, w, h], context.transform, gl);
}
// Now provide audio information in the next lines
let padding = 5.0;
// Sample rate
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Sample rate: {} Hz", format_number(audio.sample_rate as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 2.0 + 3.0 * padding),
gl
).unwrap();
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Buffer size: {} samples", format_number(audio.buffer_size as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 3.0 + 4.0 * padding),
gl
).unwrap();
let mut max_frequency = 0.0;
for sample in audio.frequency[0].clone() {
if sample > max_frequency {
max_frequency = sample;
}
if sample < self.min_amp {
self.min_amp = sample
}
if sample > self.max_amp {
self.max_amp = sample
}
}
// Min/max frequency
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!(
"Analyzed frequencies: {} Hz to {} Hz (channels: {})",
format_number(audio.bin_frequency.round() as f64),
format_number((audio.bin_frequency * audio.frequency[0].len() as f32).round() as f64),
audio.channels
).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 4.0 + 5.0 * padding),
gl
).unwrap();
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push(device.name.clone());
}
// Now display all UI elements
for elem in self.ui_elements.iter_mut() {
elem.render(gl, context, args);
}
}
fn update (&mut self, _args: &UpdateArgs) {
let now = time::Instant::now();
if now.duration_since(self.mouse_last_moved) > time::Duration::new(self.menu_display_time, 0) {
self.should_display_ui = false;
}
// Adapt the animation
if !self.should_display_ui && self.ui_opacity > 0.0 {
self.ui_opacity -= 0.1;
} else if self.should_display_ui && self.ui_opacity < self.target_opacity {
self.ui_opacity += 0.1;
}
}
fn on_cursor_movement (&mut self, x: f64, y: f64) {
self.last_cursor_x = x;
self.last_cursor_y = y;
self.mouse_last_moved = time::Instant::now();
self.should_display_ui = true;
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
elem.on_cursor_movement(x, y);
}
}
fn on_cursor_state (&mut self, is_over_window: bool) {
self.cursor_over_window = is_over_window;
if !is_over_window {
self.should_display_ui = false;
}
}
fn on_click (&mut self) {
// Check for generated events on the UI Elements
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
if let Some(event) = elem.on_click() {
if let UIEvent::Selection(idx, id) = event {
// Send event to application
if self.event_sender.is_some() && id == AUDIO_IO_ID {
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeAudioDevice(idx)).unwrap();
} else if self.event_sender.is_some() && id == RENDERER_ID {
// let event = match idx {
// 1 => {
// RendererType::Circle
// },
// 2 => {
// RendererType::Tree
// },
// _ => { RendererType::Square } // Everything 0 and non-covered
// };
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeRenderer(idx)).unwrap();
}
}
}
}
// Display the dropdown if the cursor is currently in the input device selector button rect
if cursor_in_rect(
[self.last_cursor_x, self.last_cursor_y],
self.input_selector_button_rect
) && self.input_selector_index < 0 {
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push((device.index, device.name.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
AUDIO_IO_ID,
items, true,
[self.input_selector_button_rect[0], self.input_selector_button_rect[1]],
self.input_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.input_selector_index > -1 && !self.ui_elements.is_empty() {
// Remove that thing again
self.ui_elements.remove(self.input_selector_index as usize);
self.input_selector_index = -1;
}
if cursor_in_rect([self.last_cursor_x, self.last_cursor_y],
self.renderer_selector_button_rect) && self.renderer_selector_index < 0 {
let mut items = Vec::new();
for (i, renderer) in self.available_renderers.iter().enumerate() {
items.push((i, renderer.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
RENDERER_ID,
items, true,
[self.renderer_selector_button_rect[0], self.renderer_selector_button_rect[1]],
self.renderer_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.renderer_selector_index > -1 && !self.ui_elements.is_empty() {
self.ui_elements.remove(self.renderer_selector_index as usize);
self.renderer_selector_index = -1;
}
}
fn on_keypress (&mut self, _key: Key) |
}
| {
// ...
} | identifier_body |
mod.rs | // Import the rendering contract
use crate::traits::RendererBase;
use crate::traits::UIElement;
use crate::traits::UIEvent;
// We make use of the arguments of the event loop
use piston::input::{UpdateArgs, RenderArgs, Key};
// Used to send events to the application
use std::sync::mpsc;
// Use the GlGraphics backend
use opengl_graphics::GlGraphics;
use graphics::character::CharacterCache;
// Import drawing helper functions
use graphics::{Context, rectangle, text, line, Transformed};
// Font imports
use opengl_graphics::GlyphCache;
use opengl_graphics::TextureSettings;
// Needed to satisfy the trait
use crate::audio::AnalyzedAudio;
// Import UI elements
mod dropdown;
use dropdown::UIDropdown;
mod util;
use util::{cursor_in_rect, format_number, find_font};
// Needed for the timeout
use std::time;
// Needed to resolve the asset path
use std::env::current_dir;
use std::path::Path;
// Necessary to retrieve a list of available input devices.
use crate::audio::util::{fetch_devices, AudioDevice};
static AUDIO_IO_ID: usize = 1;
static RENDERER_ID: usize = 2;
pub struct UI<'a> {
width: u32,
height: u32,
last_cursor_x: f64,
last_cursor_y: f64,
cursor_over_window: bool,
should_display_ui: bool,
menu_display_time: u64, // in seconds
mouse_last_moved: time::Instant,
ui_opacity: f64,
target_opacity: f64,
ui_font: graphics::glyph_cache::rusttype::GlyphCache<'a, (), opengl_graphics::Texture>,
// Displayable settings
available_devices: Vec<AudioDevice>,
available_renderers: Vec<String>,
ui_elements: Vec<Box<dyn UIElement>>,
selected_device: usize,
selected_renderer: usize,
event_sender: Option<mpsc::Sender<UIEvent>>,
device_info: String,
base_font_size: f64,
font_path: String,
input_selector_button_rect: [f64; 4],
renderer_selector_button_rect: [f64; 4],
input_selector_index: i32,
renderer_selector_index: i32,
min_amp: f32,
max_amp: f32
}
impl UI<'static> {
pub fn create () -> Self {
let font_path = find_font();
if let Err(e) = font_path {
use std::io::Write;
let mut file = std::fs::File::create("/Users/hendrik/Desktop/log.txt").unwrap();
file.write_all(b"Could not find the font path!").unwrap();
}
let glyph_cache = GlyphCache::new(find_font().unwrap(), (), TextureSettings::new()).unwrap();
Self {
// General window parameters
width: 0,
height: 0,
last_cursor_x: 0.0,
last_cursor_y: 0.0,
cursor_over_window: false,
mouse_last_moved: time::Instant::now(),
// General UI parameters
should_display_ui: false, // If true, will increase the ui_opacity to the target
ui_opacity: 0.0, // Will increase as long as display var is true, else decrease
target_opacity: 0.7, // The final opacity of the UI when fully shown
menu_display_time: 2,
ui_font: glyph_cache,
// Information
available_devices: fetch_devices(),
available_renderers: Vec::new(),
selected_device: 0,
selected_renderer: 0,
event_sender: None,
device_info: String::from("No device selected"),
font_path: find_font().unwrap(),
ui_elements: Vec::new(),
base_font_size: 12.0,
input_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
renderer_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
input_selector_index: -1,
renderer_selector_index: -1,
min_amp: 0.0,
max_amp: 0.0
}
}
// Helper and utility functions
pub fn selected_audio_device_changed (&mut self, idx: usize) {
self.selected_device = idx;
}
pub fn selected_renderer_changed (&mut self, idx: usize) {
self.selected_renderer = idx;
}
pub fn register_action_callback (&mut self, tx: mpsc::Sender<UIEvent>) {
self.event_sender = Some(tx);
}
pub fn set_available_renderers (&mut self, rend: Vec<String>) {
self.available_renderers = rend;
}
/// Draw a text button and return the actual rectangle where it has been drawn
fn draw_text_button (&mut self, begin_point: [f64; 2], text: String, gl: &mut GlGraphics, context: Context) -> [f64; 4] {
// Draws a text button with the UIs style
let padding = 5.0;
let real_width = self.ui_font.width(self.base_font_size as u32, text.as_str()).unwrap() + 2.0 * padding;
let real_height = self.base_font_size + 2.0 * padding;
let rect = [
begin_point[0],
begin_point[1],
begin_point[0] + real_width,
begin_point[1] + real_height
];
// Hover effect
// if cursor_in_rect([self.last_cursor_x, self.last_cursor_y], self.input_selector_button_rect) {
let line_color = [0.9, 0.9, 0.9, self.ui_opacity as f32];
// Four lines surrounding the button
line(line_color, 0.5, [rect[0], rect[1], rect[2], rect[1]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[1], rect[2], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[3], rect[0], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[0], rect[3], rect[0], rect[1]], context.transform, gl);
// }
// Now the text within it
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
text.as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(begin_point[0] + padding, begin_point[1] + padding + self.base_font_size),
gl
).unwrap();
// Finally return the actual rectangle
[
begin_point[0],
begin_point[1],
real_width,
real_height
]
}
}
impl RendererBase for UI<'static> {
fn render (&mut self, gl: &mut GlGraphics, context: Context, args: &RenderArgs, audio: &AnalyzedAudio) {
if self.ui_opacity == 0.0 {
return // If the opacity is zero, we don't need to waste resources
}
// Window size
self.width = args.draw_size[0];
self.height = args.draw_size[1];
// Overlay size (width is always full) |
// Font size relative to UI overlay (always three lines high)
self.base_font_size = (overlay_height / 3.0 * 0.95).floor();
if self.base_font_size > 14.0 {
self.base_font_size = 14.0; // Don't overdo it
}
// Colors
let bg_color = [0.0, 0.0, 0.0, self.ui_opacity as f32];
// Overlay area
let overlay_rect = [
0.0,
overlay_top,
self.width as f64,
overlay_height
];
// Draw the overlay
rectangle(bg_color, overlay_rect, context.transform, gl);
let mut selected_device = String::from("No device selected");
// Check if we have a device selected
if !self.available_devices.is_empty() && self.selected_device < self.available_devices.len() {
selected_device = self.available_devices[self.selected_device].name.clone();
}
self.device_info = format!("IN: {}", selected_device);
// Draw the input selection button
self.input_selector_button_rect = self.draw_text_button([10.0, overlay_rect[1] + 10.0], self.device_info.clone(), gl, context);
// ... and the renderer
self.renderer_selector_button_rect = self.draw_text_button(
[10.0 + self.input_selector_button_rect[2] + 20.0, overlay_rect[1] + 10.0],
format!("Renderer: {}", self.available_renderers[self.selected_renderer].clone()),
gl, context);
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
// Draw a small spectrogram to indicate whether audio is actually being received
let amp_bar_height = self.renderer_selector_button_rect[3] as f32;
let start_x = self.renderer_selector_button_rect[0] + self.renderer_selector_button_rect[2] + 10.0;
let start_y = self.renderer_selector_button_rect[1] + self.renderer_selector_button_rect[3];
let w = 50.0 / audio.amplitude[0].len() as f64;
for (i, sample) in audio.amplitude[0].iter().enumerate() {
let h = (sample.abs() * amp_bar_height) as f64;
rectangle(fg_color, [start_x + i as f64 * w, start_y - h, w, h], context.transform, gl);
}
// Now provide audio information in the next lines
let padding = 5.0;
// Sample rate
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Sample rate: {} Hz", format_number(audio.sample_rate as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 2.0 + 3.0 * padding),
gl
).unwrap();
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Buffer size: {} samples", format_number(audio.buffer_size as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 3.0 + 4.0 * padding),
gl
).unwrap();
let mut max_frequency = 0.0;
for sample in audio.frequency[0].clone() {
if sample > max_frequency {
max_frequency = sample;
}
if sample < self.min_amp {
self.min_amp = sample
}
if sample > self.max_amp {
self.max_amp = sample
}
}
// Min/max frequency
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!(
"Analyzed frequencies: {} Hz to {} Hz (channels: {})",
format_number(audio.bin_frequency.round() as f64),
format_number((audio.bin_frequency * audio.frequency[0].len() as f32).round() as f64),
audio.channels
).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 4.0 + 5.0 * padding),
gl
).unwrap();
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push(device.name.clone());
}
// Now display all UI elements
for elem in self.ui_elements.iter_mut() {
elem.render(gl, context, args);
}
}
fn update (&mut self, _args: &UpdateArgs) {
let now = time::Instant::now();
if now.duration_since(self.mouse_last_moved) > time::Duration::new(self.menu_display_time, 0) {
self.should_display_ui = false;
}
// Adapt the animation
if !self.should_display_ui && self.ui_opacity > 0.0 {
self.ui_opacity -= 0.1;
} else if self.should_display_ui && self.ui_opacity < self.target_opacity {
self.ui_opacity += 0.1;
}
}
fn on_cursor_movement (&mut self, x: f64, y: f64) {
self.last_cursor_x = x;
self.last_cursor_y = y;
self.mouse_last_moved = time::Instant::now();
self.should_display_ui = true;
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
elem.on_cursor_movement(x, y);
}
}
fn on_cursor_state (&mut self, is_over_window: bool) {
self.cursor_over_window = is_over_window;
if !is_over_window {
self.should_display_ui = false;
}
}
fn on_click (&mut self) {
// Check for generated events on the UI Elements
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
if let Some(event) = elem.on_click() {
if let UIEvent::Selection(idx, id) = event {
// Send event to application
if self.event_sender.is_some() && id == AUDIO_IO_ID {
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeAudioDevice(idx)).unwrap();
} else if self.event_sender.is_some() && id == RENDERER_ID {
// let event = match idx {
// 1 => {
// RendererType::Circle
// },
// 2 => {
// RendererType::Tree
// },
// _ => { RendererType::Square } // Everything 0 and non-covered
// };
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeRenderer(idx)).unwrap();
}
}
}
}
// Display the dropdown if the cursor is currently in the input device selector button rect
if cursor_in_rect(
[self.last_cursor_x, self.last_cursor_y],
self.input_selector_button_rect
) && self.input_selector_index < 0 {
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push((device.index, device.name.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
AUDIO_IO_ID,
items, true,
[self.input_selector_button_rect[0], self.input_selector_button_rect[1]],
self.input_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.input_selector_index > -1 && !self.ui_elements.is_empty() {
// Remove that thing again
self.ui_elements.remove(self.input_selector_index as usize);
self.input_selector_index = -1;
}
if cursor_in_rect([self.last_cursor_x, self.last_cursor_y],
self.renderer_selector_button_rect) && self.renderer_selector_index < 0 {
let mut items = Vec::new();
for (i, renderer) in self.available_renderers.iter().enumerate() {
items.push((i, renderer.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
RENDERER_ID,
items, true,
[self.renderer_selector_button_rect[0], self.renderer_selector_button_rect[1]],
self.renderer_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.renderer_selector_index > -1 && !self.ui_elements.is_empty() {
self.ui_elements.remove(self.renderer_selector_index as usize);
self.renderer_selector_index = -1;
}
}
fn on_keypress (&mut self, _key: Key) {
// ...
}
} | let overlay_top = self.height as f64 * 0.8;
let overlay_height = self.height as f64 * 0.2; | random_line_split |
mod.rs | // Import the rendering contract
use crate::traits::RendererBase;
use crate::traits::UIElement;
use crate::traits::UIEvent;
// We make use of the arguments of the event loop
use piston::input::{UpdateArgs, RenderArgs, Key};
// Used to send events to the application
use std::sync::mpsc;
// Use the GlGraphics backend
use opengl_graphics::GlGraphics;
use graphics::character::CharacterCache;
// Import drawing helper functions
use graphics::{Context, rectangle, text, line, Transformed};
// Font imports
use opengl_graphics::GlyphCache;
use opengl_graphics::TextureSettings;
// Needed to satisfy the trait
use crate::audio::AnalyzedAudio;
// Import UI elements
mod dropdown;
use dropdown::UIDropdown;
mod util;
use util::{cursor_in_rect, format_number, find_font};
// Needed for the timeout
use std::time;
// Needed to resolve the asset path
use std::env::current_dir;
use std::path::Path;
// Necessary to retrieve a list of available input devices.
use crate::audio::util::{fetch_devices, AudioDevice};
static AUDIO_IO_ID: usize = 1;
static RENDERER_ID: usize = 2;
pub struct UI<'a> {
width: u32,
height: u32,
last_cursor_x: f64,
last_cursor_y: f64,
cursor_over_window: bool,
should_display_ui: bool,
menu_display_time: u64, // in seconds
mouse_last_moved: time::Instant,
ui_opacity: f64,
target_opacity: f64,
ui_font: graphics::glyph_cache::rusttype::GlyphCache<'a, (), opengl_graphics::Texture>,
// Displayable settings
available_devices: Vec<AudioDevice>,
available_renderers: Vec<String>,
ui_elements: Vec<Box<dyn UIElement>>,
selected_device: usize,
selected_renderer: usize,
event_sender: Option<mpsc::Sender<UIEvent>>,
device_info: String,
base_font_size: f64,
font_path: String,
input_selector_button_rect: [f64; 4],
renderer_selector_button_rect: [f64; 4],
input_selector_index: i32,
renderer_selector_index: i32,
min_amp: f32,
max_amp: f32
}
impl UI<'static> {
pub fn create () -> Self {
let font_path = find_font();
if let Err(e) = font_path {
use std::io::Write;
let mut file = std::fs::File::create("/Users/hendrik/Desktop/log.txt").unwrap();
file.write_all(b"Could not find the font path!").unwrap();
}
let glyph_cache = GlyphCache::new(find_font().unwrap(), (), TextureSettings::new()).unwrap();
Self {
// General window parameters
width: 0,
height: 0,
last_cursor_x: 0.0,
last_cursor_y: 0.0,
cursor_over_window: false,
mouse_last_moved: time::Instant::now(),
// General UI parameters
should_display_ui: false, // If true, will increase the ui_opacity to the target
ui_opacity: 0.0, // Will increase as long as display var is true, else decrease
target_opacity: 0.7, // The final opacity of the UI when fully shown
menu_display_time: 2,
ui_font: glyph_cache,
// Information
available_devices: fetch_devices(),
available_renderers: Vec::new(),
selected_device: 0,
selected_renderer: 0,
event_sender: None,
device_info: String::from("No device selected"),
font_path: find_font().unwrap(),
ui_elements: Vec::new(),
base_font_size: 12.0,
input_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
renderer_selector_button_rect: [0.0, 0.0, 0.0, 0.0],
input_selector_index: -1,
renderer_selector_index: -1,
min_amp: 0.0,
max_amp: 0.0
}
}
// Helper and utility functions
pub fn selected_audio_device_changed (&mut self, idx: usize) {
self.selected_device = idx;
}
pub fn selected_renderer_changed (&mut self, idx: usize) {
self.selected_renderer = idx;
}
pub fn register_action_callback (&mut self, tx: mpsc::Sender<UIEvent>) {
self.event_sender = Some(tx);
}
pub fn set_available_renderers (&mut self, rend: Vec<String>) {
self.available_renderers = rend;
}
/// Draw a text button and return the actual rectangle where it has been drawn
fn | (&mut self, begin_point: [f64; 2], text: String, gl: &mut GlGraphics, context: Context) -> [f64; 4] {
// Draws a text button with the UIs style
let padding = 5.0;
let real_width = self.ui_font.width(self.base_font_size as u32, text.as_str()).unwrap() + 2.0 * padding;
let real_height = self.base_font_size + 2.0 * padding;
let rect = [
begin_point[0],
begin_point[1],
begin_point[0] + real_width,
begin_point[1] + real_height
];
// Hover effect
// if cursor_in_rect([self.last_cursor_x, self.last_cursor_y], self.input_selector_button_rect) {
let line_color = [0.9, 0.9, 0.9, self.ui_opacity as f32];
// Four lines surrounding the button
line(line_color, 0.5, [rect[0], rect[1], rect[2], rect[1]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[1], rect[2], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[2], rect[3], rect[0], rect[3]], context.transform, gl);
line(line_color, 0.5, [rect[0], rect[3], rect[0], rect[1]], context.transform, gl);
// }
// Now the text within it
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
text.as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(begin_point[0] + padding, begin_point[1] + padding + self.base_font_size),
gl
).unwrap();
// Finally return the actual rectangle
[
begin_point[0],
begin_point[1],
real_width,
real_height
]
}
}
impl RendererBase for UI<'static> {
fn render (&mut self, gl: &mut GlGraphics, context: Context, args: &RenderArgs, audio: &AnalyzedAudio) {
if self.ui_opacity == 0.0 {
return // If the opacity is zero, we don't need to waste resources
}
// Window size
self.width = args.draw_size[0];
self.height = args.draw_size[1];
// Overlay size (width is always full)
let overlay_top = self.height as f64 * 0.8;
let overlay_height = self.height as f64 * 0.2;
// Font size relative to UI overlay (always three lines high)
self.base_font_size = (overlay_height / 3.0 * 0.95).floor();
if self.base_font_size > 14.0 {
self.base_font_size = 14.0; // Don't overdo it
}
// Colors
let bg_color = [0.0, 0.0, 0.0, self.ui_opacity as f32];
// Overlay area
let overlay_rect = [
0.0,
overlay_top,
self.width as f64,
overlay_height
];
// Draw the overlay
rectangle(bg_color, overlay_rect, context.transform, gl);
let mut selected_device = String::from("No device selected");
// Check if we have a device selected
if !self.available_devices.is_empty() && self.selected_device < self.available_devices.len() {
selected_device = self.available_devices[self.selected_device].name.clone();
}
self.device_info = format!("IN: {}", selected_device);
// Draw the input selection button
self.input_selector_button_rect = self.draw_text_button([10.0, overlay_rect[1] + 10.0], self.device_info.clone(), gl, context);
// ... and the renderer
self.renderer_selector_button_rect = self.draw_text_button(
[10.0 + self.input_selector_button_rect[2] + 20.0, overlay_rect[1] + 10.0],
format!("Renderer: {}", self.available_renderers[self.selected_renderer].clone()),
gl, context);
let fg_color = [1.0, 1.0, 1.0, self.ui_opacity as f32];
// Draw a small spectrogram to indicate whether audio is actually being received
let amp_bar_height = self.renderer_selector_button_rect[3] as f32;
let start_x = self.renderer_selector_button_rect[0] + self.renderer_selector_button_rect[2] + 10.0;
let start_y = self.renderer_selector_button_rect[1] + self.renderer_selector_button_rect[3];
let w = 50.0 / audio.amplitude[0].len() as f64;
for (i, sample) in audio.amplitude[0].iter().enumerate() {
let h = (sample.abs() * amp_bar_height) as f64;
rectangle(fg_color, [start_x + i as f64 * w, start_y - h, w, h], context.transform, gl);
}
// Now provide audio information in the next lines
let padding = 5.0;
// Sample rate
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Sample rate: {} Hz", format_number(audio.sample_rate as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 2.0 + 3.0 * padding),
gl
).unwrap();
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!("Buffer size: {} samples", format_number(audio.buffer_size as f64)).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 3.0 + 4.0 * padding),
gl
).unwrap();
let mut max_frequency = 0.0;
for sample in audio.frequency[0].clone() {
if sample > max_frequency {
max_frequency = sample;
}
if sample < self.min_amp {
self.min_amp = sample
}
if sample > self.max_amp {
self.max_amp = sample
}
}
// Min/max frequency
// Buffer size
text::Text::new_color(fg_color, self.base_font_size as u32).draw(
format!(
"Analyzed frequencies: {} Hz to {} Hz (channels: {})",
format_number(audio.bin_frequency.round() as f64),
format_number((audio.bin_frequency * audio.frequency[0].len() as f32).round() as f64),
audio.channels
).as_str(),
&mut self.ui_font,
&context.draw_state,
context.transform.trans(10.0 + padding, overlay_rect[1] + 10.0 + self.base_font_size * 4.0 + 5.0 * padding),
gl
).unwrap();
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push(device.name.clone());
}
// Now display all UI elements
for elem in self.ui_elements.iter_mut() {
elem.render(gl, context, args);
}
}
fn update (&mut self, _args: &UpdateArgs) {
let now = time::Instant::now();
if now.duration_since(self.mouse_last_moved) > time::Duration::new(self.menu_display_time, 0) {
self.should_display_ui = false;
}
// Adapt the animation
if !self.should_display_ui && self.ui_opacity > 0.0 {
self.ui_opacity -= 0.1;
} else if self.should_display_ui && self.ui_opacity < self.target_opacity {
self.ui_opacity += 0.1;
}
}
fn on_cursor_movement (&mut self, x: f64, y: f64) {
self.last_cursor_x = x;
self.last_cursor_y = y;
self.mouse_last_moved = time::Instant::now();
self.should_display_ui = true;
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
elem.on_cursor_movement(x, y);
}
}
fn on_cursor_state (&mut self, is_over_window: bool) {
self.cursor_over_window = is_over_window;
if !is_over_window {
self.should_display_ui = false;
}
}
fn on_click (&mut self) {
// Check for generated events on the UI Elements
// Now propagate to all UI elements
for elem in self.ui_elements.iter_mut() {
if let Some(event) = elem.on_click() {
if let UIEvent::Selection(idx, id) = event {
// Send event to application
if self.event_sender.is_some() && id == AUDIO_IO_ID {
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeAudioDevice(idx)).unwrap();
} else if self.event_sender.is_some() && id == RENDERER_ID {
// let event = match idx {
// 1 => {
// RendererType::Circle
// },
// 2 => {
// RendererType::Tree
// },
// _ => { RendererType::Square } // Everything 0 and non-covered
// };
self.event_sender.as_ref().unwrap().send(UIEvent::RequestChangeRenderer(idx)).unwrap();
}
}
}
}
// Display the dropdown if the cursor is currently in the input device selector button rect
if cursor_in_rect(
[self.last_cursor_x, self.last_cursor_y],
self.input_selector_button_rect
) && self.input_selector_index < 0 {
let mut items = Vec::new();
for device in self.available_devices.iter() {
items.push((device.index, device.name.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
AUDIO_IO_ID,
items, true,
[self.input_selector_button_rect[0], self.input_selector_button_rect[1]],
self.input_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.input_selector_index > -1 && !self.ui_elements.is_empty() {
// Remove that thing again
self.ui_elements.remove(self.input_selector_index as usize);
self.input_selector_index = -1;
}
if cursor_in_rect([self.last_cursor_x, self.last_cursor_y],
self.renderer_selector_button_rect) && self.renderer_selector_index < 0 {
let mut items = Vec::new();
for (i, renderer) in self.available_renderers.iter().enumerate() {
items.push((i, renderer.clone()));
}
self.ui_elements.push(
Box::new(
UIDropdown::create(
RENDERER_ID,
items, true,
[self.renderer_selector_button_rect[0], self.renderer_selector_button_rect[1]],
self.renderer_selector_button_rect[2],
self.base_font_size,
self.font_path.clone()
)
)
);
// Save the index for later
self.input_selector_index = self.ui_elements.len() as i32 - 1;
} else if self.renderer_selector_index > -1 && !self.ui_elements.is_empty() {
self.ui_elements.remove(self.renderer_selector_index as usize);
self.renderer_selector_index = -1;
}
}
fn on_keypress (&mut self, _key: Key) {
// ...
}
}
| draw_text_button | identifier_name |
mod.rs | #![allow(dead_code)]
pub mod hypotheses;
use std::collections::HashMap;
use std::hash::Hash;
use std::cmp::{Eq, Ordering};
use std::iter::FromIterator;
use ansi_term::Style;
use triangles::Study;
use inference::triangle::hypotheses::BasicHypothesis;
use inference::triangle::hypotheses::JoinedHypothesis;
pub use inference::triangle::hypotheses::standard_basics::standard_basic_hypotheses;
pub trait Hypothesis {
fn predicts_the_property(&self, study: &Study) -> bool;
fn description(&self) -> String;
}
#[derive(Debug)]
pub struct Distribution<H: Hypothesis + Hash + Eq>(HashMap<H, f64>);
impl<H: Hypothesis + Hash + Eq + Copy> Distribution<H> {
pub fn new() -> Self {
let backing = HashMap::<H, f64>::new();
Distribution(backing)
}
pub fn ignorance_prior(hypotheses: Vec<H>) -> Self {
let mut backing = HashMap::<H, f64>::new();
let probability_each: f64 = 1.0/(hypotheses.len() as f64);
for hypothesis in hypotheses.into_iter() {
backing.insert(hypothesis, probability_each);
}
Distribution(backing)
}
fn backing(&self) -> &HashMap<H, f64> {
&self.0
}
fn mut_backing(&mut self) -> &mut HashMap<H, f64> {
&mut self.0
}
pub fn len(&self) -> usize {
self.backing().len()
}
pub fn hypotheses(&self) -> Vec<&H> {
self.backing().keys().collect::<Vec<_>>()
}
pub fn belief(&self, hypothesis: H) -> f64 {
*self.backing().get(&hypothesis).unwrap_or(&0.0f64)
}
pub fn entropy(&self) -> f64 {
self.backing().values().map(|p| -p * p.log2()).sum()
}
pub fn completely_certain(&self) -> Option<H> {
if self.backing().len() != 1 {
None
} else {
Some(*self.backing().keys().nth(0).expect("should have one entry"))
}
}
pub fn predict(&self, study: &Study, verdict: bool) -> f64 {
self.backing().iter()
.filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
})
.map(|hp| {
let p = hp.1;
p
}).sum()
}
pub fn updated(&self, study: &Study, verdict: bool) -> Self {
let normalization_factor = 1.0/self.predict(study, verdict);
let rebacking_pairs = self.backing()
.into_iter().filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
}).map(|hp| {
let (h, p) = hp;
(*h, normalization_factor * p)
});
let rebacking = HashMap::from_iter(rebacking_pairs);
Distribution(rebacking)
}
pub fn value_of_information(&self, study: &Study) -> f64 {
let mut entropy = 0.;
let mut probability_of_the_property = 0.;
let mut probability_of_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
probability_of_the_property += probability;
} else {
probability_of_the_negation += probability;
}
entropy += -probability * probability.log2();
}
let property_normalization_factor = 1./probability_of_the_property;
let negation_normalization_factor = 1./probability_of_the_negation;
let mut entropy_given_the_property = 0.;
let mut entropy_given_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
let p = property_normalization_factor * probability;
entropy_given_the_property += -p * p.log2();
} else {
let p = negation_normalization_factor * probability;
entropy_given_the_negation += -p * p.log2();
}
}
let expected_entropy =
probability_of_the_property * entropy_given_the_property +
probability_of_the_negation * entropy_given_the_negation;
entropy - expected_entropy
}
pub fn burning_question(&self, desired_bits: f64, sample_cap: usize)
-> Study {
let mut study = Study::sample();
let mut value = self.value_of_information(&study);
let mut top_study = study.clone();
let mut top_value = value;
let mut samples = 1;
loop {
if value > top_value {
top_value = value;
top_study = study;
}
if (top_value > desired_bits) || (samples >= sample_cap) {
break;
}
study = Study::sample();
value = self.value_of_information(&study);
samples += 1;
}
top_study
}
pub fn inspect(&self, n: usize) {
let mut backing = self.backing().iter().collect::<Vec<_>>();
backing.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(Ordering::Equal));
let total_probability_mass: f64 = backing.iter()
.map(|hp| { hp.1 }).sum();
println!("Total probability mass: {:.6}", total_probability_mass);
println!("Top {} hypotheses:", n);
for &(&hypothesis, &probability) in backing.iter().take(n) {
wrapln!(" * {}: {}", hypothesis.description(),
Style::new().bold().paint(&format!("{:.4}", probability)));
}
}
}
pub fn complexity_prior(basic_hypotheses: Vec<BasicHypothesis>)
-> Distribution<JoinedHypothesis> {
let mut prebacking = HashMap::<JoinedHypothesis, f64>::new();
// just a guess; we'll have to normalize later to get a real probability
let weight_each_basic = (2./3.)/(basic_hypotheses.len() as f64);
let weight_each_joined = (1./3.)/(basic_hypotheses.len().pow(2) as f64);
for &basic in &basic_hypotheses {
prebacking.insert(JoinedHypothesis::full_stop(basic),
weight_each_basic);
}
for (i, &one_basic) in basic_hypotheses.iter().enumerate() {
for (j, &another_basic) in basic_hypotheses.iter().enumerate() {
if j <= i {
continue;
}
if one_basic.obviates(&another_basic) ||
another_basic.obviates(&one_basic) {
continue;
}
let conjunction = JoinedHypothesis::and(one_basic, another_basic);
let disjunction = JoinedHypothesis::or(one_basic, another_basic);
for &junction in &vec![conjunction, disjunction] {
if junction.check_substantiality(100) {
prebacking.insert(junction, weight_each_joined);
}
}
}
}
let total_mass: f64 = prebacking.iter().map(|hp| { hp.1 }).sum();
let normalization_factor = 1.0/total_mass;
let backing_pairs = prebacking.into_iter()
.map(|hp| {
let (h, p) = hp;
(h, normalization_factor * p)
});
let backing = HashMap::from_iter(backing_pairs);
Distribution(backing)
}
#[cfg(test)]
mod tests {
use test::Bencher;
use super::*;
use triangles::{Color, Size, Stack, Study, Triangle};
use inference::triangle::hypotheses::{BasicHypothesis, JoinedHypothesis};
use inference::triangle::hypotheses::color_count_boundedness::ColorCountBoundednessHypothesis;
#[test]
fn concerning_updating_your_bayesian_distribution() {
// Suppose we think the hypotheses "A study has the property if it has
// at least 1 triangle of color C" for C in {Red, Green, Blue, Yellow}
// are all equally likely, and that we aren't considering any other
// alternatives.
let hypotheses = vec![Color::Red, Color::Green,
Color::Blue, Color::Yellow].iter()
.map(|&c| ColorCountBoundednessHypothesis::new_lower(c, 1))
.collect::<Vec<_>>();
let prior = Distribution::ignorance_prior(hypotheses);
// If we learn that a study consisting of Red and Yellow triangles does
// not have the property, then we think that C = Green or Blue are
// equally likely.
let beliefs = prior.updated(
&study!(stack!(Triangle::new(Color::Red, Size::One),
Triangle::new(Color::Yellow, Size::One))), false);
let probability_c_is_blue = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1));
let probability_c_is_green = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Green, 1));
assert_eq!(probability_c_is_blue, 0.5);
assert_eq!(probability_c_is_green, 0.5);
}
#[ignore] // TODO investigate and repair test
#[test]
fn concerning_soundness_of_our_complexity_penalty() {
// ⎲ ∞
// ⎳ i=1 1/2^i = 1
//
// So ... I want to give conjunctions and disjunctions a lower prior
// probability, but I'm running into the same philosophical difficulty
// that I ran into when I was first sketching out the number game, as
// accounted in the README: if the true meaning of the complexity
// penalty is that the hypothesis "A" gets to sum over the unspecified
// details borne by the more complicated hypotheses "A ∧ B" and "A ∧
// C", then it's not clear how this insight translates to this setting,
// where we want to represent our knowledge as a collection of mutually
// exclusive hypotheses: we don't care about being able to refine a
// true-but-vague theory to a true-but-more-precise theory; we want to
// say that the precise theory is true and that all others are false.
//
// Probably the real answer is that this game just isn't very
// philosophically interesting: we should have a complexity penalty to
// exactly the extent that we think the human property-specifiers the
// engine will face are going to choose disjunctions or disjunctions
// less often than a uniform sample over distinct hypotheses would.
let basics = vec![
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Red, 1))
];
let distribution = complexity_prior(basics);
assert_eq!(1./3.,
distribution.belief(JoinedHypothesis::full_stop(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)))));
assert_eq!(1./12.,
distribution.belief(JoinedHypothesis::and(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Red, 1)))));
}
#[bench]
fn concerning_the_expense_of_updating(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.updated(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_computing_entropy(bencher: &mut Bencher) {
| nch]
fn concerning_the_expense_of_prediction(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.predict(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_the_value(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.value_of_information(&Study::sample());
});
}
}
| let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.entropy()
});
}
#[be | identifier_body |
mod.rs | #![allow(dead_code)]
pub mod hypotheses;
use std::collections::HashMap;
use std::hash::Hash;
use std::cmp::{Eq, Ordering};
use std::iter::FromIterator;
use ansi_term::Style;
use triangles::Study;
use inference::triangle::hypotheses::BasicHypothesis;
use inference::triangle::hypotheses::JoinedHypothesis;
pub use inference::triangle::hypotheses::standard_basics::standard_basic_hypotheses;
pub trait Hypothesis {
fn predicts_the_property(&self, study: &Study) -> bool;
fn description(&self) -> String;
}
#[derive(Debug)]
pub struct Distribution<H: Hypothesis + Hash + Eq>(HashMap<H, f64>);
impl<H: Hypothesis + Hash + Eq + Copy> Distribution<H> {
pub fn new() -> Self {
let backing = HashMap::<H, f64>::new();
Distribution(backing)
}
pub fn ignorance_prior(hypotheses: Vec<H>) -> Self {
let mut backing = HashMap::<H, f64>::new();
let probability_each: f64 = 1.0/(hypotheses.len() as f64);
for hypothesis in hypotheses.into_iter() {
backing.insert(hypothesis, probability_each);
}
Distribution(backing)
}
fn backing(&self) -> &HashMap<H, f64> {
&self.0
}
fn mut_backing(&mut self) -> &mut HashMap<H, f64> {
&mut self.0
}
pub fn len(&self) -> usize {
self.backing().len()
}
pub fn hypotheses(&self) -> Vec<&H> {
self.backing().keys().collect::<Vec<_>>()
}
pub fn belief(&self, hypothesis: H) -> f64 {
*self.backing().get(&hypothesis).unwrap_or(&0.0f64)
}
pub fn entropy(&self) -> f64 {
self.backing().values().map(|p| -p * p.log2()).sum()
}
pub fn completely_certain(&self) -> Option<H> {
if self.backing().len() != 1 {
None
} else {
Some(*self.backing().keys().nth(0).expect("should have one entry"))
}
}
pub fn predict(&self, study: &Study, verdict: bool) -> f64 {
self.backing().iter()
.filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
})
.map(|hp| {
let p = hp.1;
p
}).sum()
}
pub fn updated(&self, study: &Study, verdict: bool) -> Self {
let normalization_factor = 1.0/self.predict(study, verdict);
let rebacking_pairs = self.backing()
.into_iter().filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
}).map(|hp| {
let (h, p) = hp;
(*h, normalization_factor * p)
});
let rebacking = HashMap::from_iter(rebacking_pairs);
Distribution(rebacking)
}
pub fn value_of_information(&self, study: &Study) -> f64 {
let mut entropy = 0.;
let mut probability_of_the_property = 0.;
let mut probability_of_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
probability_of_the_property += probability;
} else {
probability_of_the_negation += probability;
}
entropy += -probability * probability.log2();
}
let property_normalization_factor = 1./probability_of_the_property;
let negation_normalization_factor = 1./probability_of_the_negation;
let mut entropy_given_the_property = 0.;
let mut entropy_given_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
let p = property_normalization_factor * probability;
entropy_given_the_property += -p * p.log2();
} else {
let p = negation_normalization_factor * probability;
entropy_given_the_negation += -p * p.log2();
}
}
let expected_entropy =
probability_of_the_property * entropy_given_the_property +
probability_of_the_negation * entropy_given_the_negation;
entropy - expected_entropy
}
pub fn burning_question(&self, desired_bits: f64, sample_cap: usize)
-> Study {
let mut study = Study::sample();
let mut value = self.value_of_information(&study);
let mut top_study = study.clone();
let mut top_value = value;
let mut samples = 1;
loop {
if value > top_value {
top_value = value;
top_study = study;
}
if (top_value > desired_bits) || (samples >= sample_cap) {
break;
}
study = Study::sample();
value = self.value_of_information(&study);
samples += 1;
}
top_study
}
pub fn inspect(&self, n: usize) {
let mut backing = self.backing().iter().collect::<Vec<_>>();
backing.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(Ordering::Equal));
let total_probability_mass: f64 = backing.iter()
.map(|hp| { hp.1 }).sum();
println!("Total probability mass: {:.6}", total_probability_mass);
println!("Top {} hypotheses:", n);
for &(&hypothesis, &probability) in backing.iter().take(n) {
wrapln!(" * {}: {}", hypothesis.description(),
Style::new().bold().paint(&format!("{:.4}", probability)));
}
}
}
pub fn complexity_prior(basic_hypotheses: Vec<BasicHypothesis>)
-> Distribution<JoinedHypothesis> {
let mut prebacking = HashMap::<JoinedHypothesis, f64>::new();
// just a guess; we'll have to normalize later to get a real probability
let weight_each_basic = (2./3.)/(basic_hypotheses.len() as f64);
let weight_each_joined = (1./3.)/(basic_hypotheses.len().pow(2) as f64);
for &basic in &basic_hypotheses {
prebacking.insert(JoinedHypothesis::full_stop(basic),
weight_each_basic);
}
for (i, &one_basic) in basic_hypotheses.iter().enumerate() {
for (j, &another_basic) in basic_hypotheses.iter().enumerate() {
if j <= i {
continue;
}
if one_basic.obviates(&another_basic) ||
another_basic.obviates(&one_basic) {
continue;
}
let conjunction = JoinedHypothesis::and(one_basic, another_basic);
let disjunction = JoinedHypothesis::or(one_basic, another_basic);
for &junction in &vec![conjunction, disjunction] {
if junction.check_substantiality(100) {
prebacking.insert(junction, weight_each_joined);
}
}
}
}
let total_mass: f64 = prebacking.iter().map(|hp| { hp.1 }).sum();
let normalization_factor = 1.0/total_mass;
let backing_pairs = prebacking.into_iter()
.map(|hp| {
let (h, p) = hp;
(h, normalization_factor * p)
});
let backing = HashMap::from_iter(backing_pairs);
Distribution(backing)
}
#[cfg(test)]
mod tests {
use test::Bencher;
use super::*;
use triangles::{Color, Size, Stack, Study, Triangle};
use inference::triangle::hypotheses::{BasicHypothesis, JoinedHypothesis};
use inference::triangle::hypotheses::color_count_boundedness::ColorCountBoundednessHypothesis;
#[test]
fn concerning_updating_your_bayesian_distribution() {
// Suppose we think the hypotheses "A study has the property if it has
// at least 1 triangle of color C" for C in {Red, Green, Blue, Yellow}
// are all equally likely, and that we aren't considering any other
// alternatives.
let hypotheses = vec![Color::Red, Color::Green,
Color::Blue, Color::Yellow].iter()
.map(|&c| ColorCountBoundednessHypothesis::new_lower(c, 1))
.collect::<Vec<_>>();
let prior = Distribution::ignorance_prior(hypotheses);
// If we learn that a study consisting of Red and Yellow triangles does
// not have the property, then we think that C = Green or Blue are
// equally likely.
let beliefs = prior.updated(
&study!(stack!(Triangle::new(Color::Red, Size::One),
Triangle::new(Color::Yellow, Size::One))), false);
let probability_c_is_blue = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1));
let probability_c_is_green = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Green, 1));
assert_eq!(probability_c_is_blue, 0.5);
assert_eq!(probability_c_is_green, 0.5);
}
#[ignore] // TODO investigate and repair test
#[test]
fn concerning_soundness_of_our_complexity_penalty() {
// ⎲ ∞ | // that I ran into when I was first sketching out the number game, as
// accounted in the README: if the true meaning of the complexity
// penalty is that the hypothesis "A" gets to sum over the unspecified
// details borne by the more complicated hypotheses "A ∧ B" and "A ∧
// C", then it's not clear how this insight translates to this setting,
// where we want to represent our knowledge as a collection of mutually
// exclusive hypotheses: we don't care about being able to refine a
// true-but-vague theory to a true-but-more-precise theory; we want to
// say that the precise theory is true and that all others are false.
//
// Probably the real answer is that this game just isn't very
// philosophically interesting: we should have a complexity penalty to
// exactly the extent that we think the human property-specifiers the
// engine will face are going to choose disjunctions or disjunctions
// less often than a uniform sample over distinct hypotheses would.
let basics = vec![
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Red, 1))
];
let distribution = complexity_prior(basics);
assert_eq!(1./3.,
distribution.belief(JoinedHypothesis::full_stop(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)))));
assert_eq!(1./12.,
distribution.belief(JoinedHypothesis::and(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Red, 1)))));
}
#[bench]
fn concerning_the_expense_of_updating(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.updated(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_computing_entropy(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.entropy()
});
}
#[bench]
fn concerning_the_expense_of_prediction(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.predict(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_the_value(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.value_of_information(&Study::sample());
});
}
} | // ⎳ i=1 1/2^i = 1
//
// So ... I want to give conjunctions and disjunctions a lower prior
// probability, but I'm running into the same philosophical difficulty | random_line_split |
mod.rs | #![allow(dead_code)]
pub mod hypotheses;
use std::collections::HashMap;
use std::hash::Hash;
use std::cmp::{Eq, Ordering};
use std::iter::FromIterator;
use ansi_term::Style;
use triangles::Study;
use inference::triangle::hypotheses::BasicHypothesis;
use inference::triangle::hypotheses::JoinedHypothesis;
pub use inference::triangle::hypotheses::standard_basics::standard_basic_hypotheses;
pub trait Hypothesis {
fn predicts_the_property(&self, study: &Study) -> bool;
fn description(&self) -> String;
}
#[derive(Debug)]
pub struct Distribution<H: Hypothesis + Hash + Eq>(HashMap<H, f64>);
impl<H: Hypothesis + Hash + Eq + Copy> Distribution<H> {
pub fn new() -> Self {
let backing = HashMap::<H, f64>::new();
Distribution(backing)
}
pub fn ignorance_prior(hypotheses: Vec<H>) -> Self {
let mut backing = HashMap::<H, f64>::new();
let probability_each: f64 = 1.0/(hypotheses.len() as f64);
for hypothesis in hypotheses.into_iter() {
backing.insert(hypothesis, probability_each);
}
Distribution(backing)
}
fn backing(&self) -> &HashMap<H, f64> {
&self.0
}
fn mut_backing(&mut self) -> &mut HashMap<H, f64> {
&mut self.0
}
pub fn len(&self) -> usize {
self.backing().len()
}
pub fn hypotheses(&self) -> Vec<&H> {
self.backing().keys().collect::<Vec<_>>()
}
pub fn belief(&self, hypothesis: H) -> f64 {
*self.backing().get(&hypothesis).unwrap_or(&0.0f64)
}
pub fn entropy(&self) -> f64 {
self.backing().values().map(|p| -p * p.log2()).sum()
}
pub fn completely_certain(&self) -> Option<H> {
if self.backing().len() != 1 {
None
} else {
Some(*self.backing().keys().nth(0).expect("should have one entry"))
}
}
pub fn predict(&self, study: &Study, verdict: bool) -> f64 {
self.backing().iter()
.filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
})
.map(|hp| {
let p = hp.1;
p
}).sum()
}
pub fn updated(&self, study: &Study, verdict: bool) -> Self {
let normalization_factor = 1.0/self.predict(study, verdict);
let rebacking_pairs = self.backing()
.into_iter().filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
}).map(|hp| {
let (h, p) = hp;
(*h, normalization_factor * p)
});
let rebacking = HashMap::from_iter(rebacking_pairs);
Distribution(rebacking)
}
pub fn value_of_information(&self, study: &Study) -> f64 {
let mut entropy = 0.;
let mut probability_of_the_property = 0.;
let mut probability_of_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
probability_of_the_property += probability;
} else {
probability_of_the_negation += probability;
}
entropy += -probability * probability.log2();
}
let property_normalization_factor = 1./probability_of_the_property;
let negation_normalization_factor = 1./probability_of_the_negation;
let mut entropy_given_the_property = 0.;
let mut entropy_given_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
let p = property_normalization_factor * probability;
entropy_given_the_property += -p * p.log2();
} else {
let p = negation_normalization_factor * probability;
entropy_given_the_negation += -p * p.log2();
}
}
let expected_entropy =
probability_of_the_property * entropy_given_the_property +
probability_of_the_negation * entropy_given_the_negation;
entropy - expected_entropy
}
pub fn | (&self, desired_bits: f64, sample_cap: usize)
-> Study {
let mut study = Study::sample();
let mut value = self.value_of_information(&study);
let mut top_study = study.clone();
let mut top_value = value;
let mut samples = 1;
loop {
if value > top_value {
top_value = value;
top_study = study;
}
if (top_value > desired_bits) || (samples >= sample_cap) {
break;
}
study = Study::sample();
value = self.value_of_information(&study);
samples += 1;
}
top_study
}
pub fn inspect(&self, n: usize) {
let mut backing = self.backing().iter().collect::<Vec<_>>();
backing.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(Ordering::Equal));
let total_probability_mass: f64 = backing.iter()
.map(|hp| { hp.1 }).sum();
println!("Total probability mass: {:.6}", total_probability_mass);
println!("Top {} hypotheses:", n);
for &(&hypothesis, &probability) in backing.iter().take(n) {
wrapln!(" * {}: {}", hypothesis.description(),
Style::new().bold().paint(&format!("{:.4}", probability)));
}
}
}
pub fn complexity_prior(basic_hypotheses: Vec<BasicHypothesis>)
-> Distribution<JoinedHypothesis> {
let mut prebacking = HashMap::<JoinedHypothesis, f64>::new();
// just a guess; we'll have to normalize later to get a real probability
let weight_each_basic = (2./3.)/(basic_hypotheses.len() as f64);
let weight_each_joined = (1./3.)/(basic_hypotheses.len().pow(2) as f64);
for &basic in &basic_hypotheses {
prebacking.insert(JoinedHypothesis::full_stop(basic),
weight_each_basic);
}
for (i, &one_basic) in basic_hypotheses.iter().enumerate() {
for (j, &another_basic) in basic_hypotheses.iter().enumerate() {
if j <= i {
continue;
}
if one_basic.obviates(&another_basic) ||
another_basic.obviates(&one_basic) {
continue;
}
let conjunction = JoinedHypothesis::and(one_basic, another_basic);
let disjunction = JoinedHypothesis::or(one_basic, another_basic);
for &junction in &vec![conjunction, disjunction] {
if junction.check_substantiality(100) {
prebacking.insert(junction, weight_each_joined);
}
}
}
}
let total_mass: f64 = prebacking.iter().map(|hp| { hp.1 }).sum();
let normalization_factor = 1.0/total_mass;
let backing_pairs = prebacking.into_iter()
.map(|hp| {
let (h, p) = hp;
(h, normalization_factor * p)
});
let backing = HashMap::from_iter(backing_pairs);
Distribution(backing)
}
#[cfg(test)]
mod tests {
use test::Bencher;
use super::*;
use triangles::{Color, Size, Stack, Study, Triangle};
use inference::triangle::hypotheses::{BasicHypothesis, JoinedHypothesis};
use inference::triangle::hypotheses::color_count_boundedness::ColorCountBoundednessHypothesis;
#[test]
fn concerning_updating_your_bayesian_distribution() {
// Suppose we think the hypotheses "A study has the property if it has
// at least 1 triangle of color C" for C in {Red, Green, Blue, Yellow}
// are all equally likely, and that we aren't considering any other
// alternatives.
let hypotheses = vec![Color::Red, Color::Green,
Color::Blue, Color::Yellow].iter()
.map(|&c| ColorCountBoundednessHypothesis::new_lower(c, 1))
.collect::<Vec<_>>();
let prior = Distribution::ignorance_prior(hypotheses);
// If we learn that a study consisting of Red and Yellow triangles does
// not have the property, then we think that C = Green or Blue are
// equally likely.
let beliefs = prior.updated(
&study!(stack!(Triangle::new(Color::Red, Size::One),
Triangle::new(Color::Yellow, Size::One))), false);
let probability_c_is_blue = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1));
let probability_c_is_green = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Green, 1));
assert_eq!(probability_c_is_blue, 0.5);
assert_eq!(probability_c_is_green, 0.5);
}
#[ignore] // TODO investigate and repair test
#[test]
fn concerning_soundness_of_our_complexity_penalty() {
// ⎲ ∞
// ⎳ i=1 1/2^i = 1
//
// So ... I want to give conjunctions and disjunctions a lower prior
// probability, but I'm running into the same philosophical difficulty
// that I ran into when I was first sketching out the number game, as
// accounted in the README: if the true meaning of the complexity
// penalty is that the hypothesis "A" gets to sum over the unspecified
// details borne by the more complicated hypotheses "A ∧ B" and "A ∧
// C", then it's not clear how this insight translates to this setting,
// where we want to represent our knowledge as a collection of mutually
// exclusive hypotheses: we don't care about being able to refine a
// true-but-vague theory to a true-but-more-precise theory; we want to
// say that the precise theory is true and that all others are false.
//
// Probably the real answer is that this game just isn't very
// philosophically interesting: we should have a complexity penalty to
// exactly the extent that we think the human property-specifiers the
// engine will face are going to choose disjunctions or disjunctions
// less often than a uniform sample over distinct hypotheses would.
let basics = vec![
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Red, 1))
];
let distribution = complexity_prior(basics);
assert_eq!(1./3.,
distribution.belief(JoinedHypothesis::full_stop(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)))));
assert_eq!(1./12.,
distribution.belief(JoinedHypothesis::and(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Red, 1)))));
}
#[bench]
fn concerning_the_expense_of_updating(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.updated(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_computing_entropy(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.entropy()
});
}
#[bench]
fn concerning_the_expense_of_prediction(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.predict(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_the_value(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.value_of_information(&Study::sample());
});
}
}
| burning_question | identifier_name |
mod.rs | #![allow(dead_code)]
pub mod hypotheses;
use std::collections::HashMap;
use std::hash::Hash;
use std::cmp::{Eq, Ordering};
use std::iter::FromIterator;
use ansi_term::Style;
use triangles::Study;
use inference::triangle::hypotheses::BasicHypothesis;
use inference::triangle::hypotheses::JoinedHypothesis;
pub use inference::triangle::hypotheses::standard_basics::standard_basic_hypotheses;
pub trait Hypothesis {
fn predicts_the_property(&self, study: &Study) -> bool;
fn description(&self) -> String;
}
#[derive(Debug)]
pub struct Distribution<H: Hypothesis + Hash + Eq>(HashMap<H, f64>);
impl<H: Hypothesis + Hash + Eq + Copy> Distribution<H> {
pub fn new() -> Self {
let backing = HashMap::<H, f64>::new();
Distribution(backing)
}
pub fn ignorance_prior(hypotheses: Vec<H>) -> Self {
let mut backing = HashMap::<H, f64>::new();
let probability_each: f64 = 1.0/(hypotheses.len() as f64);
for hypothesis in hypotheses.into_iter() {
backing.insert(hypothesis, probability_each);
}
Distribution(backing)
}
fn backing(&self) -> &HashMap<H, f64> {
&self.0
}
fn mut_backing(&mut self) -> &mut HashMap<H, f64> {
&mut self.0
}
pub fn len(&self) -> usize {
self.backing().len()
}
pub fn hypotheses(&self) -> Vec<&H> {
self.backing().keys().collect::<Vec<_>>()
}
pub fn belief(&self, hypothesis: H) -> f64 {
*self.backing().get(&hypothesis).unwrap_or(&0.0f64)
}
pub fn entropy(&self) -> f64 {
self.backing().values().map(|p| -p * p.log2()).sum()
}
pub fn completely_certain(&self) -> Option<H> {
if self.backing().len() != 1 {
None
} else {
Some(*self.backing().keys().nth(0).expect("should have one entry"))
}
}
pub fn predict(&self, study: &Study, verdict: bool) -> f64 {
self.backing().iter()
.filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
})
.map(|hp| {
let p = hp.1;
p
}).sum()
}
pub fn updated(&self, study: &Study, verdict: bool) -> Self {
let normalization_factor = 1.0/self.predict(study, verdict);
let rebacking_pairs = self.backing()
.into_iter().filter(|hp| {
let h = hp.0;
h.predicts_the_property(study) == verdict
}).map(|hp| {
let (h, p) = hp;
(*h, normalization_factor * p)
});
let rebacking = HashMap::from_iter(rebacking_pairs);
Distribution(rebacking)
}
pub fn value_of_information(&self, study: &Study) -> f64 {
let mut entropy = 0.;
let mut probability_of_the_property = 0.;
let mut probability_of_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
probability_of_the_property += probability;
} else {
probability_of_the_negation += probability;
}
entropy += -probability * probability.log2();
}
let property_normalization_factor = 1./probability_of_the_property;
let negation_normalization_factor = 1./probability_of_the_negation;
let mut entropy_given_the_property = 0.;
let mut entropy_given_the_negation = 0.;
for (&hypothesis, &probability) in self.backing().iter() {
if hypothesis.predicts_the_property(study) {
let p = property_normalization_factor * probability;
entropy_given_the_property += -p * p.log2();
} else {
let p = negation_normalization_factor * probability;
entropy_given_the_negation += -p * p.log2();
}
}
let expected_entropy =
probability_of_the_property * entropy_given_the_property +
probability_of_the_negation * entropy_given_the_negation;
entropy - expected_entropy
}
pub fn burning_question(&self, desired_bits: f64, sample_cap: usize)
-> Study {
let mut study = Study::sample();
let mut value = self.value_of_information(&study);
let mut top_study = study.clone();
let mut top_value = value;
let mut samples = 1;
loop {
if value > top_value {
top_value = value;
top_study = study;
}
if (top_value > desired_bits) || (samples >= sample_cap) |
study = Study::sample();
value = self.value_of_information(&study);
samples += 1;
}
top_study
}
pub fn inspect(&self, n: usize) {
let mut backing = self.backing().iter().collect::<Vec<_>>();
backing.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(Ordering::Equal));
let total_probability_mass: f64 = backing.iter()
.map(|hp| { hp.1 }).sum();
println!("Total probability mass: {:.6}", total_probability_mass);
println!("Top {} hypotheses:", n);
for &(&hypothesis, &probability) in backing.iter().take(n) {
wrapln!(" * {}: {}", hypothesis.description(),
Style::new().bold().paint(&format!("{:.4}", probability)));
}
}
}
pub fn complexity_prior(basic_hypotheses: Vec<BasicHypothesis>)
-> Distribution<JoinedHypothesis> {
let mut prebacking = HashMap::<JoinedHypothesis, f64>::new();
// just a guess; we'll have to normalize later to get a real probability
let weight_each_basic = (2./3.)/(basic_hypotheses.len() as f64);
let weight_each_joined = (1./3.)/(basic_hypotheses.len().pow(2) as f64);
for &basic in &basic_hypotheses {
prebacking.insert(JoinedHypothesis::full_stop(basic),
weight_each_basic);
}
for (i, &one_basic) in basic_hypotheses.iter().enumerate() {
for (j, &another_basic) in basic_hypotheses.iter().enumerate() {
if j <= i {
continue;
}
if one_basic.obviates(&another_basic) ||
another_basic.obviates(&one_basic) {
continue;
}
let conjunction = JoinedHypothesis::and(one_basic, another_basic);
let disjunction = JoinedHypothesis::or(one_basic, another_basic);
for &junction in &vec![conjunction, disjunction] {
if junction.check_substantiality(100) {
prebacking.insert(junction, weight_each_joined);
}
}
}
}
let total_mass: f64 = prebacking.iter().map(|hp| { hp.1 }).sum();
let normalization_factor = 1.0/total_mass;
let backing_pairs = prebacking.into_iter()
.map(|hp| {
let (h, p) = hp;
(h, normalization_factor * p)
});
let backing = HashMap::from_iter(backing_pairs);
Distribution(backing)
}
#[cfg(test)]
mod tests {
use test::Bencher;
use super::*;
use triangles::{Color, Size, Stack, Study, Triangle};
use inference::triangle::hypotheses::{BasicHypothesis, JoinedHypothesis};
use inference::triangle::hypotheses::color_count_boundedness::ColorCountBoundednessHypothesis;
#[test]
fn concerning_updating_your_bayesian_distribution() {
// Suppose we think the hypotheses "A study has the property if it has
// at least 1 triangle of color C" for C in {Red, Green, Blue, Yellow}
// are all equally likely, and that we aren't considering any other
// alternatives.
let hypotheses = vec![Color::Red, Color::Green,
Color::Blue, Color::Yellow].iter()
.map(|&c| ColorCountBoundednessHypothesis::new_lower(c, 1))
.collect::<Vec<_>>();
let prior = Distribution::ignorance_prior(hypotheses);
// If we learn that a study consisting of Red and Yellow triangles does
// not have the property, then we think that C = Green or Blue are
// equally likely.
let beliefs = prior.updated(
&study!(stack!(Triangle::new(Color::Red, Size::One),
Triangle::new(Color::Yellow, Size::One))), false);
let probability_c_is_blue = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1));
let probability_c_is_green = beliefs.belief(
ColorCountBoundednessHypothesis::new_lower(Color::Green, 1));
assert_eq!(probability_c_is_blue, 0.5);
assert_eq!(probability_c_is_green, 0.5);
}
#[ignore] // TODO investigate and repair test
#[test]
fn concerning_soundness_of_our_complexity_penalty() {
// ⎲ ∞
// ⎳ i=1 1/2^i = 1
//
// So ... I want to give conjunctions and disjunctions a lower prior
// probability, but I'm running into the same philosophical difficulty
// that I ran into when I was first sketching out the number game, as
// accounted in the README: if the true meaning of the complexity
// penalty is that the hypothesis "A" gets to sum over the unspecified
// details borne by the more complicated hypotheses "A ∧ B" and "A ∧
// C", then it's not clear how this insight translates to this setting,
// where we want to represent our knowledge as a collection of mutually
// exclusive hypotheses: we don't care about being able to refine a
// true-but-vague theory to a true-but-more-precise theory; we want to
// say that the precise theory is true and that all others are false.
//
// Probably the real answer is that this game just isn't very
// philosophically interesting: we should have a complexity penalty to
// exactly the extent that we think the human property-specifiers the
// engine will face are going to choose disjunctions or disjunctions
// less often than a uniform sample over distinct hypotheses would.
let basics = vec![
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(Color::Red, 1))
];
let distribution = complexity_prior(basics);
assert_eq!(1./3.,
distribution.belief(JoinedHypothesis::full_stop(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)))));
assert_eq!(1./12.,
distribution.belief(JoinedHypothesis::and(
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Blue, 1)),
BasicHypothesis::from(
ColorCountBoundednessHypothesis::new_lower(
Color::Red, 1)))));
}
#[bench]
fn concerning_the_expense_of_updating(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.updated(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_computing_entropy(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.entropy()
});
}
#[bench]
fn concerning_the_expense_of_prediction(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.predict(&Study::sample(), true);
});
}
#[bench]
fn concerning_the_expense_of_the_value(bencher: &mut Bencher) {
let distribution = complexity_prior(standard_basic_hypotheses());
bencher.iter(|| {
distribution.value_of_information(&Study::sample());
});
}
}
| {
break;
} | conditional_block |
ppno.py | # -*- coding: utf-8 -*-
"""PRESSURIZED PIPE NETWORK OPTIMIZER
Andrés García Martínez (ppnoptimizer@gmail.com)
Licensed under the Apache License 2.0. http://www.apache.org/licenses/
"""
from sys import argv
from time import clock, localtime, strftime
import numpy as np
import toolkit as et
import htxt as ht
A_GD = 0
A_DE = 1
A_DA = 2
PENALTY = 1e24
class Ppn():
'''Base class for a presurized pipe network optimization problem
inpfn: str, definition problem file name (.ext)
'''
def __init__(self, problemfn):
'''problemfn: file name
definition problem file name'''
# READ PROBLEM FILE NAME
myht = ht.Htxtf(problemfn)
sections = myht.read()
# READ EPANET MODEL INP FILE
self.inpfn = sections['INP'][0]
# OPEN EPANET MODEL AND HYDRAULIC MODEL
et.ENopen(self.inpfn, self.inpfn[:-4]+'.rpt')
et.ENopenH()
print('-'*80)
print('DATA')
print('Network: %s' %(self.inpfn))
# READ OPTIONS
msg = 'The algorithm selected is: '
for line in sections['OPTIONS']:
key,value = myht.line_to_tuple(line)
if key.upper() == 'ALGORITHM':
if value == 'GD':
self.algorithm = A_GD
msg += 'Gradient Descent.'
elif value == 'DE':
self.algorithm = A_DE
msg += 'Differential Evolution.'
elif value == 'DA':
self.algorithm = A_DA
msg += 'Dual Annaeling.'
elif key.upper() == 'POLISH':
if value.upper() in ['YES', 'Y']:
self.polish = True
msg += ' A final polish was selected.'
else:
self.polish = False
msg += ' A final polish was not selected.'
print(msg)
# READ PIPES
# pipes: numpy array of labeled tuples ('ix','id','length','series'), where
# ix: int, epanet pipe index
# id: str, epanet pipe ID
# length: float, epaneht pipe lenghth
# series: str, pipe series in catalog
dt=np.dtype([('ix','i4'),('id','U16'),('length','f4'),('series','U16')])
tmp = []
for line in sections['PIPES']:
ide,series = myht.line_to_tuple(line)
ix = et.ENgetlinkindex(ide)
l = et.ENgetlinkvalue(ix,et.EN_LENGTH)
tmp.append((ix,ide,l,series))
self.pipes = np.array(tmp,dt)
print('%i pipe/s to size was/were loaded.' %(len(self.pipes)))
# READ PRESSURES
# nodes: numpy array of labeled tuples ('ix','id','pressure'), where
# ix: int, epanet node index
# id: str, epanet node ID
# pressure: float, min pressure required
dt = np.dtype([('ix','i4'),('id','U16'),('pressure','f4')])
tmp = []
for line in sections['PRESSURES']:
ide,p = myht.line_to_tuple(line)
ix = (et.ENgetnodeindex(ide))
tmp.append((ix,ide,p))
self.nodes = np.array(tmp, dtype = dt)
print('%i node/s to check was/were loaded.' %(len(self.nodes)))
# READ CATALOG
dt = np.dtype([('diameter','f4'),('roughness','f4'),('price','f4')])
self.catalog = {}
tmp = set()
for pipe in np.nditer(self.pipes):
tmp.add(str(pipe['series']))
print('%i series/s was/were required.'%(len(tmp)), end='')
for seriesname in tmp:
self.catalog[seriesname] = []
for line in sections['CATALOG']:
sn,d,r,p = myht.line_to_tuple(line)
if sn in tmp:
self.catalog[sn].append((d,r,p))
# READ SERIES
# catalog: dictionary of series {'series' : series}, where
# series: numpy array of labeled numpy tuples ('diameter','roughness','price'),
# where
# diameter: float, pipe diameter
# roughness: float, pipe roughness
# price: float, pipe price
for series in self.catalog:
tmp = self.catalog[series].copy()
self.catalog[series] = np.array(tmp, dtype = dt)
self.catalog[series].sort()
print(' %i series/s was/were loaded.' %(len(self.catalog)))
# DEFINE VARIABLE, DIMENSION AND BOUNDS
self.dimension = len(self.pipes)
self._x = np.zeros(self.dimension, dtype=np.int)
tmp = []
for pipe in self.pipes:
tmp.append(len(self.catalog[pipe['series']])-1)
self.lbound = np.zeros(self.dimension, dtype=np.int)
self.ubound = np.array(tmp, dtype=np.int)
print('-'*80)
def set_x(self, x):
'''Set x updating the hydraulic model
x: numpy array of integers containing the size of the pipes, where
size: int, index of series in catalog.
'''
self._x = x
self._update()
def get_x(self):
'''Return x
'''
return self._x
def _update(self):
'''Update pipe diameter and roughness in the epanet model
'''
for index,pipe in np.ndenumerate(self.pipes):
ix = pipe['ix']
series = self.catalog[pipe['series']]
size = int(self._x[index])
d = series[size]['diameter']
r = series[size]['roughness']
et.ENsetlinkvalue(ix,et.EN_DIAMETER,d)
et.ENsetlinkvalue(ix,et.EN_ROUGHNESS,r)
def check(self, mode='TF'):
'''Run a check of the pressures in the epanet model
mode: str, can be: 'TF', 'GD', 'PD'
Return
------
Accordig to mode, returns:
'TF', status: boolean, calculated pressures are not lower than required
'GD', (status,headlosses): tuple, where
headlosses: numpy descend ordered array by headloss pipe index
where index: int, is the index of pipe in pipes (not epanet ix).
'PD', deficits: numpy array. Nodal pressure deficits, where
deficit: float, = required presure - calculated pressure;
array index corresponds with node in nodes (not epanet ix).
'''
# DEFINE NUMPY ARRAYS
if mode=='PD':
deficits=np.array([np.inf for node in self.nodes],dtype=np.float32)
if mode=='GD':
dt = np.dtype([('index','i4'),('hl','f4')])
pipehls=np.array([(i, 0.0) for i in range(len(self.pipes))],dtype=dt)
# SOLVE HYDRAULIC MODEL
status = True
et.ENinitH(0)
while True:
# RUN A STEP
et.ENrunH()
# CHECK PRESSURES IN NODES
for index,node in np.ndenumerate(self.nodes):
ix = int(node['ix'])
cp = et.ENgetnodevalue(ix,et.EN_PRESSURE)
rp = node['pressure']
nodaldeficit = rp - cp
if nodaldeficit > 0:
status = False
# NOT NECCESSARY RETURN HEADLOSS OR PRESSURE SO EXIT
if mode == 'TF':
return status
# UPDATE DEFICIT ARRAY
if mode == 'PD':
if deficits[index] < nodaldeficit:
deficits[index] = nodaldeficit
# CALCULATE MAXIMUM UNITARY HEADLOSS ARRAY
if mode == 'GD':
for pipe in np.nditer(pipehls):
index = pipe['index']
ix = int(self.pipes[pipe['index']]['ix'])
hl = et.ENgetlinkvalue(ix,et.EN_HEADLOSS)
if pipehls[index]['hl'] < hl:
pipehls[index]['hl'] = hl
# END OF SIMULATON
if et.ENnextH() ==0:
break
# SORT HEADLOSS PIPES
if mode == 'GD':
tmp = np.sort(pipehls, order='hl')[::-1]
headlosses = np.array(tmp[:]['index'], dtype=np.int)
# RESULT
if mode == 'TF':
return status
elif mode == 'GD':
return (status,headlosses)
| return deficits
def save_file(self,fn):
'''Save inp file updating d and roughness'''
# UPDATE AND SAVE MODEL
et.ENsaveinpfile(fn)
def get_cost(self):
'''Return the network cost. Sum of length x price for each pipe'''
acumulate = 0.0
x = self.get_x()
for index,pipe in np.ndenumerate(self.pipes):
l = pipe['length']
p = self.catalog[pipe['series']][x[index]]['price']
acumulate += l*p
return acumulate
# SOLVER
def solve(self):
'''Run the optimization of the pressurized pipe network
Return
------
The best solution found , where
solution: numpy int array, sizes of pipes, according to series.
If no solution is found return None.
The optimized epanet model is saved in a new file.
'''
startime = clock()
solution = None
reducted = False
print('SOLVING')
print('The solver started at: ' + strftime("%H:%M:%S", localtime()))
# SELECT ALGORITHM
if self.algorithm == A_GD:
# GRADIENT DESCENT ALGORITHM
print('*** GRADIENT DESCENT ALGORITHM ***')
# SET TO 0 AND INITIAL PRESSURE CHECKING
self.set_x(np.zeros(self.dimension, dtype=np.int))
while True:
# CHECK PRESSURES
status,headlosses = self.check(mode='GD')
if status:
# PRESSURES OK END OF LOOP
break
# INCREASE DIAMETER
for index in np.nditer(headlosses):
x = self.get_x()
if x[index] < self.ubound[index]:
x[index] += 1
self.set_x(x)
break
if status:
solution = self.get_x().copy()
if self.algorithm in [A_DE, A_DA]:
# DIFFEERENTIAL EVOLUTION / DUAL ANNEALING ALGORITHM
# SET BOUNDS
tmp = list(zip(self.lbound,self.ubound))
self.bounds = np.array(tmp, dtype = np.int)
def objetive(x):
self.set_x(np.array([round(i) for i in x[:]], np.int))
if self.check(mode='TF'):
return self.get_cost()
else:
return PENALTY
# SOLVE
if self.algorithm == A_DE:
# DIFFEERENTIAL EVOLUTION
from scipy.optimize import differential_evolution
print('*** DIFFERENTIAL EVOLUTION ALGORITHM ***')
result = differential_evolution(objetive, self.bounds)
else:
# DUAL ANNEALING ALGORITHM
from scipy.optimize import dual_annealing
print('*** DUAL ANNEALING ALGORITHM ***')
result = dual_annealing(objetive, self.bounds)
# CHECK
tmp = [round(i) for i in result.x[:]]
tmp = np.array(tmp, dtype=np.int)
self.set_x(tmp)
if self.check(mode='TF'):
solution = self.get_x().copy()
else:
solution = None
if self.polish and (type(solution) != type(None)):
# POLISH ALGORITHM
maxredxset = [0.0,[]]
def search_reduc(savings, redxset):
'''
Searh possible reduction of pipe diameters
redxset: list of ordered by index pipe-set which diameter can
be reduced 1-step according to pipe series.
savings: reduction of cost reached applying redxset
If a pipe can be reduced, it is added, starting a recursively
precces that stop when no pipe can be reduced, then the reduction
cost is compared whith previous max reduccion, updating it.
Return
------
Update maxredset
'''
changes = False
# SET TO SOL - REDUCTIONS
newx = solution.copy()
if len(redxset) > 0:
start = redxset[-1]
else:
start = 0
for i in redxset[:]:
newx[i] -=1
# SEARCH FOR A POSSIBLE REDUCIBLE PIPE
for i in range(start,len(self._x)):
if newx[i] > 0:
# REDUCE DIAMETER
newx[i] -= 1
# CHECK PRESSURES
self.set_x(newx)
if self.check(mode='TF'):
# ACEPPT CHANGES
changes = True
series = self.catalog[self.pipes[i]['series']]
c1 = series[newx[i]+1]['price']
c2 = series[newx[i]]['price']
l = self.pipes[i]['length']
newsavings = savings+(c1-c2)*l
newredxset = redxset.copy()
newredxset.append(i)
search_reduc(newsavings, newredxset)
else:
# UNDO
newx[i] += 1
if not changes:
# CHECK AND UPDATE MAX REDUCTION SET
if savings > maxredxset[0]:
maxredxset[0] = savings
maxredxset[1] = redxset
print('+++ POLISH ALGORITHM +++')
search_reduc(0.0, [])
print('The maximum reduction cost is: %.2f'%(maxredxset[0]))
if maxredxset[0] > 0:
reducted = True
for i in maxredxset[1][:]:
solution[i] -=1
# SOLUTION
if type(solution) != type(None):
print('Solving was successful.')
self.set_x(solution)
cost = self.get_cost()
print('Network cost is: %.2f'%(cost))
solvedfn = self.inpfn[:-4]+'_Solved_'
if self.algorithm == A_GD:
solvedfn += 'GD'
elif self.algorithm == A_DE:
solvedfn += 'DE'
elif self.algorithm == A_DA:
solvedfn += 'DA'
if reducted:
solvedfn += '+Polish.inp'
else:
solvedfn += '.inp'
self.save_file(solvedfn)
print('Sized network saved in: %s'%(solvedfn))
else:
print('No solution found.')
# DURATION
print('Finished at:', strftime("%H:%M:%S"),end = '')
print('. Duration = ',clock()-startime)
print('-'*80)
return solution
def pretty_print(self, x):
'''Print the solution in a readable format'''
# PRINT SOLUTION
cost = 0
print('*** SOLUTION ***')
print('-'*80)
m = '{:>16} {:>16} {:>8} {:>9} {:>6} {:>6} {:>10}'.format( \
'Epanet Pipe ID', 'series name', 'diameter',\
'roughness', 'length', 'price', 'amount')
print(m)
print('-'*80)
for i in range(len(self.pipes)):
ide = self.pipes[i]['id']
series = self.pipes[i]['series']
size = int(x[i])
d = self.catalog[series][size]['diameter']
r = self.catalog[series][size]['roughness']
p = self.catalog[series][size]['price']
l = self.pipes[i]['length']
a = p * l
cost += a
m='{:>16} {:>16} {:8.1f} {:9.4f} {:6.1f} {:6.2f} {:10.2f}'.format(\
ide, series, d, r, l, p, a)
print(m)
print('-'*80)
print('Total cost: {:10.2f}'.format(cost))
print('='*80)
def main(argv):
#RUN AN OPTIMIZATION
print('*'*80)
print('PRESSURIZED PIPE NETWORK OPTIMIZER')
print('v0.0', 'ppnoptimizer@gmail.com')
print('Licensed under the Apache License 2.0. http://www.apache.org/licenses/')
print('*'*80)
# LOAD PROBLEM
myopt = Ppn(argv[1])
# SOLVE
solution = myopt.solve()
# PRINT SOLUTION
if type(solution) != type(None):
myopt.pretty_print(solution)
if __name__== "__main__":
main(argv[:]) | elif mode == 'PD':
| random_line_split |
ppno.py | # -*- coding: utf-8 -*-
"""PRESSURIZED PIPE NETWORK OPTIMIZER
Andrés García Martínez (ppnoptimizer@gmail.com)
Licensed under the Apache License 2.0. http://www.apache.org/licenses/
"""
from sys import argv
from time import clock, localtime, strftime
import numpy as np
import toolkit as et
import htxt as ht
A_GD = 0
A_DE = 1
A_DA = 2
PENALTY = 1e24
class Ppn():
'''Base class for a presurized pipe network optimization problem
inpfn: str, definition problem file name (.ext)
'''
def __init__(self, problemfn):
'''problemfn: file name
definition problem file name'''
# READ PROBLEM FILE NAME
myht = ht.Htxtf(problemfn)
sections = myht.read()
# READ EPANET MODEL INP FILE
self.inpfn = sections['INP'][0]
# OPEN EPANET MODEL AND HYDRAULIC MODEL
et.ENopen(self.inpfn, self.inpfn[:-4]+'.rpt')
et.ENopenH()
print('-'*80)
print('DATA')
print('Network: %s' %(self.inpfn))
# READ OPTIONS
msg = 'The algorithm selected is: '
for line in sections['OPTIONS']:
key,value = myht.line_to_tuple(line)
if key.upper() == 'ALGORITHM':
if value == 'GD':
self.algorithm = A_GD
msg += 'Gradient Descent.'
elif value == 'DE':
self.algorithm = A_DE
msg += 'Differential Evolution.'
elif value == 'DA':
self.algorithm = A_DA
msg += 'Dual Annaeling.'
elif key.upper() == 'POLISH':
if value.upper() in ['YES', 'Y']:
self.polish = True
msg += ' A final polish was selected.'
else:
self.polish = False
msg += ' A final polish was not selected.'
print(msg)
# READ PIPES
# pipes: numpy array of labeled tuples ('ix','id','length','series'), where
# ix: int, epanet pipe index
# id: str, epanet pipe ID
# length: float, epaneht pipe lenghth
# series: str, pipe series in catalog
dt=np.dtype([('ix','i4'),('id','U16'),('length','f4'),('series','U16')])
tmp = []
for line in sections['PIPES']:
ide,series = myht.line_to_tuple(line)
ix = et.ENgetlinkindex(ide)
l = et.ENgetlinkvalue(ix,et.EN_LENGTH)
tmp.append((ix,ide,l,series))
self.pipes = np.array(tmp,dt)
print('%i pipe/s to size was/were loaded.' %(len(self.pipes)))
# READ PRESSURES
# nodes: numpy array of labeled tuples ('ix','id','pressure'), where
# ix: int, epanet node index
# id: str, epanet node ID
# pressure: float, min pressure required
dt = np.dtype([('ix','i4'),('id','U16'),('pressure','f4')])
tmp = []
for line in sections['PRESSURES']:
ide,p = myht.line_to_tuple(line)
ix = (et.ENgetnodeindex(ide))
tmp.append((ix,ide,p))
self.nodes = np.array(tmp, dtype = dt)
print('%i node/s to check was/were loaded.' %(len(self.nodes)))
# READ CATALOG
dt = np.dtype([('diameter','f4'),('roughness','f4'),('price','f4')])
self.catalog = {}
tmp = set()
for pipe in np.nditer(self.pipes):
tmp.add(str(pipe['series']))
print('%i series/s was/were required.'%(len(tmp)), end='')
for seriesname in tmp:
self.catalog[seriesname] = []
for line in sections['CATALOG']:
sn,d,r,p = myht.line_to_tuple(line)
if sn in tmp:
self.catalog[sn].append((d,r,p))
# READ SERIES
# catalog: dictionary of series {'series' : series}, where
# series: numpy array of labeled numpy tuples ('diameter','roughness','price'),
# where
# diameter: float, pipe diameter
# roughness: float, pipe roughness
# price: float, pipe price
for series in self.catalog:
tmp = self.catalog[series].copy()
self.catalog[series] = np.array(tmp, dtype = dt)
self.catalog[series].sort()
print(' %i series/s was/were loaded.' %(len(self.catalog)))
# DEFINE VARIABLE, DIMENSION AND BOUNDS
self.dimension = len(self.pipes)
self._x = np.zeros(self.dimension, dtype=np.int)
tmp = []
for pipe in self.pipes:
tmp.append(len(self.catalog[pipe['series']])-1)
self.lbound = np.zeros(self.dimension, dtype=np.int)
self.ubound = np.array(tmp, dtype=np.int)
print('-'*80)
def set_x(self, x):
'''Set x updating the hydraulic model
x: numpy array of integers containing the size of the pipes, where
size: int, index of series in catalog.
'''
self._x = x
self._update()
def get_x(self):
'''Return x
'''
return self._x
def _update(self):
'''Update pipe diameter and roughness in the epanet model
'''
for index,pipe in np.ndenumerate(self.pipes):
ix = pipe['ix']
series = self.catalog[pipe['series']]
size = int(self._x[index])
d = series[size]['diameter']
r = series[size]['roughness']
et.ENsetlinkvalue(ix,et.EN_DIAMETER,d)
et.ENsetlinkvalue(ix,et.EN_ROUGHNESS,r)
def check(self, mode='TF'):
'''Run a check of the pressures in the epanet model
mode: str, can be: 'TF', 'GD', 'PD'
Return
------
Accordig to mode, returns:
'TF', status: boolean, calculated pressures are not lower than required
'GD', (status,headlosses): tuple, where
headlosses: numpy descend ordered array by headloss pipe index
where index: int, is the index of pipe in pipes (not epanet ix).
'PD', deficits: numpy array. Nodal pressure deficits, where
deficit: float, = required presure - calculated pressure;
array index corresponds with node in nodes (not epanet ix).
'''
# DEFINE NUMPY ARRAYS
if mode=='PD':
deficits=np.array([np.inf for node in self.nodes],dtype=np.float32)
if mode=='GD':
dt = np.dtype([('index','i4'),('hl','f4')])
pipehls=np.array([(i, 0.0) for i in range(len(self.pipes))],dtype=dt)
# SOLVE HYDRAULIC MODEL
status = True
et.ENinitH(0)
while True:
# RUN A STEP
et.ENrunH()
# CHECK PRESSURES IN NODES
for index,node in np.ndenumerate(self.nodes):
ix = int(node['ix'])
cp = et.ENgetnodevalue(ix,et.EN_PRESSURE)
rp = node['pressure']
nodaldeficit = rp - cp
if nodaldeficit > 0:
status = False
# NOT NECCESSARY RETURN HEADLOSS OR PRESSURE SO EXIT
if mode == 'TF':
return status
# UPDATE DEFICIT ARRAY
if mode == 'PD':
if deficits[index] < nodaldeficit:
deficits[index] = nodaldeficit
# CALCULATE MAXIMUM UNITARY HEADLOSS ARRAY
if mode == 'GD':
for pipe in np.nditer(pipehls):
index = pipe['index']
ix = int(self.pipes[pipe['index']]['ix'])
hl = et.ENgetlinkvalue(ix,et.EN_HEADLOSS)
if pipehls[index]['hl'] < hl:
pipehls[index]['hl'] = hl
# END OF SIMULATON
if et.ENnextH() ==0:
break
# SORT HEADLOSS PIPES
if mode == 'GD':
tmp = np.sort(pipehls, order='hl')[::-1]
headlosses = np.array(tmp[:]['index'], dtype=np.int)
# RESULT
if mode == 'TF':
return status
elif mode == 'GD':
return (status,headlosses)
elif mode == 'PD':
return deficits
def save_file(self,fn):
'''Save inp file updating d and roughness'''
# UPDATE AND SAVE MODEL
et.ENsaveinpfile(fn)
def get_cost(self):
'''Return the network cost. Sum of length x price for each pipe'''
acumulate = 0.0
x = self.get_x()
for index,pipe in np.ndenumerate(self.pipes):
l = pipe['length']
p = self.catalog[pipe['series']][x[index]]['price']
acumulate += l*p
return acumulate
# SOLVER
def solve(self):
'''Run the optimization of the pressurized pipe network
Return
------
The best solution found , where
solution: numpy int array, sizes of pipes, according to series.
If no solution is found return None.
The optimized epanet model is saved in a new file.
'''
startime = clock()
solution = None
reducted = False
print('SOLVING')
print('The solver started at: ' + strftime("%H:%M:%S", localtime()))
# SELECT ALGORITHM
if self.algorithm == A_GD:
# GRADIENT DESCENT ALGORITHM
print('*** GRADIENT DESCENT ALGORITHM ***')
# SET TO 0 AND INITIAL PRESSURE CHECKING
self.set_x(np.zeros(self.dimension, dtype=np.int))
while True:
# CHECK PRESSURES
status,headlosses = self.check(mode='GD')
if status:
# PRESSURES OK END OF LOOP
break
# INCREASE DIAMETER
for index in np.nditer(headlosses):
x = self.get_x()
if x[index] < self.ubound[index]:
x[index] += 1
self.set_x(x)
break
if status:
solution = self.get_x().copy()
if self.algorithm in [A_DE, A_DA]:
# DIFFEERENTIAL EVOLUTION / DUAL ANNEALING ALGORITHM
# SET BOUNDS
tmp = list(zip(self.lbound,self.ubound))
self.bounds = np.array(tmp, dtype = np.int)
def objetive(x):
self.set_x(np.array([round(i) for i in x[:]], np.int))
if self.check(mode='TF'):
return self.get_cost()
else:
return PENALTY
# SOLVE
if self.algorithm == A_DE:
# DIFFEERENTIAL EVOLUTION
from scipy.optimize import differential_evolution
print('*** DIFFERENTIAL EVOLUTION ALGORITHM ***')
result = differential_evolution(objetive, self.bounds)
else:
# DUAL ANNEALING ALGORITHM
from scipy.optimize import dual_annealing
print('*** DUAL ANNEALING ALGORITHM ***')
result = dual_annealing(objetive, self.bounds)
# CHECK
tmp = [round(i) for i in result.x[:]]
tmp = np.array(tmp, dtype=np.int)
self.set_x(tmp)
if self.check(mode='TF'):
solution = self.get_x().copy()
else:
solution = None
if self.polish and (type(solution) != type(None)):
# POLISH ALGORITHM
maxredxset = [0.0,[]]
def search_reduc(savings, redxset):
'''
Searh possible reduction of pipe diameters
redxset: list of ordered by index pipe-set which diameter can
be reduced 1-step according to pipe series.
savings: reduction of cost reached applying redxset
If a pipe can be reduced, it is added, starting a recursively
precces that stop when no pipe can be reduced, then the reduction
cost is compared whith previous max reduccion, updating it.
Return
------
Update maxredset
'''
changes = False
# SET TO SOL - REDUCTIONS
newx = solution.copy()
if len(redxset) > 0:
start = redxset[-1]
else:
start = 0
for i in redxset[:]:
newx[i] -=1
# SEARCH FOR A POSSIBLE REDUCIBLE PIPE
for i in range(start,len(self._x)):
if newx[i] > 0:
# REDUCE DIAMETER
newx[i] -= 1
# CHECK PRESSURES
self.set_x(newx)
if self.check(mode='TF'):
# ACEPPT CHANGES
changes = True
series = self.catalog[self.pipes[i]['series']]
c1 = series[newx[i]+1]['price']
c2 = series[newx[i]]['price']
l = self.pipes[i]['length']
newsavings = savings+(c1-c2)*l
newredxset = redxset.copy()
newredxset.append(i)
search_reduc(newsavings, newredxset)
else:
# UNDO
newx[i] += 1
if not changes:
# CHECK AND UPDATE MAX REDUCTION SET
if savings > maxredxset[0]:
maxredxset[0] = savings
maxredxset[1] = redxset
print('+++ POLISH ALGORITHM +++')
search_reduc(0.0, [])
print('The maximum reduction cost is: %.2f'%(maxredxset[0]))
if maxredxset[0] > 0:
reducted = True
for i in maxredxset[1][:]:
solution[i] -=1
# SOLUTION
if type(solution) != type(None):
print('Solving was successful.')
self.set_x(solution)
cost = self.get_cost()
print('Network cost is: %.2f'%(cost))
solvedfn = self.inpfn[:-4]+'_Solved_'
if self.algorithm == A_GD:
solvedfn += 'GD'
elif self.algorithm == A_DE:
solvedfn += 'DE'
elif self.algorithm == A_DA:
solvedfn += 'DA'
if reducted:
solvedfn += '+Polish.inp'
else:
solvedfn += '.inp'
self.save_file(solvedfn)
print('Sized network saved in: %s'%(solvedfn))
else:
print('No solution found.')
# DURATION
print('Finished at:', strftime("%H:%M:%S"),end = '')
print('. Duration = ',clock()-startime)
print('-'*80)
return solution
def pretty_print(self, x):
'''Pr |
def main(argv):
#RUN AN OPTIMIZATION
print('*'*80)
print('PRESSURIZED PIPE NETWORK OPTIMIZER')
print('v0.0', 'ppnoptimizer@gmail.com')
print('Licensed under the Apache License 2.0. http://www.apache.org/licenses/')
print('*'*80)
# LOAD PROBLEM
myopt = Ppn(argv[1])
# SOLVE
solution = myopt.solve()
# PRINT SOLUTION
if type(solution) != type(None):
myopt.pretty_print(solution)
if __name__== "__main__":
main(argv[:])
| int the solution in a readable format'''
# PRINT SOLUTION
cost = 0
print('*** SOLUTION ***')
print('-'*80)
m = '{:>16} {:>16} {:>8} {:>9} {:>6} {:>6} {:>10}'.format( \
'Epanet Pipe ID', 'series name', 'diameter',\
'roughness', 'length', 'price', 'amount')
print(m)
print('-'*80)
for i in range(len(self.pipes)):
ide = self.pipes[i]['id']
series = self.pipes[i]['series']
size = int(x[i])
d = self.catalog[series][size]['diameter']
r = self.catalog[series][size]['roughness']
p = self.catalog[series][size]['price']
l = self.pipes[i]['length']
a = p * l
cost += a
m='{:>16} {:>16} {:8.1f} {:9.4f} {:6.1f} {:6.2f} {:10.2f}'.format(\
ide, series, d, r, l, p, a)
print(m)
print('-'*80)
print('Total cost: {:10.2f}'.format(cost))
print('='*80) | identifier_body |
ppno.py | # -*- coding: utf-8 -*-
"""PRESSURIZED PIPE NETWORK OPTIMIZER
Andrés García Martínez (ppnoptimizer@gmail.com)
Licensed under the Apache License 2.0. http://www.apache.org/licenses/
"""
from sys import argv
from time import clock, localtime, strftime
import numpy as np
import toolkit as et
import htxt as ht
A_GD = 0
A_DE = 1
A_DA = 2
PENALTY = 1e24
class Ppn():
'''Base class for a presurized pipe network optimization problem
inpfn: str, definition problem file name (.ext)
'''
def __init__(self, problemfn):
'''problemfn: file name
definition problem file name'''
# READ PROBLEM FILE NAME
myht = ht.Htxtf(problemfn)
sections = myht.read()
# READ EPANET MODEL INP FILE
self.inpfn = sections['INP'][0]
# OPEN EPANET MODEL AND HYDRAULIC MODEL
et.ENopen(self.inpfn, self.inpfn[:-4]+'.rpt')
et.ENopenH()
print('-'*80)
print('DATA')
print('Network: %s' %(self.inpfn))
# READ OPTIONS
msg = 'The algorithm selected is: '
for line in sections['OPTIONS']:
key,value = myht.line_to_tuple(line)
if key.upper() == 'ALGORITHM':
if value == 'GD':
self.algorithm = A_GD
msg += 'Gradient Descent.'
elif value == 'DE':
self.algorithm = A_DE
msg += 'Differential Evolution.'
elif value == 'DA':
self.algorithm = A_DA
msg += 'Dual Annaeling.'
elif key.upper() == 'POLISH':
if value.upper() in ['YES', 'Y']:
self.polish = True
msg += ' A final polish was selected.'
else:
self.polish = False
msg += ' A final polish was not selected.'
print(msg)
# READ PIPES
# pipes: numpy array of labeled tuples ('ix','id','length','series'), where
# ix: int, epanet pipe index
# id: str, epanet pipe ID
# length: float, epaneht pipe lenghth
# series: str, pipe series in catalog
dt=np.dtype([('ix','i4'),('id','U16'),('length','f4'),('series','U16')])
tmp = []
for line in sections['PIPES']:
ide,series = myht.line_to_tuple(line)
ix = et.ENgetlinkindex(ide)
l = et.ENgetlinkvalue(ix,et.EN_LENGTH)
tmp.append((ix,ide,l,series))
self.pipes = np.array(tmp,dt)
print('%i pipe/s to size was/were loaded.' %(len(self.pipes)))
# READ PRESSURES
# nodes: numpy array of labeled tuples ('ix','id','pressure'), where
# ix: int, epanet node index
# id: str, epanet node ID
# pressure: float, min pressure required
dt = np.dtype([('ix','i4'),('id','U16'),('pressure','f4')])
tmp = []
for line in sections['PRESSURES']:
ide,p = myht.line_to_tuple(line)
ix = (et.ENgetnodeindex(ide))
tmp.append((ix,ide,p))
self.nodes = np.array(tmp, dtype = dt)
print('%i node/s to check was/were loaded.' %(len(self.nodes)))
# READ CATALOG
dt = np.dtype([('diameter','f4'),('roughness','f4'),('price','f4')])
self.catalog = {}
tmp = set()
for pipe in np.nditer(self.pipes):
tmp.add(str(pipe['series']))
print('%i series/s was/were required.'%(len(tmp)), end='')
for seriesname in tmp:
self.catalog[seriesname] = []
for line in sections['CATALOG']:
sn,d,r,p = myht.line_to_tuple(line)
if sn in tmp:
self.catalog[sn].append((d,r,p))
# READ SERIES
# catalog: dictionary of series {'series' : series}, where
# series: numpy array of labeled numpy tuples ('diameter','roughness','price'),
# where
# diameter: float, pipe diameter
# roughness: float, pipe roughness
# price: float, pipe price
for series in self.catalog:
tmp = self.catalog[series].copy()
self.catalog[series] = np.array(tmp, dtype = dt)
self.catalog[series].sort()
print(' %i series/s was/were loaded.' %(len(self.catalog)))
# DEFINE VARIABLE, DIMENSION AND BOUNDS
self.dimension = len(self.pipes)
self._x = np.zeros(self.dimension, dtype=np.int)
tmp = []
for pipe in self.pipes:
tmp.append(len(self.catalog[pipe['series']])-1)
self.lbound = np.zeros(self.dimension, dtype=np.int)
self.ubound = np.array(tmp, dtype=np.int)
print('-'*80)
def set_x(self, x):
'''Set x updating the hydraulic model
x: numpy array of integers containing the size of the pipes, where
size: int, index of series in catalog.
'''
self._x = x
self._update()
def get_x(self):
'''Return x
'''
return self._x
def _update(self):
'''Update pipe diameter and roughness in the epanet model
'''
for index,pipe in np.ndenumerate(self.pipes):
ix = pipe['ix']
series = self.catalog[pipe['series']]
size = int(self._x[index])
d = series[size]['diameter']
r = series[size]['roughness']
et.ENsetlinkvalue(ix,et.EN_DIAMETER,d)
et.ENsetlinkvalue(ix,et.EN_ROUGHNESS,r)
def check(self, mode='TF'):
'''Run a check of the pressures in the epanet model
mode: str, can be: 'TF', 'GD', 'PD'
Return
------
Accordig to mode, returns:
'TF', status: boolean, calculated pressures are not lower than required
'GD', (status,headlosses): tuple, where
headlosses: numpy descend ordered array by headloss pipe index
where index: int, is the index of pipe in pipes (not epanet ix).
'PD', deficits: numpy array. Nodal pressure deficits, where
deficit: float, = required presure - calculated pressure;
array index corresponds with node in nodes (not epanet ix).
'''
# DEFINE NUMPY ARRAYS
if mode=='PD':
deficits=np.array([np.inf for node in self.nodes],dtype=np.float32)
if mode=='GD':
dt = np.dtype([('index','i4'),('hl','f4')])
pipehls=np.array([(i, 0.0) for i in range(len(self.pipes))],dtype=dt)
# SOLVE HYDRAULIC MODEL
status = True
et.ENinitH(0)
while True:
# RUN A STEP
et.ENrunH()
# CHECK PRESSURES IN NODES
for index,node in np.ndenumerate(self.nodes):
ix = int(node['ix'])
cp = et.ENgetnodevalue(ix,et.EN_PRESSURE)
rp = node['pressure']
nodaldeficit = rp - cp
if nodaldeficit > 0:
status = False
# NOT NECCESSARY RETURN HEADLOSS OR PRESSURE SO EXIT
if mode == 'TF':
return status
# UPDATE DEFICIT ARRAY
if mode == 'PD':
if deficits[index] < nodaldeficit:
deficits[index] = nodaldeficit
# CALCULATE MAXIMUM UNITARY HEADLOSS ARRAY
if mode == 'GD':
for pipe in np.nditer(pipehls):
index = pipe['index']
ix = int(self.pipes[pipe['index']]['ix'])
hl = et.ENgetlinkvalue(ix,et.EN_HEADLOSS)
if pipehls[index]['hl'] < hl:
pipehls[index]['hl'] = hl
# END OF SIMULATON
if et.ENnextH() ==0:
break
# SORT HEADLOSS PIPES
if mode == 'GD':
tmp = np.sort(pipehls, order='hl')[::-1]
headlosses = np.array(tmp[:]['index'], dtype=np.int)
# RESULT
if mode == 'TF':
return status
elif mode == 'GD':
return (status,headlosses)
elif mode == 'PD':
return deficits
def save_file(self,fn):
'''Save inp file updating d and roughness'''
# UPDATE AND SAVE MODEL
et.ENsaveinpfile(fn)
def get_cost(self):
'''Return the network cost. Sum of length x price for each pipe'''
acumulate = 0.0
x = self.get_x()
for index,pipe in np.ndenumerate(self.pipes):
l = pipe['length']
p = self.catalog[pipe['series']][x[index]]['price']
acumulate += l*p
return acumulate
# SOLVER
def solve(self):
'''Run the optimization of the pressurized pipe network
Return
------
The best solution found , where
solution: numpy int array, sizes of pipes, according to series.
If no solution is found return None.
The optimized epanet model is saved in a new file.
'''
startime = clock()
solution = None
reducted = False
print('SOLVING')
print('The solver started at: ' + strftime("%H:%M:%S", localtime()))
# SELECT ALGORITHM
if self.algorithm == A_GD:
# GRADIENT DESCENT ALGORITHM
print('*** GRADIENT DESCENT ALGORITHM ***')
# SET TO 0 AND INITIAL PRESSURE CHECKING
self.set_x(np.zeros(self.dimension, dtype=np.int))
while True:
# CHECK PRESSURES
status,headlosses = self.check(mode='GD')
if status:
# PRESSURES OK END OF LOOP
break
# INCREASE DIAMETER
for index in np.nditer(headlosses):
x = s |
if status:
solution = self.get_x().copy()
if self.algorithm in [A_DE, A_DA]:
# DIFFEERENTIAL EVOLUTION / DUAL ANNEALING ALGORITHM
# SET BOUNDS
tmp = list(zip(self.lbound,self.ubound))
self.bounds = np.array(tmp, dtype = np.int)
def objetive(x):
self.set_x(np.array([round(i) for i in x[:]], np.int))
if self.check(mode='TF'):
return self.get_cost()
else:
return PENALTY
# SOLVE
if self.algorithm == A_DE:
# DIFFEERENTIAL EVOLUTION
from scipy.optimize import differential_evolution
print('*** DIFFERENTIAL EVOLUTION ALGORITHM ***')
result = differential_evolution(objetive, self.bounds)
else:
# DUAL ANNEALING ALGORITHM
from scipy.optimize import dual_annealing
print('*** DUAL ANNEALING ALGORITHM ***')
result = dual_annealing(objetive, self.bounds)
# CHECK
tmp = [round(i) for i in result.x[:]]
tmp = np.array(tmp, dtype=np.int)
self.set_x(tmp)
if self.check(mode='TF'):
solution = self.get_x().copy()
else:
solution = None
if self.polish and (type(solution) != type(None)):
# POLISH ALGORITHM
maxredxset = [0.0,[]]
def search_reduc(savings, redxset):
'''
Searh possible reduction of pipe diameters
redxset: list of ordered by index pipe-set which diameter can
be reduced 1-step according to pipe series.
savings: reduction of cost reached applying redxset
If a pipe can be reduced, it is added, starting a recursively
precces that stop when no pipe can be reduced, then the reduction
cost is compared whith previous max reduccion, updating it.
Return
------
Update maxredset
'''
changes = False
# SET TO SOL - REDUCTIONS
newx = solution.copy()
if len(redxset) > 0:
start = redxset[-1]
else:
start = 0
for i in redxset[:]:
newx[i] -=1
# SEARCH FOR A POSSIBLE REDUCIBLE PIPE
for i in range(start,len(self._x)):
if newx[i] > 0:
# REDUCE DIAMETER
newx[i] -= 1
# CHECK PRESSURES
self.set_x(newx)
if self.check(mode='TF'):
# ACEPPT CHANGES
changes = True
series = self.catalog[self.pipes[i]['series']]
c1 = series[newx[i]+1]['price']
c2 = series[newx[i]]['price']
l = self.pipes[i]['length']
newsavings = savings+(c1-c2)*l
newredxset = redxset.copy()
newredxset.append(i)
search_reduc(newsavings, newredxset)
else:
# UNDO
newx[i] += 1
if not changes:
# CHECK AND UPDATE MAX REDUCTION SET
if savings > maxredxset[0]:
maxredxset[0] = savings
maxredxset[1] = redxset
print('+++ POLISH ALGORITHM +++')
search_reduc(0.0, [])
print('The maximum reduction cost is: %.2f'%(maxredxset[0]))
if maxredxset[0] > 0:
reducted = True
for i in maxredxset[1][:]:
solution[i] -=1
# SOLUTION
if type(solution) != type(None):
print('Solving was successful.')
self.set_x(solution)
cost = self.get_cost()
print('Network cost is: %.2f'%(cost))
solvedfn = self.inpfn[:-4]+'_Solved_'
if self.algorithm == A_GD:
solvedfn += 'GD'
elif self.algorithm == A_DE:
solvedfn += 'DE'
elif self.algorithm == A_DA:
solvedfn += 'DA'
if reducted:
solvedfn += '+Polish.inp'
else:
solvedfn += '.inp'
self.save_file(solvedfn)
print('Sized network saved in: %s'%(solvedfn))
else:
print('No solution found.')
# DURATION
print('Finished at:', strftime("%H:%M:%S"),end = '')
print('. Duration = ',clock()-startime)
print('-'*80)
return solution
def pretty_print(self, x):
'''Print the solution in a readable format'''
# PRINT SOLUTION
cost = 0
print('*** SOLUTION ***')
print('-'*80)
m = '{:>16} {:>16} {:>8} {:>9} {:>6} {:>6} {:>10}'.format( \
'Epanet Pipe ID', 'series name', 'diameter',\
'roughness', 'length', 'price', 'amount')
print(m)
print('-'*80)
for i in range(len(self.pipes)):
ide = self.pipes[i]['id']
series = self.pipes[i]['series']
size = int(x[i])
d = self.catalog[series][size]['diameter']
r = self.catalog[series][size]['roughness']
p = self.catalog[series][size]['price']
l = self.pipes[i]['length']
a = p * l
cost += a
m='{:>16} {:>16} {:8.1f} {:9.4f} {:6.1f} {:6.2f} {:10.2f}'.format(\
ide, series, d, r, l, p, a)
print(m)
print('-'*80)
print('Total cost: {:10.2f}'.format(cost))
print('='*80)
def main(argv):
#RUN AN OPTIMIZATION
print('*'*80)
print('PRESSURIZED PIPE NETWORK OPTIMIZER')
print('v0.0', 'ppnoptimizer@gmail.com')
print('Licensed under the Apache License 2.0. http://www.apache.org/licenses/')
print('*'*80)
# LOAD PROBLEM
myopt = Ppn(argv[1])
# SOLVE
solution = myopt.solve()
# PRINT SOLUTION
if type(solution) != type(None):
myopt.pretty_print(solution)
if __name__== "__main__":
main(argv[:])
| elf.get_x()
if x[index] < self.ubound[index]:
x[index] += 1
self.set_x(x)
break
| conditional_block |
ppno.py | # -*- coding: utf-8 -*-
"""PRESSURIZED PIPE NETWORK OPTIMIZER
Andrés García Martínez (ppnoptimizer@gmail.com)
Licensed under the Apache License 2.0. http://www.apache.org/licenses/
"""
from sys import argv
from time import clock, localtime, strftime
import numpy as np
import toolkit as et
import htxt as ht
A_GD = 0
A_DE = 1
A_DA = 2
PENALTY = 1e24
class Ppn():
'''Base class for a presurized pipe network optimization problem
inpfn: str, definition problem file name (.ext)
'''
def __init__(self, problemfn):
'''problemfn: file name
definition problem file name'''
# READ PROBLEM FILE NAME
myht = ht.Htxtf(problemfn)
sections = myht.read()
# READ EPANET MODEL INP FILE
self.inpfn = sections['INP'][0]
# OPEN EPANET MODEL AND HYDRAULIC MODEL
et.ENopen(self.inpfn, self.inpfn[:-4]+'.rpt')
et.ENopenH()
print('-'*80)
print('DATA')
print('Network: %s' %(self.inpfn))
# READ OPTIONS
msg = 'The algorithm selected is: '
for line in sections['OPTIONS']:
key,value = myht.line_to_tuple(line)
if key.upper() == 'ALGORITHM':
if value == 'GD':
self.algorithm = A_GD
msg += 'Gradient Descent.'
elif value == 'DE':
self.algorithm = A_DE
msg += 'Differential Evolution.'
elif value == 'DA':
self.algorithm = A_DA
msg += 'Dual Annaeling.'
elif key.upper() == 'POLISH':
if value.upper() in ['YES', 'Y']:
self.polish = True
msg += ' A final polish was selected.'
else:
self.polish = False
msg += ' A final polish was not selected.'
print(msg)
# READ PIPES
# pipes: numpy array of labeled tuples ('ix','id','length','series'), where
# ix: int, epanet pipe index
# id: str, epanet pipe ID
# length: float, epaneht pipe lenghth
# series: str, pipe series in catalog
dt=np.dtype([('ix','i4'),('id','U16'),('length','f4'),('series','U16')])
tmp = []
for line in sections['PIPES']:
ide,series = myht.line_to_tuple(line)
ix = et.ENgetlinkindex(ide)
l = et.ENgetlinkvalue(ix,et.EN_LENGTH)
tmp.append((ix,ide,l,series))
self.pipes = np.array(tmp,dt)
print('%i pipe/s to size was/were loaded.' %(len(self.pipes)))
# READ PRESSURES
# nodes: numpy array of labeled tuples ('ix','id','pressure'), where
# ix: int, epanet node index
# id: str, epanet node ID
# pressure: float, min pressure required
dt = np.dtype([('ix','i4'),('id','U16'),('pressure','f4')])
tmp = []
for line in sections['PRESSURES']:
ide,p = myht.line_to_tuple(line)
ix = (et.ENgetnodeindex(ide))
tmp.append((ix,ide,p))
self.nodes = np.array(tmp, dtype = dt)
print('%i node/s to check was/were loaded.' %(len(self.nodes)))
# READ CATALOG
dt = np.dtype([('diameter','f4'),('roughness','f4'),('price','f4')])
self.catalog = {}
tmp = set()
for pipe in np.nditer(self.pipes):
tmp.add(str(pipe['series']))
print('%i series/s was/were required.'%(len(tmp)), end='')
for seriesname in tmp:
self.catalog[seriesname] = []
for line in sections['CATALOG']:
sn,d,r,p = myht.line_to_tuple(line)
if sn in tmp:
self.catalog[sn].append((d,r,p))
# READ SERIES
# catalog: dictionary of series {'series' : series}, where
# series: numpy array of labeled numpy tuples ('diameter','roughness','price'),
# where
# diameter: float, pipe diameter
# roughness: float, pipe roughness
# price: float, pipe price
for series in self.catalog:
tmp = self.catalog[series].copy()
self.catalog[series] = np.array(tmp, dtype = dt)
self.catalog[series].sort()
print(' %i series/s was/were loaded.' %(len(self.catalog)))
# DEFINE VARIABLE, DIMENSION AND BOUNDS
self.dimension = len(self.pipes)
self._x = np.zeros(self.dimension, dtype=np.int)
tmp = []
for pipe in self.pipes:
tmp.append(len(self.catalog[pipe['series']])-1)
self.lbound = np.zeros(self.dimension, dtype=np.int)
self.ubound = np.array(tmp, dtype=np.int)
print('-'*80)
def set_x(self, x):
'''Set x updating the hydraulic model
x: numpy array of integers containing the size of the pipes, where
size: int, index of series in catalog.
'''
self._x = x
self._update()
def get_x | ):
'''Return x
'''
return self._x
def _update(self):
'''Update pipe diameter and roughness in the epanet model
'''
for index,pipe in np.ndenumerate(self.pipes):
ix = pipe['ix']
series = self.catalog[pipe['series']]
size = int(self._x[index])
d = series[size]['diameter']
r = series[size]['roughness']
et.ENsetlinkvalue(ix,et.EN_DIAMETER,d)
et.ENsetlinkvalue(ix,et.EN_ROUGHNESS,r)
def check(self, mode='TF'):
'''Run a check of the pressures in the epanet model
mode: str, can be: 'TF', 'GD', 'PD'
Return
------
Accordig to mode, returns:
'TF', status: boolean, calculated pressures are not lower than required
'GD', (status,headlosses): tuple, where
headlosses: numpy descend ordered array by headloss pipe index
where index: int, is the index of pipe in pipes (not epanet ix).
'PD', deficits: numpy array. Nodal pressure deficits, where
deficit: float, = required presure - calculated pressure;
array index corresponds with node in nodes (not epanet ix).
'''
# DEFINE NUMPY ARRAYS
if mode=='PD':
deficits=np.array([np.inf for node in self.nodes],dtype=np.float32)
if mode=='GD':
dt = np.dtype([('index','i4'),('hl','f4')])
pipehls=np.array([(i, 0.0) for i in range(len(self.pipes))],dtype=dt)
# SOLVE HYDRAULIC MODEL
status = True
et.ENinitH(0)
while True:
# RUN A STEP
et.ENrunH()
# CHECK PRESSURES IN NODES
for index,node in np.ndenumerate(self.nodes):
ix = int(node['ix'])
cp = et.ENgetnodevalue(ix,et.EN_PRESSURE)
rp = node['pressure']
nodaldeficit = rp - cp
if nodaldeficit > 0:
status = False
# NOT NECCESSARY RETURN HEADLOSS OR PRESSURE SO EXIT
if mode == 'TF':
return status
# UPDATE DEFICIT ARRAY
if mode == 'PD':
if deficits[index] < nodaldeficit:
deficits[index] = nodaldeficit
# CALCULATE MAXIMUM UNITARY HEADLOSS ARRAY
if mode == 'GD':
for pipe in np.nditer(pipehls):
index = pipe['index']
ix = int(self.pipes[pipe['index']]['ix'])
hl = et.ENgetlinkvalue(ix,et.EN_HEADLOSS)
if pipehls[index]['hl'] < hl:
pipehls[index]['hl'] = hl
# END OF SIMULATON
if et.ENnextH() ==0:
break
# SORT HEADLOSS PIPES
if mode == 'GD':
tmp = np.sort(pipehls, order='hl')[::-1]
headlosses = np.array(tmp[:]['index'], dtype=np.int)
# RESULT
if mode == 'TF':
return status
elif mode == 'GD':
return (status,headlosses)
elif mode == 'PD':
return deficits
def save_file(self,fn):
'''Save inp file updating d and roughness'''
# UPDATE AND SAVE MODEL
et.ENsaveinpfile(fn)
def get_cost(self):
'''Return the network cost. Sum of length x price for each pipe'''
acumulate = 0.0
x = self.get_x()
for index,pipe in np.ndenumerate(self.pipes):
l = pipe['length']
p = self.catalog[pipe['series']][x[index]]['price']
acumulate += l*p
return acumulate
# SOLVER
def solve(self):
'''Run the optimization of the pressurized pipe network
Return
------
The best solution found , where
solution: numpy int array, sizes of pipes, according to series.
If no solution is found return None.
The optimized epanet model is saved in a new file.
'''
startime = clock()
solution = None
reducted = False
print('SOLVING')
print('The solver started at: ' + strftime("%H:%M:%S", localtime()))
# SELECT ALGORITHM
if self.algorithm == A_GD:
# GRADIENT DESCENT ALGORITHM
print('*** GRADIENT DESCENT ALGORITHM ***')
# SET TO 0 AND INITIAL PRESSURE CHECKING
self.set_x(np.zeros(self.dimension, dtype=np.int))
while True:
# CHECK PRESSURES
status,headlosses = self.check(mode='GD')
if status:
# PRESSURES OK END OF LOOP
break
# INCREASE DIAMETER
for index in np.nditer(headlosses):
x = self.get_x()
if x[index] < self.ubound[index]:
x[index] += 1
self.set_x(x)
break
if status:
solution = self.get_x().copy()
if self.algorithm in [A_DE, A_DA]:
# DIFFEERENTIAL EVOLUTION / DUAL ANNEALING ALGORITHM
# SET BOUNDS
tmp = list(zip(self.lbound,self.ubound))
self.bounds = np.array(tmp, dtype = np.int)
def objetive(x):
self.set_x(np.array([round(i) for i in x[:]], np.int))
if self.check(mode='TF'):
return self.get_cost()
else:
return PENALTY
# SOLVE
if self.algorithm == A_DE:
# DIFFEERENTIAL EVOLUTION
from scipy.optimize import differential_evolution
print('*** DIFFERENTIAL EVOLUTION ALGORITHM ***')
result = differential_evolution(objetive, self.bounds)
else:
# DUAL ANNEALING ALGORITHM
from scipy.optimize import dual_annealing
print('*** DUAL ANNEALING ALGORITHM ***')
result = dual_annealing(objetive, self.bounds)
# CHECK
tmp = [round(i) for i in result.x[:]]
tmp = np.array(tmp, dtype=np.int)
self.set_x(tmp)
if self.check(mode='TF'):
solution = self.get_x().copy()
else:
solution = None
if self.polish and (type(solution) != type(None)):
# POLISH ALGORITHM
maxredxset = [0.0,[]]
def search_reduc(savings, redxset):
'''
Searh possible reduction of pipe diameters
redxset: list of ordered by index pipe-set which diameter can
be reduced 1-step according to pipe series.
savings: reduction of cost reached applying redxset
If a pipe can be reduced, it is added, starting a recursively
precces that stop when no pipe can be reduced, then the reduction
cost is compared whith previous max reduccion, updating it.
Return
------
Update maxredset
'''
changes = False
# SET TO SOL - REDUCTIONS
newx = solution.copy()
if len(redxset) > 0:
start = redxset[-1]
else:
start = 0
for i in redxset[:]:
newx[i] -=1
# SEARCH FOR A POSSIBLE REDUCIBLE PIPE
for i in range(start,len(self._x)):
if newx[i] > 0:
# REDUCE DIAMETER
newx[i] -= 1
# CHECK PRESSURES
self.set_x(newx)
if self.check(mode='TF'):
# ACEPPT CHANGES
changes = True
series = self.catalog[self.pipes[i]['series']]
c1 = series[newx[i]+1]['price']
c2 = series[newx[i]]['price']
l = self.pipes[i]['length']
newsavings = savings+(c1-c2)*l
newredxset = redxset.copy()
newredxset.append(i)
search_reduc(newsavings, newredxset)
else:
# UNDO
newx[i] += 1
if not changes:
# CHECK AND UPDATE MAX REDUCTION SET
if savings > maxredxset[0]:
maxredxset[0] = savings
maxredxset[1] = redxset
print('+++ POLISH ALGORITHM +++')
search_reduc(0.0, [])
print('The maximum reduction cost is: %.2f'%(maxredxset[0]))
if maxredxset[0] > 0:
reducted = True
for i in maxredxset[1][:]:
solution[i] -=1
# SOLUTION
if type(solution) != type(None):
print('Solving was successful.')
self.set_x(solution)
cost = self.get_cost()
print('Network cost is: %.2f'%(cost))
solvedfn = self.inpfn[:-4]+'_Solved_'
if self.algorithm == A_GD:
solvedfn += 'GD'
elif self.algorithm == A_DE:
solvedfn += 'DE'
elif self.algorithm == A_DA:
solvedfn += 'DA'
if reducted:
solvedfn += '+Polish.inp'
else:
solvedfn += '.inp'
self.save_file(solvedfn)
print('Sized network saved in: %s'%(solvedfn))
else:
print('No solution found.')
# DURATION
print('Finished at:', strftime("%H:%M:%S"),end = '')
print('. Duration = ',clock()-startime)
print('-'*80)
return solution
def pretty_print(self, x):
'''Print the solution in a readable format'''
# PRINT SOLUTION
cost = 0
print('*** SOLUTION ***')
print('-'*80)
m = '{:>16} {:>16} {:>8} {:>9} {:>6} {:>6} {:>10}'.format( \
'Epanet Pipe ID', 'series name', 'diameter',\
'roughness', 'length', 'price', 'amount')
print(m)
print('-'*80)
for i in range(len(self.pipes)):
ide = self.pipes[i]['id']
series = self.pipes[i]['series']
size = int(x[i])
d = self.catalog[series][size]['diameter']
r = self.catalog[series][size]['roughness']
p = self.catalog[series][size]['price']
l = self.pipes[i]['length']
a = p * l
cost += a
m='{:>16} {:>16} {:8.1f} {:9.4f} {:6.1f} {:6.2f} {:10.2f}'.format(\
ide, series, d, r, l, p, a)
print(m)
print('-'*80)
print('Total cost: {:10.2f}'.format(cost))
print('='*80)
def main(argv):
#RUN AN OPTIMIZATION
print('*'*80)
print('PRESSURIZED PIPE NETWORK OPTIMIZER')
print('v0.0', 'ppnoptimizer@gmail.com')
print('Licensed under the Apache License 2.0. http://www.apache.org/licenses/')
print('*'*80)
# LOAD PROBLEM
myopt = Ppn(argv[1])
# SOLVE
solution = myopt.solve()
# PRINT SOLUTION
if type(solution) != type(None):
myopt.pretty_print(solution)
if __name__== "__main__":
main(argv[:])
| (self | identifier_name |
Scanner3D.py | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple
import cv2 as cv
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from src.utils import (
ExtremePoints,
Plane,
Point,
Rectangle,
draw_circles,
fit_plane,
line_plane_intersection,
show_image,
threshold_image,
)
@dataclass
class Scanner3D:
debug: bool
K: np.ndarray
K_inv: np.ndarray
dist: np.ndarray
filename: str = "cup1.mp4"
inner_rectangle: np.ndarray = np.array([[[0, 0]], [[23, 0]], [[23, 13]], [[0, 13]]])
lower_red_obj: np.ndarray = np.array([35, 25, 40])
lower_red_planes: np.ndarray = np.array([45, 30, 45])
upper_red: np.ndarray = np.array([100, 255, 255])
dbscan: DBSCAN = DBSCAN(eps=7, min_samples=20)
def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:
"""
Given a thresholded image of the scene (ideally, the first frame),
return the masks for the two known rectangles: one on the wall and one on the desk.
"""
contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]
mask = np.zeros(thresh.shape, np.uint8)
good_contours = sorted(
[cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],
key=cv.contourArea,
)
setattr(self, "contour1", good_contours[0])
setattr(
self,
"contour2",
good_contours[1]
if cv.pointPolygonTest(
good_contours[1], tuple(good_contours[0][0][0]), False
)
< 0
else good_contours[2],
)
cv.drawContours(mask, [self.contour1], 0, 255, -1)
cv.drawContours(mask, [self.contour2], 0, 255, -1)
return mask
def sort_corners(self, corners: np.ndarray):
"""
Sort the 4 corners clockwise of a rectangle so that the top-left corner
is the first one.
"""
center = np.sum(corners, axis=0) / 4
sorted_corners = sorted(
corners,
key=lambda p: math.atan2(p[0][0] - center[0][0], p[0][1] - center[0][1]),
reverse=True,
)
return np.roll(sorted_corners, 1, axis=0)
def get_desk_wall_corners(
self, thresh: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given a thresholded image of the scene and a mask representing the two
known rectangles, return the corners of those rectangles (8 in total)
with sub-pixel accuracy. The corners returned are already sorted.
"""
mask = self.get_rectangles_mask(thresh)
assert thresh.shape[:2] == mask.shape[:2]
corners = cv.goodFeaturesToTrack(
thresh,
maxCorners=8,
qualityLevel=0.01,
minDistance=10,
mask=mask,
blockSize=5,
)
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 20, 0.001)
corners = cv.cornerSubPix(
thresh, corners, winSize=(7, 7), zeroZone=(-1, -1), criteria=criteria
)
y_middle = thresh.shape[0] / 2
desk_corners = np.expand_dims(corners[corners[:, :, 1] > y_middle], axis=1)
wall_corners = np.expand_dims(corners[corners[:, :, 1] <= y_middle], axis=1)
sorted_desk_corners = self.sort_corners(desk_corners)
sorted_wall_corners = self.sort_corners(wall_corners)
return sorted_desk_corners, sorted_wall_corners
def get_H_R_t(self, corners: np.ndarray) -> Plane:
"""
Given 4 sorted corners, compute the homography between the corners
and the rectangle's ground truth and return the information
on the mapped plane.
In other words, this function returns information on a plane
(in particular, the desk's or wall's).
The plane's origin is in the top-left corner of the rectangle,
and the normal is perpendicular to that plane.
"""
H = cv.findHomography(self.inner_rectangle, corners)[0]
result = self.K_inv @ H
result /= cv.norm(result[:, 1])
r0, r1, t = np.hsplit(result, 3)
r2 = np.cross(r0.T, r1.T).T
_, u, vt = cv.SVDecomp(np.hstack([r0, r1, r2]))
R = u @ vt
return Plane(origin=t[:, 0], normal=R[:, 2], R=R)
def get_extreme_points(
self, wall_corners: np.ndarray, desk_corners: np.ndarray
) -> ExtremePoints:
"""
Given the corners of the rectangles on the wall and on the desk,
return the coordinates for a tight bounding box of the area
between the two rectangles.
"""
ymin_wall = int(np.min(wall_corners[:, :, 1]))
ymax_wall = int(np.max(wall_corners[:, :, 1]))
ymin_desk = int(np.min(desk_corners[:, :, 1]))
ymax_desk = int(np.max(desk_corners[:, :, 1]))
xmin = int(np.min(wall_corners[:, :, 0]))
xmax = int(np.max(wall_corners[:, :, 0]))
return ExtremePoints(
wall=Rectangle(
top_left=Point(xmin, ymin_wall), bottom_right=Point(xmax, ymax_wall)
),
desk=Rectangle(
top_left=Point(xmin, ymin_desk), bottom_right=Point(xmax, ymax_desk)
),
)
def get_laser_points_in_region(
self, image: np.ndarray, region: Rectangle, is_obj: bool = False,
) -> Optional[np.ndarray]:
"""
Given an image and a rectangle defining a region, return the laser points
in that region. In case we are considering the wall or the desk, require
at least 30 points for better accuracy.
"""
top_left = region.top_left
bottom_right = region.bottom_right
region_image = image[top_left.y : bottom_right.y, top_left.x : bottom_right.x]
image_inv = cv.cvtColor(~region_image, cv.COLOR_BGR2HSV)
lower_red = self.lower_red_obj if is_obj else self.lower_red_planes
red_mask = cv.inRange(image_inv, lower_red, self.upper_red)
laser_points = cv.findNonZero(red_mask)
if laser_points is None or (not is_obj and len(laser_points) < 30):
return None
return laser_points
def offset_points(self, points: np.ndarray, offset: Point) -> np.ndarray:
"""Given a region of an image and a point, offset the region by that point."""
points[:, :, 0] += offset.x
points[:, :, 1] += offset.y
return points
def make_homogeneous(self, points: np.ndarray) -> np.ndarray:
"""
Given some points, convert them to homogeneous coordinates, i.e. add a trailing [1].
This function can move points from R^n to P^n, for instance:
* in R^2: [x y] --> [x y 1]
* in R^3: [x y z] --> [x y z 1]
"""
return np.hstack((points[:, 0], np.ones(points.shape[0]).reshape(-1, 1),))
def remove_obj_outliers(self, points: np.ndarray) -> Optional[np.ndarray]:
"""
Use the DBSCAN clustering algorithm in order to remove possible outliers from
the points detected as laser in the object. We are basically enforcing
continuity in the laser line on the object, i.e. looking for a dense
cluster of pixels. Interesting points are the ones whose label is not -1,
i.e. the ones belonging to a cluster that is not an outlier one.
"""
dbscan_result = self.dbscan.fit(points[:, 0])
mask = dbscan_result.labels_ != -1
return np.expand_dims(points[:, 0][mask], axis=1)
def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:
"""
Given an image and a list of coordinates of shape (n_points, 1, 2),
return the RGB colors of those coordinates in the (0...1) range.
Notice that OpenCV uses BGR instead of RGB by default, thus we need to
flip the columns.
"""
x = coordinates.squeeze(1)
return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)
def | (
self,
original_image: np.ndarray,
image: np.ndarray,
extreme_points: ExtremePoints,
) -> Tuple[
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
]:
"""
Given the interesting region of an image, containing the wall and desk planes
and the object, return the laser points in the three separate regions:
one for the wall plane, one for the desk plane, one of the object.
"""
height, width = image.shape[:2]
ymin_wall = extreme_points.wall.top_left.y
ymax_wall = extreme_points.wall.bottom_right.y
ymin_desk = extreme_points.desk.top_left.y
xmin = extreme_points.desk.top_left.x
laser_desk = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymin_desk - ymin_wall),
bottom_right=Point(width, height),
),
)
if laser_desk is not None:
laser_wall = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, 0),
bottom_right=Point(width, ymax_wall - ymin_wall),
),
)
if laser_wall is not None:
laser_obj = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymax_wall - ymin_wall),
bottom_right=Point(width, ymin_desk - ymin_wall),
),
is_obj=True,
)
if laser_obj is not None:
laser_desk = self.offset_points(
points=laser_desk, offset=Point(xmin, ymin_desk)
)
laser_wall = self.offset_points(
points=laser_wall, offset=Point(xmin, ymin_wall)
)
laser_obj = self.remove_obj_outliers(laser_obj)
if laser_obj is not None:
laser_obj = self.offset_points(
points=laser_obj, offset=Point(xmin, ymax_wall)
)
obj_colors = self.get_colors(original_image, laser_obj)
return laser_wall, laser_desk, laser_obj, obj_colors
return None, None, None, None
def save_3d_render(
self, points: List[np.ndarray], colors: List[np.ndarray]
) -> None:
"""
Given points in the 3D world, save the PLY file representing
the point cloud. This function saves both the original file and
a version to which an outlier removal process has been applied.
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))
pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))
if self.debug:
o3d.visualization.draw_geometries([pcd])
if not self.debug:
o3d.io.write_point_cloud(f"results/{self.filename[:-4]}.ply", pcd)
def read_frame(self, cap) -> Optional[np.ndarray]:
"""
Read a frame from the cap. Return None if there is no frame left.
"""
frame_raw = cap.read()[1]
if frame_raw is None:
cv.destroyAllWindows()
return None
return cv.undistort(frame_raw, self.K, self.dist)
def create_exiting_rays(
self, points: np.ndarray, is_obj: bool = False
) -> List[np.ndarray]:
"""
Given a set of 2D points, get their real world 3D coordinates (direction).
In general, mapping points from the real world [x, y, z, 1]
to the camera's reference world [x, y, 1] we should multiply
the real world coordinates by the 3x4 projection matrix P = K[R|T]
In our case, we want to obtain coordinates in the 3D world starting
from 2D points in the image, i.e. do the opposite.
These points are represented in the camera's reference frame:
this means that R=I and t=[0 0 0]. Only K remains, i.e. the inverse
operation is done by multiplying each point by K^-1.
Notice that points and directions, in such a situation, are really tight
concepts: we can represent a 3D line in space as
line(λ) = P1 + λ(P1 - P2). Since in this case P2 is the camera center,
we have that the line is P1 scaled by a factor λ.
"""
if not is_obj and len(points) > 100:
points = points[np.random.choice(points.shape[0], 100, replace=False,)]
return [self.K_inv @ point for point in points]
def compute_intersections(
self, plane: Plane, directions: List[np.ndarray]
) -> List[np.ndarray]:
"""
Given a plane represented by its origin and a normal
and a list of rays, compute the intersections between
the plane and the rays.
"""
return [
line_plane_intersection(
plane_origin=plane.origin,
plane_normal=plane.normal,
line_direction=direction,
)
for direction in directions
]
def run(self):
cap = cv.VideoCapture(f"videos/{self.filename}")
if not cap.isOpened():
return
first_frame = self.read_frame(cap)
if first_frame is None:
return
first_frame_thresh = threshold_image(first_frame)
desk_corners, wall_corners = self.get_desk_wall_corners(first_frame_thresh)
extreme_points = self.get_extreme_points(wall_corners, desk_corners)
desk_plane = self.get_H_R_t(desk_corners)
wall_plane = self.get_H_R_t(wall_corners)
if self.debug:
first_frame_copy = first_frame.copy()
cv.drawContours(first_frame_copy, [self.contour1], -1, (255, 255, 255), 2)
cv.drawContours(first_frame_copy, [self.contour2], -1, (255, 255, 255), 2)
draw_circles(first_frame_copy, desk_corners, text=True)
draw_circles(first_frame_copy, wall_corners, text=True)
show_image(first_frame_copy)
all_obj_points = []
all_obj_colors = []
while True:
frame = self.read_frame(cap)
if frame is None:
break
frame_copy = frame.copy()
frame_interesting = frame[
extreme_points.wall.top_left.y : extreme_points.desk.bottom_right.y,
extreme_points.wall.top_left.x : extreme_points.wall.bottom_right.x,
]
(laser_wall, laser_desk, laser_obj, obj_colors,) = self.get_laser_points(
first_frame, frame_interesting, extreme_points
)
if laser_wall is not None:
if self.debug:
draw_circles(frame_copy, laser_wall)
draw_circles(frame_copy, laser_desk)
draw_circles(frame_copy, laser_obj)
laser_wall = self.make_homogeneous(laser_wall)
laser_obj = self.make_homogeneous(laser_obj)
laser_desk = self.make_homogeneous(laser_desk)
wall_directions = self.create_exiting_rays(laser_wall, is_obj=False)
desk_directions = self.create_exiting_rays(laser_desk, is_obj=False)
obj_directions = self.create_exiting_rays(laser_obj, is_obj=True)
intersections_wall = self.compute_intersections(
wall_plane, wall_directions
)
intersections_desk = self.compute_intersections(
desk_plane, desk_directions
)
intersections_rects = np.array(intersections_wall + intersections_desk)
laser_plane = fit_plane(intersections_rects)
intersections_objs = self.compute_intersections(
laser_plane, obj_directions
)
all_obj_points.extend(intersections_objs)
all_obj_colors.extend(obj_colors)
if self.debug:
if show_image(frame_copy, continuous=True):
break
else:
if show_image(frame, continuous=True):
break
self.save_3d_render(all_obj_points, all_obj_colors)
cap.release()
cv.destroyAllWindows()
| get_laser_points | identifier_name |
Scanner3D.py | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple
import cv2 as cv
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from src.utils import (
ExtremePoints,
Plane,
Point,
Rectangle,
draw_circles,
fit_plane,
line_plane_intersection,
show_image,
threshold_image,
)
@dataclass
class Scanner3D:
debug: bool
K: np.ndarray
K_inv: np.ndarray
dist: np.ndarray
filename: str = "cup1.mp4"
inner_rectangle: np.ndarray = np.array([[[0, 0]], [[23, 0]], [[23, 13]], [[0, 13]]])
lower_red_obj: np.ndarray = np.array([35, 25, 40])
lower_red_planes: np.ndarray = np.array([45, 30, 45])
upper_red: np.ndarray = np.array([100, 255, 255])
dbscan: DBSCAN = DBSCAN(eps=7, min_samples=20)
def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:
|
def sort_corners(self, corners: np.ndarray):
"""
Sort the 4 corners clockwise of a rectangle so that the top-left corner
is the first one.
"""
center = np.sum(corners, axis=0) / 4
sorted_corners = sorted(
corners,
key=lambda p: math.atan2(p[0][0] - center[0][0], p[0][1] - center[0][1]),
reverse=True,
)
return np.roll(sorted_corners, 1, axis=0)
def get_desk_wall_corners(
self, thresh: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given a thresholded image of the scene and a mask representing the two
known rectangles, return the corners of those rectangles (8 in total)
with sub-pixel accuracy. The corners returned are already sorted.
"""
mask = self.get_rectangles_mask(thresh)
assert thresh.shape[:2] == mask.shape[:2]
corners = cv.goodFeaturesToTrack(
thresh,
maxCorners=8,
qualityLevel=0.01,
minDistance=10,
mask=mask,
blockSize=5,
)
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 20, 0.001)
corners = cv.cornerSubPix(
thresh, corners, winSize=(7, 7), zeroZone=(-1, -1), criteria=criteria
)
y_middle = thresh.shape[0] / 2
desk_corners = np.expand_dims(corners[corners[:, :, 1] > y_middle], axis=1)
wall_corners = np.expand_dims(corners[corners[:, :, 1] <= y_middle], axis=1)
sorted_desk_corners = self.sort_corners(desk_corners)
sorted_wall_corners = self.sort_corners(wall_corners)
return sorted_desk_corners, sorted_wall_corners
def get_H_R_t(self, corners: np.ndarray) -> Plane:
"""
Given 4 sorted corners, compute the homography between the corners
and the rectangle's ground truth and return the information
on the mapped plane.
In other words, this function returns information on a plane
(in particular, the desk's or wall's).
The plane's origin is in the top-left corner of the rectangle,
and the normal is perpendicular to that plane.
"""
H = cv.findHomography(self.inner_rectangle, corners)[0]
result = self.K_inv @ H
result /= cv.norm(result[:, 1])
r0, r1, t = np.hsplit(result, 3)
r2 = np.cross(r0.T, r1.T).T
_, u, vt = cv.SVDecomp(np.hstack([r0, r1, r2]))
R = u @ vt
return Plane(origin=t[:, 0], normal=R[:, 2], R=R)
def get_extreme_points(
self, wall_corners: np.ndarray, desk_corners: np.ndarray
) -> ExtremePoints:
"""
Given the corners of the rectangles on the wall and on the desk,
return the coordinates for a tight bounding box of the area
between the two rectangles.
"""
ymin_wall = int(np.min(wall_corners[:, :, 1]))
ymax_wall = int(np.max(wall_corners[:, :, 1]))
ymin_desk = int(np.min(desk_corners[:, :, 1]))
ymax_desk = int(np.max(desk_corners[:, :, 1]))
xmin = int(np.min(wall_corners[:, :, 0]))
xmax = int(np.max(wall_corners[:, :, 0]))
return ExtremePoints(
wall=Rectangle(
top_left=Point(xmin, ymin_wall), bottom_right=Point(xmax, ymax_wall)
),
desk=Rectangle(
top_left=Point(xmin, ymin_desk), bottom_right=Point(xmax, ymax_desk)
),
)
def get_laser_points_in_region(
self, image: np.ndarray, region: Rectangle, is_obj: bool = False,
) -> Optional[np.ndarray]:
"""
Given an image and a rectangle defining a region, return the laser points
in that region. In case we are considering the wall or the desk, require
at least 30 points for better accuracy.
"""
top_left = region.top_left
bottom_right = region.bottom_right
region_image = image[top_left.y : bottom_right.y, top_left.x : bottom_right.x]
image_inv = cv.cvtColor(~region_image, cv.COLOR_BGR2HSV)
lower_red = self.lower_red_obj if is_obj else self.lower_red_planes
red_mask = cv.inRange(image_inv, lower_red, self.upper_red)
laser_points = cv.findNonZero(red_mask)
if laser_points is None or (not is_obj and len(laser_points) < 30):
return None
return laser_points
def offset_points(self, points: np.ndarray, offset: Point) -> np.ndarray:
"""Given a region of an image and a point, offset the region by that point."""
points[:, :, 0] += offset.x
points[:, :, 1] += offset.y
return points
def make_homogeneous(self, points: np.ndarray) -> np.ndarray:
"""
Given some points, convert them to homogeneous coordinates, i.e. add a trailing [1].
This function can move points from R^n to P^n, for instance:
* in R^2: [x y] --> [x y 1]
* in R^3: [x y z] --> [x y z 1]
"""
return np.hstack((points[:, 0], np.ones(points.shape[0]).reshape(-1, 1),))
def remove_obj_outliers(self, points: np.ndarray) -> Optional[np.ndarray]:
"""
Use the DBSCAN clustering algorithm in order to remove possible outliers from
the points detected as laser in the object. We are basically enforcing
continuity in the laser line on the object, i.e. looking for a dense
cluster of pixels. Interesting points are the ones whose label is not -1,
i.e. the ones belonging to a cluster that is not an outlier one.
"""
dbscan_result = self.dbscan.fit(points[:, 0])
mask = dbscan_result.labels_ != -1
return np.expand_dims(points[:, 0][mask], axis=1)
def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:
"""
Given an image and a list of coordinates of shape (n_points, 1, 2),
return the RGB colors of those coordinates in the (0...1) range.
Notice that OpenCV uses BGR instead of RGB by default, thus we need to
flip the columns.
"""
x = coordinates.squeeze(1)
return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)
def get_laser_points(
self,
original_image: np.ndarray,
image: np.ndarray,
extreme_points: ExtremePoints,
) -> Tuple[
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
]:
"""
Given the interesting region of an image, containing the wall and desk planes
and the object, return the laser points in the three separate regions:
one for the wall plane, one for the desk plane, one of the object.
"""
height, width = image.shape[:2]
ymin_wall = extreme_points.wall.top_left.y
ymax_wall = extreme_points.wall.bottom_right.y
ymin_desk = extreme_points.desk.top_left.y
xmin = extreme_points.desk.top_left.x
laser_desk = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymin_desk - ymin_wall),
bottom_right=Point(width, height),
),
)
if laser_desk is not None:
laser_wall = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, 0),
bottom_right=Point(width, ymax_wall - ymin_wall),
),
)
if laser_wall is not None:
laser_obj = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymax_wall - ymin_wall),
bottom_right=Point(width, ymin_desk - ymin_wall),
),
is_obj=True,
)
if laser_obj is not None:
laser_desk = self.offset_points(
points=laser_desk, offset=Point(xmin, ymin_desk)
)
laser_wall = self.offset_points(
points=laser_wall, offset=Point(xmin, ymin_wall)
)
laser_obj = self.remove_obj_outliers(laser_obj)
if laser_obj is not None:
laser_obj = self.offset_points(
points=laser_obj, offset=Point(xmin, ymax_wall)
)
obj_colors = self.get_colors(original_image, laser_obj)
return laser_wall, laser_desk, laser_obj, obj_colors
return None, None, None, None
def save_3d_render(
self, points: List[np.ndarray], colors: List[np.ndarray]
) -> None:
"""
Given points in the 3D world, save the PLY file representing
the point cloud. This function saves both the original file and
a version to which an outlier removal process has been applied.
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))
pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))
if self.debug:
o3d.visualization.draw_geometries([pcd])
if not self.debug:
o3d.io.write_point_cloud(f"results/{self.filename[:-4]}.ply", pcd)
def read_frame(self, cap) -> Optional[np.ndarray]:
"""
Read a frame from the cap. Return None if there is no frame left.
"""
frame_raw = cap.read()[1]
if frame_raw is None:
cv.destroyAllWindows()
return None
return cv.undistort(frame_raw, self.K, self.dist)
def create_exiting_rays(
self, points: np.ndarray, is_obj: bool = False
) -> List[np.ndarray]:
"""
Given a set of 2D points, get their real world 3D coordinates (direction).
In general, mapping points from the real world [x, y, z, 1]
to the camera's reference world [x, y, 1] we should multiply
the real world coordinates by the 3x4 projection matrix P = K[R|T]
In our case, we want to obtain coordinates in the 3D world starting
from 2D points in the image, i.e. do the opposite.
These points are represented in the camera's reference frame:
this means that R=I and t=[0 0 0]. Only K remains, i.e. the inverse
operation is done by multiplying each point by K^-1.
Notice that points and directions, in such a situation, are really tight
concepts: we can represent a 3D line in space as
line(λ) = P1 + λ(P1 - P2). Since in this case P2 is the camera center,
we have that the line is P1 scaled by a factor λ.
"""
if not is_obj and len(points) > 100:
points = points[np.random.choice(points.shape[0], 100, replace=False,)]
return [self.K_inv @ point for point in points]
def compute_intersections(
self, plane: Plane, directions: List[np.ndarray]
) -> List[np.ndarray]:
"""
Given a plane represented by its origin and a normal
and a list of rays, compute the intersections between
the plane and the rays.
"""
return [
line_plane_intersection(
plane_origin=plane.origin,
plane_normal=plane.normal,
line_direction=direction,
)
for direction in directions
]
def run(self):
cap = cv.VideoCapture(f"videos/{self.filename}")
if not cap.isOpened():
return
first_frame = self.read_frame(cap)
if first_frame is None:
return
first_frame_thresh = threshold_image(first_frame)
desk_corners, wall_corners = self.get_desk_wall_corners(first_frame_thresh)
extreme_points = self.get_extreme_points(wall_corners, desk_corners)
desk_plane = self.get_H_R_t(desk_corners)
wall_plane = self.get_H_R_t(wall_corners)
if self.debug:
first_frame_copy = first_frame.copy()
cv.drawContours(first_frame_copy, [self.contour1], -1, (255, 255, 255), 2)
cv.drawContours(first_frame_copy, [self.contour2], -1, (255, 255, 255), 2)
draw_circles(first_frame_copy, desk_corners, text=True)
draw_circles(first_frame_copy, wall_corners, text=True)
show_image(first_frame_copy)
all_obj_points = []
all_obj_colors = []
while True:
frame = self.read_frame(cap)
if frame is None:
break
frame_copy = frame.copy()
frame_interesting = frame[
extreme_points.wall.top_left.y : extreme_points.desk.bottom_right.y,
extreme_points.wall.top_left.x : extreme_points.wall.bottom_right.x,
]
(laser_wall, laser_desk, laser_obj, obj_colors,) = self.get_laser_points(
first_frame, frame_interesting, extreme_points
)
if laser_wall is not None:
if self.debug:
draw_circles(frame_copy, laser_wall)
draw_circles(frame_copy, laser_desk)
draw_circles(frame_copy, laser_obj)
laser_wall = self.make_homogeneous(laser_wall)
laser_obj = self.make_homogeneous(laser_obj)
laser_desk = self.make_homogeneous(laser_desk)
wall_directions = self.create_exiting_rays(laser_wall, is_obj=False)
desk_directions = self.create_exiting_rays(laser_desk, is_obj=False)
obj_directions = self.create_exiting_rays(laser_obj, is_obj=True)
intersections_wall = self.compute_intersections(
wall_plane, wall_directions
)
intersections_desk = self.compute_intersections(
desk_plane, desk_directions
)
intersections_rects = np.array(intersections_wall + intersections_desk)
laser_plane = fit_plane(intersections_rects)
intersections_objs = self.compute_intersections(
laser_plane, obj_directions
)
all_obj_points.extend(intersections_objs)
all_obj_colors.extend(obj_colors)
if self.debug:
if show_image(frame_copy, continuous=True):
break
else:
if show_image(frame, continuous=True):
break
self.save_3d_render(all_obj_points, all_obj_colors)
cap.release()
cv.destroyAllWindows()
| """
Given a thresholded image of the scene (ideally, the first frame),
return the masks for the two known rectangles: one on the wall and one on the desk.
"""
contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]
mask = np.zeros(thresh.shape, np.uint8)
good_contours = sorted(
[cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],
key=cv.contourArea,
)
setattr(self, "contour1", good_contours[0])
setattr(
self,
"contour2",
good_contours[1]
if cv.pointPolygonTest(
good_contours[1], tuple(good_contours[0][0][0]), False
)
< 0
else good_contours[2],
)
cv.drawContours(mask, [self.contour1], 0, 255, -1)
cv.drawContours(mask, [self.contour2], 0, 255, -1)
return mask | identifier_body |
Scanner3D.py | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple
import cv2 as cv
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from src.utils import (
ExtremePoints,
Plane,
Point,
Rectangle,
draw_circles,
fit_plane,
line_plane_intersection,
show_image,
threshold_image,
)
@dataclass
class Scanner3D:
debug: bool
K: np.ndarray
K_inv: np.ndarray
dist: np.ndarray
filename: str = "cup1.mp4"
inner_rectangle: np.ndarray = np.array([[[0, 0]], [[23, 0]], [[23, 13]], [[0, 13]]])
lower_red_obj: np.ndarray = np.array([35, 25, 40])
lower_red_planes: np.ndarray = np.array([45, 30, 45])
upper_red: np.ndarray = np.array([100, 255, 255])
dbscan: DBSCAN = DBSCAN(eps=7, min_samples=20)
def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:
"""
Given a thresholded image of the scene (ideally, the first frame),
return the masks for the two known rectangles: one on the wall and one on the desk.
"""
contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]
mask = np.zeros(thresh.shape, np.uint8)
good_contours = sorted(
[cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],
key=cv.contourArea,
)
setattr(self, "contour1", good_contours[0])
setattr(
self,
"contour2",
good_contours[1]
if cv.pointPolygonTest(
good_contours[1], tuple(good_contours[0][0][0]), False
)
< 0
else good_contours[2],
)
cv.drawContours(mask, [self.contour1], 0, 255, -1)
cv.drawContours(mask, [self.contour2], 0, 255, -1)
return mask
def sort_corners(self, corners: np.ndarray):
"""
Sort the 4 corners clockwise of a rectangle so that the top-left corner
is the first one.
"""
center = np.sum(corners, axis=0) / 4
sorted_corners = sorted(
corners,
key=lambda p: math.atan2(p[0][0] - center[0][0], p[0][1] - center[0][1]),
reverse=True,
)
return np.roll(sorted_corners, 1, axis=0)
def get_desk_wall_corners(
self, thresh: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given a thresholded image of the scene and a mask representing the two
known rectangles, return the corners of those rectangles (8 in total)
with sub-pixel accuracy. The corners returned are already sorted.
"""
mask = self.get_rectangles_mask(thresh)
assert thresh.shape[:2] == mask.shape[:2]
corners = cv.goodFeaturesToTrack(
thresh,
maxCorners=8,
qualityLevel=0.01,
minDistance=10,
mask=mask,
blockSize=5,
)
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 20, 0.001)
corners = cv.cornerSubPix(
thresh, corners, winSize=(7, 7), zeroZone=(-1, -1), criteria=criteria
)
y_middle = thresh.shape[0] / 2
desk_corners = np.expand_dims(corners[corners[:, :, 1] > y_middle], axis=1)
wall_corners = np.expand_dims(corners[corners[:, :, 1] <= y_middle], axis=1)
sorted_desk_corners = self.sort_corners(desk_corners)
sorted_wall_corners = self.sort_corners(wall_corners)
return sorted_desk_corners, sorted_wall_corners
def get_H_R_t(self, corners: np.ndarray) -> Plane:
"""
Given 4 sorted corners, compute the homography between the corners
and the rectangle's ground truth and return the information
on the mapped plane.
In other words, this function returns information on a plane
(in particular, the desk's or wall's).
The plane's origin is in the top-left corner of the rectangle,
and the normal is perpendicular to that plane.
"""
H = cv.findHomography(self.inner_rectangle, corners)[0]
result = self.K_inv @ H
result /= cv.norm(result[:, 1])
r0, r1, t = np.hsplit(result, 3)
r2 = np.cross(r0.T, r1.T).T
_, u, vt = cv.SVDecomp(np.hstack([r0, r1, r2]))
R = u @ vt
return Plane(origin=t[:, 0], normal=R[:, 2], R=R)
def get_extreme_points(
self, wall_corners: np.ndarray, desk_corners: np.ndarray
) -> ExtremePoints:
"""
Given the corners of the rectangles on the wall and on the desk,
return the coordinates for a tight bounding box of the area
between the two rectangles.
"""
ymin_wall = int(np.min(wall_corners[:, :, 1]))
ymax_wall = int(np.max(wall_corners[:, :, 1]))
ymin_desk = int(np.min(desk_corners[:, :, 1]))
ymax_desk = int(np.max(desk_corners[:, :, 1]))
xmin = int(np.min(wall_corners[:, :, 0]))
xmax = int(np.max(wall_corners[:, :, 0]))
return ExtremePoints(
wall=Rectangle(
top_left=Point(xmin, ymin_wall), bottom_right=Point(xmax, ymax_wall)
),
desk=Rectangle(
top_left=Point(xmin, ymin_desk), bottom_right=Point(xmax, ymax_desk)
),
)
def get_laser_points_in_region(
self, image: np.ndarray, region: Rectangle, is_obj: bool = False,
) -> Optional[np.ndarray]:
"""
Given an image and a rectangle defining a region, return the laser points
in that region. In case we are considering the wall or the desk, require
at least 30 points for better accuracy.
"""
top_left = region.top_left
bottom_right = region.bottom_right
region_image = image[top_left.y : bottom_right.y, top_left.x : bottom_right.x]
image_inv = cv.cvtColor(~region_image, cv.COLOR_BGR2HSV)
lower_red = self.lower_red_obj if is_obj else self.lower_red_planes
red_mask = cv.inRange(image_inv, lower_red, self.upper_red)
laser_points = cv.findNonZero(red_mask)
if laser_points is None or (not is_obj and len(laser_points) < 30):
return None
return laser_points
def offset_points(self, points: np.ndarray, offset: Point) -> np.ndarray:
"""Given a region of an image and a point, offset the region by that point."""
points[:, :, 0] += offset.x
points[:, :, 1] += offset.y
return points
def make_homogeneous(self, points: np.ndarray) -> np.ndarray:
"""
Given some points, convert them to homogeneous coordinates, i.e. add a trailing [1].
This function can move points from R^n to P^n, for instance:
* in R^2: [x y] --> [x y 1]
* in R^3: [x y z] --> [x y z 1]
"""
return np.hstack((points[:, 0], np.ones(points.shape[0]).reshape(-1, 1),))
def remove_obj_outliers(self, points: np.ndarray) -> Optional[np.ndarray]:
"""
Use the DBSCAN clustering algorithm in order to remove possible outliers from
the points detected as laser in the object. We are basically enforcing
continuity in the laser line on the object, i.e. looking for a dense
cluster of pixels. Interesting points are the ones whose label is not -1,
i.e. the ones belonging to a cluster that is not an outlier one.
"""
dbscan_result = self.dbscan.fit(points[:, 0])
mask = dbscan_result.labels_ != -1
return np.expand_dims(points[:, 0][mask], axis=1)
def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:
"""
Given an image and a list of coordinates of shape (n_points, 1, 2),
return the RGB colors of those coordinates in the (0...1) range.
Notice that OpenCV uses BGR instead of RGB by default, thus we need to
flip the columns.
"""
x = coordinates.squeeze(1)
return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)
def get_laser_points(
self,
original_image: np.ndarray,
image: np.ndarray,
extreme_points: ExtremePoints,
) -> Tuple[
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
]:
"""
Given the interesting region of an image, containing the wall and desk planes
and the object, return the laser points in the three separate regions:
one for the wall plane, one for the desk plane, one of the object.
"""
height, width = image.shape[:2]
ymin_wall = extreme_points.wall.top_left.y
ymax_wall = extreme_points.wall.bottom_right.y
ymin_desk = extreme_points.desk.top_left.y
xmin = extreme_points.desk.top_left.x
laser_desk = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymin_desk - ymin_wall),
bottom_right=Point(width, height),
),
)
if laser_desk is not None:
laser_wall = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, 0),
bottom_right=Point(width, ymax_wall - ymin_wall),
),
)
if laser_wall is not None:
laser_obj = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymax_wall - ymin_wall),
bottom_right=Point(width, ymin_desk - ymin_wall),
),
is_obj=True,
)
if laser_obj is not None:
laser_desk = self.offset_points(
points=laser_desk, offset=Point(xmin, ymin_desk)
)
laser_wall = self.offset_points(
points=laser_wall, offset=Point(xmin, ymin_wall)
)
laser_obj = self.remove_obj_outliers(laser_obj)
if laser_obj is not None:
laser_obj = self.offset_points(
points=laser_obj, offset=Point(xmin, ymax_wall)
)
obj_colors = self.get_colors(original_image, laser_obj)
return laser_wall, laser_desk, laser_obj, obj_colors
return None, None, None, None
def save_3d_render(
self, points: List[np.ndarray], colors: List[np.ndarray]
) -> None:
"""
Given points in the 3D world, save the PLY file representing
the point cloud. This function saves both the original file and
a version to which an outlier removal process has been applied.
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))
pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))
if self.debug:
o3d.visualization.draw_geometries([pcd])
if not self.debug:
o3d.io.write_point_cloud(f"results/{self.filename[:-4]}.ply", pcd)
def read_frame(self, cap) -> Optional[np.ndarray]:
"""
Read a frame from the cap. Return None if there is no frame left.
"""
frame_raw = cap.read()[1]
if frame_raw is None:
cv.destroyAllWindows()
return None
return cv.undistort(frame_raw, self.K, self.dist)
def create_exiting_rays(
self, points: np.ndarray, is_obj: bool = False
) -> List[np.ndarray]:
"""
Given a set of 2D points, get their real world 3D coordinates (direction).
In general, mapping points from the real world [x, y, z, 1]
to the camera's reference world [x, y, 1] we should multiply
the real world coordinates by the 3x4 projection matrix P = K[R|T]
In our case, we want to obtain coordinates in the 3D world starting
from 2D points in the image, i.e. do the opposite.
These points are represented in the camera's reference frame:
this means that R=I and t=[0 0 0]. Only K remains, i.e. the inverse
operation is done by multiplying each point by K^-1.
Notice that points and directions, in such a situation, are really tight
concepts: we can represent a 3D line in space as
line(λ) = P1 + λ(P1 - P2). Since in this case P2 is the camera center,
we have that the line is P1 scaled by a factor λ.
"""
if not is_obj and len(points) > 100:
points = points[np.random.choice(points.shape[0], 100, replace=False,)]
return [self.K_inv @ point for point in points]
def compute_intersections(
self, plane: Plane, directions: List[np.ndarray]
) -> List[np.ndarray]:
"""
Given a plane represented by its origin and a normal
and a list of rays, compute the intersections between
the plane and the rays.
"""
return [
line_plane_intersection(
plane_origin=plane.origin,
plane_normal=plane.normal,
line_direction=direction,
)
for direction in directions
]
def run(self):
cap = cv.VideoCapture(f"videos/{self.filename}")
if not cap.isOpened():
return
first_frame = self.read_frame(cap)
if first_frame is None:
ret | first_frame_thresh = threshold_image(first_frame)
desk_corners, wall_corners = self.get_desk_wall_corners(first_frame_thresh)
extreme_points = self.get_extreme_points(wall_corners, desk_corners)
desk_plane = self.get_H_R_t(desk_corners)
wall_plane = self.get_H_R_t(wall_corners)
if self.debug:
first_frame_copy = first_frame.copy()
cv.drawContours(first_frame_copy, [self.contour1], -1, (255, 255, 255), 2)
cv.drawContours(first_frame_copy, [self.contour2], -1, (255, 255, 255), 2)
draw_circles(first_frame_copy, desk_corners, text=True)
draw_circles(first_frame_copy, wall_corners, text=True)
show_image(first_frame_copy)
all_obj_points = []
all_obj_colors = []
while True:
frame = self.read_frame(cap)
if frame is None:
break
frame_copy = frame.copy()
frame_interesting = frame[
extreme_points.wall.top_left.y : extreme_points.desk.bottom_right.y,
extreme_points.wall.top_left.x : extreme_points.wall.bottom_right.x,
]
(laser_wall, laser_desk, laser_obj, obj_colors,) = self.get_laser_points(
first_frame, frame_interesting, extreme_points
)
if laser_wall is not None:
if self.debug:
draw_circles(frame_copy, laser_wall)
draw_circles(frame_copy, laser_desk)
draw_circles(frame_copy, laser_obj)
laser_wall = self.make_homogeneous(laser_wall)
laser_obj = self.make_homogeneous(laser_obj)
laser_desk = self.make_homogeneous(laser_desk)
wall_directions = self.create_exiting_rays(laser_wall, is_obj=False)
desk_directions = self.create_exiting_rays(laser_desk, is_obj=False)
obj_directions = self.create_exiting_rays(laser_obj, is_obj=True)
intersections_wall = self.compute_intersections(
wall_plane, wall_directions
)
intersections_desk = self.compute_intersections(
desk_plane, desk_directions
)
intersections_rects = np.array(intersections_wall + intersections_desk)
laser_plane = fit_plane(intersections_rects)
intersections_objs = self.compute_intersections(
laser_plane, obj_directions
)
all_obj_points.extend(intersections_objs)
all_obj_colors.extend(obj_colors)
if self.debug:
if show_image(frame_copy, continuous=True):
break
else:
if show_image(frame, continuous=True):
break
self.save_3d_render(all_obj_points, all_obj_colors)
cap.release()
cv.destroyAllWindows()
| urn
| conditional_block |
Scanner3D.py | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple
import cv2 as cv
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from src.utils import (
ExtremePoints,
Plane,
Point,
Rectangle,
draw_circles,
fit_plane,
line_plane_intersection,
show_image,
threshold_image,
)
@dataclass
class Scanner3D:
debug: bool
K: np.ndarray
K_inv: np.ndarray
dist: np.ndarray
filename: str = "cup1.mp4"
inner_rectangle: np.ndarray = np.array([[[0, 0]], [[23, 0]], [[23, 13]], [[0, 13]]])
lower_red_obj: np.ndarray = np.array([35, 25, 40])
lower_red_planes: np.ndarray = np.array([45, 30, 45])
upper_red: np.ndarray = np.array([100, 255, 255])
dbscan: DBSCAN = DBSCAN(eps=7, min_samples=20)
def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:
"""
Given a thresholded image of the scene (ideally, the first frame),
return the masks for the two known rectangles: one on the wall and one on the desk.
"""
contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]
mask = np.zeros(thresh.shape, np.uint8)
good_contours = sorted(
[cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],
key=cv.contourArea,
)
setattr(self, "contour1", good_contours[0])
setattr(
self,
"contour2",
good_contours[1]
if cv.pointPolygonTest(
good_contours[1], tuple(good_contours[0][0][0]), False
)
< 0
else good_contours[2],
)
cv.drawContours(mask, [self.contour1], 0, 255, -1)
cv.drawContours(mask, [self.contour2], 0, 255, -1)
return mask
def sort_corners(self, corners: np.ndarray):
"""
Sort the 4 corners clockwise of a rectangle so that the top-left corner
is the first one.
"""
center = np.sum(corners, axis=0) / 4
sorted_corners = sorted(
corners,
key=lambda p: math.atan2(p[0][0] - center[0][0], p[0][1] - center[0][1]),
reverse=True,
)
return np.roll(sorted_corners, 1, axis=0)
def get_desk_wall_corners(
self, thresh: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given a thresholded image of the scene and a mask representing the two
known rectangles, return the corners of those rectangles (8 in total)
with sub-pixel accuracy. The corners returned are already sorted.
"""
mask = self.get_rectangles_mask(thresh)
assert thresh.shape[:2] == mask.shape[:2]
corners = cv.goodFeaturesToTrack(
thresh,
maxCorners=8,
qualityLevel=0.01,
minDistance=10,
mask=mask,
blockSize=5,
)
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 20, 0.001)
corners = cv.cornerSubPix(
thresh, corners, winSize=(7, 7), zeroZone=(-1, -1), criteria=criteria
)
y_middle = thresh.shape[0] / 2
desk_corners = np.expand_dims(corners[corners[:, :, 1] > y_middle], axis=1)
wall_corners = np.expand_dims(corners[corners[:, :, 1] <= y_middle], axis=1)
sorted_desk_corners = self.sort_corners(desk_corners)
sorted_wall_corners = self.sort_corners(wall_corners)
return sorted_desk_corners, sorted_wall_corners
def get_H_R_t(self, corners: np.ndarray) -> Plane:
"""
Given 4 sorted corners, compute the homography between the corners
and the rectangle's ground truth and return the information
on the mapped plane.
In other words, this function returns information on a plane
(in particular, the desk's or wall's).
The plane's origin is in the top-left corner of the rectangle,
and the normal is perpendicular to that plane.
"""
H = cv.findHomography(self.inner_rectangle, corners)[0]
result = self.K_inv @ H
result /= cv.norm(result[:, 1])
r0, r1, t = np.hsplit(result, 3)
r2 = np.cross(r0.T, r1.T).T
_, u, vt = cv.SVDecomp(np.hstack([r0, r1, r2]))
R = u @ vt
return Plane(origin=t[:, 0], normal=R[:, 2], R=R)
def get_extreme_points(
self, wall_corners: np.ndarray, desk_corners: np.ndarray
) -> ExtremePoints:
"""
Given the corners of the rectangles on the wall and on the desk,
return the coordinates for a tight bounding box of the area
between the two rectangles.
"""
ymin_wall = int(np.min(wall_corners[:, :, 1]))
ymax_wall = int(np.max(wall_corners[:, :, 1]))
ymin_desk = int(np.min(desk_corners[:, :, 1]))
ymax_desk = int(np.max(desk_corners[:, :, 1]))
xmin = int(np.min(wall_corners[:, :, 0]))
xmax = int(np.max(wall_corners[:, :, 0]))
return ExtremePoints(
wall=Rectangle(
top_left=Point(xmin, ymin_wall), bottom_right=Point(xmax, ymax_wall)
),
desk=Rectangle(
top_left=Point(xmin, ymin_desk), bottom_right=Point(xmax, ymax_desk)
),
)
def get_laser_points_in_region(
self, image: np.ndarray, region: Rectangle, is_obj: bool = False,
) -> Optional[np.ndarray]:
"""
Given an image and a rectangle defining a region, return the laser points
in that region. In case we are considering the wall or the desk, require
at least 30 points for better accuracy.
"""
top_left = region.top_left
bottom_right = region.bottom_right
region_image = image[top_left.y : bottom_right.y, top_left.x : bottom_right.x]
image_inv = cv.cvtColor(~region_image, cv.COLOR_BGR2HSV)
lower_red = self.lower_red_obj if is_obj else self.lower_red_planes
red_mask = cv.inRange(image_inv, lower_red, self.upper_red)
laser_points = cv.findNonZero(red_mask)
if laser_points is None or (not is_obj and len(laser_points) < 30):
return None
return laser_points
def offset_points(self, points: np.ndarray, offset: Point) -> np.ndarray:
"""Given a region of an image and a point, offset the region by that point."""
points[:, :, 0] += offset.x
points[:, :, 1] += offset.y
return points
def make_homogeneous(self, points: np.ndarray) -> np.ndarray:
"""
Given some points, convert them to homogeneous coordinates, i.e. add a trailing [1].
This function can move points from R^n to P^n, for instance:
* in R^2: [x y] --> [x y 1]
* in R^3: [x y z] --> [x y z 1]
"""
return np.hstack((points[:, 0], np.ones(points.shape[0]).reshape(-1, 1),))
def remove_obj_outliers(self, points: np.ndarray) -> Optional[np.ndarray]:
"""
Use the DBSCAN clustering algorithm in order to remove possible outliers from
the points detected as laser in the object. We are basically enforcing
continuity in the laser line on the object, i.e. looking for a dense
cluster of pixels. Interesting points are the ones whose label is not -1,
i.e. the ones belonging to a cluster that is not an outlier one.
"""
dbscan_result = self.dbscan.fit(points[:, 0])
mask = dbscan_result.labels_ != -1
return np.expand_dims(points[:, 0][mask], axis=1)
def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:
"""
Given an image and a list of coordinates of shape (n_points, 1, 2),
return the RGB colors of those coordinates in the (0...1) range.
Notice that OpenCV uses BGR instead of RGB by default, thus we need to
flip the columns.
"""
x = coordinates.squeeze(1)
return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)
def get_laser_points(
self,
original_image: np.ndarray,
image: np.ndarray,
extreme_points: ExtremePoints,
) -> Tuple[
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray],
]:
"""
Given the interesting region of an image, containing the wall and desk planes
and the object, return the laser points in the three separate regions:
one for the wall plane, one for the desk plane, one of the object.
"""
height, width = image.shape[:2]
ymin_wall = extreme_points.wall.top_left.y
ymax_wall = extreme_points.wall.bottom_right.y
ymin_desk = extreme_points.desk.top_left.y
xmin = extreme_points.desk.top_left.x
laser_desk = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymin_desk - ymin_wall),
bottom_right=Point(width, height),
),
)
if laser_desk is not None:
laser_wall = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, 0),
bottom_right=Point(width, ymax_wall - ymin_wall),
),
)
if laser_wall is not None:
laser_obj = self.get_laser_points_in_region(
image=image,
region=Rectangle(
top_left=Point(0, ymax_wall - ymin_wall),
bottom_right=Point(width, ymin_desk - ymin_wall),
),
is_obj=True,
)
if laser_obj is not None:
laser_desk = self.offset_points(
| laser_wall = self.offset_points(
points=laser_wall, offset=Point(xmin, ymin_wall)
)
laser_obj = self.remove_obj_outliers(laser_obj)
if laser_obj is not None:
laser_obj = self.offset_points(
points=laser_obj, offset=Point(xmin, ymax_wall)
)
obj_colors = self.get_colors(original_image, laser_obj)
return laser_wall, laser_desk, laser_obj, obj_colors
return None, None, None, None
def save_3d_render(
self, points: List[np.ndarray], colors: List[np.ndarray]
) -> None:
"""
Given points in the 3D world, save the PLY file representing
the point cloud. This function saves both the original file and
a version to which an outlier removal process has been applied.
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))
pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))
if self.debug:
o3d.visualization.draw_geometries([pcd])
if not self.debug:
o3d.io.write_point_cloud(f"results/{self.filename[:-4]}.ply", pcd)
def read_frame(self, cap) -> Optional[np.ndarray]:
"""
Read a frame from the cap. Return None if there is no frame left.
"""
frame_raw = cap.read()[1]
if frame_raw is None:
cv.destroyAllWindows()
return None
return cv.undistort(frame_raw, self.K, self.dist)
def create_exiting_rays(
self, points: np.ndarray, is_obj: bool = False
) -> List[np.ndarray]:
"""
Given a set of 2D points, get their real world 3D coordinates (direction).
In general, mapping points from the real world [x, y, z, 1]
to the camera's reference world [x, y, 1] we should multiply
the real world coordinates by the 3x4 projection matrix P = K[R|T]
In our case, we want to obtain coordinates in the 3D world starting
from 2D points in the image, i.e. do the opposite.
These points are represented in the camera's reference frame:
this means that R=I and t=[0 0 0]. Only K remains, i.e. the inverse
operation is done by multiplying each point by K^-1.
Notice that points and directions, in such a situation, are really tight
concepts: we can represent a 3D line in space as
line(λ) = P1 + λ(P1 - P2). Since in this case P2 is the camera center,
we have that the line is P1 scaled by a factor λ.
"""
if not is_obj and len(points) > 100:
points = points[np.random.choice(points.shape[0], 100, replace=False,)]
return [self.K_inv @ point for point in points]
def compute_intersections(
self, plane: Plane, directions: List[np.ndarray]
) -> List[np.ndarray]:
"""
Given a plane represented by its origin and a normal
and a list of rays, compute the intersections between
the plane and the rays.
"""
return [
line_plane_intersection(
plane_origin=plane.origin,
plane_normal=plane.normal,
line_direction=direction,
)
for direction in directions
]
def run(self):
cap = cv.VideoCapture(f"videos/{self.filename}")
if not cap.isOpened():
return
first_frame = self.read_frame(cap)
if first_frame is None:
return
first_frame_thresh = threshold_image(first_frame)
desk_corners, wall_corners = self.get_desk_wall_corners(first_frame_thresh)
extreme_points = self.get_extreme_points(wall_corners, desk_corners)
desk_plane = self.get_H_R_t(desk_corners)
wall_plane = self.get_H_R_t(wall_corners)
if self.debug:
first_frame_copy = first_frame.copy()
cv.drawContours(first_frame_copy, [self.contour1], -1, (255, 255, 255), 2)
cv.drawContours(first_frame_copy, [self.contour2], -1, (255, 255, 255), 2)
draw_circles(first_frame_copy, desk_corners, text=True)
draw_circles(first_frame_copy, wall_corners, text=True)
show_image(first_frame_copy)
all_obj_points = []
all_obj_colors = []
while True:
frame = self.read_frame(cap)
if frame is None:
break
frame_copy = frame.copy()
frame_interesting = frame[
extreme_points.wall.top_left.y : extreme_points.desk.bottom_right.y,
extreme_points.wall.top_left.x : extreme_points.wall.bottom_right.x,
]
(laser_wall, laser_desk, laser_obj, obj_colors,) = self.get_laser_points(
first_frame, frame_interesting, extreme_points
)
if laser_wall is not None:
if self.debug:
draw_circles(frame_copy, laser_wall)
draw_circles(frame_copy, laser_desk)
draw_circles(frame_copy, laser_obj)
laser_wall = self.make_homogeneous(laser_wall)
laser_obj = self.make_homogeneous(laser_obj)
laser_desk = self.make_homogeneous(laser_desk)
wall_directions = self.create_exiting_rays(laser_wall, is_obj=False)
desk_directions = self.create_exiting_rays(laser_desk, is_obj=False)
obj_directions = self.create_exiting_rays(laser_obj, is_obj=True)
intersections_wall = self.compute_intersections(
wall_plane, wall_directions
)
intersections_desk = self.compute_intersections(
desk_plane, desk_directions
)
intersections_rects = np.array(intersections_wall + intersections_desk)
laser_plane = fit_plane(intersections_rects)
intersections_objs = self.compute_intersections(
laser_plane, obj_directions
)
all_obj_points.extend(intersections_objs)
all_obj_colors.extend(obj_colors)
if self.debug:
if show_image(frame_copy, continuous=True):
break
else:
if show_image(frame, continuous=True):
break
self.save_3d_render(all_obj_points, all_obj_colors)
cap.release()
cv.destroyAllWindows() | points=laser_desk, offset=Point(xmin, ymin_desk)
)
| random_line_split |
blackjack (1).py | # -*- coding: utf-8 -*-
from card import BJCard, Deck
class BJCards(list):
"""Blackjack Cards Class
Attributes:
possible_sums: all the possible sum of card
hand: -1 if bust
highest sum of possible sums, otherwise
"""
def __init__(self):
list.__init__(self)
self.possible_sums = set([0]) # init possible sums (== 0)
self.hand = 0 # hand of BJCards. -1 if bust
def hit(self, card):
"""Hit a BJcard and append it.
Then, find all possible sums and the current hand.
The current hand is defined as max. of possible sums
The current hand should be -1 if burst"""
self.append(card)
values=[]
values.append(card.value())
if values[0] < 2:
values.append(values[0]+ 10)
new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])
new_sums =sorted(new_sums)
if len(new_sums) ==0:
self.hand=-1
else:
self.hand = new_sums[-1]
self.possible_sums = new_sums
def is_blackjack(self):
"""Is current cards the Blackjack?"""
if self.hand == 21 and len(list(self)) ==2:
print '%s = Blackjack'%self
return True
def __lt__(self, other):
if other.is_blackjack():
return -1
else:
return self.hand<other.hand
def __gt__(self, other):
if self.is_blackjack():
return 1
else:
return self.hand>other.hand
def __eq__(self, other):
if not self.is_blackjack() and not other.is_blackjack():
return self.hand == other.hand
def __repr__(self):
return "BJCards(%s) = %s" % (list(self),self.possible_sums)
class Player:
"""Player class
Attributes:
name: player's name
init_budget: initial budget
budget: current budet
game: game joined by the player
cards: BJ Cards given to the player
state: 'active', 'stand', or 'burst'
"""
def __init__(self, name, budget,state =None):
self.name =name
self.budget = budget
self.restart()
def restart(self):
"""Restart another round.
Check the remaining budget and leave the game if budget <= 0.
Create new BJCards"""
self.state ='active'
if self.budget <= 0:
return self.leave()
self.cards =BJCards()
self.bet_amount =0
def join(self, game):
"""join the Blackjack game"""
self.game = game
self.game.join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.leave(self)
return self.game
def bet(self, amount):
"""Bet the amount of money.
Cannot exceed player's budget"""
if amount >self.budget:
print 'you cannot bet because of little money'
else:
self.bet_amount = amount
print 'you bet %s' % (amount)
def hit(self, card):
"""Hit a card and check if bust"""
self.cards.hit(card)
if self.cards.hand ==-1:
self.state ='burst'
def stand(self):
self.state ='stand'
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
class Dealer(Player):
"""Dealer is a player competing against players in the game.
Dealer has a card deck and deals players cards
Attributes:
deck: a deck of BJCard
"""
def __init__(self):
Player.__init__(self, name='dealer', budget=1000000)
self.deck=Deck(BJCard)
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)
2nd card in BJCards object should be makred as '?' to hide the face
"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
def get_card(self):
"""Get a card from the deck"""
return self.deck.pop()
def join(self, game):
"""join a Blackjack game"""
self.game = game
self.game.dealer_join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.dealer_leave(self)
return self.game
def showdown(self):
"""Face up dealer's hidden card and balance with players in the game"""
print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards
for player in self.game.players:
win = self.balance(player)
if win > 0:
print player.name, 'wins', win
elif win == 0:
print player.name, 'draws'
elif win <0:
print player.name, 'loses', -(win)
self.budget -= win
player.budget += win
print 'budget of %s : %s'%(player.name,player.budget)
print 'budget of %s : %s'%(self.name,self.budget)
def balance(self, player):
"""Who wins? Caculate pay-back according to player's betting amount.
Returns:
positive amount if player wins
0 if draw
negative amount if player loses
"""
print 'hand of %s: %s'%(player.name,player.cards.hand)
print 'hand of %s: %s'%(self.name,self.cards.hand)
if player.cards.hand == self.cards.hand:
return 0
elif player.cards.hand > self.cards.hand:
return player.bet_amount*2
else:
return -player.bet_amount
def deal(self):
# player's betting first
for player in self.game.players:
amount = self.__ask_bet(player)
player.bet(amount)
# turn down first two cards
for i in range(2):
for player in self.game.players:
player.hit(self.get_card())
print player
self.hit(self.get_card())
print self
# deal next cards
if not self.cards.is_blackjack():
print "players' turn:"
for player in self.game.players:
while player.state == 'active' :
self.deal_player(player)
print player
print "dealer's turn:"
while self.state == 'active':
self.deal_self()
print self
# Who wins?
self.showdown()
def deal_player(self, player):
"""Player can choose hit or stand"""
answer = self.__ask_hit_or_stand(player)
if answer in ('hit'):
player.hit(self.get_card())
elif answer in('stand'):
player.stand()
def deal_self(self):
"""Dealer have no choice. Stand if hand >= 17, otherwise hit"""
self.cards.hit(self.get_card())
if self.cards.hand < 17 and self.cards.hand>=0:
self.state = 'active'
elif self.cards.hand >= 17 and self.cards.hand <= 21:
self.state = 'stand'
elif self.cards.hand==-1:
self.state = 'burst'
def __ask_hit_or_stand(self, player):
while True:
answer = raw_input('> %s, hit or stand? ' % player.name)
if answer in ('h', 'hit'):
return 'hit'
elif answer in ('s', 'stand'):
return 'stand'
def __ask_bet(self, player):
while True:
try:
amount = int(raw_input('> %s, how much want to bet? (%d) ' \
%(player.name, player.budget)))
except Exception as e:
print e
else:
return amount
class BJGame:
"""Blackjack game consist of a dealer, one or more players
"""
Round =0
def __init__(self):
self.players = []
self.dealer = None
def join(self, player):
self.players.append(player)
def leave(self, player):
self.players.remove(player)
def dealer_join(self, dealer):
self.dealer = dealer
def dealer_leave(self, dealer):
self.dealer = None
def start(self):
if not self.players:
print 'No players on the table'
return False
if self.dealer == None:
print 'Dealer lost all the money. No dealer present'
return False
print 'Starting round'
self.dealer.deal()
# Prepare to restart
for player in self.players[:]:
player.restart()
self.dealer.restart()
return True
def repeat(self):
while self.start():
pass
if __name__ == '__main__':
print "==Testing BJCards"
def test_cards(card_list):
cards = BJCards()
for c in card_list:
cards.hit(BJCard(c))
print cards
print cards.hand
return cards
bob_cards = BJCards()
sue_cards = BJCards()
tom_cards = BJCards()
bob_cards = test_cards(['KD', '8S', '2D'])
assert bob_cards.hand == 20
sue_cards = test_cards(['9S', '5S', 'JD', 'TS'])
assert sue_cards.hand == -1 # bust
tom_cards = test_cards(['QC', 'AH'])
assert tom_cards.hand == 21
assert sue_cards < bob_cards < tom_cards
assert tom_cards > test_cards(['9C', '7S', '5C'])
print "==Testing Player"
game = BJGame()
bob = Player('bob', 100)
bob.join(game)
bob.bet(10); bob.hit(BJCard('KD')); bob.hit(BJCard('8S')); bob.hit(BJCard('2D')); bob.stand()
print bob
print "== Testing Dealer"
dealer = Dealer()
dealer.join(game)
while dealer.state == 'active':
|
print dealer
bob.restart()
dealer.restart()
dealer.deal()
print "== Run BJGame"
game = BJGame()
dealer = Dealer()
dealer.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.start()
game.start()
# game.repeat()
print '==Run BjGame including me'
game = BJGame()
dealer = Dealer()
dealer.join(game)
MinGeun =Player('MinGeun', 1000)
MinGeun.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.repeat()
| dealer.deal_self()
print dealer | conditional_block |
blackjack (1).py | # -*- coding: utf-8 -*-
from card import BJCard, Deck
class BJCards(list):
"""Blackjack Cards Class
Attributes:
possible_sums: all the possible sum of card
hand: -1 if bust
highest sum of possible sums, otherwise
"""
def __init__(self):
list.__init__(self)
self.possible_sums = set([0]) # init possible sums (== 0)
self.hand = 0 # hand of BJCards. -1 if bust
def hit(self, card):
"""Hit a BJcard and append it.
Then, find all possible sums and the current hand.
The current hand is defined as max. of possible sums
The current hand should be -1 if burst"""
self.append(card)
values=[]
values.append(card.value())
if values[0] < 2:
values.append(values[0]+ 10)
new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])
new_sums =sorted(new_sums)
if len(new_sums) ==0:
self.hand=-1
else:
self.hand = new_sums[-1]
self.possible_sums = new_sums
def is_blackjack(self):
"""Is current cards the Blackjack?"""
if self.hand == 21 and len(list(self)) ==2:
print '%s = Blackjack'%self
return True
def __lt__(self, other):
if other.is_blackjack():
return -1
else:
return self.hand<other.hand
def __gt__(self, other):
if self.is_blackjack():
return 1
else:
return self.hand>other.hand
def __eq__(self, other):
if not self.is_blackjack() and not other.is_blackjack():
return self.hand == other.hand
def __repr__(self):
return "BJCards(%s) = %s" % (list(self),self.possible_sums)
class Player:
"""Player class
Attributes:
name: player's name
init_budget: initial budget
budget: current budet
game: game joined by the player
cards: BJ Cards given to the player
state: 'active', 'stand', or 'burst'
"""
def __init__(self, name, budget,state =None):
self.name =name
self.budget = budget
self.restart()
def restart(self):
"""Restart another round.
Check the remaining budget and leave the game if budget <= 0.
Create new BJCards"""
self.state ='active'
if self.budget <= 0:
return self.leave()
self.cards =BJCards()
self.bet_amount =0
def join(self, game):
"""join the Blackjack game"""
self.game = game
self.game.join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.leave(self)
return self.game
def bet(self, amount):
"""Bet the amount of money.
Cannot exceed player's budget"""
if amount >self.budget:
print 'you cannot bet because of little money'
else:
self.bet_amount = amount
print 'you bet %s' % (amount)
def hit(self, card):
"""Hit a card and check if bust"""
self.cards.hit(card)
if self.cards.hand ==-1:
self.state ='burst'
def stand(self):
self.state ='stand'
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
class Dealer(Player):
"""Dealer is a player competing against players in the game.
Dealer has a card deck and deals players cards
Attributes:
deck: a deck of BJCard
"""
def __init__(self):
Player.__init__(self, name='dealer', budget=1000000)
self.deck=Deck(BJCard)
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)
2nd card in BJCards object should be makred as '?' to hide the face
"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
def get_card(self):
"""Get a card from the deck"""
return self.deck.pop()
def join(self, game):
"""join a Blackjack game"""
self.game = game
self.game.dealer_join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.dealer_leave(self)
return self.game
def showdown(self):
"""Face up dealer's hidden card and balance with players in the game"""
print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards
for player in self.game.players:
win = self.balance(player)
if win > 0:
print player.name, 'wins', win
elif win == 0:
print player.name, 'draws'
elif win <0:
print player.name, 'loses', -(win)
self.budget -= win
player.budget += win
print 'budget of %s : %s'%(player.name,player.budget)
print 'budget of %s : %s'%(self.name,self.budget)
def balance(self, player):
"""Who wins? Caculate pay-back according to player's betting amount.
Returns:
positive amount if player wins
0 if draw
negative amount if player loses
"""
print 'hand of %s: %s'%(player.name,player.cards.hand)
print 'hand of %s: %s'%(self.name,self.cards.hand)
if player.cards.hand == self.cards.hand:
return 0
elif player.cards.hand > self.cards.hand:
return player.bet_amount*2
else:
return -player.bet_amount
def deal(self):
# player's betting first
for player in self.game.players:
amount = self.__ask_bet(player)
player.bet(amount)
# turn down first two cards
for i in range(2):
for player in self.game.players:
player.hit(self.get_card())
print player
self.hit(self.get_card())
print self
# deal next cards
if not self.cards.is_blackjack():
print "players' turn:"
for player in self.game.players:
while player.state == 'active' :
self.deal_player(player)
print player
print "dealer's turn:"
while self.state == 'active':
self.deal_self()
print self
# Who wins?
self.showdown()
def deal_player(self, player):
"""Player can choose hit or stand"""
answer = self.__ask_hit_or_stand(player)
if answer in ('hit'):
player.hit(self.get_card())
elif answer in('stand'):
player.stand()
def deal_self(self):
"""Dealer have no choice. Stand if hand >= 17, otherwise hit"""
self.cards.hit(self.get_card())
if self.cards.hand < 17 and self.cards.hand>=0:
self.state = 'active'
elif self.cards.hand >= 17 and self.cards.hand <= 21:
self.state = 'stand'
elif self.cards.hand==-1:
self.state = 'burst'
def __ask_hit_or_stand(self, player):
while True:
answer = raw_input('> %s, hit or stand? ' % player.name)
if answer in ('h', 'hit'):
return 'hit'
elif answer in ('s', 'stand'):
return 'stand'
def __ask_bet(self, player):
while True:
try:
amount = int(raw_input('> %s, how much want to bet? (%d) ' \
%(player.name, player.budget)))
except Exception as e:
print e
else:
return amount
class BJGame:
"""Blackjack game consist of a dealer, one or more players
"""
Round =0
def | (self):
self.players = []
self.dealer = None
def join(self, player):
self.players.append(player)
def leave(self, player):
self.players.remove(player)
def dealer_join(self, dealer):
self.dealer = dealer
def dealer_leave(self, dealer):
self.dealer = None
def start(self):
if not self.players:
print 'No players on the table'
return False
if self.dealer == None:
print 'Dealer lost all the money. No dealer present'
return False
print 'Starting round'
self.dealer.deal()
# Prepare to restart
for player in self.players[:]:
player.restart()
self.dealer.restart()
return True
def repeat(self):
while self.start():
pass
if __name__ == '__main__':
print "==Testing BJCards"
def test_cards(card_list):
cards = BJCards()
for c in card_list:
cards.hit(BJCard(c))
print cards
print cards.hand
return cards
bob_cards = BJCards()
sue_cards = BJCards()
tom_cards = BJCards()
bob_cards = test_cards(['KD', '8S', '2D'])
assert bob_cards.hand == 20
sue_cards = test_cards(['9S', '5S', 'JD', 'TS'])
assert sue_cards.hand == -1 # bust
tom_cards = test_cards(['QC', 'AH'])
assert tom_cards.hand == 21
assert sue_cards < bob_cards < tom_cards
assert tom_cards > test_cards(['9C', '7S', '5C'])
print "==Testing Player"
game = BJGame()
bob = Player('bob', 100)
bob.join(game)
bob.bet(10); bob.hit(BJCard('KD')); bob.hit(BJCard('8S')); bob.hit(BJCard('2D')); bob.stand()
print bob
print "== Testing Dealer"
dealer = Dealer()
dealer.join(game)
while dealer.state == 'active':
dealer.deal_self()
print dealer
print dealer
bob.restart()
dealer.restart()
dealer.deal()
print "== Run BJGame"
game = BJGame()
dealer = Dealer()
dealer.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.start()
game.start()
# game.repeat()
print '==Run BjGame including me'
game = BJGame()
dealer = Dealer()
dealer.join(game)
MinGeun =Player('MinGeun', 1000)
MinGeun.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.repeat()
| __init__ | identifier_name |
blackjack (1).py | # -*- coding: utf-8 -*-
from card import BJCard, Deck
class BJCards(list):
"""Blackjack Cards Class
Attributes:
possible_sums: all the possible sum of card
hand: -1 if bust
highest sum of possible sums, otherwise
"""
def __init__(self):
list.__init__(self)
self.possible_sums = set([0]) # init possible sums (== 0)
self.hand = 0 # hand of BJCards. -1 if bust
def hit(self, card):
"""Hit a BJcard and append it.
Then, find all possible sums and the current hand.
The current hand is defined as max. of possible sums
The current hand should be -1 if burst"""
self.append(card)
values=[]
values.append(card.value())
if values[0] < 2:
values.append(values[0]+ 10)
new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])
new_sums =sorted(new_sums)
if len(new_sums) ==0:
self.hand=-1
else:
self.hand = new_sums[-1]
self.possible_sums = new_sums
def is_blackjack(self):
"""Is current cards the Blackjack?"""
if self.hand == 21 and len(list(self)) ==2:
print '%s = Blackjack'%self
return True
def __lt__(self, other):
if other.is_blackjack():
return -1
else:
return self.hand<other.hand
def __gt__(self, other):
if self.is_blackjack():
return 1
else:
return self.hand>other.hand
def __eq__(self, other):
if not self.is_blackjack() and not other.is_blackjack():
return self.hand == other.hand
def __repr__(self):
return "BJCards(%s) = %s" % (list(self),self.possible_sums)
class Player:
"""Player class
Attributes:
name: player's name
init_budget: initial budget
budget: current budet
game: game joined by the player
cards: BJ Cards given to the player
state: 'active', 'stand', or 'burst'
"""
def __init__(self, name, budget,state =None):
self.name =name
self.budget = budget
self.restart()
def restart(self):
"""Restart another round.
Check the remaining budget and leave the game if budget <= 0.
Create new BJCards"""
self.state ='active'
if self.budget <= 0:
return self.leave()
self.cards =BJCards()
self.bet_amount =0
def join(self, game):
"""join the Blackjack game"""
self.game = game
self.game.join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.leave(self)
return self.game
def bet(self, amount):
"""Bet the amount of money.
Cannot exceed player's budget"""
if amount >self.budget:
print 'you cannot bet because of little money'
else:
self.bet_amount = amount
print 'you bet %s' % (amount)
def hit(self, card):
"""Hit a card and check if bust"""
self.cards.hit(card)
if self.cards.hand ==-1:
self.state ='burst'
def stand(self):
self.state ='stand'
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
class Dealer(Player):
"""Dealer is a player competing against players in the game.
Dealer has a card deck and deals players cards
Attributes:
deck: a deck of BJCard
"""
def __init__(self):
Player.__init__(self, name='dealer', budget=1000000)
self.deck=Deck(BJCard)
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)
2nd card in BJCards object should be makred as '?' to hide the face
"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
def get_card(self):
|
def join(self, game):
"""join a Blackjack game"""
self.game = game
self.game.dealer_join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.dealer_leave(self)
return self.game
def showdown(self):
"""Face up dealer's hidden card and balance with players in the game"""
print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards
for player in self.game.players:
win = self.balance(player)
if win > 0:
print player.name, 'wins', win
elif win == 0:
print player.name, 'draws'
elif win <0:
print player.name, 'loses', -(win)
self.budget -= win
player.budget += win
print 'budget of %s : %s'%(player.name,player.budget)
print 'budget of %s : %s'%(self.name,self.budget)
def balance(self, player):
"""Who wins? Caculate pay-back according to player's betting amount.
Returns:
positive amount if player wins
0 if draw
negative amount if player loses
"""
print 'hand of %s: %s'%(player.name,player.cards.hand)
print 'hand of %s: %s'%(self.name,self.cards.hand)
if player.cards.hand == self.cards.hand:
return 0
elif player.cards.hand > self.cards.hand:
return player.bet_amount*2
else:
return -player.bet_amount
def deal(self):
# player's betting first
for player in self.game.players:
amount = self.__ask_bet(player)
player.bet(amount)
# turn down first two cards
for i in range(2):
for player in self.game.players:
player.hit(self.get_card())
print player
self.hit(self.get_card())
print self
# deal next cards
if not self.cards.is_blackjack():
print "players' turn:"
for player in self.game.players:
while player.state == 'active' :
self.deal_player(player)
print player
print "dealer's turn:"
while self.state == 'active':
self.deal_self()
print self
# Who wins?
self.showdown()
def deal_player(self, player):
"""Player can choose hit or stand"""
answer = self.__ask_hit_or_stand(player)
if answer in ('hit'):
player.hit(self.get_card())
elif answer in('stand'):
player.stand()
def deal_self(self):
"""Dealer have no choice. Stand if hand >= 17, otherwise hit"""
self.cards.hit(self.get_card())
if self.cards.hand < 17 and self.cards.hand>=0:
self.state = 'active'
elif self.cards.hand >= 17 and self.cards.hand <= 21:
self.state = 'stand'
elif self.cards.hand==-1:
self.state = 'burst'
def __ask_hit_or_stand(self, player):
while True:
answer = raw_input('> %s, hit or stand? ' % player.name)
if answer in ('h', 'hit'):
return 'hit'
elif answer in ('s', 'stand'):
return 'stand'
def __ask_bet(self, player):
while True:
try:
amount = int(raw_input('> %s, how much want to bet? (%d) ' \
%(player.name, player.budget)))
except Exception as e:
print e
else:
return amount
class BJGame:
"""Blackjack game consist of a dealer, one or more players
"""
Round =0
def __init__(self):
self.players = []
self.dealer = None
def join(self, player):
self.players.append(player)
def leave(self, player):
self.players.remove(player)
def dealer_join(self, dealer):
self.dealer = dealer
def dealer_leave(self, dealer):
self.dealer = None
def start(self):
if not self.players:
print 'No players on the table'
return False
if self.dealer == None:
print 'Dealer lost all the money. No dealer present'
return False
print 'Starting round'
self.dealer.deal()
# Prepare to restart
for player in self.players[:]:
player.restart()
self.dealer.restart()
return True
def repeat(self):
while self.start():
pass
if __name__ == '__main__':
print "==Testing BJCards"
def test_cards(card_list):
cards = BJCards()
for c in card_list:
cards.hit(BJCard(c))
print cards
print cards.hand
return cards
bob_cards = BJCards()
sue_cards = BJCards()
tom_cards = BJCards()
bob_cards = test_cards(['KD', '8S', '2D'])
assert bob_cards.hand == 20
sue_cards = test_cards(['9S', '5S', 'JD', 'TS'])
assert sue_cards.hand == -1 # bust
tom_cards = test_cards(['QC', 'AH'])
assert tom_cards.hand == 21
assert sue_cards < bob_cards < tom_cards
assert tom_cards > test_cards(['9C', '7S', '5C'])
print "==Testing Player"
game = BJGame()
bob = Player('bob', 100)
bob.join(game)
bob.bet(10); bob.hit(BJCard('KD')); bob.hit(BJCard('8S')); bob.hit(BJCard('2D')); bob.stand()
print bob
print "== Testing Dealer"
dealer = Dealer()
dealer.join(game)
while dealer.state == 'active':
dealer.deal_self()
print dealer
print dealer
bob.restart()
dealer.restart()
dealer.deal()
print "== Run BJGame"
game = BJGame()
dealer = Dealer()
dealer.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.start()
game.start()
# game.repeat()
print '==Run BjGame including me'
game = BJGame()
dealer = Dealer()
dealer.join(game)
MinGeun =Player('MinGeun', 1000)
MinGeun.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.repeat()
| """Get a card from the deck"""
return self.deck.pop() | identifier_body |
blackjack (1).py | # -*- coding: utf-8 -*-
from card import BJCard, Deck
class BJCards(list):
"""Blackjack Cards Class
Attributes:
possible_sums: all the possible sum of card
hand: -1 if bust
highest sum of possible sums, otherwise
"""
def __init__(self):
list.__init__(self)
self.possible_sums = set([0]) # init possible sums (== 0)
self.hand = 0 # hand of BJCards. -1 if bust
def hit(self, card):
"""Hit a BJcard and append it.
Then, find all possible sums and the current hand.
The current hand is defined as max. of possible sums
The current hand should be -1 if burst"""
self.append(card)
values=[]
values.append(card.value())
if values[0] < 2:
values.append(values[0]+ 10)
new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])
new_sums =sorted(new_sums)
if len(new_sums) ==0:
self.hand=-1
else:
self.hand = new_sums[-1]
self.possible_sums = new_sums
def is_blackjack(self):
"""Is current cards the Blackjack?"""
if self.hand == 21 and len(list(self)) ==2:
print '%s = Blackjack'%self
return True
def __lt__(self, other):
if other.is_blackjack():
return -1
else:
return self.hand<other.hand
def __gt__(self, other):
if self.is_blackjack():
return 1
else:
return self.hand>other.hand
def __eq__(self, other):
if not self.is_blackjack() and not other.is_blackjack():
return self.hand == other.hand
def __repr__(self):
return "BJCards(%s) = %s" % (list(self),self.possible_sums)
class Player:
"""Player class
Attributes:
name: player's name
init_budget: initial budget
budget: current budet
game: game joined by the player
cards: BJ Cards given to the player
state: 'active', 'stand', or 'burst'
"""
def __init__(self, name, budget,state =None):
self.name =name
self.budget = budget
self.restart()
def restart(self):
"""Restart another round.
Check the remaining budget and leave the game if budget <= 0.
Create new BJCards"""
self.state ='active'
if self.budget <= 0:
return self.leave()
self.cards =BJCards()
self.bet_amount =0
def join(self, game):
"""join the Blackjack game"""
self.game = game
self.game.join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.leave(self)
return self.game
def bet(self, amount):
"""Bet the amount of money.
Cannot exceed player's budget"""
if amount >self.budget:
print 'you cannot bet because of little money'
else:
self.bet_amount = amount
print 'you bet %s' % (amount)
def hit(self, card):
"""Hit a card and check if bust"""
self.cards.hit(card)
if self.cards.hand ==-1:
self.state ='burst'
def stand(self):
self.state ='stand'
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
class Dealer(Player):
"""Dealer is a player competing against players in the game.
Dealer has a card deck and deals players cards
Attributes:
deck: a deck of BJCard
"""
def __init__(self):
Player.__init__(self, name='dealer', budget=1000000)
self.deck=Deck(BJCard)
def __repr__(self):
"""Represent in the form as: name, state: repr(BJCards)
2nd card in BJCards object should be makred as '?' to hide the face
"""
return 'name:%s ,state :%s,%s' % (self.name, self.cards,self.state)
def get_card(self):
"""Get a card from the deck"""
return self.deck.pop()
def join(self, game):
"""join a Blackjack game"""
self.game = game
self.game.dealer_join(self)
return self.game
def leave(self):
"""Leave the Blackjack game"""
self.game.dealer_leave(self)
return self.game
def showdown(self):
"""Face up dealer's hidden card and balance with players in the game"""
print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards
for player in self.game.players:
win = self.balance(player)
if win > 0:
print player.name, 'wins', win
elif win == 0:
print player.name, 'draws'
elif win <0:
print player.name, 'loses', -(win)
self.budget -= win
player.budget += win
print 'budget of %s : %s'%(player.name,player.budget)
print 'budget of %s : %s'%(self.name,self.budget)
def balance(self, player):
"""Who wins? Caculate pay-back according to player's betting amount.
Returns:
positive amount if player wins
0 if draw
negative amount if player loses
"""
print 'hand of %s: %s'%(player.name,player.cards.hand)
print 'hand of %s: %s'%(self.name,self.cards.hand)
if player.cards.hand == self.cards.hand:
return 0
elif player.cards.hand > self.cards.hand:
return player.bet_amount*2
else:
return -player.bet_amount
def deal(self):
# player's betting first
for player in self.game.players:
amount = self.__ask_bet(player)
player.bet(amount)
# turn down first two cards
for i in range(2):
for player in self.game.players:
player.hit(self.get_card())
print player
self.hit(self.get_card())
print self
# deal next cards
if not self.cards.is_blackjack():
print "players' turn:"
for player in self.game.players:
while player.state == 'active' :
self.deal_player(player)
print player
print "dealer's turn:"
while self.state == 'active':
self.deal_self()
print self
# Who wins?
self.showdown()
def deal_player(self, player):
"""Player can choose hit or stand"""
answer = self.__ask_hit_or_stand(player)
if answer in ('hit'):
player.hit(self.get_card())
elif answer in('stand'):
player.stand()
def deal_self(self):
"""Dealer have no choice. Stand if hand >= 17, otherwise hit"""
self.cards.hit(self.get_card())
if self.cards.hand < 17 and self.cards.hand>=0:
self.state = 'active'
elif self.cards.hand >= 17 and self.cards.hand <= 21:
self.state = 'stand'
elif self.cards.hand==-1:
self.state = 'burst'
def __ask_hit_or_stand(self, player):
while True:
answer = raw_input('> %s, hit or stand? ' % player.name)
if answer in ('h', 'hit'):
return 'hit'
elif answer in ('s', 'stand'):
return 'stand'
def __ask_bet(self, player):
while True:
try:
amount = int(raw_input('> %s, how much want to bet? (%d) ' \
%(player.name, player.budget)))
except Exception as e:
print e
else:
return amount
class BJGame:
"""Blackjack game consist of a dealer, one or more players
"""
Round =0
def __init__(self):
self.players = []
self.dealer = None
def join(self, player):
self.players.append(player)
def leave(self, player):
self.players.remove(player)
def dealer_join(self, dealer):
self.dealer = dealer
def dealer_leave(self, dealer):
self.dealer = None
def start(self):
if not self.players:
print 'No players on the table'
return False
if self.dealer == None:
print 'Dealer lost all the money. No dealer present'
return False
print 'Starting round'
self.dealer.deal()
# Prepare to restart
for player in self.players[:]: | player.restart()
self.dealer.restart()
return True
def repeat(self):
while self.start():
pass
if __name__ == '__main__':
print "==Testing BJCards"
def test_cards(card_list):
cards = BJCards()
for c in card_list:
cards.hit(BJCard(c))
print cards
print cards.hand
return cards
bob_cards = BJCards()
sue_cards = BJCards()
tom_cards = BJCards()
bob_cards = test_cards(['KD', '8S', '2D'])
assert bob_cards.hand == 20
sue_cards = test_cards(['9S', '5S', 'JD', 'TS'])
assert sue_cards.hand == -1 # bust
tom_cards = test_cards(['QC', 'AH'])
assert tom_cards.hand == 21
assert sue_cards < bob_cards < tom_cards
assert tom_cards > test_cards(['9C', '7S', '5C'])
print "==Testing Player"
game = BJGame()
bob = Player('bob', 100)
bob.join(game)
bob.bet(10); bob.hit(BJCard('KD')); bob.hit(BJCard('8S')); bob.hit(BJCard('2D')); bob.stand()
print bob
print "== Testing Dealer"
dealer = Dealer()
dealer.join(game)
while dealer.state == 'active':
dealer.deal_self()
print dealer
print dealer
bob.restart()
dealer.restart()
dealer.deal()
print "== Run BJGame"
game = BJGame()
dealer = Dealer()
dealer.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.start()
game.start()
# game.repeat()
print '==Run BjGame including me'
game = BJGame()
dealer = Dealer()
dealer.join(game)
MinGeun =Player('MinGeun', 1000)
MinGeun.join(game)
bob = Player('bob', 100)
bob.join(game)
tom = Player('tom', 200)
tom.join(game)
game.repeat() | random_line_split | |
classes.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef } from '@angular/core';
import { CommonService } from 'app/service/common.service';
import { ClassesConfigService } from 'app/service/general/api/classes-config.service';
import { MatDialog, MatSnackBar } from '@angular/material';
import { ClassFilterView } from '../models/class-filter-view';
import { CustomDialogComponent } from 'app/shared/custom-dialog/custom-dialog.component';
import { AppSettings } from 'app/app.constants';
import { ClassResultViewModel, ClassResultView, ValidationMessageView } from '../models/class-result-view-model';
import { FormGroup, FormControl, Validators } from '@angular/forms';
import { DropDownModel } from 'app/models/drop-down-view';
import * as HttpStatus from 'http-status-codes';
import { CommonComponent } from '../../../../shared/common/common.component';
@Component({
selector: 'app-classes',
templateUrl: './classes.component.html',
styleUrls: ['./classes.component.scss'],
providers: [ClassesConfigService]
})
export class ClassesComponent extends CommonComponent implements OnInit {
@ViewChild('class') myClassForm;
filterViewModel: ClassFilterView;
isdisableBtn = false;
classForm: FormGroup;
classReferences: Array<DropDownModel> = [];
tableSettings: {};
rows: Array<any>;
columns: any[];
pageCnt: number;
lastSelectediId = '';
selectedIdsList: Array<string> = [];
totalRowsCount: number;
rowBasedAction: Array<any> = [];
closeForm: boolean;
validateForm: boolean;
isFormSubmitted = false;
showCreateBtn = true;
preSelectIds: Array<string> = [];
currentComponent = 'ClassesComponent';
constructor(public commonService: CommonService, private classesConfigService: ClassesConfigService, public snackBar: MatSnackBar, public dialogRef: MatDialog,
public viewContainerRef: ViewContainerRef) {
super();
this.initializeFilterView();
this.setColumnHeaders();
this.initializeTableSettings();
}
ngOnInit(): void {
// this.commonService.getTableLSObj(this.tableSettings);
this.initializeForm();
// if (localStorage.getItem('_s')) {
this.classesConfigService.getClassReferences()
.subscribe((data: Array<{ id: string | number, name: string }>) =>
data.forEach(element =>
this.classReferences.push({ label: element.name, value: element.id })
));
const modelTableComponent = this.getModelComponent(this.currentComponent);
if (modelTableComponent) {
this.filterViewModel = modelTableComponent;
}
this.getAllFilteredClasses();
// }
}
initializeForm(): void {
this.classForm = new FormGroup({
id: new FormControl(''),
classReferenceTypeId: new FormControl('', [Validators.required]),
name: new FormControl('', [Validators.required, Validators.maxLength(15)]),
code: new FormControl(null, [Validators.maxLength(6)]),
description: new FormControl(null, [Validators.maxLength(135)]),
});
}
setColumnHeaders(): void {
this.columns = [
{ field: 'name', header: 'Class Name', sort: true },
{ field: 'code', header: 'Code', sort: true },
{ field: 'classReferenceName', header: 'Class Reference', sort: true },
{ field: 'description', header: 'Description', sort: true },
{ field: 'actions', header: 'Actions', sort: false }
];
}
initializeTableSettings(): void {
this.tableSettings = {
rows: [],
columns: this.columns,
tablename: 'Classes',
componentName: this.currentComponent,
model: this.filterViewModel
};
}
initializeFilterView(): void {
this.filterViewModel = {
sortBy: '',
sortOrder: 0,
pageNumber: AppSettings.PAGENUMBER,
pageSize: AppSettings.PAGESIZE,
};
}
getAllFilteredClasses(): void {
this.classesConfigService.getFilteredClasses(this.filterViewModel.classReferenceIds, this.filterViewModel.names,
this.filterViewModel.codes, this.filterViewModel.descriptions, this.filterViewModel.sortOrder, this.filterViewModel.sortBy,
this.preSelectIds, this.filterViewModel.pageNumber, this.filterViewModel.pageSize).subscribe(res => {
this.bindClassResult(res);
}, error => {
this.errorResponse(error);
});
}
tableData(_event: ClassFilterView): void {
this.filterViewModel = _event;
this.getAllFilteredClasses();
}
openForm(): void {
this.initializeForm();
this.isFormSubmitted = false;
this.closeForm = false;
}
onCancel(): void {
this.closeForm = true;
this.isFormSubmitted = false;
this.showCreateBtn = true;
this.getAllFilteredClasses();
this.myClassForm.resetForm();
}
bindClassResult(data: ClassResultView): any {
if (!data.pagedClassViewModels) {
this.rows = [];
this.totalRowsCount = 0;
this.pageCnt = 0;
} else {
this.rows = data.pagedClassViewModels.list;
this.totalRowsCount = data.pagedClassViewModels.totalItems;
this.pageCnt = data.pagedClassViewModels.totalPages;
this.rows.forEach(e => {
e.operations = [
{
name: AppSettings.EDIT_OPERATION,
icon: AppSettings.EDIT,
operationName: AppSettings.EDIT
}, {
name: AppSettings.DELETE_OPERATION,
icon: AppSettings.DELETE,
operationName: AppSettings.DELETE
}
];
});
this.preSelectIds = [];
}
if (data.pagedClassViewModels) {
this.filterViewModel.pageNumber = data.pagedClassViewModels.pageNumber;
}
this.tableSettings = {
model: this.filterViewModel,
rows: this.rows,
columns: this.columns,
totalRowsCount: this.totalRowsCount,
pageCnt: this.pageCnt,
tablename: 'Classes',
componentName: this.currentComponent,
visibleSelectAll: true,
isSelectRowRequired: true,
isPaginationRequired: true,
filtersList: data.filters,
headerOperations: {
infoButton: {
required: true,
text: 'Class Component'
},
addingForm: {
required: true,
btnName: 'Add Class'
}
}
};
}
createOrUpdateClass(form: ClassResultViewModel, onContinue = false): void {
this.isFormSubmitted = true;
if (this.classForm.invalid) {
return;
}
if (!form.id && this.classForm.status === AppSettings.VALID) {
this.isdisableBtn = true;
this.classesConfigService.createClass(form)
.subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
if (!onContinue){
this.closeForm = true;
this.getAllFilteredClasses();
}
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.isdisableBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
else if (this.classForm.valid) {
this.isdisableBtn = true;
this.classesConfigService.updateClass(form).subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
this.showCreateBtn = true;
this.closeForm = true;
this.onCancel();
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.disbleSubmitBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
}
selectedRows(_event: Array<ClassResultViewModel>): void {
this.selectedIdsList = _event.length ? _event.map(x => x.id) : [];
}
actions(operationData: any): void {
if (operationData.operation === AppSettings.EDIT.toLowerCase()) {
this.isFormSubmitted = false;
this.validateForm = false;
this.showCreateBtn = false;
this.closeForm = false;
this.classesConfigService.getClass(operationData.clickedRow.id).subscribe(res => {
if (res.statusCode === HttpStatus.OK) {
this.classForm.patchValue(res.classViewModel);
}
}, error => {
this.errorResponse(error);
});
this.classForm.patchValue(operationData.clickedRow);
}
if (operationData.operation === AppSettings.DELETE.toLowerCase()) {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
const actionClickedId: Array<string> = [operationData.clickedRow.id];
this.deleteClasses(actionClickedId, false);
}
});
}
}
deleteClasses(selectedIds: Array<string>, isMultiDelete: boolean): void {
this.classesConfigService.deleteAllClass(selectedIds).subscribe(response => {
if (response.statusCode === HttpStatus.OK) {
this.openSnackBar(response.messages.ResultMessage);
this.selectedIdsList = [];
}
else {
this.openSnackBar(response.messages.ResultMessage, true);
if (isMultiDelete) {
this.preSelectIds = response.failedRecords;
}
}
this.getAllFilteredClasses();
}, error => {
if (error.error.failedRecords !== undefined && error.error.failedRecords.length > 0 && isMultiDelete) {
this.preSelectIds = (error.error.failedRecords);
}
this.errorResponse(error);
this.getAllFilteredClasses();
});
}
deleteWarning(): void {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
this.deleteClasses(this.selectedIdsList, true);
}
});
}
dialogMethod(dialogData: any, disableClose: boolean, button1Text: string, button2Text?: string): any {
return this.dialogRef.open(CustomDialogComponent, {
disableClose: disableClose,
data: { text: this.commonService.getTranslation(dialogData), action: true, btn1Text: button1Text, btn2Text: button2Text },
});
}
trimTextBoxSpaces(key: string): void |
}
| {
this.classForm.controls[key].setValue(this.commonService.trimSpaces(this.classForm.controls[key].value)); // modify value here)
} | identifier_body |
classes.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef } from '@angular/core';
import { CommonService } from 'app/service/common.service';
import { ClassesConfigService } from 'app/service/general/api/classes-config.service';
import { MatDialog, MatSnackBar } from '@angular/material';
import { ClassFilterView } from '../models/class-filter-view';
import { CustomDialogComponent } from 'app/shared/custom-dialog/custom-dialog.component';
import { AppSettings } from 'app/app.constants';
import { ClassResultViewModel, ClassResultView, ValidationMessageView } from '../models/class-result-view-model';
import { FormGroup, FormControl, Validators } from '@angular/forms';
import { DropDownModel } from 'app/models/drop-down-view';
import * as HttpStatus from 'http-status-codes';
import { CommonComponent } from '../../../../shared/common/common.component';
@Component({
selector: 'app-classes',
templateUrl: './classes.component.html',
styleUrls: ['./classes.component.scss'],
providers: [ClassesConfigService]
})
export class ClassesComponent extends CommonComponent implements OnInit {
@ViewChild('class') myClassForm;
filterViewModel: ClassFilterView;
isdisableBtn = false;
classForm: FormGroup;
classReferences: Array<DropDownModel> = [];
tableSettings: {};
rows: Array<any>;
columns: any[];
pageCnt: number;
lastSelectediId = '';
selectedIdsList: Array<string> = [];
totalRowsCount: number;
rowBasedAction: Array<any> = [];
closeForm: boolean;
validateForm: boolean;
isFormSubmitted = false;
showCreateBtn = true;
preSelectIds: Array<string> = [];
currentComponent = 'ClassesComponent';
constructor(public commonService: CommonService, private classesConfigService: ClassesConfigService, public snackBar: MatSnackBar, public dialogRef: MatDialog,
public viewContainerRef: ViewContainerRef) {
super();
this.initializeFilterView();
this.setColumnHeaders();
this.initializeTableSettings();
}
ngOnInit(): void {
// this.commonService.getTableLSObj(this.tableSettings);
this.initializeForm();
// if (localStorage.getItem('_s')) {
this.classesConfigService.getClassReferences()
.subscribe((data: Array<{ id: string | number, name: string }>) =>
data.forEach(element =>
this.classReferences.push({ label: element.name, value: element.id })
));
const modelTableComponent = this.getModelComponent(this.currentComponent);
if (modelTableComponent) {
this.filterViewModel = modelTableComponent;
}
this.getAllFilteredClasses();
// }
}
initializeForm(): void {
this.classForm = new FormGroup({
id: new FormControl(''),
classReferenceTypeId: new FormControl('', [Validators.required]),
name: new FormControl('', [Validators.required, Validators.maxLength(15)]),
code: new FormControl(null, [Validators.maxLength(6)]),
description: new FormControl(null, [Validators.maxLength(135)]),
});
}
setColumnHeaders(): void {
this.columns = [
{ field: 'name', header: 'Class Name', sort: true },
{ field: 'code', header: 'Code', sort: true },
{ field: 'classReferenceName', header: 'Class Reference', sort: true },
{ field: 'description', header: 'Description', sort: true },
{ field: 'actions', header: 'Actions', sort: false }
];
}
initializeTableSettings(): void {
this.tableSettings = {
rows: [],
columns: this.columns,
tablename: 'Classes',
componentName: this.currentComponent,
model: this.filterViewModel
};
}
initializeFilterView(): void {
this.filterViewModel = {
sortBy: '',
sortOrder: 0,
pageNumber: AppSettings.PAGENUMBER,
pageSize: AppSettings.PAGESIZE,
};
}
getAllFilteredClasses(): void {
this.classesConfigService.getFilteredClasses(this.filterViewModel.classReferenceIds, this.filterViewModel.names,
this.filterViewModel.codes, this.filterViewModel.descriptions, this.filterViewModel.sortOrder, this.filterViewModel.sortBy,
this.preSelectIds, this.filterViewModel.pageNumber, this.filterViewModel.pageSize).subscribe(res => {
this.bindClassResult(res);
}, error => {
this.errorResponse(error);
});
}
tableData(_event: ClassFilterView): void {
this.filterViewModel = _event;
this.getAllFilteredClasses();
}
openForm(): void {
this.initializeForm();
this.isFormSubmitted = false;
this.closeForm = false;
}
onCancel(): void {
this.closeForm = true;
this.isFormSubmitted = false;
this.showCreateBtn = true;
this.getAllFilteredClasses();
this.myClassForm.resetForm();
}
bindClassResult(data: ClassResultView): any {
if (!data.pagedClassViewModels) {
this.rows = [];
this.totalRowsCount = 0;
this.pageCnt = 0;
} else {
this.rows = data.pagedClassViewModels.list;
this.totalRowsCount = data.pagedClassViewModels.totalItems;
this.pageCnt = data.pagedClassViewModels.totalPages;
this.rows.forEach(e => {
e.operations = [
{
name: AppSettings.EDIT_OPERATION,
icon: AppSettings.EDIT,
operationName: AppSettings.EDIT
}, {
name: AppSettings.DELETE_OPERATION,
icon: AppSettings.DELETE,
operationName: AppSettings.DELETE
}
];
});
this.preSelectIds = [];
}
if (data.pagedClassViewModels) {
this.filterViewModel.pageNumber = data.pagedClassViewModels.pageNumber;
}
this.tableSettings = {
model: this.filterViewModel,
rows: this.rows,
columns: this.columns,
totalRowsCount: this.totalRowsCount,
pageCnt: this.pageCnt,
tablename: 'Classes',
componentName: this.currentComponent,
visibleSelectAll: true,
isSelectRowRequired: true,
isPaginationRequired: true,
filtersList: data.filters,
headerOperations: {
infoButton: {
required: true,
text: 'Class Component'
},
addingForm: {
required: true,
btnName: 'Add Class'
}
}
};
}
createOrUpdateClass(form: ClassResultViewModel, onContinue = false): void {
this.isFormSubmitted = true;
if (this.classForm.invalid) {
return;
}
if (!form.id && this.classForm.status === AppSettings.VALID) { | if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
if (!onContinue){
this.closeForm = true;
this.getAllFilteredClasses();
}
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.isdisableBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
else if (this.classForm.valid) {
this.isdisableBtn = true;
this.classesConfigService.updateClass(form).subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
this.showCreateBtn = true;
this.closeForm = true;
this.onCancel();
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.disbleSubmitBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
}
selectedRows(_event: Array<ClassResultViewModel>): void {
this.selectedIdsList = _event.length ? _event.map(x => x.id) : [];
}
actions(operationData: any): void {
if (operationData.operation === AppSettings.EDIT.toLowerCase()) {
this.isFormSubmitted = false;
this.validateForm = false;
this.showCreateBtn = false;
this.closeForm = false;
this.classesConfigService.getClass(operationData.clickedRow.id).subscribe(res => {
if (res.statusCode === HttpStatus.OK) {
this.classForm.patchValue(res.classViewModel);
}
}, error => {
this.errorResponse(error);
});
this.classForm.patchValue(operationData.clickedRow);
}
if (operationData.operation === AppSettings.DELETE.toLowerCase()) {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
const actionClickedId: Array<string> = [operationData.clickedRow.id];
this.deleteClasses(actionClickedId, false);
}
});
}
}
deleteClasses(selectedIds: Array<string>, isMultiDelete: boolean): void {
this.classesConfigService.deleteAllClass(selectedIds).subscribe(response => {
if (response.statusCode === HttpStatus.OK) {
this.openSnackBar(response.messages.ResultMessage);
this.selectedIdsList = [];
}
else {
this.openSnackBar(response.messages.ResultMessage, true);
if (isMultiDelete) {
this.preSelectIds = response.failedRecords;
}
}
this.getAllFilteredClasses();
}, error => {
if (error.error.failedRecords !== undefined && error.error.failedRecords.length > 0 && isMultiDelete) {
this.preSelectIds = (error.error.failedRecords);
}
this.errorResponse(error);
this.getAllFilteredClasses();
});
}
deleteWarning(): void {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
this.deleteClasses(this.selectedIdsList, true);
}
});
}
dialogMethod(dialogData: any, disableClose: boolean, button1Text: string, button2Text?: string): any {
return this.dialogRef.open(CustomDialogComponent, {
disableClose: disableClose,
data: { text: this.commonService.getTranslation(dialogData), action: true, btn1Text: button1Text, btn2Text: button2Text },
});
}
trimTextBoxSpaces(key: string): void {
this.classForm.controls[key].setValue(this.commonService.trimSpaces(this.classForm.controls[key].value)); // modify value here)
}
} | this.isdisableBtn = true;
this.classesConfigService.createClass(form)
.subscribe((res: ValidationMessageView) => { | random_line_split |
classes.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef } from '@angular/core';
import { CommonService } from 'app/service/common.service';
import { ClassesConfigService } from 'app/service/general/api/classes-config.service';
import { MatDialog, MatSnackBar } from '@angular/material';
import { ClassFilterView } from '../models/class-filter-view';
import { CustomDialogComponent } from 'app/shared/custom-dialog/custom-dialog.component';
import { AppSettings } from 'app/app.constants';
import { ClassResultViewModel, ClassResultView, ValidationMessageView } from '../models/class-result-view-model';
import { FormGroup, FormControl, Validators } from '@angular/forms';
import { DropDownModel } from 'app/models/drop-down-view';
import * as HttpStatus from 'http-status-codes';
import { CommonComponent } from '../../../../shared/common/common.component';
@Component({
selector: 'app-classes',
templateUrl: './classes.component.html',
styleUrls: ['./classes.component.scss'],
providers: [ClassesConfigService]
})
export class ClassesComponent extends CommonComponent implements OnInit {
@ViewChild('class') myClassForm;
filterViewModel: ClassFilterView;
isdisableBtn = false;
classForm: FormGroup;
classReferences: Array<DropDownModel> = [];
tableSettings: {};
rows: Array<any>;
columns: any[];
pageCnt: number;
lastSelectediId = '';
selectedIdsList: Array<string> = [];
totalRowsCount: number;
rowBasedAction: Array<any> = [];
closeForm: boolean;
validateForm: boolean;
isFormSubmitted = false;
showCreateBtn = true;
preSelectIds: Array<string> = [];
currentComponent = 'ClassesComponent';
constructor(public commonService: CommonService, private classesConfigService: ClassesConfigService, public snackBar: MatSnackBar, public dialogRef: MatDialog,
public viewContainerRef: ViewContainerRef) {
super();
this.initializeFilterView();
this.setColumnHeaders();
this.initializeTableSettings();
}
ngOnInit(): void {
// this.commonService.getTableLSObj(this.tableSettings);
this.initializeForm();
// if (localStorage.getItem('_s')) {
this.classesConfigService.getClassReferences()
.subscribe((data: Array<{ id: string | number, name: string }>) =>
data.forEach(element =>
this.classReferences.push({ label: element.name, value: element.id })
));
const modelTableComponent = this.getModelComponent(this.currentComponent);
if (modelTableComponent) {
this.filterViewModel = modelTableComponent;
}
this.getAllFilteredClasses();
// }
}
initializeForm(): void {
this.classForm = new FormGroup({
id: new FormControl(''),
classReferenceTypeId: new FormControl('', [Validators.required]),
name: new FormControl('', [Validators.required, Validators.maxLength(15)]),
code: new FormControl(null, [Validators.maxLength(6)]),
description: new FormControl(null, [Validators.maxLength(135)]),
});
}
setColumnHeaders(): void {
this.columns = [
{ field: 'name', header: 'Class Name', sort: true },
{ field: 'code', header: 'Code', sort: true },
{ field: 'classReferenceName', header: 'Class Reference', sort: true },
{ field: 'description', header: 'Description', sort: true },
{ field: 'actions', header: 'Actions', sort: false }
];
}
initializeTableSettings(): void {
this.tableSettings = {
rows: [],
columns: this.columns,
tablename: 'Classes',
componentName: this.currentComponent,
model: this.filterViewModel
};
}
initializeFilterView(): void {
this.filterViewModel = {
sortBy: '',
sortOrder: 0,
pageNumber: AppSettings.PAGENUMBER,
pageSize: AppSettings.PAGESIZE,
};
}
getAllFilteredClasses(): void {
this.classesConfigService.getFilteredClasses(this.filterViewModel.classReferenceIds, this.filterViewModel.names,
this.filterViewModel.codes, this.filterViewModel.descriptions, this.filterViewModel.sortOrder, this.filterViewModel.sortBy,
this.preSelectIds, this.filterViewModel.pageNumber, this.filterViewModel.pageSize).subscribe(res => {
this.bindClassResult(res);
}, error => {
this.errorResponse(error);
});
}
tableData(_event: ClassFilterView): void {
this.filterViewModel = _event;
this.getAllFilteredClasses();
}
openForm(): void {
this.initializeForm();
this.isFormSubmitted = false;
this.closeForm = false;
}
onCancel(): void {
this.closeForm = true;
this.isFormSubmitted = false;
this.showCreateBtn = true;
this.getAllFilteredClasses();
this.myClassForm.resetForm();
}
bindClassResult(data: ClassResultView): any {
if (!data.pagedClassViewModels) {
this.rows = [];
this.totalRowsCount = 0;
this.pageCnt = 0;
} else {
this.rows = data.pagedClassViewModels.list;
this.totalRowsCount = data.pagedClassViewModels.totalItems;
this.pageCnt = data.pagedClassViewModels.totalPages;
this.rows.forEach(e => {
e.operations = [
{
name: AppSettings.EDIT_OPERATION,
icon: AppSettings.EDIT,
operationName: AppSettings.EDIT
}, {
name: AppSettings.DELETE_OPERATION,
icon: AppSettings.DELETE,
operationName: AppSettings.DELETE
}
];
});
this.preSelectIds = [];
}
if (data.pagedClassViewModels) {
this.filterViewModel.pageNumber = data.pagedClassViewModels.pageNumber;
}
this.tableSettings = {
model: this.filterViewModel,
rows: this.rows,
columns: this.columns,
totalRowsCount: this.totalRowsCount,
pageCnt: this.pageCnt,
tablename: 'Classes',
componentName: this.currentComponent,
visibleSelectAll: true,
isSelectRowRequired: true,
isPaginationRequired: true,
filtersList: data.filters,
headerOperations: {
infoButton: {
required: true,
text: 'Class Component'
},
addingForm: {
required: true,
btnName: 'Add Class'
}
}
};
}
createOrUpdateClass(form: ClassResultViewModel, onContinue = false): void {
this.isFormSubmitted = true;
if (this.classForm.invalid) {
return;
}
if (!form.id && this.classForm.status === AppSettings.VALID) {
this.isdisableBtn = true;
this.classesConfigService.createClass(form)
.subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
if (!onContinue){
this.closeForm = true;
this.getAllFilteredClasses();
}
}
else |
}, error => {
// this.isdisableBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
else if (this.classForm.valid) {
this.isdisableBtn = true;
this.classesConfigService.updateClass(form).subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
this.showCreateBtn = true;
this.closeForm = true;
this.onCancel();
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.disbleSubmitBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
}
selectedRows(_event: Array<ClassResultViewModel>): void {
this.selectedIdsList = _event.length ? _event.map(x => x.id) : [];
}
actions(operationData: any): void {
if (operationData.operation === AppSettings.EDIT.toLowerCase()) {
this.isFormSubmitted = false;
this.validateForm = false;
this.showCreateBtn = false;
this.closeForm = false;
this.classesConfigService.getClass(operationData.clickedRow.id).subscribe(res => {
if (res.statusCode === HttpStatus.OK) {
this.classForm.patchValue(res.classViewModel);
}
}, error => {
this.errorResponse(error);
});
this.classForm.patchValue(operationData.clickedRow);
}
if (operationData.operation === AppSettings.DELETE.toLowerCase()) {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
const actionClickedId: Array<string> = [operationData.clickedRow.id];
this.deleteClasses(actionClickedId, false);
}
});
}
}
deleteClasses(selectedIds: Array<string>, isMultiDelete: boolean): void {
this.classesConfigService.deleteAllClass(selectedIds).subscribe(response => {
if (response.statusCode === HttpStatus.OK) {
this.openSnackBar(response.messages.ResultMessage);
this.selectedIdsList = [];
}
else {
this.openSnackBar(response.messages.ResultMessage, true);
if (isMultiDelete) {
this.preSelectIds = response.failedRecords;
}
}
this.getAllFilteredClasses();
}, error => {
if (error.error.failedRecords !== undefined && error.error.failedRecords.length > 0 && isMultiDelete) {
this.preSelectIds = (error.error.failedRecords);
}
this.errorResponse(error);
this.getAllFilteredClasses();
});
}
deleteWarning(): void {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
this.deleteClasses(this.selectedIdsList, true);
}
});
}
dialogMethod(dialogData: any, disableClose: boolean, button1Text: string, button2Text?: string): any {
return this.dialogRef.open(CustomDialogComponent, {
disableClose: disableClose,
data: { text: this.commonService.getTranslation(dialogData), action: true, btn1Text: button1Text, btn2Text: button2Text },
});
}
trimTextBoxSpaces(key: string): void {
this.classForm.controls[key].setValue(this.commonService.trimSpaces(this.classForm.controls[key].value)); // modify value here)
}
}
| {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
} | conditional_block |
classes.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef } from '@angular/core';
import { CommonService } from 'app/service/common.service';
import { ClassesConfigService } from 'app/service/general/api/classes-config.service';
import { MatDialog, MatSnackBar } from '@angular/material';
import { ClassFilterView } from '../models/class-filter-view';
import { CustomDialogComponent } from 'app/shared/custom-dialog/custom-dialog.component';
import { AppSettings } from 'app/app.constants';
import { ClassResultViewModel, ClassResultView, ValidationMessageView } from '../models/class-result-view-model';
import { FormGroup, FormControl, Validators } from '@angular/forms';
import { DropDownModel } from 'app/models/drop-down-view';
import * as HttpStatus from 'http-status-codes';
import { CommonComponent } from '../../../../shared/common/common.component';
@Component({
selector: 'app-classes',
templateUrl: './classes.component.html',
styleUrls: ['./classes.component.scss'],
providers: [ClassesConfigService]
})
export class ClassesComponent extends CommonComponent implements OnInit {
@ViewChild('class') myClassForm;
filterViewModel: ClassFilterView;
isdisableBtn = false;
classForm: FormGroup;
classReferences: Array<DropDownModel> = [];
tableSettings: {};
rows: Array<any>;
columns: any[];
pageCnt: number;
lastSelectediId = '';
selectedIdsList: Array<string> = [];
totalRowsCount: number;
rowBasedAction: Array<any> = [];
closeForm: boolean;
validateForm: boolean;
isFormSubmitted = false;
showCreateBtn = true;
preSelectIds: Array<string> = [];
currentComponent = 'ClassesComponent';
constructor(public commonService: CommonService, private classesConfigService: ClassesConfigService, public snackBar: MatSnackBar, public dialogRef: MatDialog,
public viewContainerRef: ViewContainerRef) {
super();
this.initializeFilterView();
this.setColumnHeaders();
this.initializeTableSettings();
}
ngOnInit(): void {
// this.commonService.getTableLSObj(this.tableSettings);
this.initializeForm();
// if (localStorage.getItem('_s')) {
this.classesConfigService.getClassReferences()
.subscribe((data: Array<{ id: string | number, name: string }>) =>
data.forEach(element =>
this.classReferences.push({ label: element.name, value: element.id })
));
const modelTableComponent = this.getModelComponent(this.currentComponent);
if (modelTableComponent) {
this.filterViewModel = modelTableComponent;
}
this.getAllFilteredClasses();
// }
}
initializeForm(): void {
this.classForm = new FormGroup({
id: new FormControl(''),
classReferenceTypeId: new FormControl('', [Validators.required]),
name: new FormControl('', [Validators.required, Validators.maxLength(15)]),
code: new FormControl(null, [Validators.maxLength(6)]),
description: new FormControl(null, [Validators.maxLength(135)]),
});
}
setColumnHeaders(): void {
this.columns = [
{ field: 'name', header: 'Class Name', sort: true },
{ field: 'code', header: 'Code', sort: true },
{ field: 'classReferenceName', header: 'Class Reference', sort: true },
{ field: 'description', header: 'Description', sort: true },
{ field: 'actions', header: 'Actions', sort: false }
];
}
initializeTableSettings(): void {
this.tableSettings = {
rows: [],
columns: this.columns,
tablename: 'Classes',
componentName: this.currentComponent,
model: this.filterViewModel
};
}
initializeFilterView(): void {
this.filterViewModel = {
sortBy: '',
sortOrder: 0,
pageNumber: AppSettings.PAGENUMBER,
pageSize: AppSettings.PAGESIZE,
};
}
getAllFilteredClasses(): void {
this.classesConfigService.getFilteredClasses(this.filterViewModel.classReferenceIds, this.filterViewModel.names,
this.filterViewModel.codes, this.filterViewModel.descriptions, this.filterViewModel.sortOrder, this.filterViewModel.sortBy,
this.preSelectIds, this.filterViewModel.pageNumber, this.filterViewModel.pageSize).subscribe(res => {
this.bindClassResult(res);
}, error => {
this.errorResponse(error);
});
}
tableData(_event: ClassFilterView): void {
this.filterViewModel = _event;
this.getAllFilteredClasses();
}
openForm(): void {
this.initializeForm();
this.isFormSubmitted = false;
this.closeForm = false;
}
onCancel(): void {
this.closeForm = true;
this.isFormSubmitted = false;
this.showCreateBtn = true;
this.getAllFilteredClasses();
this.myClassForm.resetForm();
}
bindClassResult(data: ClassResultView): any {
if (!data.pagedClassViewModels) {
this.rows = [];
this.totalRowsCount = 0;
this.pageCnt = 0;
} else {
this.rows = data.pagedClassViewModels.list;
this.totalRowsCount = data.pagedClassViewModels.totalItems;
this.pageCnt = data.pagedClassViewModels.totalPages;
this.rows.forEach(e => {
e.operations = [
{
name: AppSettings.EDIT_OPERATION,
icon: AppSettings.EDIT,
operationName: AppSettings.EDIT
}, {
name: AppSettings.DELETE_OPERATION,
icon: AppSettings.DELETE,
operationName: AppSettings.DELETE
}
];
});
this.preSelectIds = [];
}
if (data.pagedClassViewModels) {
this.filterViewModel.pageNumber = data.pagedClassViewModels.pageNumber;
}
this.tableSettings = {
model: this.filterViewModel,
rows: this.rows,
columns: this.columns,
totalRowsCount: this.totalRowsCount,
pageCnt: this.pageCnt,
tablename: 'Classes',
componentName: this.currentComponent,
visibleSelectAll: true,
isSelectRowRequired: true,
isPaginationRequired: true,
filtersList: data.filters,
headerOperations: {
infoButton: {
required: true,
text: 'Class Component'
},
addingForm: {
required: true,
btnName: 'Add Class'
}
}
};
}
| (form: ClassResultViewModel, onContinue = false): void {
this.isFormSubmitted = true;
if (this.classForm.invalid) {
return;
}
if (!form.id && this.classForm.status === AppSettings.VALID) {
this.isdisableBtn = true;
this.classesConfigService.createClass(form)
.subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
if (!onContinue){
this.closeForm = true;
this.getAllFilteredClasses();
}
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.isdisableBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
else if (this.classForm.valid) {
this.isdisableBtn = true;
this.classesConfigService.updateClass(form).subscribe((res: ValidationMessageView) => {
if (res.statusCode === HttpStatus.OK) {
this.openSnackBar(res.messages.ResultMessage);
this.myClassForm.resetForm();
this.showCreateBtn = true;
this.closeForm = true;
this.onCancel();
}
else {
this.openSnackBar(res.messages.ResultMessage, true);
this.closeForm = false;
}
}, error => {
// this.disbleSubmitBtn = false;
this.errorResponse(error);
});
this.isdisableBtn = false;
}
}
selectedRows(_event: Array<ClassResultViewModel>): void {
this.selectedIdsList = _event.length ? _event.map(x => x.id) : [];
}
actions(operationData: any): void {
if (operationData.operation === AppSettings.EDIT.toLowerCase()) {
this.isFormSubmitted = false;
this.validateForm = false;
this.showCreateBtn = false;
this.closeForm = false;
this.classesConfigService.getClass(operationData.clickedRow.id).subscribe(res => {
if (res.statusCode === HttpStatus.OK) {
this.classForm.patchValue(res.classViewModel);
}
}, error => {
this.errorResponse(error);
});
this.classForm.patchValue(operationData.clickedRow);
}
if (operationData.operation === AppSettings.DELETE.toLowerCase()) {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
const actionClickedId: Array<string> = [operationData.clickedRow.id];
this.deleteClasses(actionClickedId, false);
}
});
}
}
deleteClasses(selectedIds: Array<string>, isMultiDelete: boolean): void {
this.classesConfigService.deleteAllClass(selectedIds).subscribe(response => {
if (response.statusCode === HttpStatus.OK) {
this.openSnackBar(response.messages.ResultMessage);
this.selectedIdsList = [];
}
else {
this.openSnackBar(response.messages.ResultMessage, true);
if (isMultiDelete) {
this.preSelectIds = response.failedRecords;
}
}
this.getAllFilteredClasses();
}, error => {
if (error.error.failedRecords !== undefined && error.error.failedRecords.length > 0 && isMultiDelete) {
this.preSelectIds = (error.error.failedRecords);
}
this.errorResponse(error);
this.getAllFilteredClasses();
});
}
deleteWarning(): void {
const dialogRef = this.dialogMethod(AppSettings.WARNING_ON_SINGLE_DELETE, true, AppSettings.NO, AppSettings.YES);
dialogRef.afterClosed().subscribe(action => {
if (action === AppSettings.YES) {
this.deleteClasses(this.selectedIdsList, true);
}
});
}
dialogMethod(dialogData: any, disableClose: boolean, button1Text: string, button2Text?: string): any {
return this.dialogRef.open(CustomDialogComponent, {
disableClose: disableClose,
data: { text: this.commonService.getTranslation(dialogData), action: true, btn1Text: button1Text, btn2Text: button2Text },
});
}
trimTextBoxSpaces(key: string): void {
this.classForm.controls[key].setValue(this.commonService.trimSpaces(this.classForm.controls[key].value)); // modify value here)
}
}
| createOrUpdateClass | identifier_name |
lib.rs | #![allow(clippy::many_single_char_names, non_snake_case)]
pub mod chase;
pub mod mvd;
mod parser;
mod sorted_uvec;
use itertools::Itertools;
use mvd::MVD;
use rand::prelude::Distribution;
use sorted_uvec::SortedUVec;
use std::{borrow::Borrow, mem, ops::Not};
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
pub type Attrs = SortedUVec<u32>;
impl Attrs {
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> AttrWithNames<'a> {
AttrWithNames {
attrs: self,
register,
}
}
pub fn keys(&self, FDs: &[FD]) -> Vec<Attrs> {
all_subsets_of(self)
.filter(|sub| categorize(sub, self, FDs) == Category::Key)
.collect()
}
}
pub fn attrs<I, J>(iter: I) -> Attrs
where
I: IntoIterator<Item = J>,
J: Borrow<u32>,
{
Attrs::new(iter.into_iter().map(|v| *v.borrow()))
}
#[derive(PartialEq, Eq, Clone, Debug, Default)]
pub struct FD {
pub source: Attrs,
pub target: Attrs,
}
impl FD {
pub fn new(source: Attrs, target: Attrs) -> Self {
let target = &target - &source;
Self { source, target }
}
pub fn is_deformed(&self) -> bool {
self.source.is_empty() || self.target.is_empty()
}
pub fn split(&self) -> impl Iterator<Item = FD> + '_ {
self.target
.iter()
.map(move |&v| FD::new(self.source.clone(), attrs(&[v])))
}
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> DepWithNames<'a> {
DepWithNames {
arrow: "->",
source: &self.source,
target: &self.target,
register,
}
}
}
pub struct DepWithNames<'a> {
arrow: &'a str,
source: &'a Attrs,
target: &'a Attrs,
register: &'a NameRegister,
}
impl Display for DepWithNames<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let sep_by_comma = |list: &[u32], f: &mut Formatter| -> fmt::Result {
let mut first = true;
for &v in list {
if !first {
write!(f, ", ")?;
}
first = false;
write!(f, "{}", self.register.name(v).unwrap_or("{Unnamed}"))?;
}
Ok(())
};
sep_by_comma(&*self.source, f)?;
write!(f, " {} ", self.arrow)?;
sep_by_comma(&*self.target, f)?;
Ok(())
}
}
pub fn closure_of(attrs: &Attrs, dependencies: &[FD]) -> Attrs {
let mut closure = attrs.clone();
let mut size = closure.len();
loop {
for fd in dependencies {
if fd.source.is_subset(&closure) {
closure.extend(fd.target.iter().copied());
}
}
if closure.len() > size {
size = closure.len();
} else {
break;
}
}
closure
}
#[derive(Debug, PartialEq)]
pub enum Category {
Nonkey,
Key,
Superkey,
}
impl Display for Category {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
pub fn categorize(sub: &Attrs, rel: &Attrs, FDs: &[FD]) -> Category {
let closure = closure_of(sub, FDs);
if !closure.is_superset(&rel) {
return Category::Nonkey;
}
let has_subkey = sub
.iter()
.map(|v| {
let mut shirnked = sub.clone();
shirnked.remove(v);
shirnked
})
.any(|attrs| closure_of(&attrs, FDs).is_superset(&rel));
if has_subkey {
Category::Superkey
} else {
Category::Key
}
}
#[derive(Default)]
pub struct NameRegister {
cnt: u32,
name_idx: HashMap<String, u32>,
idx_name: HashMap<u32, String>,
}
impl NameRegister {
pub fn new() -> Self {
Self::default()
}
pub fn resolve(&self, name: &str) -> Option<u32> {
self.name_idx.get(name).copied()
}
pub fn name(&self, idx: u32) -> Option<&str> {
self.idx_name.get(&idx).map(|s| s.as_str())
}
pub fn attrs(&self) -> Attrs {
(0..self.cnt).collect()
}
pub fn categorize(&self, attrs: &Attrs, dependencies: &[FD]) -> Category {
categorize(attrs, &self.attrs(), dependencies)
}
pub fn register(&mut self, name: &str) -> u32 {
self.resolve(name).unwrap_or_else(|| {
let key = self.cnt;
self.cnt += 1;
self.name_idx.insert(name.to_string(), key);
self.idx_name.insert(key, name.to_string());
key
})
}
pub fn parse_fd(&self, input: &str) -> Option<FD> {
let (_, (source, target)) = parser::fd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(FD::new(source, target))
}
pub fn parse_mvd(&self, input: &str) -> Option<MVD> {
let (_, (source, target)) = parser::mvd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(MVD::new(source, target))
}
pub fn cnt(&self) -> u32 {
self.cnt
}
}
pub struct AttrWithNames<'a> {
attrs: &'a [u32],
register: &'a NameRegister,
}
impl<'a> Display for AttrWithNames<'a> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let mut is_first = true;
write!(f, "{{ ")?;
for &attr in self.attrs {
if !is_first {
write!(f, ", ")?;
}
is_first = false;
f.write_str(self.register.name(attr).unwrap_or("{Unnamed}"))?;
}
write!(f, " }}")?;
Ok(())
}
}
pub fn parse_FDs(register: &NameRegister, FDs: &[&str]) -> Vec<FD> {
FDs.iter()
.map(|fd| register.parse_fd(fd).unwrap())
.collect()
}
pub fn parse_MVDs(register: &NameRegister, MVDs: &[&str]) -> Vec<MVD> {
MVDs.iter()
.map(|mvd| register.parse_mvd(mvd).unwrap())
.collect()
}
pub fn implies(FDs: &[FD], fd: &FD) -> bool {
closure_of(&fd.source, FDs).is_superset(&fd.target)
}
pub fn all_subsets_of(attrs: &[u32]) -> impl Iterator<Item = Attrs> + '_ {
(0..=attrs.len())
.flat_map(move |k| attrs.iter().copied().combinations(k))
.map(From::from)
}
pub fn project_to(attrs: &Attrs, FDs: &[FD]) -> Vec<FD> {
let FDs: Vec<FD> = all_subsets_of(&*attrs)
.map(|selected| {
let closure = closure_of(&selected, FDs);
FD::new(selected, &closure & attrs)
})
.filter(|fd| !fd.is_deformed())
.collect();
minify(&FDs)
}
pub fn is_minimal_basis(FDs: &[FD]) -> bool {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
!remove_implied(&mut FDs)
}
pub fn minify(FDs: &[FD]) -> Vec<FD> {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
loop {
let refined = remove_implied(&mut FDs);
let shrinked = remove_redundant(&mut FDs);
if !(refined || shrinked) {
break;
}
}
FDs.sort_by(|a, b| a.source.cmp(&b.source));
FDs
}
fn remove_implied(FDs: &mut Vec<FD>) -> bool {
for i in 0..FDs.len() {
let FD = mem::take(&mut FDs[i]);
if implies(FDs, &FD) {
FDs.swap_remove(i);
return true;
}
FDs[i] = FD;
}
false
}
fn remove_redundant(FDs: &mut [FD]) -> bool {
for i in 0..FDs.len() {
let FD = &FDs[i];
for v in &FD.source {
let mut shrinked = FD.clone();
shrinked.source.remove(v);
if implies(FDs, &shrinked) {
FDs[i] = shrinked;
return true;
}
}
}
false
}
pub fn all_violations<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> impl Iterator<Item = &'a FD> + 'a {
FDs.iter()
.filter(move |fd| closure_of(&fd.source, FDs).is_superset(rel).not())
}
fn violation<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> Option<&'a FD> {
all_violations(rel, FDs).next()
}
pub fn is_bcnf_violation(rel: &Attrs, FDs: &[FD]) -> bool {
violation(rel, FDs).is_some()
}
pub fn bcnf_decomposition(rel: &Attrs, FDs: &[FD]) -> Vec<Attrs> {
let rel: Attrs = rel.clone();
let mut candidates: Vec<(Attrs, Vec<FD>)> = vec![(rel, FDs.to_vec())];
let mut bcnf: Vec<Attrs> = vec![];
while let Some((rel, FDs)) = candidates.pop() {
// every 2-attribute relation is in BCNF
if rel.len() <= 2 {
bcnf.push(rel);
continue;
}
if let Some(fd) = violation(&rel, &FDs) {
let rel_0 = closure_of(&fd.source, &FDs);
let FDs_0 = project_to(&rel_0, &FDs);
let rel_1 = &fd.source | &(&rel - &rel_0);
let FDs_1 = project_to(&rel_1, &FDs);
candidates.push((rel_0, FDs_0));
candidates.push((rel_1, FDs_1));
} else |
}
bcnf
}
pub struct NumberOfAttrs(u32);
impl NumberOfAttrs {
pub fn new(n: u32) -> Self {
Self(n)
}
}
impl Distribution<FD> for NumberOfAttrs {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> FD {
let n = self.0;
let mut source = vec![];
let mut target = vec![];
for i in 0..n {
if rng.gen_bool(0.5) {
source.push(i);
}
if rng.gen_bool(0.5) {
target.push(i);
}
}
if source.is_empty() {
source.push(rng.gen_range(0..n));
}
if target.is_empty() {
target.push(rng.gen_range(0..n));
}
FD::new(source.into(), target.into())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn closure_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let E = reg.register("E");
let _F = reg.register("F");
let dependencies = parse_FDs(®, &["A, B -> C", "B, C -> A, D", "D -> E", "C, F -> B"]);
assert_eq!(
&*closure_of(&attrs(&[A, B]), &dependencies),
&[A, B, C, D, E]
);
assert_eq!(&*closure_of(&attrs(&[D]), &dependencies), &[D, E]);
}
#[test]
fn format_test() {
let mut reg = NameRegister::new();
reg.register("A");
reg.register("B");
reg.register("C");
reg.register("D");
let fd = reg.parse_fd("B, A -> D, C").unwrap();
assert_eq!(format!("{}", fd.with_names(®)), "A, B -> C, D");
}
#[test]
fn project_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let _B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let FDs = parse_FDs(®, &["A -> B", "B -> C", "C -> D"]);
let projection = project_to(&[A, C, D].iter().copied().collect(), &FDs);
assert_eq!(projection.len(), 2);
assert!(projection.iter().all(|fd| fd.target.len() == 1));
assert!(implies(&projection, ®.parse_fd("A -> C, D").unwrap()));
assert!(implies(&projection, ®.parse_fd("C -> D").unwrap()));
}
#[test]
fn violation_test() {
let mut reg = NameRegister::new();
let _title = reg.register("title");
let _year = reg.register("year");
let _studio_name = reg.register("studio_name");
let _president = reg.register("president");
let FDs = parse_FDs(
®,
&["title, year -> studio_name", "studio_name -> president"],
);
assert_eq!(violation(®.attrs(), &FDs), Some(&FDs[1]));
}
#[test]
fn bcnf_test() {
let mut reg = NameRegister::new();
let title = reg.register("title");
let year = reg.register("year");
let studio_name = reg.register("studio_name");
let president = reg.register("president");
let pres_addr = reg.register("pres_addr");
let FDs = parse_FDs(
®,
&[
"title, year -> studio_name",
"studio_name -> president",
"president -> pres_addr",
],
);
let decomposition = bcnf_decomposition(®.attrs(), &FDs);
assert_eq!(decomposition.len(), 3);
assert!(decomposition.contains(&attrs(&[title, year, studio_name])));
assert!(decomposition.contains(&attrs(&[studio_name, president])));
assert!(decomposition.contains(&attrs(&[president, pres_addr])));
}
}
| {
bcnf.push(rel);
} | conditional_block |
lib.rs | #![allow(clippy::many_single_char_names, non_snake_case)]
pub mod chase;
pub mod mvd;
mod parser;
mod sorted_uvec;
use itertools::Itertools;
use mvd::MVD;
use rand::prelude::Distribution;
use sorted_uvec::SortedUVec;
use std::{borrow::Borrow, mem, ops::Not};
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
pub type Attrs = SortedUVec<u32>;
impl Attrs {
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> AttrWithNames<'a> {
AttrWithNames {
attrs: self,
register,
}
}
pub fn keys(&self, FDs: &[FD]) -> Vec<Attrs> {
all_subsets_of(self)
.filter(|sub| categorize(sub, self, FDs) == Category::Key)
.collect()
}
}
pub fn attrs<I, J>(iter: I) -> Attrs
where
I: IntoIterator<Item = J>,
J: Borrow<u32>,
{
Attrs::new(iter.into_iter().map(|v| *v.borrow()))
}
#[derive(PartialEq, Eq, Clone, Debug, Default)]
pub struct FD {
pub source: Attrs,
pub target: Attrs,
}
impl FD {
pub fn new(source: Attrs, target: Attrs) -> Self {
let target = &target - &source;
Self { source, target }
}
pub fn is_deformed(&self) -> bool {
self.source.is_empty() || self.target.is_empty()
}
pub fn split(&self) -> impl Iterator<Item = FD> + '_ {
self.target
.iter()
.map(move |&v| FD::new(self.source.clone(), attrs(&[v])))
}
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> DepWithNames<'a> {
DepWithNames {
arrow: "->",
source: &self.source,
target: &self.target,
register,
}
}
}
pub struct DepWithNames<'a> {
arrow: &'a str,
source: &'a Attrs,
target: &'a Attrs,
register: &'a NameRegister,
}
impl Display for DepWithNames<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let sep_by_comma = |list: &[u32], f: &mut Formatter| -> fmt::Result {
let mut first = true;
for &v in list {
if !first {
write!(f, ", ")?;
}
first = false;
write!(f, "{}", self.register.name(v).unwrap_or("{Unnamed}"))?;
}
Ok(())
};
sep_by_comma(&*self.source, f)?;
write!(f, " {} ", self.arrow)?;
sep_by_comma(&*self.target, f)?;
Ok(())
}
}
pub fn closure_of(attrs: &Attrs, dependencies: &[FD]) -> Attrs {
let mut closure = attrs.clone();
let mut size = closure.len();
loop {
for fd in dependencies {
if fd.source.is_subset(&closure) {
closure.extend(fd.target.iter().copied());
}
}
if closure.len() > size {
size = closure.len();
} else {
break;
}
}
closure
}
#[derive(Debug, PartialEq)]
pub enum Category {
Nonkey,
Key,
Superkey,
}
impl Display for Category {
fn | (&self, f: &mut Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
pub fn categorize(sub: &Attrs, rel: &Attrs, FDs: &[FD]) -> Category {
let closure = closure_of(sub, FDs);
if !closure.is_superset(&rel) {
return Category::Nonkey;
}
let has_subkey = sub
.iter()
.map(|v| {
let mut shirnked = sub.clone();
shirnked.remove(v);
shirnked
})
.any(|attrs| closure_of(&attrs, FDs).is_superset(&rel));
if has_subkey {
Category::Superkey
} else {
Category::Key
}
}
#[derive(Default)]
pub struct NameRegister {
cnt: u32,
name_idx: HashMap<String, u32>,
idx_name: HashMap<u32, String>,
}
impl NameRegister {
pub fn new() -> Self {
Self::default()
}
pub fn resolve(&self, name: &str) -> Option<u32> {
self.name_idx.get(name).copied()
}
pub fn name(&self, idx: u32) -> Option<&str> {
self.idx_name.get(&idx).map(|s| s.as_str())
}
pub fn attrs(&self) -> Attrs {
(0..self.cnt).collect()
}
pub fn categorize(&self, attrs: &Attrs, dependencies: &[FD]) -> Category {
categorize(attrs, &self.attrs(), dependencies)
}
pub fn register(&mut self, name: &str) -> u32 {
self.resolve(name).unwrap_or_else(|| {
let key = self.cnt;
self.cnt += 1;
self.name_idx.insert(name.to_string(), key);
self.idx_name.insert(key, name.to_string());
key
})
}
pub fn parse_fd(&self, input: &str) -> Option<FD> {
let (_, (source, target)) = parser::fd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(FD::new(source, target))
}
pub fn parse_mvd(&self, input: &str) -> Option<MVD> {
let (_, (source, target)) = parser::mvd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(MVD::new(source, target))
}
pub fn cnt(&self) -> u32 {
self.cnt
}
}
pub struct AttrWithNames<'a> {
attrs: &'a [u32],
register: &'a NameRegister,
}
impl<'a> Display for AttrWithNames<'a> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let mut is_first = true;
write!(f, "{{ ")?;
for &attr in self.attrs {
if !is_first {
write!(f, ", ")?;
}
is_first = false;
f.write_str(self.register.name(attr).unwrap_or("{Unnamed}"))?;
}
write!(f, " }}")?;
Ok(())
}
}
pub fn parse_FDs(register: &NameRegister, FDs: &[&str]) -> Vec<FD> {
FDs.iter()
.map(|fd| register.parse_fd(fd).unwrap())
.collect()
}
pub fn parse_MVDs(register: &NameRegister, MVDs: &[&str]) -> Vec<MVD> {
MVDs.iter()
.map(|mvd| register.parse_mvd(mvd).unwrap())
.collect()
}
pub fn implies(FDs: &[FD], fd: &FD) -> bool {
closure_of(&fd.source, FDs).is_superset(&fd.target)
}
pub fn all_subsets_of(attrs: &[u32]) -> impl Iterator<Item = Attrs> + '_ {
(0..=attrs.len())
.flat_map(move |k| attrs.iter().copied().combinations(k))
.map(From::from)
}
pub fn project_to(attrs: &Attrs, FDs: &[FD]) -> Vec<FD> {
let FDs: Vec<FD> = all_subsets_of(&*attrs)
.map(|selected| {
let closure = closure_of(&selected, FDs);
FD::new(selected, &closure & attrs)
})
.filter(|fd| !fd.is_deformed())
.collect();
minify(&FDs)
}
pub fn is_minimal_basis(FDs: &[FD]) -> bool {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
!remove_implied(&mut FDs)
}
pub fn minify(FDs: &[FD]) -> Vec<FD> {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
loop {
let refined = remove_implied(&mut FDs);
let shrinked = remove_redundant(&mut FDs);
if !(refined || shrinked) {
break;
}
}
FDs.sort_by(|a, b| a.source.cmp(&b.source));
FDs
}
fn remove_implied(FDs: &mut Vec<FD>) -> bool {
for i in 0..FDs.len() {
let FD = mem::take(&mut FDs[i]);
if implies(FDs, &FD) {
FDs.swap_remove(i);
return true;
}
FDs[i] = FD;
}
false
}
fn remove_redundant(FDs: &mut [FD]) -> bool {
for i in 0..FDs.len() {
let FD = &FDs[i];
for v in &FD.source {
let mut shrinked = FD.clone();
shrinked.source.remove(v);
if implies(FDs, &shrinked) {
FDs[i] = shrinked;
return true;
}
}
}
false
}
pub fn all_violations<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> impl Iterator<Item = &'a FD> + 'a {
FDs.iter()
.filter(move |fd| closure_of(&fd.source, FDs).is_superset(rel).not())
}
fn violation<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> Option<&'a FD> {
all_violations(rel, FDs).next()
}
pub fn is_bcnf_violation(rel: &Attrs, FDs: &[FD]) -> bool {
violation(rel, FDs).is_some()
}
pub fn bcnf_decomposition(rel: &Attrs, FDs: &[FD]) -> Vec<Attrs> {
let rel: Attrs = rel.clone();
let mut candidates: Vec<(Attrs, Vec<FD>)> = vec![(rel, FDs.to_vec())];
let mut bcnf: Vec<Attrs> = vec![];
while let Some((rel, FDs)) = candidates.pop() {
// every 2-attribute relation is in BCNF
if rel.len() <= 2 {
bcnf.push(rel);
continue;
}
if let Some(fd) = violation(&rel, &FDs) {
let rel_0 = closure_of(&fd.source, &FDs);
let FDs_0 = project_to(&rel_0, &FDs);
let rel_1 = &fd.source | &(&rel - &rel_0);
let FDs_1 = project_to(&rel_1, &FDs);
candidates.push((rel_0, FDs_0));
candidates.push((rel_1, FDs_1));
} else {
bcnf.push(rel);
}
}
bcnf
}
pub struct NumberOfAttrs(u32);
impl NumberOfAttrs {
pub fn new(n: u32) -> Self {
Self(n)
}
}
impl Distribution<FD> for NumberOfAttrs {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> FD {
let n = self.0;
let mut source = vec![];
let mut target = vec![];
for i in 0..n {
if rng.gen_bool(0.5) {
source.push(i);
}
if rng.gen_bool(0.5) {
target.push(i);
}
}
if source.is_empty() {
source.push(rng.gen_range(0..n));
}
if target.is_empty() {
target.push(rng.gen_range(0..n));
}
FD::new(source.into(), target.into())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn closure_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let E = reg.register("E");
let _F = reg.register("F");
let dependencies = parse_FDs(®, &["A, B -> C", "B, C -> A, D", "D -> E", "C, F -> B"]);
assert_eq!(
&*closure_of(&attrs(&[A, B]), &dependencies),
&[A, B, C, D, E]
);
assert_eq!(&*closure_of(&attrs(&[D]), &dependencies), &[D, E]);
}
#[test]
fn format_test() {
let mut reg = NameRegister::new();
reg.register("A");
reg.register("B");
reg.register("C");
reg.register("D");
let fd = reg.parse_fd("B, A -> D, C").unwrap();
assert_eq!(format!("{}", fd.with_names(®)), "A, B -> C, D");
}
#[test]
fn project_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let _B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let FDs = parse_FDs(®, &["A -> B", "B -> C", "C -> D"]);
let projection = project_to(&[A, C, D].iter().copied().collect(), &FDs);
assert_eq!(projection.len(), 2);
assert!(projection.iter().all(|fd| fd.target.len() == 1));
assert!(implies(&projection, ®.parse_fd("A -> C, D").unwrap()));
assert!(implies(&projection, ®.parse_fd("C -> D").unwrap()));
}
#[test]
fn violation_test() {
let mut reg = NameRegister::new();
let _title = reg.register("title");
let _year = reg.register("year");
let _studio_name = reg.register("studio_name");
let _president = reg.register("president");
let FDs = parse_FDs(
®,
&["title, year -> studio_name", "studio_name -> president"],
);
assert_eq!(violation(®.attrs(), &FDs), Some(&FDs[1]));
}
#[test]
fn bcnf_test() {
let mut reg = NameRegister::new();
let title = reg.register("title");
let year = reg.register("year");
let studio_name = reg.register("studio_name");
let president = reg.register("president");
let pres_addr = reg.register("pres_addr");
let FDs = parse_FDs(
®,
&[
"title, year -> studio_name",
"studio_name -> president",
"president -> pres_addr",
],
);
let decomposition = bcnf_decomposition(®.attrs(), &FDs);
assert_eq!(decomposition.len(), 3);
assert!(decomposition.contains(&attrs(&[title, year, studio_name])));
assert!(decomposition.contains(&attrs(&[studio_name, president])));
assert!(decomposition.contains(&attrs(&[president, pres_addr])));
}
}
| fmt | identifier_name |
lib.rs | #![allow(clippy::many_single_char_names, non_snake_case)]
pub mod chase;
pub mod mvd;
mod parser;
mod sorted_uvec;
use itertools::Itertools;
use mvd::MVD;
use rand::prelude::Distribution;
use sorted_uvec::SortedUVec;
use std::{borrow::Borrow, mem, ops::Not};
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
pub type Attrs = SortedUVec<u32>;
impl Attrs {
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> AttrWithNames<'a> {
AttrWithNames {
attrs: self,
register,
}
}
pub fn keys(&self, FDs: &[FD]) -> Vec<Attrs> {
all_subsets_of(self)
.filter(|sub| categorize(sub, self, FDs) == Category::Key)
.collect()
}
}
pub fn attrs<I, J>(iter: I) -> Attrs
where
I: IntoIterator<Item = J>,
J: Borrow<u32>,
{
Attrs::new(iter.into_iter().map(|v| *v.borrow()))
}
#[derive(PartialEq, Eq, Clone, Debug, Default)]
pub struct FD {
pub source: Attrs,
pub target: Attrs,
}
impl FD {
pub fn new(source: Attrs, target: Attrs) -> Self {
let target = &target - &source;
Self { source, target }
}
pub fn is_deformed(&self) -> bool {
self.source.is_empty() || self.target.is_empty()
}
pub fn split(&self) -> impl Iterator<Item = FD> + '_ {
self.target
.iter()
.map(move |&v| FD::new(self.source.clone(), attrs(&[v])))
}
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> DepWithNames<'a> {
DepWithNames {
arrow: "->",
source: &self.source,
target: &self.target,
register,
}
}
}
pub struct DepWithNames<'a> {
arrow: &'a str,
source: &'a Attrs,
target: &'a Attrs,
register: &'a NameRegister,
}
impl Display for DepWithNames<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let sep_by_comma = |list: &[u32], f: &mut Formatter| -> fmt::Result {
let mut first = true;
for &v in list {
if !first {
write!(f, ", ")?;
}
first = false;
write!(f, "{}", self.register.name(v).unwrap_or("{Unnamed}"))?;
}
Ok(())
};
sep_by_comma(&*self.source, f)?;
write!(f, " {} ", self.arrow)?;
sep_by_comma(&*self.target, f)?;
Ok(())
}
}
pub fn closure_of(attrs: &Attrs, dependencies: &[FD]) -> Attrs {
let mut closure = attrs.clone();
let mut size = closure.len();
loop {
for fd in dependencies {
if fd.source.is_subset(&closure) {
closure.extend(fd.target.iter().copied());
}
}
if closure.len() > size {
size = closure.len();
} else {
break;
}
}
closure
}
#[derive(Debug, PartialEq)]
pub enum Category {
Nonkey,
Key,
Superkey,
}
impl Display for Category {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
pub fn categorize(sub: &Attrs, rel: &Attrs, FDs: &[FD]) -> Category {
let closure = closure_of(sub, FDs);
if !closure.is_superset(&rel) {
return Category::Nonkey;
}
let has_subkey = sub
.iter()
.map(|v| {
let mut shirnked = sub.clone();
shirnked.remove(v);
shirnked
})
.any(|attrs| closure_of(&attrs, FDs).is_superset(&rel));
if has_subkey {
Category::Superkey
} else {
Category::Key
}
}
#[derive(Default)]
pub struct NameRegister {
cnt: u32,
name_idx: HashMap<String, u32>,
idx_name: HashMap<u32, String>,
}
impl NameRegister {
pub fn new() -> Self {
Self::default()
}
pub fn resolve(&self, name: &str) -> Option<u32> {
self.name_idx.get(name).copied()
}
pub fn name(&self, idx: u32) -> Option<&str> {
self.idx_name.get(&idx).map(|s| s.as_str())
}
pub fn attrs(&self) -> Attrs {
(0..self.cnt).collect()
}
pub fn categorize(&self, attrs: &Attrs, dependencies: &[FD]) -> Category {
categorize(attrs, &self.attrs(), dependencies)
}
pub fn register(&mut self, name: &str) -> u32 {
self.resolve(name).unwrap_or_else(|| {
let key = self.cnt;
self.cnt += 1;
self.name_idx.insert(name.to_string(), key);
self.idx_name.insert(key, name.to_string());
key
})
}
pub fn parse_fd(&self, input: &str) -> Option<FD> {
let (_, (source, target)) = parser::fd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(FD::new(source, target))
}
pub fn parse_mvd(&self, input: &str) -> Option<MVD> {
let (_, (source, target)) = parser::mvd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(MVD::new(source, target))
}
pub fn cnt(&self) -> u32 {
self.cnt
}
}
pub struct AttrWithNames<'a> {
attrs: &'a [u32],
register: &'a NameRegister,
}
impl<'a> Display for AttrWithNames<'a> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let mut is_first = true;
write!(f, "{{ ")?;
for &attr in self.attrs {
if !is_first {
write!(f, ", ")?;
}
is_first = false;
f.write_str(self.register.name(attr).unwrap_or("{Unnamed}"))?;
}
write!(f, " }}")?;
Ok(())
}
}
pub fn parse_FDs(register: &NameRegister, FDs: &[&str]) -> Vec<FD> {
FDs.iter()
.map(|fd| register.parse_fd(fd).unwrap())
.collect()
}
pub fn parse_MVDs(register: &NameRegister, MVDs: &[&str]) -> Vec<MVD> {
MVDs.iter()
.map(|mvd| register.parse_mvd(mvd).unwrap())
.collect()
}
pub fn implies(FDs: &[FD], fd: &FD) -> bool {
closure_of(&fd.source, FDs).is_superset(&fd.target)
}
pub fn all_subsets_of(attrs: &[u32]) -> impl Iterator<Item = Attrs> + '_ {
(0..=attrs.len())
.flat_map(move |k| attrs.iter().copied().combinations(k))
.map(From::from)
}
pub fn project_to(attrs: &Attrs, FDs: &[FD]) -> Vec<FD> {
let FDs: Vec<FD> = all_subsets_of(&*attrs)
.map(|selected| {
let closure = closure_of(&selected, FDs);
FD::new(selected, &closure & attrs)
})
.filter(|fd| !fd.is_deformed())
.collect();
minify(&FDs)
}
pub fn is_minimal_basis(FDs: &[FD]) -> bool {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
!remove_implied(&mut FDs)
}
pub fn minify(FDs: &[FD]) -> Vec<FD> {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
loop {
let refined = remove_implied(&mut FDs);
let shrinked = remove_redundant(&mut FDs);
if !(refined || shrinked) {
break;
}
}
FDs.sort_by(|a, b| a.source.cmp(&b.source));
FDs
}
fn remove_implied(FDs: &mut Vec<FD>) -> bool {
for i in 0..FDs.len() {
let FD = mem::take(&mut FDs[i]);
if implies(FDs, &FD) {
FDs.swap_remove(i);
return true;
}
FDs[i] = FD;
}
false
}
fn remove_redundant(FDs: &mut [FD]) -> bool {
for i in 0..FDs.len() {
let FD = &FDs[i];
for v in &FD.source {
let mut shrinked = FD.clone();
shrinked.source.remove(v);
if implies(FDs, &shrinked) {
FDs[i] = shrinked;
return true;
}
}
}
false
}
pub fn all_violations<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> impl Iterator<Item = &'a FD> + 'a {
FDs.iter()
.filter(move |fd| closure_of(&fd.source, FDs).is_superset(rel).not())
}
fn violation<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> Option<&'a FD> {
all_violations(rel, FDs).next()
}
pub fn is_bcnf_violation(rel: &Attrs, FDs: &[FD]) -> bool {
violation(rel, FDs).is_some()
}
|
while let Some((rel, FDs)) = candidates.pop() {
// every 2-attribute relation is in BCNF
if rel.len() <= 2 {
bcnf.push(rel);
continue;
}
if let Some(fd) = violation(&rel, &FDs) {
let rel_0 = closure_of(&fd.source, &FDs);
let FDs_0 = project_to(&rel_0, &FDs);
let rel_1 = &fd.source | &(&rel - &rel_0);
let FDs_1 = project_to(&rel_1, &FDs);
candidates.push((rel_0, FDs_0));
candidates.push((rel_1, FDs_1));
} else {
bcnf.push(rel);
}
}
bcnf
}
pub struct NumberOfAttrs(u32);
impl NumberOfAttrs {
pub fn new(n: u32) -> Self {
Self(n)
}
}
impl Distribution<FD> for NumberOfAttrs {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> FD {
let n = self.0;
let mut source = vec![];
let mut target = vec![];
for i in 0..n {
if rng.gen_bool(0.5) {
source.push(i);
}
if rng.gen_bool(0.5) {
target.push(i);
}
}
if source.is_empty() {
source.push(rng.gen_range(0..n));
}
if target.is_empty() {
target.push(rng.gen_range(0..n));
}
FD::new(source.into(), target.into())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn closure_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let E = reg.register("E");
let _F = reg.register("F");
let dependencies = parse_FDs(®, &["A, B -> C", "B, C -> A, D", "D -> E", "C, F -> B"]);
assert_eq!(
&*closure_of(&attrs(&[A, B]), &dependencies),
&[A, B, C, D, E]
);
assert_eq!(&*closure_of(&attrs(&[D]), &dependencies), &[D, E]);
}
#[test]
fn format_test() {
let mut reg = NameRegister::new();
reg.register("A");
reg.register("B");
reg.register("C");
reg.register("D");
let fd = reg.parse_fd("B, A -> D, C").unwrap();
assert_eq!(format!("{}", fd.with_names(®)), "A, B -> C, D");
}
#[test]
fn project_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let _B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let FDs = parse_FDs(®, &["A -> B", "B -> C", "C -> D"]);
let projection = project_to(&[A, C, D].iter().copied().collect(), &FDs);
assert_eq!(projection.len(), 2);
assert!(projection.iter().all(|fd| fd.target.len() == 1));
assert!(implies(&projection, ®.parse_fd("A -> C, D").unwrap()));
assert!(implies(&projection, ®.parse_fd("C -> D").unwrap()));
}
#[test]
fn violation_test() {
let mut reg = NameRegister::new();
let _title = reg.register("title");
let _year = reg.register("year");
let _studio_name = reg.register("studio_name");
let _president = reg.register("president");
let FDs = parse_FDs(
®,
&["title, year -> studio_name", "studio_name -> president"],
);
assert_eq!(violation(®.attrs(), &FDs), Some(&FDs[1]));
}
#[test]
fn bcnf_test() {
let mut reg = NameRegister::new();
let title = reg.register("title");
let year = reg.register("year");
let studio_name = reg.register("studio_name");
let president = reg.register("president");
let pres_addr = reg.register("pres_addr");
let FDs = parse_FDs(
®,
&[
"title, year -> studio_name",
"studio_name -> president",
"president -> pres_addr",
],
);
let decomposition = bcnf_decomposition(®.attrs(), &FDs);
assert_eq!(decomposition.len(), 3);
assert!(decomposition.contains(&attrs(&[title, year, studio_name])));
assert!(decomposition.contains(&attrs(&[studio_name, president])));
assert!(decomposition.contains(&attrs(&[president, pres_addr])));
}
} | pub fn bcnf_decomposition(rel: &Attrs, FDs: &[FD]) -> Vec<Attrs> {
let rel: Attrs = rel.clone();
let mut candidates: Vec<(Attrs, Vec<FD>)> = vec![(rel, FDs.to_vec())];
let mut bcnf: Vec<Attrs> = vec![]; | random_line_split |
lib.rs | #![allow(clippy::many_single_char_names, non_snake_case)]
pub mod chase;
pub mod mvd;
mod parser;
mod sorted_uvec;
use itertools::Itertools;
use mvd::MVD;
use rand::prelude::Distribution;
use sorted_uvec::SortedUVec;
use std::{borrow::Borrow, mem, ops::Not};
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
pub type Attrs = SortedUVec<u32>;
impl Attrs {
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> AttrWithNames<'a> {
AttrWithNames {
attrs: self,
register,
}
}
pub fn keys(&self, FDs: &[FD]) -> Vec<Attrs> {
all_subsets_of(self)
.filter(|sub| categorize(sub, self, FDs) == Category::Key)
.collect()
}
}
pub fn attrs<I, J>(iter: I) -> Attrs
where
I: IntoIterator<Item = J>,
J: Borrow<u32>,
{
Attrs::new(iter.into_iter().map(|v| *v.borrow()))
}
#[derive(PartialEq, Eq, Clone, Debug, Default)]
pub struct FD {
pub source: Attrs,
pub target: Attrs,
}
impl FD {
pub fn new(source: Attrs, target: Attrs) -> Self {
let target = &target - &source;
Self { source, target }
}
pub fn is_deformed(&self) -> bool {
self.source.is_empty() || self.target.is_empty()
}
pub fn split(&self) -> impl Iterator<Item = FD> + '_ |
pub fn with_names<'a>(&'a self, register: &'a NameRegister) -> DepWithNames<'a> {
DepWithNames {
arrow: "->",
source: &self.source,
target: &self.target,
register,
}
}
}
pub struct DepWithNames<'a> {
arrow: &'a str,
source: &'a Attrs,
target: &'a Attrs,
register: &'a NameRegister,
}
impl Display for DepWithNames<'_> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let sep_by_comma = |list: &[u32], f: &mut Formatter| -> fmt::Result {
let mut first = true;
for &v in list {
if !first {
write!(f, ", ")?;
}
first = false;
write!(f, "{}", self.register.name(v).unwrap_or("{Unnamed}"))?;
}
Ok(())
};
sep_by_comma(&*self.source, f)?;
write!(f, " {} ", self.arrow)?;
sep_by_comma(&*self.target, f)?;
Ok(())
}
}
pub fn closure_of(attrs: &Attrs, dependencies: &[FD]) -> Attrs {
let mut closure = attrs.clone();
let mut size = closure.len();
loop {
for fd in dependencies {
if fd.source.is_subset(&closure) {
closure.extend(fd.target.iter().copied());
}
}
if closure.len() > size {
size = closure.len();
} else {
break;
}
}
closure
}
#[derive(Debug, PartialEq)]
pub enum Category {
Nonkey,
Key,
Superkey,
}
impl Display for Category {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
pub fn categorize(sub: &Attrs, rel: &Attrs, FDs: &[FD]) -> Category {
let closure = closure_of(sub, FDs);
if !closure.is_superset(&rel) {
return Category::Nonkey;
}
let has_subkey = sub
.iter()
.map(|v| {
let mut shirnked = sub.clone();
shirnked.remove(v);
shirnked
})
.any(|attrs| closure_of(&attrs, FDs).is_superset(&rel));
if has_subkey {
Category::Superkey
} else {
Category::Key
}
}
#[derive(Default)]
pub struct NameRegister {
cnt: u32,
name_idx: HashMap<String, u32>,
idx_name: HashMap<u32, String>,
}
impl NameRegister {
pub fn new() -> Self {
Self::default()
}
pub fn resolve(&self, name: &str) -> Option<u32> {
self.name_idx.get(name).copied()
}
pub fn name(&self, idx: u32) -> Option<&str> {
self.idx_name.get(&idx).map(|s| s.as_str())
}
pub fn attrs(&self) -> Attrs {
(0..self.cnt).collect()
}
pub fn categorize(&self, attrs: &Attrs, dependencies: &[FD]) -> Category {
categorize(attrs, &self.attrs(), dependencies)
}
pub fn register(&mut self, name: &str) -> u32 {
self.resolve(name).unwrap_or_else(|| {
let key = self.cnt;
self.cnt += 1;
self.name_idx.insert(name.to_string(), key);
self.idx_name.insert(key, name.to_string());
key
})
}
pub fn parse_fd(&self, input: &str) -> Option<FD> {
let (_, (source, target)) = parser::fd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(FD::new(source, target))
}
pub fn parse_mvd(&self, input: &str) -> Option<MVD> {
let (_, (source, target)) = parser::mvd(input).ok()?;
let source: Attrs = source
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
let target: Attrs = target
.iter()
.map(|v| self.resolve(v))
.collect::<Option<_>>()?;
Some(MVD::new(source, target))
}
pub fn cnt(&self) -> u32 {
self.cnt
}
}
pub struct AttrWithNames<'a> {
attrs: &'a [u32],
register: &'a NameRegister,
}
impl<'a> Display for AttrWithNames<'a> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let mut is_first = true;
write!(f, "{{ ")?;
for &attr in self.attrs {
if !is_first {
write!(f, ", ")?;
}
is_first = false;
f.write_str(self.register.name(attr).unwrap_or("{Unnamed}"))?;
}
write!(f, " }}")?;
Ok(())
}
}
pub fn parse_FDs(register: &NameRegister, FDs: &[&str]) -> Vec<FD> {
FDs.iter()
.map(|fd| register.parse_fd(fd).unwrap())
.collect()
}
pub fn parse_MVDs(register: &NameRegister, MVDs: &[&str]) -> Vec<MVD> {
MVDs.iter()
.map(|mvd| register.parse_mvd(mvd).unwrap())
.collect()
}
pub fn implies(FDs: &[FD], fd: &FD) -> bool {
closure_of(&fd.source, FDs).is_superset(&fd.target)
}
pub fn all_subsets_of(attrs: &[u32]) -> impl Iterator<Item = Attrs> + '_ {
(0..=attrs.len())
.flat_map(move |k| attrs.iter().copied().combinations(k))
.map(From::from)
}
pub fn project_to(attrs: &Attrs, FDs: &[FD]) -> Vec<FD> {
let FDs: Vec<FD> = all_subsets_of(&*attrs)
.map(|selected| {
let closure = closure_of(&selected, FDs);
FD::new(selected, &closure & attrs)
})
.filter(|fd| !fd.is_deformed())
.collect();
minify(&FDs)
}
pub fn is_minimal_basis(FDs: &[FD]) -> bool {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
!remove_implied(&mut FDs)
}
pub fn minify(FDs: &[FD]) -> Vec<FD> {
let mut FDs: Vec<_> = FDs.iter().flat_map(|fd| fd.split()).collect();
loop {
let refined = remove_implied(&mut FDs);
let shrinked = remove_redundant(&mut FDs);
if !(refined || shrinked) {
break;
}
}
FDs.sort_by(|a, b| a.source.cmp(&b.source));
FDs
}
fn remove_implied(FDs: &mut Vec<FD>) -> bool {
for i in 0..FDs.len() {
let FD = mem::take(&mut FDs[i]);
if implies(FDs, &FD) {
FDs.swap_remove(i);
return true;
}
FDs[i] = FD;
}
false
}
fn remove_redundant(FDs: &mut [FD]) -> bool {
for i in 0..FDs.len() {
let FD = &FDs[i];
for v in &FD.source {
let mut shrinked = FD.clone();
shrinked.source.remove(v);
if implies(FDs, &shrinked) {
FDs[i] = shrinked;
return true;
}
}
}
false
}
pub fn all_violations<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> impl Iterator<Item = &'a FD> + 'a {
FDs.iter()
.filter(move |fd| closure_of(&fd.source, FDs).is_superset(rel).not())
}
fn violation<'a>(rel: &'a Attrs, FDs: &'a [FD]) -> Option<&'a FD> {
all_violations(rel, FDs).next()
}
pub fn is_bcnf_violation(rel: &Attrs, FDs: &[FD]) -> bool {
violation(rel, FDs).is_some()
}
pub fn bcnf_decomposition(rel: &Attrs, FDs: &[FD]) -> Vec<Attrs> {
let rel: Attrs = rel.clone();
let mut candidates: Vec<(Attrs, Vec<FD>)> = vec![(rel, FDs.to_vec())];
let mut bcnf: Vec<Attrs> = vec![];
while let Some((rel, FDs)) = candidates.pop() {
// every 2-attribute relation is in BCNF
if rel.len() <= 2 {
bcnf.push(rel);
continue;
}
if let Some(fd) = violation(&rel, &FDs) {
let rel_0 = closure_of(&fd.source, &FDs);
let FDs_0 = project_to(&rel_0, &FDs);
let rel_1 = &fd.source | &(&rel - &rel_0);
let FDs_1 = project_to(&rel_1, &FDs);
candidates.push((rel_0, FDs_0));
candidates.push((rel_1, FDs_1));
} else {
bcnf.push(rel);
}
}
bcnf
}
pub struct NumberOfAttrs(u32);
impl NumberOfAttrs {
pub fn new(n: u32) -> Self {
Self(n)
}
}
impl Distribution<FD> for NumberOfAttrs {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> FD {
let n = self.0;
let mut source = vec![];
let mut target = vec![];
for i in 0..n {
if rng.gen_bool(0.5) {
source.push(i);
}
if rng.gen_bool(0.5) {
target.push(i);
}
}
if source.is_empty() {
source.push(rng.gen_range(0..n));
}
if target.is_empty() {
target.push(rng.gen_range(0..n));
}
FD::new(source.into(), target.into())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn closure_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let E = reg.register("E");
let _F = reg.register("F");
let dependencies = parse_FDs(®, &["A, B -> C", "B, C -> A, D", "D -> E", "C, F -> B"]);
assert_eq!(
&*closure_of(&attrs(&[A, B]), &dependencies),
&[A, B, C, D, E]
);
assert_eq!(&*closure_of(&attrs(&[D]), &dependencies), &[D, E]);
}
#[test]
fn format_test() {
let mut reg = NameRegister::new();
reg.register("A");
reg.register("B");
reg.register("C");
reg.register("D");
let fd = reg.parse_fd("B, A -> D, C").unwrap();
assert_eq!(format!("{}", fd.with_names(®)), "A, B -> C, D");
}
#[test]
fn project_test() {
let mut reg = NameRegister::new();
let A = reg.register("A");
let _B = reg.register("B");
let C = reg.register("C");
let D = reg.register("D");
let FDs = parse_FDs(®, &["A -> B", "B -> C", "C -> D"]);
let projection = project_to(&[A, C, D].iter().copied().collect(), &FDs);
assert_eq!(projection.len(), 2);
assert!(projection.iter().all(|fd| fd.target.len() == 1));
assert!(implies(&projection, ®.parse_fd("A -> C, D").unwrap()));
assert!(implies(&projection, ®.parse_fd("C -> D").unwrap()));
}
#[test]
fn violation_test() {
let mut reg = NameRegister::new();
let _title = reg.register("title");
let _year = reg.register("year");
let _studio_name = reg.register("studio_name");
let _president = reg.register("president");
let FDs = parse_FDs(
®,
&["title, year -> studio_name", "studio_name -> president"],
);
assert_eq!(violation(®.attrs(), &FDs), Some(&FDs[1]));
}
#[test]
fn bcnf_test() {
let mut reg = NameRegister::new();
let title = reg.register("title");
let year = reg.register("year");
let studio_name = reg.register("studio_name");
let president = reg.register("president");
let pres_addr = reg.register("pres_addr");
let FDs = parse_FDs(
®,
&[
"title, year -> studio_name",
"studio_name -> president",
"president -> pres_addr",
],
);
let decomposition = bcnf_decomposition(®.attrs(), &FDs);
assert_eq!(decomposition.len(), 3);
assert!(decomposition.contains(&attrs(&[title, year, studio_name])));
assert!(decomposition.contains(&attrs(&[studio_name, president])));
assert!(decomposition.contains(&attrs(&[president, pres_addr])));
}
}
| {
self.target
.iter()
.map(move |&v| FD::new(self.source.clone(), attrs(&[v])))
} | identifier_body |
main.py | import tensorflow as tf
import numpy as np
import time
import os
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from src.model import get_args
from src.funcs import linear
from src.youtubeface import load_ytf_data
from src.lfw import load_lfw_data
from src.facescrub import load_fs_data
from src.wrapper_basicImg import wrapper_basicImg
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
total_iteration = 300000
m = 512
q = 32
lam = 0.01
beta = 1.
margin = 0.5
s = 32
batch_size = 256
class_num = 1595
train_dataset = 'FS'
eval_dataset = "LFW"
args = get_args()
### Get image and label from tfrecord
image, label, iterator = {}, {}, {}
if train_dataset == 'YTF':
image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')
elif train_dataset == 'FS':
image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')
else:
print("Select proper dataset")
### Get evaluation dataset. Wrapper
wrapper = wrapper_basicImg(dataset=eval_dataset)
if eval_dataset == 'YTF':
|
elif eval_dataset == 'LFW':
image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')
image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')
### Backbone network (Arcface)
embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)
labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)
### Global step & learning rate
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.003
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)
### My implementation (DIom algorithm)
with tf.variable_scope('DIom'):
fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')
fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')
fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')
h_k = tf.reshape(fc3, [-1, m, q])
h_k = tf.nn.softmax(beta * h_k, axis=2)
index_matrix = tf.range(1, q + 1, dtype=tf.float32)
h = tf.reduce_sum(h_k * index_matrix, axis=2)
h = tf.reshape(h, [-1, m])
h_norm = tf.math.l2_normalize(h, axis=1)
### Loss function
l = tf.one_hot(labels, class_num)
l = tf.matmul(l, tf.transpose(l))
l_float = tf.cast(l, tf.float32)
l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))
label_int = tf.cast(tf.squeeze(l, 1), tf.int32)
inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))
cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)
theta = tf.math.acos(cos_t)
sin_t = tf.math.sin(theta)
cos_mt = tf.math.cos(theta + margin)
sin_mt = tf.math.sin(theta + margin)
logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))
l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)
c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)
# Baseline pairwise-CE
# label_ce = tf.cast(labels, tf.float32)
# l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))
# l_ij = -tf.reduce_mean(l_ij)
# My novel cosine loss
l_ij = tf.reduce_mean(l_ij_logit)
c_ij = tf.reduce_mean(c_ij)
loss = l_ij + lam * c_ij
gradient = tf.gradients(loss, sin_t)
### Optimizer
t_vars = tf.global_variables()
train_vars = [var for var in t_vars if 'DIom' in var.name]
opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)
with tf.Session() as sess:
tf.global_variables_initializer().run()
sess.run(iterator['train'].initializer)
### Training
iteration = sess.run(global_step)
t_opt = [opt_t, loss, l_ij, c_ij]
start_time = time.time()
while iteration != total_iteration:
img, lbl = sess.run([image['train'], label['train']])
train_dict = {
embedding_tensor: img,
labels: lbl
}
_, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)
iteration += 1
if iteration % 10000 == 0:
### Evaluation after training
### Get gallery hash code
# gallery = []
# gallery_label = []
# sess.run(iterator['gallery'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['gallery'], label['gallery']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# hash_code = sess.run(h_norm, feed_dict=gallery_dict)
#
# if gallery == []:
# gallery = hash_code
# gallery_label = lbl
# else:
# gallery = np.concatenate((gallery, hash_code), axis=0)
# gallery_label = np.concatenate((gallery_label, lbl), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Get probe hash code
# probe = []
# probe_label = []
# code_arr = []
# sess.run(iterator['test'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['test'], label['test']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)
#
# if probe == []:
# probe = hash_code
# probe_label = lbl
# code_arr = code
# else:
# probe = np.concatenate((probe, hash_code), axis=0)
# probe_label = np.concatenate((probe_label, lbl), axis=0)
# code_arr = np.concatenate((code_arr, code), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Code frequency
# code_arr = np.around(code_arr)
# count_arr = []
# for i in range(q):
# count_arr.append(np.count_nonzero(code_arr == i + 1))
#
# plt.clf()
# plt.bar(range(1, q+1), count_arr)
# plt.savefig('./plt/code_' + str(iteration) + '.png')
# ### Calculate MAP
# gtp = 40
# k = 50
#
# distance = np.matmul(probe, gallery.T)
# arg_idx = np.argsort(-distance, axis=1)
#
# max_label = gallery_label[arg_idx[:, :k]]
# match_matrix = np.equal(max_label, probe_label[:,np.newaxis])
#
# tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)
# ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp
# MAP = np.mean(ap)
### Calculate EER
dist_list = []
label_list = []
code_list = []
while wrapper.samples_left > 0:
imgs, lbls = wrapper.get_next_batch(100)
imgs = np.reshape(imgs, [-1, 512])
eer_dict = {
embedding_tensor: imgs
}
code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)
code = np.reshape(code, [-1, 2, m])
distance = np.sum(np.prod(code, axis=1), axis=1)
if dist_list == []:
dist_list = distance
label_list = lbls
code_list = int_code
else:
dist_list = np.concatenate((dist_list, distance), axis=0)
label_list = np.concatenate((label_list, lbls), axis=0)
code_list = np.concatenate((code_list, int_code), axis=0)
wrapper.samples_left= np.size(wrapper.labels, axis=0)
wrapper.next_batch_pointer = 0
fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)
fnr = 1 - tpr
# eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
### Code frequency
code_arr = np.around(code_list)
count_arr = []
for i in range(q):
count_arr.append(np.count_nonzero(code_arr == i + 1))
plt.clf()
plt.bar(range(1, q + 1), count_arr)
plt.savefig('./plt/code_' + str(iteration) + '.png')
time_taken = time.time() - start_time
MAP = 0
# print("good")
print("[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f"
% (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))
start_time = time.time()
# np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))
### Save model.
# save_vars = [var for var in t_vars if 'DIom' in var.name]
# saver = tf.train.Saver(var_list=save_vars)
# saver.save(sess, './model/DIom_layer') | image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)
image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test') | conditional_block |
main.py | import tensorflow as tf
import numpy as np
import time
import os
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from src.model import get_args
from src.funcs import linear
from src.youtubeface import load_ytf_data
from src.lfw import load_lfw_data
from src.facescrub import load_fs_data
from src.wrapper_basicImg import wrapper_basicImg
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
total_iteration = 300000
m = 512
q = 32
lam = 0.01
beta = 1.
margin = 0.5
s = 32
batch_size = 256
class_num = 1595
train_dataset = 'FS'
eval_dataset = "LFW"
args = get_args()
### Get image and label from tfrecord
image, label, iterator = {}, {}, {}
if train_dataset == 'YTF':
image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')
elif train_dataset == 'FS':
image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')
else:
print("Select proper dataset")
### Get evaluation dataset. Wrapper
wrapper = wrapper_basicImg(dataset=eval_dataset)
if eval_dataset == 'YTF':
image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)
image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test')
elif eval_dataset == 'LFW':
image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')
image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')
### Backbone network (Arcface) | embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)
labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)
### Global step & learning rate
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.003
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)
### My implementation (DIom algorithm)
with tf.variable_scope('DIom'):
fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')
fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')
fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')
h_k = tf.reshape(fc3, [-1, m, q])
h_k = tf.nn.softmax(beta * h_k, axis=2)
index_matrix = tf.range(1, q + 1, dtype=tf.float32)
h = tf.reduce_sum(h_k * index_matrix, axis=2)
h = tf.reshape(h, [-1, m])
h_norm = tf.math.l2_normalize(h, axis=1)
### Loss function
l = tf.one_hot(labels, class_num)
l = tf.matmul(l, tf.transpose(l))
l_float = tf.cast(l, tf.float32)
l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))
label_int = tf.cast(tf.squeeze(l, 1), tf.int32)
inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))
cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)
theta = tf.math.acos(cos_t)
sin_t = tf.math.sin(theta)
cos_mt = tf.math.cos(theta + margin)
sin_mt = tf.math.sin(theta + margin)
logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))
l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)
c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)
# Baseline pairwise-CE
# label_ce = tf.cast(labels, tf.float32)
# l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))
# l_ij = -tf.reduce_mean(l_ij)
# My novel cosine loss
l_ij = tf.reduce_mean(l_ij_logit)
c_ij = tf.reduce_mean(c_ij)
loss = l_ij + lam * c_ij
gradient = tf.gradients(loss, sin_t)
### Optimizer
t_vars = tf.global_variables()
train_vars = [var for var in t_vars if 'DIom' in var.name]
opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)
with tf.Session() as sess:
tf.global_variables_initializer().run()
sess.run(iterator['train'].initializer)
### Training
iteration = sess.run(global_step)
t_opt = [opt_t, loss, l_ij, c_ij]
start_time = time.time()
while iteration != total_iteration:
img, lbl = sess.run([image['train'], label['train']])
train_dict = {
embedding_tensor: img,
labels: lbl
}
_, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)
iteration += 1
if iteration % 10000 == 0:
### Evaluation after training
### Get gallery hash code
# gallery = []
# gallery_label = []
# sess.run(iterator['gallery'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['gallery'], label['gallery']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# hash_code = sess.run(h_norm, feed_dict=gallery_dict)
#
# if gallery == []:
# gallery = hash_code
# gallery_label = lbl
# else:
# gallery = np.concatenate((gallery, hash_code), axis=0)
# gallery_label = np.concatenate((gallery_label, lbl), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Get probe hash code
# probe = []
# probe_label = []
# code_arr = []
# sess.run(iterator['test'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['test'], label['test']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)
#
# if probe == []:
# probe = hash_code
# probe_label = lbl
# code_arr = code
# else:
# probe = np.concatenate((probe, hash_code), axis=0)
# probe_label = np.concatenate((probe_label, lbl), axis=0)
# code_arr = np.concatenate((code_arr, code), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Code frequency
# code_arr = np.around(code_arr)
# count_arr = []
# for i in range(q):
# count_arr.append(np.count_nonzero(code_arr == i + 1))
#
# plt.clf()
# plt.bar(range(1, q+1), count_arr)
# plt.savefig('./plt/code_' + str(iteration) + '.png')
# ### Calculate MAP
# gtp = 40
# k = 50
#
# distance = np.matmul(probe, gallery.T)
# arg_idx = np.argsort(-distance, axis=1)
#
# max_label = gallery_label[arg_idx[:, :k]]
# match_matrix = np.equal(max_label, probe_label[:,np.newaxis])
#
# tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)
# ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp
# MAP = np.mean(ap)
### Calculate EER
dist_list = []
label_list = []
code_list = []
while wrapper.samples_left > 0:
imgs, lbls = wrapper.get_next_batch(100)
imgs = np.reshape(imgs, [-1, 512])
eer_dict = {
embedding_tensor: imgs
}
code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)
code = np.reshape(code, [-1, 2, m])
distance = np.sum(np.prod(code, axis=1), axis=1)
if dist_list == []:
dist_list = distance
label_list = lbls
code_list = int_code
else:
dist_list = np.concatenate((dist_list, distance), axis=0)
label_list = np.concatenate((label_list, lbls), axis=0)
code_list = np.concatenate((code_list, int_code), axis=0)
wrapper.samples_left= np.size(wrapper.labels, axis=0)
wrapper.next_batch_pointer = 0
fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)
fnr = 1 - tpr
# eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
### Code frequency
code_arr = np.around(code_list)
count_arr = []
for i in range(q):
count_arr.append(np.count_nonzero(code_arr == i + 1))
plt.clf()
plt.bar(range(1, q + 1), count_arr)
plt.savefig('./plt/code_' + str(iteration) + '.png')
time_taken = time.time() - start_time
MAP = 0
# print("good")
print("[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f"
% (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))
start_time = time.time()
# np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))
### Save model.
# save_vars = [var for var in t_vars if 'DIom' in var.name]
# saver = tf.train.Saver(var_list=save_vars)
# saver.save(sess, './model/DIom_layer') | random_line_split | |
sentiment_clustering_speech_classification_v0.py | import nltk
import sklearn_crfsuite
from sklearn_crfsuite import metrics
import pandas as pd
from sklearn.preprocessing import label_binarize
import string
# nltk.download('conll2002')
flatten = lambda l: [item for sublist in l for item in sublist]
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import os
import sys
from sklearn.preprocessing import LabelEncoder
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
import argparse
import matplotlib.cm as cm
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
# nltk.corpus.conll2002.fileids()
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from gensim.models.word2vec import Word2Vec
import gensim
import random
from collections import OrderedDict
from sklearn.model_selection import KFold
# classifier information
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
LabeledSentence = gensim.models.doc2vec.LabeledSentence
import hdbscan
# classifier information
from keras.layers import Input
from keras.models import Model
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import hdbscan
from sklearn.cluster import MiniBatchKMeans
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def model_ae(X_train,x_test,n=300,encoding_dim=32):
# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/
# r program
# this is our input placeholder
input = Input(shape=(n,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(n, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input, decoded)
# this model maps an input to its encoded representation
encoder = Model(input, encoded)
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, X_train,
epochs=20,
batch_size=32,
shuffle=True,
validation_data=(x_test, x_test))
return encoder
def call_silhout_(X,df,range_n_clusters):
hyper_parm_turning=OrderedDict()
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# clusterer = MiniBatchKMeans(n_clusters=n_clusters,init='k-means++', random_state=10)
from sklearn.mixture import GaussianMixture
# Predict GMM cluster membership
clusterer = GaussianMixture(n_components=n_clusters, random_state=10)
# from sklearn.cluster import AgglomerativeClustering
# clusterer = AgglomerativeClustering(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(X)
labels="cluster_labels_{}".format(n_clusters)
if not labels in df.keys():
df[labels]=cluster_labels
sample_dist_std=np.std(df.groupby(labels).size())
sample_dist_avrg=np.median(df.groupby(labels).size())
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
if not 'n_clusters' in hyper_parm_turning.keys():
hyper_parm_turning['n_clusters']=[n_clusters]
else:
hyper_parm_turning['n_clusters'].append(n_clusters)
if not 'silhouette_avg' in hyper_parm_turning.keys():
hyper_parm_turning['silhouette_avg']=[silhouette_avg]
else:
hyper_parm_turning['silhouette_avg'].append(silhouette_avg)
if not 'sample_dist_std' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_std']=[sample_dist_std]
else:
hyper_parm_turning['sample_dist_std'].append(sample_dist_std)
if not 'sample_dist_avrg' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_avrg']=[sample_dist_avrg]
else:
hyper_parm_turning['sample_dist_avrg'].append(sample_dist_avrg)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
return df,hyper_parm_turning
def | ():
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("input_file", help="The input file to be projected")
# parser.add_argument("speech_feats_file", help="The input file to be projected")
# parser.add_argument("out_path_file", help="The input file to be projected")
args = parser.parse_args()
df_=pd.read_csv(args.input_file)
# print(df_.head())
df_doc2vec=df_.copy()
df_doc2vec=df_doc2vec.drop(['utterance'], axis=1)
# print(df_doc2vec.columns.to_list())
# df_['sentence_label']=sentence_emotion_labeling
df_doc2vec = df_doc2vec[df_doc2vec.columns[:300]]
print('loading the database')
# print(df_doc2vec.head())
print(df_doc2vec.shape)
from sklearn.preprocessing import scale
train_vecs = scale(df_doc2vec)
print('scaling the data')
#using pca as dimension reduction technique
PCA_model = PCA(.90, random_state=42)
X_standard = PCA_model.fit_transform(train_vecs)*(-1)
print(X_standard.shape)
# Single VD
# from numpy import array
# from sklearn.decomposition import TruncatedSVD
# TruncatedSVD_model=TruncatedSVD(n_components=3)
# X_standard = TruncatedSVD_model.fit_transform(train_vecs)
# using T-distributed Stochastic Neighbor Embedding (T-SNE)
# from sklearn.manifold import TSNE
# X_standard = TSNE(n_components=3).fit_transform(train_vecs)
# from sklearn.decomposition import NMF
# NMF_model=NMF(n_components=3)
# X_standard = NMF_model.fit_transform(train_vecs)
# from sklearn import random_projection
# X_standard = random_projection.GaussianRandomProjection(n_components=2).fit_transform(X_standard)
# X_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)
# encodeing=model_ae(X_train,x_test)
# X_standard=scale(encodeing.predict(train_vecs))
# print(X_standard)
# print(PCA_model.explained_variance_ratio_)
# print(TruncatedSVD_model.explained_variance_ratio_)
# print(NMF_model.explained_variance_ratio_)
# clustering
range_n_clusters =np.arange(20,22,+1)
# # print(df_.shape)
X_labeled,hyper_parm_turning=call_silhout_(X_standard,df_,range_n_clusters)
# print(X_labeled.head())
X_labeled['utterance']=df_.index.to_list()
# # X_labeled['sentence_label']=sentence_emotion_labeling
cluster_='cluster_labels_20'
# cluster_labeling=X_labeled[['utterance','sentence_label',cluster_]].groupby(cluster_).size()
cluster_labeling=X_labeled[['utterance',cluster_]].groupby(cluster_).size()
print(cluster_labeling)
hyper_parm_turning=pd.DataFrame(hyper_parm_turning)
# Sort the rows of dataframe by column 'Name'
hyper_parm_turning = hyper_parm_turning.sort_values(by =['silhouette_avg','sample_dist_std'],ascending=False)
print("Contents of Sorted Dataframe based on a single column 'silhouette_avg' & 'sample_dist_std' : ")
print(hyper_parm_turning)
# print(hyper_parm_turning)
# cluster=''
# outPutData=OrderedDict()
# for idx,group in cluster_labeling:
# if cluster!=group[cluster_].to_list()[0] and group.shape[0]>80 :
# cluster=group[cluster_].to_list()[0]
# print('the shape of the group {} cluster name {}'.format(group.shape,cluster))
# # print(group['utterance'].to_list())
# # with codecs.open('./Doc2Vec/cluster_{}_doc2vec_with_emolex.scp'.format(cluster),'w','utf-8') as cluster:
# # for utt,label in zip(group['utterance'].to_list(),group['sentence_label'].to_list()):
# for utt in group['utterance'].to_list():#,group['sentence_label'].to_list()):
# if not 'utterance' in outPutData.keys():
# outPutData['utterance']=[utt]
# else:
# outPutData['utterance'].append(utt)
# # if not 'emotion_label' in outPutData.keys():
# # outPutData['emotion_label']=[label]
# # else:
# # outPutData['emotion_label'].append(label)
# if not 'cluster' in outPutData.keys():
# outPutData['cluster']=[cluster]
# else:
# outPutData['cluster'].append(cluster)
# final_data=pd.DataFrame(outPutData)
# speech_features=pd.read_csv(args.speech_feats_file)
# # print(speech_features['utterance'].to_list())
# # data_with_feat=speech_features.copy()
# features=speech_features.columns.drop('utterance')
# feat_data = pd.DataFrame(0, index=final_data.index, columns=features)
# for i,row in final_data.iterrows():
# utterance=getattr(row,'utterance')
# feats = speech_features[speech_features.utterance == utterance]
# for feat in list(features):
# feat_data.at[i,feat]=feats[feat]
# final_data = pd.concat([final_data, feat_data], axis=1)
# convert_dict={
# 'cluster':'category',
# }
# # # print(cat_list)
# final_data = final_data.astype(convert_dict)
# final_data['cluster_cat'] = final_data.cluster.cat.codes
# print(final_data.head())
# kf = KFold(n_splits=5,shuffle=True)
# X=final_data[features].values
# Y=final_data['cluster_cat'].values
# X_train,x_test,Y_train,y_test=train_test_split(X,Y,test_size=0.2)
# # build the model
# n_classes=len(set(Y))
# print(n_classes)
# model_DNN=Build_Model_DNN_Text(X_train.shape[1],n_classes)
# print(model_DNN.summary())
# cross_fold_accuracy=[]
# for idx,(train_index, test_index) in enumerate(kf.split(X_train)):
# # print("TRAIN:", train_index, "TEST:", test_index)
# x_train=X_train[train_index]
# x_eval=X_train[test_index]
# y_train=Y_train[train_index]
# y_eval=Y_train[test_index]
# model_DNN.fit(x_train, y_train,validation_data=(x_eval, y_eval),
# epochs=20,
# batch_size=16,
# verbose=2)
# predicted = model_DNN.predict(x_test)
# predicted = np.argmax(predicted, axis=1)
# acc=accuracy_score(y_test,predicted)
# cross_fold_accuracy.append(acc)
# print('fold {} accuracy {}'.format(idx+1,acc*100))
# print('cross folds acc {} (+/-{})'.format(np.mean(cross_fold_accuracy)*100,np.std(cross_fold_accuracy)*100))
# final_data['fold']=np.zeros((final_data.shape[0]))
#
# for idx,(train_index, test_index) in enumerate(kf.split(final_data)):
# #
# final_data.at[test_index,'fold']=idx
# # print(X[test_index])
# outFilename='./Doc2Vec/cluster_{}_doc2vec_with_emolex.csv'.format(os.path.basename(args.input_file))
# final_data.to_csv(outFilename,index=False)
if __name__ == '__main__':
main()
| main | identifier_name |
sentiment_clustering_speech_classification_v0.py | import nltk
import sklearn_crfsuite
from sklearn_crfsuite import metrics
import pandas as pd
from sklearn.preprocessing import label_binarize
import string
# nltk.download('conll2002')
flatten = lambda l: [item for sublist in l for item in sublist]
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import os
import sys
from sklearn.preprocessing import LabelEncoder
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
import argparse
import matplotlib.cm as cm
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
# nltk.corpus.conll2002.fileids()
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from gensim.models.word2vec import Word2Vec
import gensim
import random
from collections import OrderedDict
from sklearn.model_selection import KFold
# classifier information
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
LabeledSentence = gensim.models.doc2vec.LabeledSentence
import hdbscan
# classifier information
from keras.layers import Input
from keras.models import Model
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import hdbscan
from sklearn.cluster import MiniBatchKMeans
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def model_ae(X_train,x_test,n=300,encoding_dim=32):
# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/
# r program
# this is our input placeholder
input = Input(shape=(n,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(n, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input, decoded)
# this model maps an input to its encoded representation
encoder = Model(input, encoded)
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, X_train,
epochs=20,
batch_size=32,
shuffle=True,
validation_data=(x_test, x_test))
return encoder
def call_silhout_(X,df,range_n_clusters):
|
def main():
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("input_file", help="The input file to be projected")
# parser.add_argument("speech_feats_file", help="The input file to be projected")
# parser.add_argument("out_path_file", help="The input file to be projected")
args = parser.parse_args()
df_=pd.read_csv(args.input_file)
# print(df_.head())
df_doc2vec=df_.copy()
df_doc2vec=df_doc2vec.drop(['utterance'], axis=1)
# print(df_doc2vec.columns.to_list())
# df_['sentence_label']=sentence_emotion_labeling
df_doc2vec = df_doc2vec[df_doc2vec.columns[:300]]
print('loading the database')
# print(df_doc2vec.head())
print(df_doc2vec.shape)
from sklearn.preprocessing import scale
train_vecs = scale(df_doc2vec)
print('scaling the data')
#using pca as dimension reduction technique
PCA_model = PCA(.90, random_state=42)
X_standard = PCA_model.fit_transform(train_vecs)*(-1)
print(X_standard.shape)
# Single VD
# from numpy import array
# from sklearn.decomposition import TruncatedSVD
# TruncatedSVD_model=TruncatedSVD(n_components=3)
# X_standard = TruncatedSVD_model.fit_transform(train_vecs)
# using T-distributed Stochastic Neighbor Embedding (T-SNE)
# from sklearn.manifold import TSNE
# X_standard = TSNE(n_components=3).fit_transform(train_vecs)
# from sklearn.decomposition import NMF
# NMF_model=NMF(n_components=3)
# X_standard = NMF_model.fit_transform(train_vecs)
# from sklearn import random_projection
# X_standard = random_projection.GaussianRandomProjection(n_components=2).fit_transform(X_standard)
# X_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)
# encodeing=model_ae(X_train,x_test)
# X_standard=scale(encodeing.predict(train_vecs))
# print(X_standard)
# print(PCA_model.explained_variance_ratio_)
# print(TruncatedSVD_model.explained_variance_ratio_)
# print(NMF_model.explained_variance_ratio_)
# clustering
range_n_clusters =np.arange(20,22,+1)
# # print(df_.shape)
X_labeled,hyper_parm_turning=call_silhout_(X_standard,df_,range_n_clusters)
# print(X_labeled.head())
X_labeled['utterance']=df_.index.to_list()
# # X_labeled['sentence_label']=sentence_emotion_labeling
cluster_='cluster_labels_20'
# cluster_labeling=X_labeled[['utterance','sentence_label',cluster_]].groupby(cluster_).size()
cluster_labeling=X_labeled[['utterance',cluster_]].groupby(cluster_).size()
print(cluster_labeling)
hyper_parm_turning=pd.DataFrame(hyper_parm_turning)
# Sort the rows of dataframe by column 'Name'
hyper_parm_turning = hyper_parm_turning.sort_values(by =['silhouette_avg','sample_dist_std'],ascending=False)
print("Contents of Sorted Dataframe based on a single column 'silhouette_avg' & 'sample_dist_std' : ")
print(hyper_parm_turning)
# print(hyper_parm_turning)
# cluster=''
# outPutData=OrderedDict()
# for idx,group in cluster_labeling:
# if cluster!=group[cluster_].to_list()[0] and group.shape[0]>80 :
# cluster=group[cluster_].to_list()[0]
# print('the shape of the group {} cluster name {}'.format(group.shape,cluster))
# # print(group['utterance'].to_list())
# # with codecs.open('./Doc2Vec/cluster_{}_doc2vec_with_emolex.scp'.format(cluster),'w','utf-8') as cluster:
# # for utt,label in zip(group['utterance'].to_list(),group['sentence_label'].to_list()):
# for utt in group['utterance'].to_list():#,group['sentence_label'].to_list()):
# if not 'utterance' in outPutData.keys():
# outPutData['utterance']=[utt]
# else:
# outPutData['utterance'].append(utt)
# # if not 'emotion_label' in outPutData.keys():
# # outPutData['emotion_label']=[label]
# # else:
# # outPutData['emotion_label'].append(label)
# if not 'cluster' in outPutData.keys():
# outPutData['cluster']=[cluster]
# else:
# outPutData['cluster'].append(cluster)
# final_data=pd.DataFrame(outPutData)
# speech_features=pd.read_csv(args.speech_feats_file)
# # print(speech_features['utterance'].to_list())
# # data_with_feat=speech_features.copy()
# features=speech_features.columns.drop('utterance')
# feat_data = pd.DataFrame(0, index=final_data.index, columns=features)
# for i,row in final_data.iterrows():
# utterance=getattr(row,'utterance')
# feats = speech_features[speech_features.utterance == utterance]
# for feat in list(features):
# feat_data.at[i,feat]=feats[feat]
# final_data = pd.concat([final_data, feat_data], axis=1)
# convert_dict={
# 'cluster':'category',
# }
# # # print(cat_list)
# final_data = final_data.astype(convert_dict)
# final_data['cluster_cat'] = final_data.cluster.cat.codes
# print(final_data.head())
# kf = KFold(n_splits=5,shuffle=True)
# X=final_data[features].values
# Y=final_data['cluster_cat'].values
# X_train,x_test,Y_train,y_test=train_test_split(X,Y,test_size=0.2)
# # build the model
# n_classes=len(set(Y))
# print(n_classes)
# model_DNN=Build_Model_DNN_Text(X_train.shape[1],n_classes)
# print(model_DNN.summary())
# cross_fold_accuracy=[]
# for idx,(train_index, test_index) in enumerate(kf.split(X_train)):
# # print("TRAIN:", train_index, "TEST:", test_index)
# x_train=X_train[train_index]
# x_eval=X_train[test_index]
# y_train=Y_train[train_index]
# y_eval=Y_train[test_index]
# model_DNN.fit(x_train, y_train,validation_data=(x_eval, y_eval),
# epochs=20,
# batch_size=16,
# verbose=2)
# predicted = model_DNN.predict(x_test)
# predicted = np.argmax(predicted, axis=1)
# acc=accuracy_score(y_test,predicted)
# cross_fold_accuracy.append(acc)
# print('fold {} accuracy {}'.format(idx+1,acc*100))
# print('cross folds acc {} (+/-{})'.format(np.mean(cross_fold_accuracy)*100,np.std(cross_fold_accuracy)*100))
# final_data['fold']=np.zeros((final_data.shape[0]))
#
# for idx,(train_index, test_index) in enumerate(kf.split(final_data)):
# #
# final_data.at[test_index,'fold']=idx
# # print(X[test_index])
# outFilename='./Doc2Vec/cluster_{}_doc2vec_with_emolex.csv'.format(os.path.basename(args.input_file))
# final_data.to_csv(outFilename,index=False)
if __name__ == '__main__':
main()
| hyper_parm_turning=OrderedDict()
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# clusterer = MiniBatchKMeans(n_clusters=n_clusters,init='k-means++', random_state=10)
from sklearn.mixture import GaussianMixture
# Predict GMM cluster membership
clusterer = GaussianMixture(n_components=n_clusters, random_state=10)
# from sklearn.cluster import AgglomerativeClustering
# clusterer = AgglomerativeClustering(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(X)
labels="cluster_labels_{}".format(n_clusters)
if not labels in df.keys():
df[labels]=cluster_labels
sample_dist_std=np.std(df.groupby(labels).size())
sample_dist_avrg=np.median(df.groupby(labels).size())
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
if not 'n_clusters' in hyper_parm_turning.keys():
hyper_parm_turning['n_clusters']=[n_clusters]
else:
hyper_parm_turning['n_clusters'].append(n_clusters)
if not 'silhouette_avg' in hyper_parm_turning.keys():
hyper_parm_turning['silhouette_avg']=[silhouette_avg]
else:
hyper_parm_turning['silhouette_avg'].append(silhouette_avg)
if not 'sample_dist_std' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_std']=[sample_dist_std]
else:
hyper_parm_turning['sample_dist_std'].append(sample_dist_std)
if not 'sample_dist_avrg' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_avrg']=[sample_dist_avrg]
else:
hyper_parm_turning['sample_dist_avrg'].append(sample_dist_avrg)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
return df,hyper_parm_turning | identifier_body |
sentiment_clustering_speech_classification_v0.py | import nltk
import sklearn_crfsuite
from sklearn_crfsuite import metrics
import pandas as pd
from sklearn.preprocessing import label_binarize
import string
# nltk.download('conll2002')
flatten = lambda l: [item for sublist in l for item in sublist]
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import os
import sys
from sklearn.preprocessing import LabelEncoder
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
import argparse
import matplotlib.cm as cm
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
# nltk.corpus.conll2002.fileids()
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from gensim.models.word2vec import Word2Vec
import gensim
import random
from collections import OrderedDict
from sklearn.model_selection import KFold
# classifier information
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
LabeledSentence = gensim.models.doc2vec.LabeledSentence
import hdbscan
# classifier information
from keras.layers import Input
from keras.models import Model
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import hdbscan
from sklearn.cluster import MiniBatchKMeans
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def model_ae(X_train,x_test,n=300,encoding_dim=32):
# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/
# r program
# this is our input placeholder
input = Input(shape=(n,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(n, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input, decoded)
# this model maps an input to its encoded representation
encoder = Model(input, encoded)
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, X_train,
epochs=20,
batch_size=32,
shuffle=True,
validation_data=(x_test, x_test))
return encoder
def call_silhout_(X,df,range_n_clusters):
hyper_parm_turning=OrderedDict()
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# clusterer = MiniBatchKMeans(n_clusters=n_clusters,init='k-means++', random_state=10)
from sklearn.mixture import GaussianMixture
# Predict GMM cluster membership
clusterer = GaussianMixture(n_components=n_clusters, random_state=10)
# from sklearn.cluster import AgglomerativeClustering
# clusterer = AgglomerativeClustering(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(X)
labels="cluster_labels_{}".format(n_clusters)
if not labels in df.keys():
df[labels]=cluster_labels
sample_dist_std=np.std(df.groupby(labels).size())
sample_dist_avrg=np.median(df.groupby(labels).size())
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
if not 'n_clusters' in hyper_parm_turning.keys():
hyper_parm_turning['n_clusters']=[n_clusters]
else:
hyper_parm_turning['n_clusters'].append(n_clusters)
if not 'silhouette_avg' in hyper_parm_turning.keys():
hyper_parm_turning['silhouette_avg']=[silhouette_avg]
else:
hyper_parm_turning['silhouette_avg'].append(silhouette_avg)
if not 'sample_dist_std' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_std']=[sample_dist_std]
else:
hyper_parm_turning['sample_dist_std'].append(sample_dist_std)
if not 'sample_dist_avrg' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_avrg']=[sample_dist_avrg]
else:
|
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
return df,hyper_parm_turning
def main():
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("input_file", help="The input file to be projected")
# parser.add_argument("speech_feats_file", help="The input file to be projected")
# parser.add_argument("out_path_file", help="The input file to be projected")
args = parser.parse_args()
df_=pd.read_csv(args.input_file)
# print(df_.head())
df_doc2vec=df_.copy()
df_doc2vec=df_doc2vec.drop(['utterance'], axis=1)
# print(df_doc2vec.columns.to_list())
# df_['sentence_label']=sentence_emotion_labeling
df_doc2vec = df_doc2vec[df_doc2vec.columns[:300]]
print('loading the database')
# print(df_doc2vec.head())
print(df_doc2vec.shape)
from sklearn.preprocessing import scale
train_vecs = scale(df_doc2vec)
print('scaling the data')
#using pca as dimension reduction technique
PCA_model = PCA(.90, random_state=42)
X_standard = PCA_model.fit_transform(train_vecs)*(-1)
print(X_standard.shape)
# Single VD
# from numpy import array
# from sklearn.decomposition import TruncatedSVD
# TruncatedSVD_model=TruncatedSVD(n_components=3)
# X_standard = TruncatedSVD_model.fit_transform(train_vecs)
# using T-distributed Stochastic Neighbor Embedding (T-SNE)
# from sklearn.manifold import TSNE
# X_standard = TSNE(n_components=3).fit_transform(train_vecs)
# from sklearn.decomposition import NMF
# NMF_model=NMF(n_components=3)
# X_standard = NMF_model.fit_transform(train_vecs)
# from sklearn import random_projection
# X_standard = random_projection.GaussianRandomProjection(n_components=2).fit_transform(X_standard)
# X_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)
# encodeing=model_ae(X_train,x_test)
# X_standard=scale(encodeing.predict(train_vecs))
# print(X_standard)
# print(PCA_model.explained_variance_ratio_)
# print(TruncatedSVD_model.explained_variance_ratio_)
# print(NMF_model.explained_variance_ratio_)
# clustering
range_n_clusters =np.arange(20,22,+1)
# # print(df_.shape)
X_labeled,hyper_parm_turning=call_silhout_(X_standard,df_,range_n_clusters)
# print(X_labeled.head())
X_labeled['utterance']=df_.index.to_list()
# # X_labeled['sentence_label']=sentence_emotion_labeling
cluster_='cluster_labels_20'
# cluster_labeling=X_labeled[['utterance','sentence_label',cluster_]].groupby(cluster_).size()
cluster_labeling=X_labeled[['utterance',cluster_]].groupby(cluster_).size()
print(cluster_labeling)
hyper_parm_turning=pd.DataFrame(hyper_parm_turning)
# Sort the rows of dataframe by column 'Name'
hyper_parm_turning = hyper_parm_turning.sort_values(by =['silhouette_avg','sample_dist_std'],ascending=False)
print("Contents of Sorted Dataframe based on a single column 'silhouette_avg' & 'sample_dist_std' : ")
print(hyper_parm_turning)
# print(hyper_parm_turning)
# cluster=''
# outPutData=OrderedDict()
# for idx,group in cluster_labeling:
# if cluster!=group[cluster_].to_list()[0] and group.shape[0]>80 :
# cluster=group[cluster_].to_list()[0]
# print('the shape of the group {} cluster name {}'.format(group.shape,cluster))
# # print(group['utterance'].to_list())
# # with codecs.open('./Doc2Vec/cluster_{}_doc2vec_with_emolex.scp'.format(cluster),'w','utf-8') as cluster:
# # for utt,label in zip(group['utterance'].to_list(),group['sentence_label'].to_list()):
# for utt in group['utterance'].to_list():#,group['sentence_label'].to_list()):
# if not 'utterance' in outPutData.keys():
# outPutData['utterance']=[utt]
# else:
# outPutData['utterance'].append(utt)
# # if not 'emotion_label' in outPutData.keys():
# # outPutData['emotion_label']=[label]
# # else:
# # outPutData['emotion_label'].append(label)
# if not 'cluster' in outPutData.keys():
# outPutData['cluster']=[cluster]
# else:
# outPutData['cluster'].append(cluster)
# final_data=pd.DataFrame(outPutData)
# speech_features=pd.read_csv(args.speech_feats_file)
# # print(speech_features['utterance'].to_list())
# # data_with_feat=speech_features.copy()
# features=speech_features.columns.drop('utterance')
# feat_data = pd.DataFrame(0, index=final_data.index, columns=features)
# for i,row in final_data.iterrows():
# utterance=getattr(row,'utterance')
# feats = speech_features[speech_features.utterance == utterance]
# for feat in list(features):
# feat_data.at[i,feat]=feats[feat]
# final_data = pd.concat([final_data, feat_data], axis=1)
# convert_dict={
# 'cluster':'category',
# }
# # # print(cat_list)
# final_data = final_data.astype(convert_dict)
# final_data['cluster_cat'] = final_data.cluster.cat.codes
# print(final_data.head())
# kf = KFold(n_splits=5,shuffle=True)
# X=final_data[features].values
# Y=final_data['cluster_cat'].values
# X_train,x_test,Y_train,y_test=train_test_split(X,Y,test_size=0.2)
# # build the model
# n_classes=len(set(Y))
# print(n_classes)
# model_DNN=Build_Model_DNN_Text(X_train.shape[1],n_classes)
# print(model_DNN.summary())
# cross_fold_accuracy=[]
# for idx,(train_index, test_index) in enumerate(kf.split(X_train)):
# # print("TRAIN:", train_index, "TEST:", test_index)
# x_train=X_train[train_index]
# x_eval=X_train[test_index]
# y_train=Y_train[train_index]
# y_eval=Y_train[test_index]
# model_DNN.fit(x_train, y_train,validation_data=(x_eval, y_eval),
# epochs=20,
# batch_size=16,
# verbose=2)
# predicted = model_DNN.predict(x_test)
# predicted = np.argmax(predicted, axis=1)
# acc=accuracy_score(y_test,predicted)
# cross_fold_accuracy.append(acc)
# print('fold {} accuracy {}'.format(idx+1,acc*100))
# print('cross folds acc {} (+/-{})'.format(np.mean(cross_fold_accuracy)*100,np.std(cross_fold_accuracy)*100))
# final_data['fold']=np.zeros((final_data.shape[0]))
#
# for idx,(train_index, test_index) in enumerate(kf.split(final_data)):
# #
# final_data.at[test_index,'fold']=idx
# # print(X[test_index])
# outFilename='./Doc2Vec/cluster_{}_doc2vec_with_emolex.csv'.format(os.path.basename(args.input_file))
# final_data.to_csv(outFilename,index=False)
if __name__ == '__main__':
main()
| hyper_parm_turning['sample_dist_avrg'].append(sample_dist_avrg) | conditional_block |
sentiment_clustering_speech_classification_v0.py | import nltk
import sklearn_crfsuite
from sklearn_crfsuite import metrics
import pandas as pd
from sklearn.preprocessing import label_binarize
import string
# nltk.download('conll2002')
flatten = lambda l: [item for sublist in l for item in sublist]
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import os
import sys
from sklearn.preprocessing import LabelEncoder
from math import sqrt
from sklearn.metrics import mean_squared_error | from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
import argparse
import matplotlib.cm as cm
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
# nltk.corpus.conll2002.fileids()
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from gensim.models.word2vec import Word2Vec
import gensim
import random
from collections import OrderedDict
from sklearn.model_selection import KFold
# classifier information
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
LabeledSentence = gensim.models.doc2vec.LabeledSentence
import hdbscan
# classifier information
from keras.layers import Input
from keras.models import Model
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import hdbscan
from sklearn.cluster import MiniBatchKMeans
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def model_ae(X_train,x_test,n=300,encoding_dim=32):
# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/
# r program
# this is our input placeholder
input = Input(shape=(n,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(n, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input, decoded)
# this model maps an input to its encoded representation
encoder = Model(input, encoded)
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, X_train,
epochs=20,
batch_size=32,
shuffle=True,
validation_data=(x_test, x_test))
return encoder
def call_silhout_(X,df,range_n_clusters):
hyper_parm_turning=OrderedDict()
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# clusterer = MiniBatchKMeans(n_clusters=n_clusters,init='k-means++', random_state=10)
from sklearn.mixture import GaussianMixture
# Predict GMM cluster membership
clusterer = GaussianMixture(n_components=n_clusters, random_state=10)
# from sklearn.cluster import AgglomerativeClustering
# clusterer = AgglomerativeClustering(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(X)
labels="cluster_labels_{}".format(n_clusters)
if not labels in df.keys():
df[labels]=cluster_labels
sample_dist_std=np.std(df.groupby(labels).size())
sample_dist_avrg=np.median(df.groupby(labels).size())
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
if not 'n_clusters' in hyper_parm_turning.keys():
hyper_parm_turning['n_clusters']=[n_clusters]
else:
hyper_parm_turning['n_clusters'].append(n_clusters)
if not 'silhouette_avg' in hyper_parm_turning.keys():
hyper_parm_turning['silhouette_avg']=[silhouette_avg]
else:
hyper_parm_turning['silhouette_avg'].append(silhouette_avg)
if not 'sample_dist_std' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_std']=[sample_dist_std]
else:
hyper_parm_turning['sample_dist_std'].append(sample_dist_std)
if not 'sample_dist_avrg' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_avrg']=[sample_dist_avrg]
else:
hyper_parm_turning['sample_dist_avrg'].append(sample_dist_avrg)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
return df,hyper_parm_turning
def main():
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("input_file", help="The input file to be projected")
# parser.add_argument("speech_feats_file", help="The input file to be projected")
# parser.add_argument("out_path_file", help="The input file to be projected")
args = parser.parse_args()
df_=pd.read_csv(args.input_file)
# print(df_.head())
df_doc2vec=df_.copy()
df_doc2vec=df_doc2vec.drop(['utterance'], axis=1)
# print(df_doc2vec.columns.to_list())
# df_['sentence_label']=sentence_emotion_labeling
df_doc2vec = df_doc2vec[df_doc2vec.columns[:300]]
print('loading the database')
# print(df_doc2vec.head())
print(df_doc2vec.shape)
from sklearn.preprocessing import scale
train_vecs = scale(df_doc2vec)
print('scaling the data')
#using pca as dimension reduction technique
PCA_model = PCA(.90, random_state=42)
X_standard = PCA_model.fit_transform(train_vecs)*(-1)
print(X_standard.shape)
# Single VD
# from numpy import array
# from sklearn.decomposition import TruncatedSVD
# TruncatedSVD_model=TruncatedSVD(n_components=3)
# X_standard = TruncatedSVD_model.fit_transform(train_vecs)
# using T-distributed Stochastic Neighbor Embedding (T-SNE)
# from sklearn.manifold import TSNE
# X_standard = TSNE(n_components=3).fit_transform(train_vecs)
# from sklearn.decomposition import NMF
# NMF_model=NMF(n_components=3)
# X_standard = NMF_model.fit_transform(train_vecs)
# from sklearn import random_projection
# X_standard = random_projection.GaussianRandomProjection(n_components=2).fit_transform(X_standard)
# X_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)
# encodeing=model_ae(X_train,x_test)
# X_standard=scale(encodeing.predict(train_vecs))
# print(X_standard)
# print(PCA_model.explained_variance_ratio_)
# print(TruncatedSVD_model.explained_variance_ratio_)
# print(NMF_model.explained_variance_ratio_)
# clustering
range_n_clusters =np.arange(20,22,+1)
# # print(df_.shape)
X_labeled,hyper_parm_turning=call_silhout_(X_standard,df_,range_n_clusters)
# print(X_labeled.head())
X_labeled['utterance']=df_.index.to_list()
# # X_labeled['sentence_label']=sentence_emotion_labeling
cluster_='cluster_labels_20'
# cluster_labeling=X_labeled[['utterance','sentence_label',cluster_]].groupby(cluster_).size()
cluster_labeling=X_labeled[['utterance',cluster_]].groupby(cluster_).size()
print(cluster_labeling)
hyper_parm_turning=pd.DataFrame(hyper_parm_turning)
# Sort the rows of dataframe by column 'Name'
hyper_parm_turning = hyper_parm_turning.sort_values(by =['silhouette_avg','sample_dist_std'],ascending=False)
print("Contents of Sorted Dataframe based on a single column 'silhouette_avg' & 'sample_dist_std' : ")
print(hyper_parm_turning)
# print(hyper_parm_turning)
# cluster=''
# outPutData=OrderedDict()
# for idx,group in cluster_labeling:
# if cluster!=group[cluster_].to_list()[0] and group.shape[0]>80 :
# cluster=group[cluster_].to_list()[0]
# print('the shape of the group {} cluster name {}'.format(group.shape,cluster))
# # print(group['utterance'].to_list())
# # with codecs.open('./Doc2Vec/cluster_{}_doc2vec_with_emolex.scp'.format(cluster),'w','utf-8') as cluster:
# # for utt,label in zip(group['utterance'].to_list(),group['sentence_label'].to_list()):
# for utt in group['utterance'].to_list():#,group['sentence_label'].to_list()):
# if not 'utterance' in outPutData.keys():
# outPutData['utterance']=[utt]
# else:
# outPutData['utterance'].append(utt)
# # if not 'emotion_label' in outPutData.keys():
# # outPutData['emotion_label']=[label]
# # else:
# # outPutData['emotion_label'].append(label)
# if not 'cluster' in outPutData.keys():
# outPutData['cluster']=[cluster]
# else:
# outPutData['cluster'].append(cluster)
# final_data=pd.DataFrame(outPutData)
# speech_features=pd.read_csv(args.speech_feats_file)
# # print(speech_features['utterance'].to_list())
# # data_with_feat=speech_features.copy()
# features=speech_features.columns.drop('utterance')
# feat_data = pd.DataFrame(0, index=final_data.index, columns=features)
# for i,row in final_data.iterrows():
# utterance=getattr(row,'utterance')
# feats = speech_features[speech_features.utterance == utterance]
# for feat in list(features):
# feat_data.at[i,feat]=feats[feat]
# final_data = pd.concat([final_data, feat_data], axis=1)
# convert_dict={
# 'cluster':'category',
# }
# # # print(cat_list)
# final_data = final_data.astype(convert_dict)
# final_data['cluster_cat'] = final_data.cluster.cat.codes
# print(final_data.head())
# kf = KFold(n_splits=5,shuffle=True)
# X=final_data[features].values
# Y=final_data['cluster_cat'].values
# X_train,x_test,Y_train,y_test=train_test_split(X,Y,test_size=0.2)
# # build the model
# n_classes=len(set(Y))
# print(n_classes)
# model_DNN=Build_Model_DNN_Text(X_train.shape[1],n_classes)
# print(model_DNN.summary())
# cross_fold_accuracy=[]
# for idx,(train_index, test_index) in enumerate(kf.split(X_train)):
# # print("TRAIN:", train_index, "TEST:", test_index)
# x_train=X_train[train_index]
# x_eval=X_train[test_index]
# y_train=Y_train[train_index]
# y_eval=Y_train[test_index]
# model_DNN.fit(x_train, y_train,validation_data=(x_eval, y_eval),
# epochs=20,
# batch_size=16,
# verbose=2)
# predicted = model_DNN.predict(x_test)
# predicted = np.argmax(predicted, axis=1)
# acc=accuracy_score(y_test,predicted)
# cross_fold_accuracy.append(acc)
# print('fold {} accuracy {}'.format(idx+1,acc*100))
# print('cross folds acc {} (+/-{})'.format(np.mean(cross_fold_accuracy)*100,np.std(cross_fold_accuracy)*100))
# final_data['fold']=np.zeros((final_data.shape[0]))
#
# for idx,(train_index, test_index) in enumerate(kf.split(final_data)):
# #
# final_data.at[test_index,'fold']=idx
# # print(X[test_index])
# outFilename='./Doc2Vec/cluster_{}_doc2vec_with_emolex.csv'.format(os.path.basename(args.input_file))
# final_data.to_csv(outFilename,index=False)
if __name__ == '__main__':
main() |
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize | random_line_split |
menus.ts | import logger from "../lib/logger";
import options from "../lib/options";
import { stringify } from "../lib/utils";
import * as menuIds from "../menuIds";
import castManager from "./castManager";
const _ = browser.i18n.getMessage;
const URL_PATTERN_HTTP = "http://*/*";
const URL_PATTERN_HTTPS = "https://*/*";
const URL_PATTERN_FILE = "file://*/*";
const URL_PATTERNS_REMOTE = [URL_PATTERN_HTTP, URL_PATTERN_HTTPS];
const URL_PATTERNS_ALL = [...URL_PATTERNS_REMOTE, URL_PATTERN_FILE];
type MenuId = string | number;
let menuIdCast: MenuId;
let menuIdCastMedia: MenuId;
let menuIdWhitelist: MenuId;
let menuIdWhitelistRecommended: MenuId;
/** Match patterns for the whitelist option menus. */
const whitelistChildMenuPatterns = new Map<MenuId, string>();
/** Handles initial menu setup. */
export async function initMenus() {
logger.info("init (menus)");
const opts = await options.getAll();
// Global "Cast..." menu item
menuIdCast = browser.menus.create({
contexts: ["browser_action", "page", "tools_menu"],
title: _("contextCast"),
documentUrlPatterns: ["http://*/*", "https://*/*"],
icons: { "16": "icons/icon.svg" } // browser_action context
});
// <video>/<audio> "Cast..." context menu item
menuIdCastMedia = browser.menus.create({
contexts: ["audio", "video", "image"],
title: _("contextCast"),
visible: opts.mediaEnabled,
targetUrlPatterns: opts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
menuIdWhitelist = browser.menus.create({
contexts: ["browser_action"],
title: _("contextAddToWhitelist"),
enabled: false
});
menuIdWhitelistRecommended = browser.menus.create({
title: _("contextAddToWhitelistRecommended"),
parentId: menuIdWhitelist
});
browser.menus.create({
type: "separator",
parentId: menuIdWhitelist
});
// Popup context menus
const createPopupMenu = (props: browser.menus._CreateCreateProperties) =>
browser.menus.create({
visible: false,
documentUrlPatterns: [`${browser.runtime.getURL("ui/popup")}/*`],
...props
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_PLAY_PAUSE,
title: _("popupMediaPlay")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_MUTE,
type: "checkbox",
title: _("popupMediaMute")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_PREVIOUS,
title: _("popupMediaSkipPrevious")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_NEXT,
title: _("popupMediaSkipNext")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC,
title: _("popupMediaSubtitlesCaptions")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC_OFF,
parentId: menuIds.POPUP_MEDIA_CC,
type: "radio",
title: _("popupMediaSubtitlesCaptionsOff")
});
createPopupMenu({ id: menuIds.POPUP_MEDIA_SEPARATOR, type: "separator" });
createPopupMenu({
id: menuIds.POPUP_CAST,
title: _("popupCastButtonTitle"),
icons: { 16: "icons/icon.svg" }
});
createPopupMenu({
id: menuIds.POPUP_STOP,
title: _("popupStopButtonTitle")
});
browser.menus.onShown.addListener(onMenuShown);
browser.menus.onClicked.addListener(onMenuClicked);
options.addEventListener("changed", async ev => {
const alteredOpts = ev.detail;
const newOpts = await options.getAll();
if (menuIdCastMedia && alteredOpts.includes("mediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
visible: newOpts.mediaEnabled
});
}
if (menuIdCastMedia && alteredOpts.includes("localMediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
targetUrlPatterns: newOpts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
}
});
}
/** Handle updating menus when shown. */
async function onMenuShown(info: browser.menus._OnShownInfo) {
const menuIds = info.menuIds as unknown as number[];
// Only rebuild menus if whitelist menu present
if (menuIds.includes(menuIdWhitelist as number)) {
updateWhitelistMenu(info.pageUrl);
return;
}
}
/** Handle menu click events */
async function onMenuClicked(
info: browser.menus.OnClickData,
tab?: browser.tabs.Tab
) {
// Handle whitelist menus
if (info.parentMenuItemId === menuIdWhitelist) {
const pattern = whitelistChildMenuPatterns.get(info.menuItemId);
if (!pattern) {
throw logger.error(
`Whitelist pattern not found for menu item ID ${info.menuItemId}.`
);
}
const whitelist = await options.get("siteWhitelist");
if (!whitelist.find(item => item.pattern === pattern)) {
// Add to whitelist and update options
whitelist.push({ pattern, isEnabled: true });
await options.set("siteWhitelist", whitelist);
}
return;
}
if (tab?.id === undefined) {
logger.error("Menu handler tab ID not found.");
return;
}
switch (info.menuItemId) {
case menuIdCast: {
castManager.triggerCast(tab.id, info.frameId);
break;
}
case menuIdCastMedia:
if (info.srcUrl) {
await browser.tabs.executeScript(tab.id, {
code: stringify`
window.mediaUrl = ${info.srcUrl};
window.targetElementId = ${info.targetElementId};
`,
frameId: info.frameId
});
await browser.tabs.executeScript(tab.id, {
file: "cast/senders/media.js",
frameId: info.frameId
});
}
break;
}
}
/** Handles updating the whitelist menus for a given URL */
async function | (pageUrl?: string) {
/**
* If page URL doesn't exist, we're not on a page and have nothing
* to whitelist, so disable the menu and return.
*/
if (!pageUrl) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
const url = new URL(pageUrl);
const urlHasOrigin = url.origin !== "null";
/**
* If the page URL doesn't have an origin, we're not on a
* remote page and have nothing to whitelist, so disable the
* menu and return.
*/
if (!urlHasOrigin) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
// Enable the whitelist menu
browser.menus.update(menuIdWhitelist, {
enabled: true
});
for (const [menuId] of whitelistChildMenuPatterns) {
// Clear all page-specific temporary menus
if (menuId !== menuIdWhitelistRecommended) {
browser.menus.remove(menuId);
}
whitelistChildMenuPatterns.delete(menuId);
}
// If there is more than one subdomain, get the base domain
const baseDomain =
(url.hostname.match(/\./g) || []).length > 1
? url.hostname.substring(url.hostname.indexOf(".") + 1)
: url.hostname;
const portlessOrigin = `${url.protocol}//${url.hostname}`;
const patternRecommended = `${portlessOrigin}/*`;
const patternSearch = `${portlessOrigin}${url.pathname}${url.search}`;
const patternWildcardProtocol = `*://${url.hostname}/*`;
const patternWildcardSubdomain = `${url.protocol}//*.${baseDomain}/*`;
const patternWildcardProtocolAndSubdomain = `*://*.${baseDomain}/*`;
// Update recommended menu item
browser.menus.update(menuIdWhitelistRecommended, {
title: _("contextAddToWhitelistRecommended", patternRecommended)
});
whitelistChildMenuPatterns.set(
menuIdWhitelistRecommended,
patternRecommended
);
if (url.search) {
const whitelistSearchMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternSearch),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(whitelistSearchMenuId, patternSearch);
}
/**
* Split URL path into segments and add menu items for each
* partial path as the segments are removed.
*/
{
const pathTrimmed = url.pathname.endsWith("/")
? url.pathname.substring(0, url.pathname.length - 1)
: url.pathname;
const pathSegments = pathTrimmed
.split("/")
.filter(segment => segment)
.reverse();
if (pathSegments.length) {
for (let i = 0; i < pathSegments.length; i++) {
const partialPath = pathSegments.slice(i).reverse().join("/");
const pattern = `${portlessOrigin}/${partialPath}/*`;
const partialPathMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", pattern),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(partialPathMenuId, pattern);
}
}
}
const wildcardProtocolMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardProtocol),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolMenuId,
patternWildcardProtocol
);
const wildcardSubdomainMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardSubdomain),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardSubdomainMenuId,
patternWildcardSubdomain
);
const wildcardProtocolAndSubdomainMenuId = browser.menus.create({
title: _(
"contextAddToWhitelistAdvancedAdd",
patternWildcardProtocolAndSubdomain
),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolAndSubdomainMenuId,
patternWildcardProtocolAndSubdomain
);
await browser.menus.refresh();
}
| updateWhitelistMenu | identifier_name |
menus.ts | import logger from "../lib/logger";
import options from "../lib/options";
import { stringify } from "../lib/utils";
import * as menuIds from "../menuIds";
import castManager from "./castManager";
const _ = browser.i18n.getMessage;
const URL_PATTERN_HTTP = "http://*/*";
const URL_PATTERN_HTTPS = "https://*/*";
const URL_PATTERN_FILE = "file://*/*";
const URL_PATTERNS_REMOTE = [URL_PATTERN_HTTP, URL_PATTERN_HTTPS];
const URL_PATTERNS_ALL = [...URL_PATTERNS_REMOTE, URL_PATTERN_FILE];
type MenuId = string | number;
let menuIdCast: MenuId;
let menuIdCastMedia: MenuId;
let menuIdWhitelist: MenuId;
let menuIdWhitelistRecommended: MenuId;
/** Match patterns for the whitelist option menus. */
const whitelistChildMenuPatterns = new Map<MenuId, string>();
/** Handles initial menu setup. */
export async function initMenus() {
logger.info("init (menus)");
const opts = await options.getAll();
// Global "Cast..." menu item
menuIdCast = browser.menus.create({
contexts: ["browser_action", "page", "tools_menu"],
title: _("contextCast"),
documentUrlPatterns: ["http://*/*", "https://*/*"],
icons: { "16": "icons/icon.svg" } // browser_action context
});
// <video>/<audio> "Cast..." context menu item
menuIdCastMedia = browser.menus.create({
contexts: ["audio", "video", "image"],
title: _("contextCast"),
visible: opts.mediaEnabled,
targetUrlPatterns: opts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
menuIdWhitelist = browser.menus.create({
contexts: ["browser_action"],
title: _("contextAddToWhitelist"),
enabled: false
});
menuIdWhitelistRecommended = browser.menus.create({
title: _("contextAddToWhitelistRecommended"),
parentId: menuIdWhitelist
});
browser.menus.create({
type: "separator",
parentId: menuIdWhitelist
});
// Popup context menus
const createPopupMenu = (props: browser.menus._CreateCreateProperties) =>
browser.menus.create({
visible: false,
documentUrlPatterns: [`${browser.runtime.getURL("ui/popup")}/*`],
...props
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_PLAY_PAUSE,
title: _("popupMediaPlay")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_MUTE,
type: "checkbox",
title: _("popupMediaMute")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_PREVIOUS,
title: _("popupMediaSkipPrevious")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_NEXT,
title: _("popupMediaSkipNext")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC,
title: _("popupMediaSubtitlesCaptions")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC_OFF,
parentId: menuIds.POPUP_MEDIA_CC,
type: "radio",
title: _("popupMediaSubtitlesCaptionsOff")
});
createPopupMenu({ id: menuIds.POPUP_MEDIA_SEPARATOR, type: "separator" });
createPopupMenu({
id: menuIds.POPUP_CAST,
title: _("popupCastButtonTitle"),
icons: { 16: "icons/icon.svg" }
});
createPopupMenu({
id: menuIds.POPUP_STOP,
title: _("popupStopButtonTitle")
});
browser.menus.onShown.addListener(onMenuShown);
browser.menus.onClicked.addListener(onMenuClicked);
options.addEventListener("changed", async ev => {
const alteredOpts = ev.detail;
const newOpts = await options.getAll();
if (menuIdCastMedia && alteredOpts.includes("mediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
visible: newOpts.mediaEnabled
});
}
if (menuIdCastMedia && alteredOpts.includes("localMediaEnabled")) |
});
}
/** Handle updating menus when shown. */
async function onMenuShown(info: browser.menus._OnShownInfo) {
const menuIds = info.menuIds as unknown as number[];
// Only rebuild menus if whitelist menu present
if (menuIds.includes(menuIdWhitelist as number)) {
updateWhitelistMenu(info.pageUrl);
return;
}
}
/** Handle menu click events */
async function onMenuClicked(
info: browser.menus.OnClickData,
tab?: browser.tabs.Tab
) {
// Handle whitelist menus
if (info.parentMenuItemId === menuIdWhitelist) {
const pattern = whitelistChildMenuPatterns.get(info.menuItemId);
if (!pattern) {
throw logger.error(
`Whitelist pattern not found for menu item ID ${info.menuItemId}.`
);
}
const whitelist = await options.get("siteWhitelist");
if (!whitelist.find(item => item.pattern === pattern)) {
// Add to whitelist and update options
whitelist.push({ pattern, isEnabled: true });
await options.set("siteWhitelist", whitelist);
}
return;
}
if (tab?.id === undefined) {
logger.error("Menu handler tab ID not found.");
return;
}
switch (info.menuItemId) {
case menuIdCast: {
castManager.triggerCast(tab.id, info.frameId);
break;
}
case menuIdCastMedia:
if (info.srcUrl) {
await browser.tabs.executeScript(tab.id, {
code: stringify`
window.mediaUrl = ${info.srcUrl};
window.targetElementId = ${info.targetElementId};
`,
frameId: info.frameId
});
await browser.tabs.executeScript(tab.id, {
file: "cast/senders/media.js",
frameId: info.frameId
});
}
break;
}
}
/** Handles updating the whitelist menus for a given URL */
async function updateWhitelistMenu(pageUrl?: string) {
/**
* If page URL doesn't exist, we're not on a page and have nothing
* to whitelist, so disable the menu and return.
*/
if (!pageUrl) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
const url = new URL(pageUrl);
const urlHasOrigin = url.origin !== "null";
/**
* If the page URL doesn't have an origin, we're not on a
* remote page and have nothing to whitelist, so disable the
* menu and return.
*/
if (!urlHasOrigin) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
// Enable the whitelist menu
browser.menus.update(menuIdWhitelist, {
enabled: true
});
for (const [menuId] of whitelistChildMenuPatterns) {
// Clear all page-specific temporary menus
if (menuId !== menuIdWhitelistRecommended) {
browser.menus.remove(menuId);
}
whitelistChildMenuPatterns.delete(menuId);
}
// If there is more than one subdomain, get the base domain
const baseDomain =
(url.hostname.match(/\./g) || []).length > 1
? url.hostname.substring(url.hostname.indexOf(".") + 1)
: url.hostname;
const portlessOrigin = `${url.protocol}//${url.hostname}`;
const patternRecommended = `${portlessOrigin}/*`;
const patternSearch = `${portlessOrigin}${url.pathname}${url.search}`;
const patternWildcardProtocol = `*://${url.hostname}/*`;
const patternWildcardSubdomain = `${url.protocol}//*.${baseDomain}/*`;
const patternWildcardProtocolAndSubdomain = `*://*.${baseDomain}/*`;
// Update recommended menu item
browser.menus.update(menuIdWhitelistRecommended, {
title: _("contextAddToWhitelistRecommended", patternRecommended)
});
whitelistChildMenuPatterns.set(
menuIdWhitelistRecommended,
patternRecommended
);
if (url.search) {
const whitelistSearchMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternSearch),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(whitelistSearchMenuId, patternSearch);
}
/**
* Split URL path into segments and add menu items for each
* partial path as the segments are removed.
*/
{
const pathTrimmed = url.pathname.endsWith("/")
? url.pathname.substring(0, url.pathname.length - 1)
: url.pathname;
const pathSegments = pathTrimmed
.split("/")
.filter(segment => segment)
.reverse();
if (pathSegments.length) {
for (let i = 0; i < pathSegments.length; i++) {
const partialPath = pathSegments.slice(i).reverse().join("/");
const pattern = `${portlessOrigin}/${partialPath}/*`;
const partialPathMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", pattern),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(partialPathMenuId, pattern);
}
}
}
const wildcardProtocolMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardProtocol),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolMenuId,
patternWildcardProtocol
);
const wildcardSubdomainMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardSubdomain),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardSubdomainMenuId,
patternWildcardSubdomain
);
const wildcardProtocolAndSubdomainMenuId = browser.menus.create({
title: _(
"contextAddToWhitelistAdvancedAdd",
patternWildcardProtocolAndSubdomain
),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolAndSubdomainMenuId,
patternWildcardProtocolAndSubdomain
);
await browser.menus.refresh();
}
| {
browser.menus.update(menuIdCastMedia, {
targetUrlPatterns: newOpts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
} | conditional_block |
menus.ts | import logger from "../lib/logger";
import options from "../lib/options";
import { stringify } from "../lib/utils";
import * as menuIds from "../menuIds";
import castManager from "./castManager";
const _ = browser.i18n.getMessage;
const URL_PATTERN_HTTP = "http://*/*";
const URL_PATTERN_HTTPS = "https://*/*";
const URL_PATTERN_FILE = "file://*/*";
const URL_PATTERNS_REMOTE = [URL_PATTERN_HTTP, URL_PATTERN_HTTPS];
const URL_PATTERNS_ALL = [...URL_PATTERNS_REMOTE, URL_PATTERN_FILE];
type MenuId = string | number;
let menuIdCast: MenuId;
let menuIdCastMedia: MenuId;
let menuIdWhitelist: MenuId;
let menuIdWhitelistRecommended: MenuId;
/** Match patterns for the whitelist option menus. */
const whitelistChildMenuPatterns = new Map<MenuId, string>();
/** Handles initial menu setup. */
export async function initMenus() {
logger.info("init (menus)");
const opts = await options.getAll();
// Global "Cast..." menu item
menuIdCast = browser.menus.create({
contexts: ["browser_action", "page", "tools_menu"],
title: _("contextCast"),
documentUrlPatterns: ["http://*/*", "https://*/*"],
icons: { "16": "icons/icon.svg" } // browser_action context
});
// <video>/<audio> "Cast..." context menu item
menuIdCastMedia = browser.menus.create({
contexts: ["audio", "video", "image"],
title: _("contextCast"),
visible: opts.mediaEnabled,
targetUrlPatterns: opts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
menuIdWhitelist = browser.menus.create({
contexts: ["browser_action"],
title: _("contextAddToWhitelist"),
enabled: false
});
menuIdWhitelistRecommended = browser.menus.create({
title: _("contextAddToWhitelistRecommended"),
parentId: menuIdWhitelist
});
browser.menus.create({
type: "separator",
parentId: menuIdWhitelist
});
// Popup context menus
const createPopupMenu = (props: browser.menus._CreateCreateProperties) =>
browser.menus.create({
visible: false,
documentUrlPatterns: [`${browser.runtime.getURL("ui/popup")}/*`],
...props
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_PLAY_PAUSE,
title: _("popupMediaPlay")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_MUTE,
type: "checkbox",
title: _("popupMediaMute")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_PREVIOUS,
title: _("popupMediaSkipPrevious")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_NEXT,
title: _("popupMediaSkipNext")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC,
title: _("popupMediaSubtitlesCaptions")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC_OFF,
parentId: menuIds.POPUP_MEDIA_CC,
type: "radio",
title: _("popupMediaSubtitlesCaptionsOff")
});
createPopupMenu({ id: menuIds.POPUP_MEDIA_SEPARATOR, type: "separator" });
createPopupMenu({
id: menuIds.POPUP_CAST,
title: _("popupCastButtonTitle"),
icons: { 16: "icons/icon.svg" }
});
createPopupMenu({
id: menuIds.POPUP_STOP,
title: _("popupStopButtonTitle")
});
browser.menus.onShown.addListener(onMenuShown);
browser.menus.onClicked.addListener(onMenuClicked);
options.addEventListener("changed", async ev => {
const alteredOpts = ev.detail;
const newOpts = await options.getAll();
if (menuIdCastMedia && alteredOpts.includes("mediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
visible: newOpts.mediaEnabled
});
}
if (menuIdCastMedia && alteredOpts.includes("localMediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
targetUrlPatterns: newOpts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
}
});
}
/** Handle updating menus when shown. */
async function onMenuShown(info: browser.menus._OnShownInfo) |
/** Handle menu click events */
async function onMenuClicked(
info: browser.menus.OnClickData,
tab?: browser.tabs.Tab
) {
// Handle whitelist menus
if (info.parentMenuItemId === menuIdWhitelist) {
const pattern = whitelistChildMenuPatterns.get(info.menuItemId);
if (!pattern) {
throw logger.error(
`Whitelist pattern not found for menu item ID ${info.menuItemId}.`
);
}
const whitelist = await options.get("siteWhitelist");
if (!whitelist.find(item => item.pattern === pattern)) {
// Add to whitelist and update options
whitelist.push({ pattern, isEnabled: true });
await options.set("siteWhitelist", whitelist);
}
return;
}
if (tab?.id === undefined) {
logger.error("Menu handler tab ID not found.");
return;
}
switch (info.menuItemId) {
case menuIdCast: {
castManager.triggerCast(tab.id, info.frameId);
break;
}
case menuIdCastMedia:
if (info.srcUrl) {
await browser.tabs.executeScript(tab.id, {
code: stringify`
window.mediaUrl = ${info.srcUrl};
window.targetElementId = ${info.targetElementId};
`,
frameId: info.frameId
});
await browser.tabs.executeScript(tab.id, {
file: "cast/senders/media.js",
frameId: info.frameId
});
}
break;
}
}
/** Handles updating the whitelist menus for a given URL */
async function updateWhitelistMenu(pageUrl?: string) {
/**
* If page URL doesn't exist, we're not on a page and have nothing
* to whitelist, so disable the menu and return.
*/
if (!pageUrl) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
const url = new URL(pageUrl);
const urlHasOrigin = url.origin !== "null";
/**
* If the page URL doesn't have an origin, we're not on a
* remote page and have nothing to whitelist, so disable the
* menu and return.
*/
if (!urlHasOrigin) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
// Enable the whitelist menu
browser.menus.update(menuIdWhitelist, {
enabled: true
});
for (const [menuId] of whitelistChildMenuPatterns) {
// Clear all page-specific temporary menus
if (menuId !== menuIdWhitelistRecommended) {
browser.menus.remove(menuId);
}
whitelistChildMenuPatterns.delete(menuId);
}
// If there is more than one subdomain, get the base domain
const baseDomain =
(url.hostname.match(/\./g) || []).length > 1
? url.hostname.substring(url.hostname.indexOf(".") + 1)
: url.hostname;
const portlessOrigin = `${url.protocol}//${url.hostname}`;
const patternRecommended = `${portlessOrigin}/*`;
const patternSearch = `${portlessOrigin}${url.pathname}${url.search}`;
const patternWildcardProtocol = `*://${url.hostname}/*`;
const patternWildcardSubdomain = `${url.protocol}//*.${baseDomain}/*`;
const patternWildcardProtocolAndSubdomain = `*://*.${baseDomain}/*`;
// Update recommended menu item
browser.menus.update(menuIdWhitelistRecommended, {
title: _("contextAddToWhitelistRecommended", patternRecommended)
});
whitelistChildMenuPatterns.set(
menuIdWhitelistRecommended,
patternRecommended
);
if (url.search) {
const whitelistSearchMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternSearch),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(whitelistSearchMenuId, patternSearch);
}
/**
* Split URL path into segments and add menu items for each
* partial path as the segments are removed.
*/
{
const pathTrimmed = url.pathname.endsWith("/")
? url.pathname.substring(0, url.pathname.length - 1)
: url.pathname;
const pathSegments = pathTrimmed
.split("/")
.filter(segment => segment)
.reverse();
if (pathSegments.length) {
for (let i = 0; i < pathSegments.length; i++) {
const partialPath = pathSegments.slice(i).reverse().join("/");
const pattern = `${portlessOrigin}/${partialPath}/*`;
const partialPathMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", pattern),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(partialPathMenuId, pattern);
}
}
}
const wildcardProtocolMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardProtocol),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolMenuId,
patternWildcardProtocol
);
const wildcardSubdomainMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardSubdomain),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardSubdomainMenuId,
patternWildcardSubdomain
);
const wildcardProtocolAndSubdomainMenuId = browser.menus.create({
title: _(
"contextAddToWhitelistAdvancedAdd",
patternWildcardProtocolAndSubdomain
),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolAndSubdomainMenuId,
patternWildcardProtocolAndSubdomain
);
await browser.menus.refresh();
}
| {
const menuIds = info.menuIds as unknown as number[];
// Only rebuild menus if whitelist menu present
if (menuIds.includes(menuIdWhitelist as number)) {
updateWhitelistMenu(info.pageUrl);
return;
}
} | identifier_body |
menus.ts | import logger from "../lib/logger";
import options from "../lib/options";
import { stringify } from "../lib/utils";
import * as menuIds from "../menuIds";
import castManager from "./castManager";
const _ = browser.i18n.getMessage;
const URL_PATTERN_HTTP = "http://*/*";
const URL_PATTERN_HTTPS = "https://*/*";
const URL_PATTERN_FILE = "file://*/*";
const URL_PATTERNS_REMOTE = [URL_PATTERN_HTTP, URL_PATTERN_HTTPS];
const URL_PATTERNS_ALL = [...URL_PATTERNS_REMOTE, URL_PATTERN_FILE];
type MenuId = string | number;
let menuIdCast: MenuId;
let menuIdCastMedia: MenuId;
let menuIdWhitelist: MenuId;
let menuIdWhitelistRecommended: MenuId;
/** Match patterns for the whitelist option menus. */
const whitelistChildMenuPatterns = new Map<MenuId, string>();
/** Handles initial menu setup. */
export async function initMenus() {
logger.info("init (menus)");
const opts = await options.getAll();
// Global "Cast..." menu item
menuIdCast = browser.menus.create({
contexts: ["browser_action", "page", "tools_menu"],
title: _("contextCast"),
documentUrlPatterns: ["http://*/*", "https://*/*"],
icons: { "16": "icons/icon.svg" } // browser_action context
});
// <video>/<audio> "Cast..." context menu item
menuIdCastMedia = browser.menus.create({
contexts: ["audio", "video", "image"],
title: _("contextCast"),
visible: opts.mediaEnabled,
targetUrlPatterns: opts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
menuIdWhitelist = browser.menus.create({
contexts: ["browser_action"],
title: _("contextAddToWhitelist"),
enabled: false
});
menuIdWhitelistRecommended = browser.menus.create({
title: _("contextAddToWhitelistRecommended"),
parentId: menuIdWhitelist
});
browser.menus.create({
type: "separator",
parentId: menuIdWhitelist
});
// Popup context menus
const createPopupMenu = (props: browser.menus._CreateCreateProperties) =>
browser.menus.create({
visible: false,
documentUrlPatterns: [`${browser.runtime.getURL("ui/popup")}/*`],
...props
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_PLAY_PAUSE,
title: _("popupMediaPlay")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_MUTE,
type: "checkbox",
title: _("popupMediaMute")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_PREVIOUS,
title: _("popupMediaSkipPrevious")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_SKIP_NEXT,
title: _("popupMediaSkipNext")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC,
title: _("popupMediaSubtitlesCaptions")
});
createPopupMenu({
id: menuIds.POPUP_MEDIA_CC_OFF,
parentId: menuIds.POPUP_MEDIA_CC,
type: "radio",
title: _("popupMediaSubtitlesCaptionsOff")
});
createPopupMenu({ id: menuIds.POPUP_MEDIA_SEPARATOR, type: "separator" });
createPopupMenu({
id: menuIds.POPUP_CAST,
title: _("popupCastButtonTitle"),
icons: { 16: "icons/icon.svg" }
});
createPopupMenu({
id: menuIds.POPUP_STOP,
title: _("popupStopButtonTitle")
});
browser.menus.onShown.addListener(onMenuShown);
browser.menus.onClicked.addListener(onMenuClicked);
options.addEventListener("changed", async ev => {
const alteredOpts = ev.detail;
const newOpts = await options.getAll();
if (menuIdCastMedia && alteredOpts.includes("mediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
visible: newOpts.mediaEnabled
});
}
if (menuIdCastMedia && alteredOpts.includes("localMediaEnabled")) {
browser.menus.update(menuIdCastMedia, {
targetUrlPatterns: newOpts.localMediaEnabled
? URL_PATTERNS_ALL
: URL_PATTERNS_REMOTE
});
}
});
}
/** Handle updating menus when shown. */
async function onMenuShown(info: browser.menus._OnShownInfo) {
const menuIds = info.menuIds as unknown as number[];
// Only rebuild menus if whitelist menu present
if (menuIds.includes(menuIdWhitelist as number)) {
updateWhitelistMenu(info.pageUrl);
return;
}
}
/** Handle menu click events */
async function onMenuClicked(
info: browser.menus.OnClickData,
tab?: browser.tabs.Tab
) {
// Handle whitelist menus | const pattern = whitelistChildMenuPatterns.get(info.menuItemId);
if (!pattern) {
throw logger.error(
`Whitelist pattern not found for menu item ID ${info.menuItemId}.`
);
}
const whitelist = await options.get("siteWhitelist");
if (!whitelist.find(item => item.pattern === pattern)) {
// Add to whitelist and update options
whitelist.push({ pattern, isEnabled: true });
await options.set("siteWhitelist", whitelist);
}
return;
}
if (tab?.id === undefined) {
logger.error("Menu handler tab ID not found.");
return;
}
switch (info.menuItemId) {
case menuIdCast: {
castManager.triggerCast(tab.id, info.frameId);
break;
}
case menuIdCastMedia:
if (info.srcUrl) {
await browser.tabs.executeScript(tab.id, {
code: stringify`
window.mediaUrl = ${info.srcUrl};
window.targetElementId = ${info.targetElementId};
`,
frameId: info.frameId
});
await browser.tabs.executeScript(tab.id, {
file: "cast/senders/media.js",
frameId: info.frameId
});
}
break;
}
}
/** Handles updating the whitelist menus for a given URL */
async function updateWhitelistMenu(pageUrl?: string) {
/**
* If page URL doesn't exist, we're not on a page and have nothing
* to whitelist, so disable the menu and return.
*/
if (!pageUrl) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
const url = new URL(pageUrl);
const urlHasOrigin = url.origin !== "null";
/**
* If the page URL doesn't have an origin, we're not on a
* remote page and have nothing to whitelist, so disable the
* menu and return.
*/
if (!urlHasOrigin) {
browser.menus.update(menuIdWhitelist, {
enabled: false
});
browser.menus.refresh();
return;
}
// Enable the whitelist menu
browser.menus.update(menuIdWhitelist, {
enabled: true
});
for (const [menuId] of whitelistChildMenuPatterns) {
// Clear all page-specific temporary menus
if (menuId !== menuIdWhitelistRecommended) {
browser.menus.remove(menuId);
}
whitelistChildMenuPatterns.delete(menuId);
}
// If there is more than one subdomain, get the base domain
const baseDomain =
(url.hostname.match(/\./g) || []).length > 1
? url.hostname.substring(url.hostname.indexOf(".") + 1)
: url.hostname;
const portlessOrigin = `${url.protocol}//${url.hostname}`;
const patternRecommended = `${portlessOrigin}/*`;
const patternSearch = `${portlessOrigin}${url.pathname}${url.search}`;
const patternWildcardProtocol = `*://${url.hostname}/*`;
const patternWildcardSubdomain = `${url.protocol}//*.${baseDomain}/*`;
const patternWildcardProtocolAndSubdomain = `*://*.${baseDomain}/*`;
// Update recommended menu item
browser.menus.update(menuIdWhitelistRecommended, {
title: _("contextAddToWhitelistRecommended", patternRecommended)
});
whitelistChildMenuPatterns.set(
menuIdWhitelistRecommended,
patternRecommended
);
if (url.search) {
const whitelistSearchMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternSearch),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(whitelistSearchMenuId, patternSearch);
}
/**
* Split URL path into segments and add menu items for each
* partial path as the segments are removed.
*/
{
const pathTrimmed = url.pathname.endsWith("/")
? url.pathname.substring(0, url.pathname.length - 1)
: url.pathname;
const pathSegments = pathTrimmed
.split("/")
.filter(segment => segment)
.reverse();
if (pathSegments.length) {
for (let i = 0; i < pathSegments.length; i++) {
const partialPath = pathSegments.slice(i).reverse().join("/");
const pattern = `${portlessOrigin}/${partialPath}/*`;
const partialPathMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", pattern),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(partialPathMenuId, pattern);
}
}
}
const wildcardProtocolMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardProtocol),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolMenuId,
patternWildcardProtocol
);
const wildcardSubdomainMenuId = browser.menus.create({
title: _("contextAddToWhitelistAdvancedAdd", patternWildcardSubdomain),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardSubdomainMenuId,
patternWildcardSubdomain
);
const wildcardProtocolAndSubdomainMenuId = browser.menus.create({
title: _(
"contextAddToWhitelistAdvancedAdd",
patternWildcardProtocolAndSubdomain
),
parentId: menuIdWhitelist
});
whitelistChildMenuPatterns.set(
wildcardProtocolAndSubdomainMenuId,
patternWildcardProtocolAndSubdomain
);
await browser.menus.refresh();
} | if (info.parentMenuItemId === menuIdWhitelist) { | random_line_split |
wordcount.py | """Using word frequencies to create a summary.
"""
import argparse
import json
import string
import random
import pprint
from nltk import pos_tag
from nltk.collocations import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words as nltk_words
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
import constants
###########################
# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS
###########################
# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
# NB: wordnet has a ADV_SAT tag, but I have no idea what that is
DEFAULT_TAG = wordnet.NOUN
POS_TRANSLATOR = {
'CC': DEFAULT_TAG, # coordinating conjunction
'CD': DEFAULT_TAG, # cardinal digit
'DT': DEFAULT_TAG, # determiner
'EX': DEFAULT_TAG, # existential there (like: "there is" ... think of it like "there exists")
'FW': DEFAULT_TAG, # foreign word
'IN': DEFAULT_TAG, # preposition/subordinating conjunction
'JJ': wordnet.ADJ, # adjective 'big'
'JJR': wordnet.ADJ, # adjective, comparative 'bigger'
'JJS': wordnet.ADJ, # adjective, superlative 'biggest'
'LS': DEFAULT_TAG, # list marker 1)
'MD': wordnet.VERB, # modal could, will
'NN': wordnet.NOUN, # noun, singular 'desk'
'NNS': wordnet.NOUN, # noun plural 'desks'
'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'
'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'
'PDT': wordnet.ADJ, # predeterminer 'all the kids'
'POS': DEFAULT_TAG, # possessive ending parent's
'PRP': DEFAULT_TAG, # personal pronoun I, he, she
'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers
'RB': wordnet.ADV, # adverb very, silently,
'RBR': wordnet.ADV, # adverb, comparative better
'RBS': wordnet.ADV, # adverb, superlative best
'RP': wordnet.ADV, # particle give up
'TO': DEFAULT_TAG, # to go 'to' the store.
'UH': DEFAULT_TAG, # interjection errrrrrrrm
'VB': wordnet.VERB, # verb, base form take
'VBD': wordnet.VERB, # verb, past tense took
'VBG': wordnet.VERB, # verb, gerund/present participle taking
'VBN': wordnet.VERB, # verb, past participle taken
'VBP': wordnet.VERB, # verb, sing. present, non-3d take
'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes
'WDT': DEFAULT_TAG, # wh-determiner which
'WP': DEFAULT_TAG, # wh-pronoun who, what
'WP$': DEFAULT_TAG, # possessive wh-pronoun whose
'WRB': wordnet.ADV # wh-abverb where, when
}
def parse_arguments():
"""Parses command-line arguments.
Returns:
- args (argparse.Namespace): The parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')
parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)
parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',
default=10)
parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',
default=5)
return parser.parse_args()
# End of parse_arguments()
def load_records(file, preview_records=False):
"""Loads the records from the JSON file. Also filters out empty records.
Params:
- file (str): The path to the JSON file
Returns:
- records (list<dict>): The contents of the JSON file
"""
with open(file, 'r') as json_file:
records = json_file.readlines()
records = [json.loads(record) for record in records]
records = list(filter(lambda record: record[constants.TEXT] != '', records))
if preview_records:
print("=====Random Sample of Records=====")
pprint.pprint(random.choices(records, k=10))
return records
# End of load_records()
def tokenize_records(records):
"""Tokenizes the records into word lists. Filters out any stopwords in the list.
Params:
- records (list<dict>): The non-empty records from the JSON file
Returns:
- tokenized_records (list<list<str>>): The tokenized text content of the records
"""
contents = map(lambda record: record[constants.TEXT], records)
tokenized_records = [word_tokenize(record.lower()) for record in contents]
lemmatized_records = lemmatize_words(tokenized_records)
lemmatized_words = list()
for lemmatized_record in lemmatized_records:
lemmatized_words.extend(lemmatized_record)
return lemmatized_words
# End of tokenize_records()
def lemmatize_words(records):
"""Lemmatizes the words in the tokenized sentences.
Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first
tagged using nltk's `pos_tag` function.
NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%
accurate.
Params:
- records (list<list<str>>): The word-tokenized records
Returns:
- lemmatized_records (list<str>)): The lemmatized words from all the records
"""
print('Length of tagged_records: {:d}'.format(len(records)))
print('Total number of words: {:d}'.format(sum([len(record) for record in records])))
tagged_records = map(lambda record: pos_tag(record), records)
tagged_records = filter_stopwords(tagged_records)
lemmatizer = WordNetLemmatizer()
lemmatized_records = list()
for record in tagged_records:
|
print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))
return lemmatized_records
# End of lemmatize_words()
def filter_stopwords(tagged_records):
"""Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make
sure that the tagging is as accurate as possible.
Params:
- tagged_records (list<list<tuple<str, str>>>): The records, with each word tagged with its part of speech
Returns:
- filtered_records (list<list<tuple<str, str>>>): The records, with unimportant words filtered out
"""
print('Filtering stopwords')
stop_words = list(stopwords.words('english'))
stop_words.extend(string.punctuation)
stop_words.extend(constants.CONTRACTIONS)
stop_words.extend(constants.MYSQL_STOPWORDS)
dictionary_words = set(nltk_words.words())
def not_dictionary_word(word):
return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']
filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]
filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]
filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)
for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608
filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))
for record in filtered_records]
return filtered_records
# End of filter_stopwords()
def extract_frequent_words(records, num_words, no_counts=False):
"""Stems the words in the given records, and then counts the words using NLTK FreqDist.
Stemming is done using the English Snowball stemmer as per the recommendation from
http://www.nltk.org/howto/stem.html
NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of
potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)
Params:
- records (list<str>): The tokenized records from the JSON file
- num_words (int): The number of words to extract
- no_counts (bool): If True, frequent words will not include the word counts
Returns:
- frequent_words (list<str> or list<tuple<str, int>>): The list of most frequent words
"""
word_counts = FreqDist(records)
frequent_words = word_counts.most_common(num_words)
if no_counts:
frequent_words = [word[0] for word in frequent_words]
print("=====The {:d} Most Frequent Words=====".format(num_words))
print(frequent_words)
return frequent_words
# End of extract_frequent_words()
def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):
"""Extracts the most common collocations present in the records.
Params:
- records (list<list<str>>): The tokenized and lemmatized records from the JSON file
- num_collocations (int): The number of collocations to show
- collocation_window (int): The text window within which to search for collocations
Returns:
- best_collocations (list<tuple<str>>): The highest scored collocations present in the records
"""
bigram_measures = BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)
bigram_finder.apply_freq_filter(min_freq=3)
best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)
print("=====The {:d} Most Frequent Collocations=====".format(num_collocations))
pprint.pprint(best_collocations)
if compare_collocations:
print("=====The {:d} Best Collocations (Pointwise Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))
print("=====The {:d} Best Collocations (Student's t test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))
print("=====The {:d} Best Collocations (Chi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))
print("=====The {:d} Best Collocations (Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))
print("=====The {:d} Best Collocations (Likelihood Ratios)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))
print("=====The {:d} Best Collocations (Poisson Stirling)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))
print("=====The {:d} Best Collocations (Jaccard Index)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))
print("=====The {:d} Best Collocations (Phi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))
print("=====The {:d} Best Collocations (Fisher's Exact Test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))
print("=====The {:d} Best Collocations (Dice's Coefficient)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))
return best_collocations
# End of extract_collocations()
if __name__ == "__main__":
args = parse_arguments()
records = load_records(args.file, False)
tokenized_records = tokenize_records(records)
extract_frequent_words(tokenized_records, args.num_words, True)
extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False)
| try:
lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))
except Exception as err:
print(record)
raise err
lemmatized_records.append(lemmatized_record) | conditional_block |
wordcount.py | """Using word frequencies to create a summary.
"""
import argparse
import json
import string
import random
import pprint
from nltk import pos_tag
from nltk.collocations import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words as nltk_words
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
import constants
###########################
# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS
###########################
# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
# NB: wordnet has a ADV_SAT tag, but I have no idea what that is
DEFAULT_TAG = wordnet.NOUN
POS_TRANSLATOR = {
'CC': DEFAULT_TAG, # coordinating conjunction
'CD': DEFAULT_TAG, # cardinal digit
'DT': DEFAULT_TAG, # determiner
'EX': DEFAULT_TAG, # existential there (like: "there is" ... think of it like "there exists")
'FW': DEFAULT_TAG, # foreign word
'IN': DEFAULT_TAG, # preposition/subordinating conjunction
'JJ': wordnet.ADJ, # adjective 'big'
'JJR': wordnet.ADJ, # adjective, comparative 'bigger'
'JJS': wordnet.ADJ, # adjective, superlative 'biggest'
'LS': DEFAULT_TAG, # list marker 1)
'MD': wordnet.VERB, # modal could, will
'NN': wordnet.NOUN, # noun, singular 'desk'
'NNS': wordnet.NOUN, # noun plural 'desks'
'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'
'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'
'PDT': wordnet.ADJ, # predeterminer 'all the kids'
'POS': DEFAULT_TAG, # possessive ending parent's
'PRP': DEFAULT_TAG, # personal pronoun I, he, she
'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers
'RB': wordnet.ADV, # adverb very, silently,
'RBR': wordnet.ADV, # adverb, comparative better
'RBS': wordnet.ADV, # adverb, superlative best
'RP': wordnet.ADV, # particle give up
'TO': DEFAULT_TAG, # to go 'to' the store.
'UH': DEFAULT_TAG, # interjection errrrrrrrm
'VB': wordnet.VERB, # verb, base form take
'VBD': wordnet.VERB, # verb, past tense took
'VBG': wordnet.VERB, # verb, gerund/present participle taking
'VBN': wordnet.VERB, # verb, past participle taken
'VBP': wordnet.VERB, # verb, sing. present, non-3d take
'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes
'WDT': DEFAULT_TAG, # wh-determiner which
'WP': DEFAULT_TAG, # wh-pronoun who, what
'WP$': DEFAULT_TAG, # possessive wh-pronoun whose
'WRB': wordnet.ADV # wh-abverb where, when
}
def parse_arguments():
"""Parses command-line arguments.
Returns:
- args (argparse.Namespace): The parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')
parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)
parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',
default=10)
parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',
default=5)
return parser.parse_args()
# End of parse_arguments()
def load_records(file, preview_records=False):
"""Loads the records from the JSON file. Also filters out empty records.
Params:
- file (str): The path to the JSON file
Returns:
- records (list<dict>): The contents of the JSON file
"""
with open(file, 'r') as json_file:
records = json_file.readlines()
records = [json.loads(record) for record in records]
records = list(filter(lambda record: record[constants.TEXT] != '', records))
if preview_records:
print("=====Random Sample of Records=====")
pprint.pprint(random.choices(records, k=10))
return records
# End of load_records()
def | (records):
"""Tokenizes the records into word lists. Filters out any stopwords in the list.
Params:
- records (list<dict>): The non-empty records from the JSON file
Returns:
- tokenized_records (list<list<str>>): The tokenized text content of the records
"""
contents = map(lambda record: record[constants.TEXT], records)
tokenized_records = [word_tokenize(record.lower()) for record in contents]
lemmatized_records = lemmatize_words(tokenized_records)
lemmatized_words = list()
for lemmatized_record in lemmatized_records:
lemmatized_words.extend(lemmatized_record)
return lemmatized_words
# End of tokenize_records()
def lemmatize_words(records):
"""Lemmatizes the words in the tokenized sentences.
Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first
tagged using nltk's `pos_tag` function.
NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%
accurate.
Params:
- records (list<list<str>>): The word-tokenized records
Returns:
- lemmatized_records (list<str>)): The lemmatized words from all the records
"""
print('Length of tagged_records: {:d}'.format(len(records)))
print('Total number of words: {:d}'.format(sum([len(record) for record in records])))
tagged_records = map(lambda record: pos_tag(record), records)
tagged_records = filter_stopwords(tagged_records)
lemmatizer = WordNetLemmatizer()
lemmatized_records = list()
for record in tagged_records:
try:
lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))
except Exception as err:
print(record)
raise err
lemmatized_records.append(lemmatized_record)
print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))
return lemmatized_records
# End of lemmatize_words()
def filter_stopwords(tagged_records):
"""Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make
sure that the tagging is as accurate as possible.
Params:
- tagged_records (list<list<tuple<str, str>>>): The records, with each word tagged with its part of speech
Returns:
- filtered_records (list<list<tuple<str, str>>>): The records, with unimportant words filtered out
"""
print('Filtering stopwords')
stop_words = list(stopwords.words('english'))
stop_words.extend(string.punctuation)
stop_words.extend(constants.CONTRACTIONS)
stop_words.extend(constants.MYSQL_STOPWORDS)
dictionary_words = set(nltk_words.words())
def not_dictionary_word(word):
return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']
filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]
filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]
filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)
for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608
filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))
for record in filtered_records]
return filtered_records
# End of filter_stopwords()
def extract_frequent_words(records, num_words, no_counts=False):
"""Stems the words in the given records, and then counts the words using NLTK FreqDist.
Stemming is done using the English Snowball stemmer as per the recommendation from
http://www.nltk.org/howto/stem.html
NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of
potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)
Params:
- records (list<str>): The tokenized records from the JSON file
- num_words (int): The number of words to extract
- no_counts (bool): If True, frequent words will not include the word counts
Returns:
- frequent_words (list<str> or list<tuple<str, int>>): The list of most frequent words
"""
word_counts = FreqDist(records)
frequent_words = word_counts.most_common(num_words)
if no_counts:
frequent_words = [word[0] for word in frequent_words]
print("=====The {:d} Most Frequent Words=====".format(num_words))
print(frequent_words)
return frequent_words
# End of extract_frequent_words()
def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):
"""Extracts the most common collocations present in the records.
Params:
- records (list<list<str>>): The tokenized and lemmatized records from the JSON file
- num_collocations (int): The number of collocations to show
- collocation_window (int): The text window within which to search for collocations
Returns:
- best_collocations (list<tuple<str>>): The highest scored collocations present in the records
"""
bigram_measures = BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)
bigram_finder.apply_freq_filter(min_freq=3)
best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)
print("=====The {:d} Most Frequent Collocations=====".format(num_collocations))
pprint.pprint(best_collocations)
if compare_collocations:
print("=====The {:d} Best Collocations (Pointwise Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))
print("=====The {:d} Best Collocations (Student's t test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))
print("=====The {:d} Best Collocations (Chi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))
print("=====The {:d} Best Collocations (Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))
print("=====The {:d} Best Collocations (Likelihood Ratios)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))
print("=====The {:d} Best Collocations (Poisson Stirling)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))
print("=====The {:d} Best Collocations (Jaccard Index)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))
print("=====The {:d} Best Collocations (Phi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))
print("=====The {:d} Best Collocations (Fisher's Exact Test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))
print("=====The {:d} Best Collocations (Dice's Coefficient)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))
return best_collocations
# End of extract_collocations()
if __name__ == "__main__":
args = parse_arguments()
records = load_records(args.file, False)
tokenized_records = tokenize_records(records)
extract_frequent_words(tokenized_records, args.num_words, True)
extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False)
| tokenize_records | identifier_name |
wordcount.py | """Using word frequencies to create a summary.
"""
import argparse
import json
import string
import random
import pprint
from nltk import pos_tag
from nltk.collocations import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words as nltk_words
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
import constants
###########################
# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS
###########################
# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
# NB: wordnet has a ADV_SAT tag, but I have no idea what that is
DEFAULT_TAG = wordnet.NOUN
POS_TRANSLATOR = {
'CC': DEFAULT_TAG, # coordinating conjunction
'CD': DEFAULT_TAG, # cardinal digit
'DT': DEFAULT_TAG, # determiner
'EX': DEFAULT_TAG, # existential there (like: "there is" ... think of it like "there exists")
'FW': DEFAULT_TAG, # foreign word
'IN': DEFAULT_TAG, # preposition/subordinating conjunction
'JJ': wordnet.ADJ, # adjective 'big'
'JJR': wordnet.ADJ, # adjective, comparative 'bigger'
'JJS': wordnet.ADJ, # adjective, superlative 'biggest'
'LS': DEFAULT_TAG, # list marker 1)
'MD': wordnet.VERB, # modal could, will
'NN': wordnet.NOUN, # noun, singular 'desk'
'NNS': wordnet.NOUN, # noun plural 'desks'
'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'
'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'
'PDT': wordnet.ADJ, # predeterminer 'all the kids'
'POS': DEFAULT_TAG, # possessive ending parent's
'PRP': DEFAULT_TAG, # personal pronoun I, he, she
'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers
'RB': wordnet.ADV, # adverb very, silently,
'RBR': wordnet.ADV, # adverb, comparative better
'RBS': wordnet.ADV, # adverb, superlative best
'RP': wordnet.ADV, # particle give up
'TO': DEFAULT_TAG, # to go 'to' the store.
'UH': DEFAULT_TAG, # interjection errrrrrrrm
'VB': wordnet.VERB, # verb, base form take
'VBD': wordnet.VERB, # verb, past tense took
'VBG': wordnet.VERB, # verb, gerund/present participle taking
'VBN': wordnet.VERB, # verb, past participle taken
'VBP': wordnet.VERB, # verb, sing. present, non-3d take
'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes
'WDT': DEFAULT_TAG, # wh-determiner which
'WP': DEFAULT_TAG, # wh-pronoun who, what
'WP$': DEFAULT_TAG, # possessive wh-pronoun whose
'WRB': wordnet.ADV # wh-abverb where, when
}
def parse_arguments():
"""Parses command-line arguments.
Returns:
- args (argparse.Namespace): The parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')
parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)
parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',
default=10)
parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',
default=5)
return parser.parse_args()
# End of parse_arguments()
def load_records(file, preview_records=False):
"""Loads the records from the JSON file. Also filters out empty records.
Params:
- file (str): The path to the JSON file
Returns:
- records (list<dict>): The contents of the JSON file
"""
with open(file, 'r') as json_file:
records = json_file.readlines()
records = [json.loads(record) for record in records]
records = list(filter(lambda record: record[constants.TEXT] != '', records))
if preview_records:
print("=====Random Sample of Records=====")
pprint.pprint(random.choices(records, k=10))
return records
# End of load_records()
def tokenize_records(records):
"""Tokenizes the records into word lists. Filters out any stopwords in the list.
Params:
- records (list<dict>): The non-empty records from the JSON file
Returns:
- tokenized_records (list<list<str>>): The tokenized text content of the records
"""
contents = map(lambda record: record[constants.TEXT], records)
tokenized_records = [word_tokenize(record.lower()) for record in contents]
lemmatized_records = lemmatize_words(tokenized_records)
lemmatized_words = list()
for lemmatized_record in lemmatized_records:
lemmatized_words.extend(lemmatized_record)
return lemmatized_words
# End of tokenize_records()
def lemmatize_words(records):
"""Lemmatizes the words in the tokenized sentences.
Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first
tagged using nltk's `pos_tag` function.
NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%
accurate.
|
Returns:
- lemmatized_records (list<str>)): The lemmatized words from all the records
"""
print('Length of tagged_records: {:d}'.format(len(records)))
print('Total number of words: {:d}'.format(sum([len(record) for record in records])))
tagged_records = map(lambda record: pos_tag(record), records)
tagged_records = filter_stopwords(tagged_records)
lemmatizer = WordNetLemmatizer()
lemmatized_records = list()
for record in tagged_records:
try:
lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))
except Exception as err:
print(record)
raise err
lemmatized_records.append(lemmatized_record)
print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))
return lemmatized_records
# End of lemmatize_words()
def filter_stopwords(tagged_records):
"""Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make
sure that the tagging is as accurate as possible.
Params:
- tagged_records (list<list<tuple<str, str>>>): The records, with each word tagged with its part of speech
Returns:
- filtered_records (list<list<tuple<str, str>>>): The records, with unimportant words filtered out
"""
print('Filtering stopwords')
stop_words = list(stopwords.words('english'))
stop_words.extend(string.punctuation)
stop_words.extend(constants.CONTRACTIONS)
stop_words.extend(constants.MYSQL_STOPWORDS)
dictionary_words = set(nltk_words.words())
def not_dictionary_word(word):
return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']
filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]
filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]
filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)
for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608
filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))
for record in filtered_records]
return filtered_records
# End of filter_stopwords()
def extract_frequent_words(records, num_words, no_counts=False):
"""Stems the words in the given records, and then counts the words using NLTK FreqDist.
Stemming is done using the English Snowball stemmer as per the recommendation from
http://www.nltk.org/howto/stem.html
NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of
potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)
Params:
- records (list<str>): The tokenized records from the JSON file
- num_words (int): The number of words to extract
- no_counts (bool): If True, frequent words will not include the word counts
Returns:
- frequent_words (list<str> or list<tuple<str, int>>): The list of most frequent words
"""
word_counts = FreqDist(records)
frequent_words = word_counts.most_common(num_words)
if no_counts:
frequent_words = [word[0] for word in frequent_words]
print("=====The {:d} Most Frequent Words=====".format(num_words))
print(frequent_words)
return frequent_words
# End of extract_frequent_words()
def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):
"""Extracts the most common collocations present in the records.
Params:
- records (list<list<str>>): The tokenized and lemmatized records from the JSON file
- num_collocations (int): The number of collocations to show
- collocation_window (int): The text window within which to search for collocations
Returns:
- best_collocations (list<tuple<str>>): The highest scored collocations present in the records
"""
bigram_measures = BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)
bigram_finder.apply_freq_filter(min_freq=3)
best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)
print("=====The {:d} Most Frequent Collocations=====".format(num_collocations))
pprint.pprint(best_collocations)
if compare_collocations:
print("=====The {:d} Best Collocations (Pointwise Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))
print("=====The {:d} Best Collocations (Student's t test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))
print("=====The {:d} Best Collocations (Chi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))
print("=====The {:d} Best Collocations (Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))
print("=====The {:d} Best Collocations (Likelihood Ratios)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))
print("=====The {:d} Best Collocations (Poisson Stirling)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))
print("=====The {:d} Best Collocations (Jaccard Index)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))
print("=====The {:d} Best Collocations (Phi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))
print("=====The {:d} Best Collocations (Fisher's Exact Test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))
print("=====The {:d} Best Collocations (Dice's Coefficient)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))
return best_collocations
# End of extract_collocations()
if __name__ == "__main__":
args = parse_arguments()
records = load_records(args.file, False)
tokenized_records = tokenize_records(records)
extract_frequent_words(tokenized_records, args.num_words, True)
extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False) |
Params:
- records (list<list<str>>): The word-tokenized records
| random_line_split |
wordcount.py | """Using word frequencies to create a summary.
"""
import argparse
import json
import string
import random
import pprint
from nltk import pos_tag
from nltk.collocations import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words as nltk_words
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
import constants
###########################
# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS
###########################
# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
# NB: wordnet has a ADV_SAT tag, but I have no idea what that is
DEFAULT_TAG = wordnet.NOUN
POS_TRANSLATOR = {
'CC': DEFAULT_TAG, # coordinating conjunction
'CD': DEFAULT_TAG, # cardinal digit
'DT': DEFAULT_TAG, # determiner
'EX': DEFAULT_TAG, # existential there (like: "there is" ... think of it like "there exists")
'FW': DEFAULT_TAG, # foreign word
'IN': DEFAULT_TAG, # preposition/subordinating conjunction
'JJ': wordnet.ADJ, # adjective 'big'
'JJR': wordnet.ADJ, # adjective, comparative 'bigger'
'JJS': wordnet.ADJ, # adjective, superlative 'biggest'
'LS': DEFAULT_TAG, # list marker 1)
'MD': wordnet.VERB, # modal could, will
'NN': wordnet.NOUN, # noun, singular 'desk'
'NNS': wordnet.NOUN, # noun plural 'desks'
'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'
'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'
'PDT': wordnet.ADJ, # predeterminer 'all the kids'
'POS': DEFAULT_TAG, # possessive ending parent's
'PRP': DEFAULT_TAG, # personal pronoun I, he, she
'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers
'RB': wordnet.ADV, # adverb very, silently,
'RBR': wordnet.ADV, # adverb, comparative better
'RBS': wordnet.ADV, # adverb, superlative best
'RP': wordnet.ADV, # particle give up
'TO': DEFAULT_TAG, # to go 'to' the store.
'UH': DEFAULT_TAG, # interjection errrrrrrrm
'VB': wordnet.VERB, # verb, base form take
'VBD': wordnet.VERB, # verb, past tense took
'VBG': wordnet.VERB, # verb, gerund/present participle taking
'VBN': wordnet.VERB, # verb, past participle taken
'VBP': wordnet.VERB, # verb, sing. present, non-3d take
'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes
'WDT': DEFAULT_TAG, # wh-determiner which
'WP': DEFAULT_TAG, # wh-pronoun who, what
'WP$': DEFAULT_TAG, # possessive wh-pronoun whose
'WRB': wordnet.ADV # wh-abverb where, when
}
def parse_arguments():
"""Parses command-line arguments.
Returns:
- args (argparse.Namespace): The parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')
parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)
parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',
default=10)
parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',
default=5)
return parser.parse_args()
# End of parse_arguments()
def load_records(file, preview_records=False):
"""Loads the records from the JSON file. Also filters out empty records.
Params:
- file (str): The path to the JSON file
Returns:
- records (list<dict>): The contents of the JSON file
"""
with open(file, 'r') as json_file:
records = json_file.readlines()
records = [json.loads(record) for record in records]
records = list(filter(lambda record: record[constants.TEXT] != '', records))
if preview_records:
print("=====Random Sample of Records=====")
pprint.pprint(random.choices(records, k=10))
return records
# End of load_records()
def tokenize_records(records):
"""Tokenizes the records into word lists. Filters out any stopwords in the list.
Params:
- records (list<dict>): The non-empty records from the JSON file
Returns:
- tokenized_records (list<list<str>>): The tokenized text content of the records
"""
contents = map(lambda record: record[constants.TEXT], records)
tokenized_records = [word_tokenize(record.lower()) for record in contents]
lemmatized_records = lemmatize_words(tokenized_records)
lemmatized_words = list()
for lemmatized_record in lemmatized_records:
lemmatized_words.extend(lemmatized_record)
return lemmatized_words
# End of tokenize_records()
def lemmatize_words(records):
"""Lemmatizes the words in the tokenized sentences.
Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first
tagged using nltk's `pos_tag` function.
NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%
accurate.
Params:
- records (list<list<str>>): The word-tokenized records
Returns:
- lemmatized_records (list<str>)): The lemmatized words from all the records
"""
print('Length of tagged_records: {:d}'.format(len(records)))
print('Total number of words: {:d}'.format(sum([len(record) for record in records])))
tagged_records = map(lambda record: pos_tag(record), records)
tagged_records = filter_stopwords(tagged_records)
lemmatizer = WordNetLemmatizer()
lemmatized_records = list()
for record in tagged_records:
try:
lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))
except Exception as err:
print(record)
raise err
lemmatized_records.append(lemmatized_record)
print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))
return lemmatized_records
# End of lemmatize_words()
def filter_stopwords(tagged_records):
"""Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make
sure that the tagging is as accurate as possible.
Params:
- tagged_records (list<list<tuple<str, str>>>): The records, with each word tagged with its part of speech
Returns:
- filtered_records (list<list<tuple<str, str>>>): The records, with unimportant words filtered out
"""
print('Filtering stopwords')
stop_words = list(stopwords.words('english'))
stop_words.extend(string.punctuation)
stop_words.extend(constants.CONTRACTIONS)
stop_words.extend(constants.MYSQL_STOPWORDS)
dictionary_words = set(nltk_words.words())
def not_dictionary_word(word):
return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']
filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]
filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]
filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)
for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608
filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))
for record in filtered_records]
return filtered_records
# End of filter_stopwords()
def extract_frequent_words(records, num_words, no_counts=False):
|
# End of extract_frequent_words()
def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):
"""Extracts the most common collocations present in the records.
Params:
- records (list<list<str>>): The tokenized and lemmatized records from the JSON file
- num_collocations (int): The number of collocations to show
- collocation_window (int): The text window within which to search for collocations
Returns:
- best_collocations (list<tuple<str>>): The highest scored collocations present in the records
"""
bigram_measures = BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)
bigram_finder.apply_freq_filter(min_freq=3)
best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)
print("=====The {:d} Most Frequent Collocations=====".format(num_collocations))
pprint.pprint(best_collocations)
if compare_collocations:
print("=====The {:d} Best Collocations (Pointwise Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))
print("=====The {:d} Best Collocations (Student's t test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))
print("=====The {:d} Best Collocations (Chi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))
print("=====The {:d} Best Collocations (Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))
print("=====The {:d} Best Collocations (Likelihood Ratios)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))
print("=====The {:d} Best Collocations (Poisson Stirling)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))
print("=====The {:d} Best Collocations (Jaccard Index)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))
print("=====The {:d} Best Collocations (Phi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))
print("=====The {:d} Best Collocations (Fisher's Exact Test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))
print("=====The {:d} Best Collocations (Dice's Coefficient)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))
return best_collocations
# End of extract_collocations()
if __name__ == "__main__":
args = parse_arguments()
records = load_records(args.file, False)
tokenized_records = tokenize_records(records)
extract_frequent_words(tokenized_records, args.num_words, True)
extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False)
| """Stems the words in the given records, and then counts the words using NLTK FreqDist.
Stemming is done using the English Snowball stemmer as per the recommendation from
http://www.nltk.org/howto/stem.html
NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of
potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)
Params:
- records (list<str>): The tokenized records from the JSON file
- num_words (int): The number of words to extract
- no_counts (bool): If True, frequent words will not include the word counts
Returns:
- frequent_words (list<str> or list<tuple<str, int>>): The list of most frequent words
"""
word_counts = FreqDist(records)
frequent_words = word_counts.most_common(num_words)
if no_counts:
frequent_words = [word[0] for word in frequent_words]
print("=====The {:d} Most Frequent Words=====".format(num_words))
print(frequent_words)
return frequent_words | identifier_body |
client.ts | /**
*
* client
*
*/
import type { ExecutionResult } from 'graphql';
import { RequestParams, Sink } from './common';
import { isObject } from './utils';
/** This file is the entry point for browsers, re-export common elements. */
export * from './common';
/** @category Client */
export interface ClientOptions {
/**
* URL of the GraphQL over HTTP server to connect.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*
* Function receives the request params. Useful for example, to ease up debugging and DevTools
* navigation you might want to use the operation name in the URL's search params (`/graphql?MyQuery`).
*/
url: string | ((request: RequestParams) => Promise<string> | string);
/**
* Indicates whether the user agent should send cookies from the other domain in the case
* of cross-origin requests.
*
* Possible options are:
* - `omit`: Never send or receive cookies.
* - `same-origin`: Send user credentials (cookies, basic http auth, etc..) if the URL is on the same origin as the calling script.
* - `include`: Always send user credentials (cookies, basic http auth, etc..), even for cross-origin calls.
*
* @default same-origin
*/
credentials?: 'omit' | 'same-origin' | 'include';
/**
* A string specifying the referrer of the request. This can be a same-origin URL, about:client, or an empty string.
*
* @default undefined
*/
referrer?: string;
/**
* Specifies the referrer policy to use for the request.
*
* Possible options are:
* - `no-referrer`: Does not send referrer information along with requests to any origin.
* - `no-referrer-when-downgrade`: Sends full referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `same-origin`: Sends full referrerURL as referrer information when making same-origin-referrer requests.
* - `origin`: Sends only the ASCII serialization of the request’s referrerURL when making both same-origin-referrer requests and cross-origin-referrer requests.
* - `strict-origin`: Sends the ASCII serialization of the origin of the referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL
* - `origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL is sent when making cross-origin-referrer requests
* - `strict-origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL when making cross-origin-referrer requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `unsafe-url`: Sends full referrerURL along for both same-origin-referrer requests and cross-origin-referrer requests.
*
* @default undefined
*/
referrerPolicy?:
| 'no-referrer'
| 'no-referrer-when-downgrade'
| 'same-origin'
| 'origin'
| 'strict-origin'
| 'origin-when-cross-origin'
| 'strict-origin-when-cross-origin'
| 'unsafe-url';
/**
* HTTP headers to pass along the request.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*/
headers?:
| Record<string, string>
| (() =>
| Promise<Record<string, string> | null | void>
| Record<string, string>
| null
| void);
/**
* Control whether the network request error should be retried.
*
* Please note that you can **only** control network errors, all other
* errors are considered fatal and will be reported immediately.
*
* You may implement your own waiting strategy by timing the resolution of the returned promise.
*
* Useful for retrying requests that failed because the service is temporarely unavailable.
*
* `retries` argument counts actual retries, so it will begin with
* 0 after the first failed request.
*
* Returning `false` will report the `err` argument; however, throwing a different error from
* the `err` argument, will report it instead.
*
* @default '() => false'
*/
shouldRetry?: (err: NetworkError, retries: number) => Promise<boolean>;
/**
* The Fetch function to use.
*
* For NodeJS environments consider using [`node-fetch`](https://github.com/node-fetch/node-fetch).
*
* @default global.fetch
*/
fetchFn?: unknown;
/**
* The AbortController implementation to use.
*
* For NodeJS environments before v15 consider using [`node-abort-controller`](https://github.com/southpolesteve/node-abort-controller).
*
* @default global.AbortController
*/
abortControllerImpl?: unknown;
}
/** @category Client */
export interface Client {
/**
* Subscribes to receive a response by making an HTTP request.
*
* It uses the `sink` to emit the received data or errors. Returns a _dispose_
* function used for canceling active requests and cleaning up.
*/
subscribe<Data = Record<string, unknown>, Extensions = unknown>(
request: RequestParams,
sink: Sink<ExecutionResult<Data, Extensions>>,
): () => void;
/**
* Dispose of the client, cancel all active requests and clean up resources.
*/
dispose: () => void;
}
/**
* Creates a disposable GraphQL over HTTP client to transmit
* GraphQL operation results.
*
* @category Client
*/
export function createClient(options: ClientOptions): Client {
const {
credentials = 'same-origin',
referrer,
referrerPolicy,
shouldRetry = () => false,
} = options;
const fetchFn = (options.fetchFn || fetch) as typeof fetch;
const AbortControllerImpl = (options.abortControllerImpl ||
AbortController) as typeof AbortController;
// we dont use yet another AbortController here because of
// node's max EventEmitters listeners being only 10
const client = (() => {
let disposed = false;
const listeners: (() => void)[] = [];
return {
get disposed() {
return disposed;
},
onDispose(cb: () => void) {
if (disposed) {
// empty the call stack and then call the cb
setTimeout(() => cb(), 0);
return () => {
// noop
};
}
listeners.push(cb);
return () => {
listeners.splice(listeners.indexOf(cb), 1);
};
},
dispos | if (disposed) return;
disposed = true;
// we copy the listeners so that onDispose unlistens dont "pull the rug under our feet"
for (const listener of [...listeners]) {
listener();
}
},
};
})();
return {
subscribe(request, sink) {
if (client.disposed) throw new Error('Client has been disposed');
const control = new AbortControllerImpl();
const unlisten = client.onDispose(() => {
unlisten();
control.abort();
});
(async () => {
let retryingErr: NetworkError | null = null,
retries = 0;
for (;;) {
if (retryingErr) {
const should = await shouldRetry(retryingErr, retries);
// requst might've been canceled while waiting for retry
if (control.signal.aborted) return;
if (!should) throw retryingErr;
retries++;
}
try {
const url =
typeof options.url === 'function'
? await options.url(request)
: options.url;
if (control.signal.aborted) return;
const headers =
typeof options.headers === 'function'
? await options.headers()
: options.headers ?? {};
if (control.signal.aborted) return;
let res;
try {
res = await fetchFn(url, {
signal: control.signal,
method: 'POST',
headers: {
...headers,
'content-type': 'application/json; charset=utf-8',
accept: 'application/graphql-response+json, application/json',
},
credentials,
referrer,
referrerPolicy,
body: JSON.stringify(request),
});
} catch (err) {
throw new NetworkError(err);
}
if (!res.ok) throw new NetworkError(res);
if (!res.body) throw new Error('Missing response body');
const contentType = res.headers.get('content-type');
if (!contentType) throw new Error('Missing response content-type');
if (
!contentType.includes('application/graphql-response+json') &&
!contentType.includes('application/json')
) {
throw new Error(
`Unsupported response content-type ${contentType}`,
);
}
const result = await res.json();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
sink.next(result as any);
return control.abort();
} catch (err) {
if (control.signal.aborted) return;
// all non-network errors are worth reporting immediately
if (!(err instanceof NetworkError)) throw err;
// try again
retryingErr = err;
}
}
})()
.then(() => sink.complete())
.catch((err) => sink.error(err));
return () => control.abort();
},
dispose() {
client.dispose();
},
};
}
/**
* A network error caused by the client or an unexpected response from the server.
*
* To avoid bundling DOM typings (because the client can run in Node env too),
* you should supply the `Response` generic depending on your Fetch implementation.
*
* @category Client
*/
export class NetworkError<
Response extends ResponseLike = ResponseLike,
> extends Error {
/**
* The underlyig response thats considered an error.
*
* Will be undefined when no response is received,
* instead an unexpected network error.
*/
public response: Response | undefined;
constructor(msgOrErrOrResponse: string | Error | Response) {
let message, response: Response | undefined;
if (isResponseLike(msgOrErrOrResponse)) {
response = msgOrErrOrResponse;
message =
'Server responded with ' +
msgOrErrOrResponse.status +
': ' +
msgOrErrOrResponse.statusText;
} else if (msgOrErrOrResponse instanceof Error)
message = msgOrErrOrResponse.message;
else message = String(msgOrErrOrResponse);
super(message);
this.name = this.constructor.name;
this.response = response;
}
}
/**
* Concrete interface a response needs to implement for the client.
*
* @category Client
*/
export interface ResponseLike {
readonly ok: boolean;
readonly status: number;
readonly statusText: string;
}
function isResponseLike(val: unknown): val is ResponseLike {
return (
isObject(val) &&
typeof val['ok'] === 'boolean' &&
typeof val['status'] === 'number' &&
typeof val['statusText'] === 'string'
);
}
| e() {
| identifier_name |
client.ts | /**
*
* client
*
*/
import type { ExecutionResult } from 'graphql';
import { RequestParams, Sink } from './common';
import { isObject } from './utils';
/** This file is the entry point for browsers, re-export common elements. */
export * from './common';
/** @category Client */
export interface ClientOptions {
/**
* URL of the GraphQL over HTTP server to connect.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*
* Function receives the request params. Useful for example, to ease up debugging and DevTools
* navigation you might want to use the operation name in the URL's search params (`/graphql?MyQuery`).
*/
url: string | ((request: RequestParams) => Promise<string> | string);
/**
* Indicates whether the user agent should send cookies from the other domain in the case
* of cross-origin requests.
*
* Possible options are:
* - `omit`: Never send or receive cookies.
* - `same-origin`: Send user credentials (cookies, basic http auth, etc..) if the URL is on the same origin as the calling script.
* - `include`: Always send user credentials (cookies, basic http auth, etc..), even for cross-origin calls.
*
* @default same-origin
*/
credentials?: 'omit' | 'same-origin' | 'include';
/**
* A string specifying the referrer of the request. This can be a same-origin URL, about:client, or an empty string.
*
* @default undefined
*/
referrer?: string;
/**
* Specifies the referrer policy to use for the request.
*
* Possible options are:
* - `no-referrer`: Does not send referrer information along with requests to any origin.
* - `no-referrer-when-downgrade`: Sends full referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `same-origin`: Sends full referrerURL as referrer information when making same-origin-referrer requests.
* - `origin`: Sends only the ASCII serialization of the request’s referrerURL when making both same-origin-referrer requests and cross-origin-referrer requests.
* - `strict-origin`: Sends the ASCII serialization of the origin of the referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL
* - `origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL is sent when making cross-origin-referrer requests
* - `strict-origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL when making cross-origin-referrer requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `unsafe-url`: Sends full referrerURL along for both same-origin-referrer requests and cross-origin-referrer requests.
*
* @default undefined
*/
referrerPolicy?:
| 'no-referrer'
| 'no-referrer-when-downgrade'
| 'same-origin'
| 'origin'
| 'strict-origin'
| 'origin-when-cross-origin'
| 'strict-origin-when-cross-origin'
| 'unsafe-url';
/**
* HTTP headers to pass along the request.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*/
headers?:
| Record<string, string>
| (() =>
| Promise<Record<string, string> | null | void>
| Record<string, string>
| null
| void);
/**
* Control whether the network request error should be retried.
*
* Please note that you can **only** control network errors, all other
* errors are considered fatal and will be reported immediately.
*
* You may implement your own waiting strategy by timing the resolution of the returned promise.
*
* Useful for retrying requests that failed because the service is temporarely unavailable.
*
* `retries` argument counts actual retries, so it will begin with
* 0 after the first failed request.
*
* Returning `false` will report the `err` argument; however, throwing a different error from
* the `err` argument, will report it instead.
*
* @default '() => false'
*/
shouldRetry?: (err: NetworkError, retries: number) => Promise<boolean>;
/**
* The Fetch function to use.
*
* For NodeJS environments consider using [`node-fetch`](https://github.com/node-fetch/node-fetch).
*
* @default global.fetch
*/
fetchFn?: unknown;
/**
* The AbortController implementation to use.
*
* For NodeJS environments before v15 consider using [`node-abort-controller`](https://github.com/southpolesteve/node-abort-controller).
*
* @default global.AbortController
*/
abortControllerImpl?: unknown;
}
/** @category Client */
export interface Client {
/**
* Subscribes to receive a response by making an HTTP request.
*
* It uses the `sink` to emit the received data or errors. Returns a _dispose_
* function used for canceling active requests and cleaning up.
*/
subscribe<Data = Record<string, unknown>, Extensions = unknown>(
request: RequestParams,
sink: Sink<ExecutionResult<Data, Extensions>>,
): () => void;
/**
* Dispose of the client, cancel all active requests and clean up resources.
*/
dispose: () => void;
}
/**
* Creates a disposable GraphQL over HTTP client to transmit
* GraphQL operation results.
*
* @category Client
*/
export function createClient(options: ClientOptions): Client {
const {
credentials = 'same-origin',
referrer,
referrerPolicy,
shouldRetry = () => false,
} = options;
const fetchFn = (options.fetchFn || fetch) as typeof fetch;
const AbortControllerImpl = (options.abortControllerImpl ||
AbortController) as typeof AbortController;
// we dont use yet another AbortController here because of
// node's max EventEmitters listeners being only 10
const client = (() => {
let disposed = false;
const listeners: (() => void)[] = [];
return {
get disposed() {
return disposed;
},
onDispose(cb: () => void) {
if (disposed) {
// empty the call stack and then call the cb
setTimeout(() => cb(), 0);
return () => {
// noop
};
}
listeners.push(cb);
return () => {
listeners.splice(listeners.indexOf(cb), 1);
};
},
dispose() {
| };
})();
return {
subscribe(request, sink) {
if (client.disposed) throw new Error('Client has been disposed');
const control = new AbortControllerImpl();
const unlisten = client.onDispose(() => {
unlisten();
control.abort();
});
(async () => {
let retryingErr: NetworkError | null = null,
retries = 0;
for (;;) {
if (retryingErr) {
const should = await shouldRetry(retryingErr, retries);
// requst might've been canceled while waiting for retry
if (control.signal.aborted) return;
if (!should) throw retryingErr;
retries++;
}
try {
const url =
typeof options.url === 'function'
? await options.url(request)
: options.url;
if (control.signal.aborted) return;
const headers =
typeof options.headers === 'function'
? await options.headers()
: options.headers ?? {};
if (control.signal.aborted) return;
let res;
try {
res = await fetchFn(url, {
signal: control.signal,
method: 'POST',
headers: {
...headers,
'content-type': 'application/json; charset=utf-8',
accept: 'application/graphql-response+json, application/json',
},
credentials,
referrer,
referrerPolicy,
body: JSON.stringify(request),
});
} catch (err) {
throw new NetworkError(err);
}
if (!res.ok) throw new NetworkError(res);
if (!res.body) throw new Error('Missing response body');
const contentType = res.headers.get('content-type');
if (!contentType) throw new Error('Missing response content-type');
if (
!contentType.includes('application/graphql-response+json') &&
!contentType.includes('application/json')
) {
throw new Error(
`Unsupported response content-type ${contentType}`,
);
}
const result = await res.json();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
sink.next(result as any);
return control.abort();
} catch (err) {
if (control.signal.aborted) return;
// all non-network errors are worth reporting immediately
if (!(err instanceof NetworkError)) throw err;
// try again
retryingErr = err;
}
}
})()
.then(() => sink.complete())
.catch((err) => sink.error(err));
return () => control.abort();
},
dispose() {
client.dispose();
},
};
}
/**
* A network error caused by the client or an unexpected response from the server.
*
* To avoid bundling DOM typings (because the client can run in Node env too),
* you should supply the `Response` generic depending on your Fetch implementation.
*
* @category Client
*/
export class NetworkError<
Response extends ResponseLike = ResponseLike,
> extends Error {
/**
* The underlyig response thats considered an error.
*
* Will be undefined when no response is received,
* instead an unexpected network error.
*/
public response: Response | undefined;
constructor(msgOrErrOrResponse: string | Error | Response) {
let message, response: Response | undefined;
if (isResponseLike(msgOrErrOrResponse)) {
response = msgOrErrOrResponse;
message =
'Server responded with ' +
msgOrErrOrResponse.status +
': ' +
msgOrErrOrResponse.statusText;
} else if (msgOrErrOrResponse instanceof Error)
message = msgOrErrOrResponse.message;
else message = String(msgOrErrOrResponse);
super(message);
this.name = this.constructor.name;
this.response = response;
}
}
/**
* Concrete interface a response needs to implement for the client.
*
* @category Client
*/
export interface ResponseLike {
readonly ok: boolean;
readonly status: number;
readonly statusText: string;
}
function isResponseLike(val: unknown): val is ResponseLike {
return (
isObject(val) &&
typeof val['ok'] === 'boolean' &&
typeof val['status'] === 'number' &&
typeof val['statusText'] === 'string'
);
}
| if (disposed) return;
disposed = true;
// we copy the listeners so that onDispose unlistens dont "pull the rug under our feet"
for (const listener of [...listeners]) {
listener();
}
},
| identifier_body |
client.ts | /**
*
* client
*
*/
import type { ExecutionResult } from 'graphql';
import { RequestParams, Sink } from './common';
import { isObject } from './utils';
/** This file is the entry point for browsers, re-export common elements. */
export * from './common';
/** @category Client */
export interface ClientOptions {
/**
* URL of the GraphQL over HTTP server to connect.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*
* Function receives the request params. Useful for example, to ease up debugging and DevTools
* navigation you might want to use the operation name in the URL's search params (`/graphql?MyQuery`).
*/
url: string | ((request: RequestParams) => Promise<string> | string);
/**
* Indicates whether the user agent should send cookies from the other domain in the case
* of cross-origin requests.
*
* Possible options are:
* - `omit`: Never send or receive cookies.
* - `same-origin`: Send user credentials (cookies, basic http auth, etc..) if the URL is on the same origin as the calling script.
* - `include`: Always send user credentials (cookies, basic http auth, etc..), even for cross-origin calls.
*
* @default same-origin
*/
credentials?: 'omit' | 'same-origin' | 'include';
/**
* A string specifying the referrer of the request. This can be a same-origin URL, about:client, or an empty string.
*
* @default undefined
*/
referrer?: string;
/**
* Specifies the referrer policy to use for the request.
*
* Possible options are:
* - `no-referrer`: Does not send referrer information along with requests to any origin.
* - `no-referrer-when-downgrade`: Sends full referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `same-origin`: Sends full referrerURL as referrer information when making same-origin-referrer requests.
* - `origin`: Sends only the ASCII serialization of the request’s referrerURL when making both same-origin-referrer requests and cross-origin-referrer requests.
* - `strict-origin`: Sends the ASCII serialization of the origin of the referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL
* - `origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL is sent when making cross-origin-referrer requests
* - `strict-origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL when making cross-origin-referrer requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `unsafe-url`: Sends full referrerURL along for both same-origin-referrer requests and cross-origin-referrer requests.
*
* @default undefined
*/
referrerPolicy?:
| 'no-referrer'
| 'no-referrer-when-downgrade'
| 'same-origin'
| 'origin'
| 'strict-origin'
| 'origin-when-cross-origin'
| 'strict-origin-when-cross-origin'
| 'unsafe-url';
/**
* HTTP headers to pass along the request.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*/
headers?:
| Record<string, string>
| (() =>
| Promise<Record<string, string> | null | void>
| Record<string, string>
| null
| void);
/**
* Control whether the network request error should be retried.
*
* Please note that you can **only** control network errors, all other
* errors are considered fatal and will be reported immediately.
*
* You may implement your own waiting strategy by timing the resolution of the returned promise.
*
* Useful for retrying requests that failed because the service is temporarely unavailable.
*
* `retries` argument counts actual retries, so it will begin with
* 0 after the first failed request.
*
* Returning `false` will report the `err` argument; however, throwing a different error from
* the `err` argument, will report it instead.
*
* @default '() => false'
*/
shouldRetry?: (err: NetworkError, retries: number) => Promise<boolean>;
/**
* The Fetch function to use.
*
* For NodeJS environments consider using [`node-fetch`](https://github.com/node-fetch/node-fetch).
*
* @default global.fetch
*/
fetchFn?: unknown;
/**
* The AbortController implementation to use.
*
* For NodeJS environments before v15 consider using [`node-abort-controller`](https://github.com/southpolesteve/node-abort-controller).
*
* @default global.AbortController
*/
abortControllerImpl?: unknown;
}
/** @category Client */
export interface Client {
/**
* Subscribes to receive a response by making an HTTP request.
*
* It uses the `sink` to emit the received data or errors. Returns a _dispose_
* function used for canceling active requests and cleaning up.
*/
subscribe<Data = Record<string, unknown>, Extensions = unknown>(
request: RequestParams,
sink: Sink<ExecutionResult<Data, Extensions>>,
): () => void;
/**
* Dispose of the client, cancel all active requests and clean up resources.
*/
dispose: () => void;
}
/**
* Creates a disposable GraphQL over HTTP client to transmit
* GraphQL operation results.
*
* @category Client
*/
export function createClient(options: ClientOptions): Client {
const {
credentials = 'same-origin',
referrer,
referrerPolicy,
shouldRetry = () => false,
} = options;
const fetchFn = (options.fetchFn || fetch) as typeof fetch;
const AbortControllerImpl = (options.abortControllerImpl ||
AbortController) as typeof AbortController;
// we dont use yet another AbortController here because of
// node's max EventEmitters listeners being only 10
const client = (() => {
let disposed = false;
const listeners: (() => void)[] = [];
return {
get disposed() {
return disposed;
},
onDispose(cb: () => void) {
if (disposed) {
// empty the call stack and then call the cb
setTimeout(() => cb(), 0);
return () => {
// noop
};
}
listeners.push(cb);
return () => {
listeners.splice(listeners.indexOf(cb), 1);
};
},
dispose() {
if (disposed) return;
disposed = true;
// we copy the listeners so that onDispose unlistens dont "pull the rug under our feet"
for (const listener of [...listeners]) {
listener();
}
},
};
})();
return {
subscribe(request, sink) {
if (client.disposed) throw new Error('Client has been disposed');
const control = new AbortControllerImpl();
const unlisten = client.onDispose(() => {
unlisten();
control.abort();
});
(async () => {
let retryingErr: NetworkError | null = null,
retries = 0;
for (;;) {
if (retryingErr) {
| try {
const url =
typeof options.url === 'function'
? await options.url(request)
: options.url;
if (control.signal.aborted) return;
const headers =
typeof options.headers === 'function'
? await options.headers()
: options.headers ?? {};
if (control.signal.aborted) return;
let res;
try {
res = await fetchFn(url, {
signal: control.signal,
method: 'POST',
headers: {
...headers,
'content-type': 'application/json; charset=utf-8',
accept: 'application/graphql-response+json, application/json',
},
credentials,
referrer,
referrerPolicy,
body: JSON.stringify(request),
});
} catch (err) {
throw new NetworkError(err);
}
if (!res.ok) throw new NetworkError(res);
if (!res.body) throw new Error('Missing response body');
const contentType = res.headers.get('content-type');
if (!contentType) throw new Error('Missing response content-type');
if (
!contentType.includes('application/graphql-response+json') &&
!contentType.includes('application/json')
) {
throw new Error(
`Unsupported response content-type ${contentType}`,
);
}
const result = await res.json();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
sink.next(result as any);
return control.abort();
} catch (err) {
if (control.signal.aborted) return;
// all non-network errors are worth reporting immediately
if (!(err instanceof NetworkError)) throw err;
// try again
retryingErr = err;
}
}
})()
.then(() => sink.complete())
.catch((err) => sink.error(err));
return () => control.abort();
},
dispose() {
client.dispose();
},
};
}
/**
* A network error caused by the client or an unexpected response from the server.
*
* To avoid bundling DOM typings (because the client can run in Node env too),
* you should supply the `Response` generic depending on your Fetch implementation.
*
* @category Client
*/
export class NetworkError<
Response extends ResponseLike = ResponseLike,
> extends Error {
/**
* The underlyig response thats considered an error.
*
* Will be undefined when no response is received,
* instead an unexpected network error.
*/
public response: Response | undefined;
constructor(msgOrErrOrResponse: string | Error | Response) {
let message, response: Response | undefined;
if (isResponseLike(msgOrErrOrResponse)) {
response = msgOrErrOrResponse;
message =
'Server responded with ' +
msgOrErrOrResponse.status +
': ' +
msgOrErrOrResponse.statusText;
} else if (msgOrErrOrResponse instanceof Error)
message = msgOrErrOrResponse.message;
else message = String(msgOrErrOrResponse);
super(message);
this.name = this.constructor.name;
this.response = response;
}
}
/**
* Concrete interface a response needs to implement for the client.
*
* @category Client
*/
export interface ResponseLike {
readonly ok: boolean;
readonly status: number;
readonly statusText: string;
}
function isResponseLike(val: unknown): val is ResponseLike {
return (
isObject(val) &&
typeof val['ok'] === 'boolean' &&
typeof val['status'] === 'number' &&
typeof val['statusText'] === 'string'
);
}
| const should = await shouldRetry(retryingErr, retries);
// requst might've been canceled while waiting for retry
if (control.signal.aborted) return;
if (!should) throw retryingErr;
retries++;
}
| conditional_block |
client.ts | /**
*
* client
*
*/
import type { ExecutionResult } from 'graphql';
import { RequestParams, Sink } from './common';
import { isObject } from './utils';
/** This file is the entry point for browsers, re-export common elements. */
export * from './common';
/** @category Client */
export interface ClientOptions {
/**
* URL of the GraphQL over HTTP server to connect.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*
* Function receives the request params. Useful for example, to ease up debugging and DevTools
* navigation you might want to use the operation name in the URL's search params (`/graphql?MyQuery`).
*/
url: string | ((request: RequestParams) => Promise<string> | string);
/**
* Indicates whether the user agent should send cookies from the other domain in the case
* of cross-origin requests.
*
* Possible options are:
* - `omit`: Never send or receive cookies.
* - `same-origin`: Send user credentials (cookies, basic http auth, etc..) if the URL is on the same origin as the calling script.
* - `include`: Always send user credentials (cookies, basic http auth, etc..), even for cross-origin calls.
*
* @default same-origin
*/
credentials?: 'omit' | 'same-origin' | 'include';
/**
* A string specifying the referrer of the request. This can be a same-origin URL, about:client, or an empty string.
*
* @default undefined
*/
referrer?: string;
/**
* Specifies the referrer policy to use for the request.
*
* Possible options are:
* - `no-referrer`: Does not send referrer information along with requests to any origin.
* - `no-referrer-when-downgrade`: Sends full referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `same-origin`: Sends full referrerURL as referrer information when making same-origin-referrer requests.
* - `origin`: Sends only the ASCII serialization of the request’s referrerURL when making both same-origin-referrer requests and cross-origin-referrer requests.
* - `strict-origin`: Sends the ASCII serialization of the origin of the referrerURL for requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL
* - `origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL is sent when making cross-origin-referrer requests
* - `strict-origin-when-cross-origin`: Sends full referrerURL when making same-origin-referrer requests, and only the ASCII serialization of the origin of the request’s referrerURL when making cross-origin-referrer requests: whose referrerURL and current URL are both potentially trustworthy URLs, or whose referrerURL is a non-potentially trustworthy URL.
* - `unsafe-url`: Sends full referrerURL along for both same-origin-referrer requests and cross-origin-referrer requests.
*
* @default undefined
*/
referrerPolicy?:
| 'no-referrer'
| 'no-referrer-when-downgrade'
| 'same-origin'
| 'origin'
| 'strict-origin'
| 'origin-when-cross-origin'
| 'strict-origin-when-cross-origin'
| 'unsafe-url';
/**
* HTTP headers to pass along the request.
*
* If the option is a function, it will be called on each request.
* Returning a Promise is supported too and the request will stall until it
* resolves.
*
* A good use-case for having a function is when using the URL for authentication,
* where subsequent requests (due to auth) may have a refreshed identity token.
*/
headers?:
| Record<string, string>
| (() =>
| Promise<Record<string, string> | null | void>
| Record<string, string>
| null
| void);
/**
* Control whether the network request error should be retried.
*
* Please note that you can **only** control network errors, all other
* errors are considered fatal and will be reported immediately.
*
* You may implement your own waiting strategy by timing the resolution of the returned promise.
*
* Useful for retrying requests that failed because the service is temporarely unavailable.
*
* `retries` argument counts actual retries, so it will begin with
* 0 after the first failed request.
*
* Returning `false` will report the `err` argument; however, throwing a different error from
* the `err` argument, will report it instead.
*
* @default '() => false'
*/
shouldRetry?: (err: NetworkError, retries: number) => Promise<boolean>;
/**
* The Fetch function to use.
*
* For NodeJS environments consider using [`node-fetch`](https://github.com/node-fetch/node-fetch).
*
* @default global.fetch
*/
fetchFn?: unknown;
/**
* The AbortController implementation to use.
*
* For NodeJS environments before v15 consider using [`node-abort-controller`](https://github.com/southpolesteve/node-abort-controller).
*
* @default global.AbortController
*/
abortControllerImpl?: unknown;
}
/** @category Client */
export interface Client {
/**
* Subscribes to receive a response by making an HTTP request.
*
* It uses the `sink` to emit the received data or errors. Returns a _dispose_
* function used for canceling active requests and cleaning up.
*/
subscribe<Data = Record<string, unknown>, Extensions = unknown>(
request: RequestParams,
sink: Sink<ExecutionResult<Data, Extensions>>,
): () => void;
/**
* Dispose of the client, cancel all active requests and clean up resources.
*/
dispose: () => void;
}
/**
* Creates a disposable GraphQL over HTTP client to transmit
* GraphQL operation results.
*
* @category Client
*/
export function createClient(options: ClientOptions): Client {
const {
credentials = 'same-origin',
referrer,
referrerPolicy,
shouldRetry = () => false,
} = options;
const fetchFn = (options.fetchFn || fetch) as typeof fetch;
const AbortControllerImpl = (options.abortControllerImpl ||
AbortController) as typeof AbortController;
// we dont use yet another AbortController here because of
// node's max EventEmitters listeners being only 10
const client = (() => {
let disposed = false;
const listeners: (() => void)[] = [];
return {
get disposed() {
return disposed;
},
onDispose(cb: () => void) {
if (disposed) {
// empty the call stack and then call the cb
setTimeout(() => cb(), 0);
return () => {
// noop
};
}
listeners.push(cb);
return () => {
listeners.splice(listeners.indexOf(cb), 1);
};
},
dispose() {
if (disposed) return;
disposed = true;
// we copy the listeners so that onDispose unlistens dont "pull the rug under our feet"
for (const listener of [...listeners]) {
listener();
}
},
};
})();
return {
subscribe(request, sink) {
if (client.disposed) throw new Error('Client has been disposed');
const control = new AbortControllerImpl();
const unlisten = client.onDispose(() => {
unlisten();
control.abort();
});
(async () => {
let retryingErr: NetworkError | null = null,
retries = 0;
for (;;) {
if (retryingErr) {
const should = await shouldRetry(retryingErr, retries);
// requst might've been canceled while waiting for retry
if (control.signal.aborted) return;
if (!should) throw retryingErr;
retries++;
}
try {
const url =
typeof options.url === 'function'
? await options.url(request)
: options.url;
if (control.signal.aborted) return;
const headers =
typeof options.headers === 'function'
? await options.headers()
: options.headers ?? {};
if (control.signal.aborted) return;
let res;
try {
res = await fetchFn(url, {
signal: control.signal,
method: 'POST',
headers: {
...headers,
'content-type': 'application/json; charset=utf-8',
accept: 'application/graphql-response+json, application/json',
},
credentials,
referrer,
referrerPolicy,
body: JSON.stringify(request),
});
} catch (err) {
throw new NetworkError(err);
}
if (!res.ok) throw new NetworkError(res);
if (!res.body) throw new Error('Missing response body');
const contentType = res.headers.get('content-type');
if (!contentType) throw new Error('Missing response content-type');
if (
!contentType.includes('application/graphql-response+json') &&
!contentType.includes('application/json')
) {
throw new Error(
`Unsupported response content-type ${contentType}`,
);
}
const result = await res.json();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
sink.next(result as any);
return control.abort();
} catch (err) {
if (control.signal.aborted) return;
// all non-network errors are worth reporting immediately
if (!(err instanceof NetworkError)) throw err;
// try again
retryingErr = err;
}
}
})()
.then(() => sink.complete())
.catch((err) => sink.error(err));
return () => control.abort();
},
dispose() {
client.dispose();
},
};
}
/**
* A network error caused by the client or an unexpected response from the server.
*
* To avoid bundling DOM typings (because the client can run in Node env too),
* you should supply the `Response` generic depending on your Fetch implementation.
*
* @category Client
*/
export class NetworkError<
Response extends ResponseLike = ResponseLike,
> extends Error {
/**
* The underlyig response thats considered an error.
*
* Will be undefined when no response is received,
* instead an unexpected network error.
*/
public response: Response | undefined;
constructor(msgOrErrOrResponse: string | Error | Response) {
let message, response: Response | undefined;
if (isResponseLike(msgOrErrOrResponse)) {
response = msgOrErrOrResponse;
message =
'Server responded with ' +
msgOrErrOrResponse.status +
': ' +
msgOrErrOrResponse.statusText;
} else if (msgOrErrOrResponse instanceof Error)
message = msgOrErrOrResponse.message;
else message = String(msgOrErrOrResponse);
super(message);
this.name = this.constructor.name;
this.response = response;
}
}
/**
* Concrete interface a response needs to implement for the client.
*
* @category Client
*/
export interface ResponseLike {
readonly ok: boolean;
readonly status: number;
readonly statusText: string;
}
function isResponseLike(val: unknown): val is ResponseLike {
return ( | isObject(val) &&
typeof val['ok'] === 'boolean' &&
typeof val['status'] === 'number' &&
typeof val['statusText'] === 'string'
);
} | random_line_split | |
test_language.py | import torch
import triton
import triton.language as tl
import copy
import pytest
import ast
import itertools
torch.manual_seed(0)
# convert from string to torch.dtype
# Necessary because doesn't print torch.dtype properly
cvt = {
'bool': torch.bool,
'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
}
int_dtypes = ['int8', 'int16', 'int32', 'int64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + float_dtypes
def patch_kernel(template, to_replace):
kernel = copy.deepcopy(template)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
# generic test functions
def _test_unary(dtype_x, expr, torch_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
if 'log' in expr: x = torch.abs(x) + 0.01
# reference result
z_ref = eval(expr if torch_expr is None else torch_expr)
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
def _test_binary(dtype_x, dtype_y, expr, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
y = triton.testing.random(SIZE, dtype=cvt[dtype_y], device=device)
# reference result
z_ref = eval(expr)
# triton result
z_tri = torch.empty(SIZE, dtype=z_ref.dtype, device=device)
kernel[(1, )](z_tri, x, y, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['+', '-', '*', '/', '%'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['&', '|', '^'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, expr, device='cuda'):
if 'float' in dtype_x + dtype_y:
with pytest.raises(RuntimeError):
_test_binary(dtype_x, dtype_y, expr, device=device)
else:
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test compare ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['==', '!=', '>', '<', '>=', '<='] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_compare_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, f' -x') for dtype_x in float_dtypes
] + [\
(dtype_x, f' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'torch.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr", [f'x[{s}]' for s in
['None, :', ':, None',\
'None, :, :', ':, :, None']\
])
def test_index1d(expr, device='cuda'):
dtype = torch.int32
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, **meta):
SIZE = meta['SIZE']
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = triton.testing.random(shape_x, dtype=dtype, device=device)
y = torch.zeros(shape_z, dtype=dtype, device=device)
z_ref = eval(expr) + y
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, num_warps=1, SIZE=shape_x[0])
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x, mode", itertools.chain.from_iterable([
[('add', 'int32', mode), ('add', 'float16', mode), ('add', 'float32', mode), \
('max', 'int32', mode), ('max', 'float32', mode),\
('min', 'int32', mode), ('min', 'float32', mode),\
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x, mode, device='cuda'):
dtype_x = cvt[dtype_x]
n_programs = 37
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
torch_op = {'add': torch.sum, 'max': torch.max, 'min': torch.min}[op]
max_neutral = float('-inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).min
min_neutral = float('inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
x_tri = triton.testing.random((n_programs, ), dtype=dtype_x, device=device)
if mode == 'all_neg':
x_tri = -torch.abs(x_tri)
if mode == 'all_pos':
|
if mode == 'min_neg':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = -torch.max(torch.abs(x_tri)) - 1
if mode == 'max_pos':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = torch.max(torch.abs(x_tri)) + 1
z_tri = torch.empty([], dtype=dtype_x, device=device)
z_tri.fill_(neutral)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = torch_op(x_tri).to(dtype_x)
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == z_tri.item()
else:
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False) \
for dtype_x in dtypes\
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True)
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
x = torch.tensor([43.5], dtype=cvt[dtype_x], device=device)
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=meta['BITCAST'])
tl.store(Z, z)
# triton result
z_tri = torch.empty((1, ), dtype=cvt[dtype_z], device=device)
kernel[(1, )](x, z_tri, BITCAST=bitcast)
# torch result
if bitcast:
import numpy as np
z_ref = x.detach().cpu().numpy().view(getattr(np, dtype_z))
z_ref = torch.from_numpy(z_ref).to(device)
else:
z_ref = x.to(z_tri.dtype)
assert z_tri == z_ref
# ---------------
# test load
# ---------------
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
| x_tri = torch.abs(x_tri) | conditional_block |
test_language.py | import torch
import triton
import triton.language as tl
import copy
import pytest
import ast
import itertools
torch.manual_seed(0)
# convert from string to torch.dtype
# Necessary because doesn't print torch.dtype properly
cvt = {
'bool': torch.bool,
'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
}
int_dtypes = ['int8', 'int16', 'int32', 'int64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + float_dtypes
def | (template, to_replace):
kernel = copy.deepcopy(template)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
# generic test functions
def _test_unary(dtype_x, expr, torch_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
if 'log' in expr: x = torch.abs(x) + 0.01
# reference result
z_ref = eval(expr if torch_expr is None else torch_expr)
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
def _test_binary(dtype_x, dtype_y, expr, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
y = triton.testing.random(SIZE, dtype=cvt[dtype_y], device=device)
# reference result
z_ref = eval(expr)
# triton result
z_tri = torch.empty(SIZE, dtype=z_ref.dtype, device=device)
kernel[(1, )](z_tri, x, y, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['+', '-', '*', '/', '%'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['&', '|', '^'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, expr, device='cuda'):
if 'float' in dtype_x + dtype_y:
with pytest.raises(RuntimeError):
_test_binary(dtype_x, dtype_y, expr, device=device)
else:
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test compare ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['==', '!=', '>', '<', '>=', '<='] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_compare_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, f' -x') for dtype_x in float_dtypes
] + [\
(dtype_x, f' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'torch.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr", [f'x[{s}]' for s in
['None, :', ':, None',\
'None, :, :', ':, :, None']\
])
def test_index1d(expr, device='cuda'):
dtype = torch.int32
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, **meta):
SIZE = meta['SIZE']
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = triton.testing.random(shape_x, dtype=dtype, device=device)
y = torch.zeros(shape_z, dtype=dtype, device=device)
z_ref = eval(expr) + y
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, num_warps=1, SIZE=shape_x[0])
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x, mode", itertools.chain.from_iterable([
[('add', 'int32', mode), ('add', 'float16', mode), ('add', 'float32', mode), \
('max', 'int32', mode), ('max', 'float32', mode),\
('min', 'int32', mode), ('min', 'float32', mode),\
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x, mode, device='cuda'):
dtype_x = cvt[dtype_x]
n_programs = 37
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
torch_op = {'add': torch.sum, 'max': torch.max, 'min': torch.min}[op]
max_neutral = float('-inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).min
min_neutral = float('inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
x_tri = triton.testing.random((n_programs, ), dtype=dtype_x, device=device)
if mode == 'all_neg':
x_tri = -torch.abs(x_tri)
if mode == 'all_pos':
x_tri = torch.abs(x_tri)
if mode == 'min_neg':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = -torch.max(torch.abs(x_tri)) - 1
if mode == 'max_pos':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = torch.max(torch.abs(x_tri)) + 1
z_tri = torch.empty([], dtype=dtype_x, device=device)
z_tri.fill_(neutral)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = torch_op(x_tri).to(dtype_x)
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == z_tri.item()
else:
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False) \
for dtype_x in dtypes\
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True)
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
x = torch.tensor([43.5], dtype=cvt[dtype_x], device=device)
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=meta['BITCAST'])
tl.store(Z, z)
# triton result
z_tri = torch.empty((1, ), dtype=cvt[dtype_z], device=device)
kernel[(1, )](x, z_tri, BITCAST=bitcast)
# torch result
if bitcast:
import numpy as np
z_ref = x.detach().cpu().numpy().view(getattr(np, dtype_z))
z_ref = torch.from_numpy(z_ref).to(device)
else:
z_ref = x.to(z_tri.dtype)
assert z_tri == z_ref
# ---------------
# test load
# ---------------
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
| patch_kernel | identifier_name |
test_language.py | import torch
import triton
import triton.language as tl
import copy
import pytest
import ast
import itertools
torch.manual_seed(0)
# convert from string to torch.dtype
# Necessary because doesn't print torch.dtype properly
cvt = {
'bool': torch.bool,
'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
}
int_dtypes = ['int8', 'int16', 'int32', 'int64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + float_dtypes
def patch_kernel(template, to_replace):
kernel = copy.deepcopy(template)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
# generic test functions
def _test_unary(dtype_x, expr, torch_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
if 'log' in expr: x = torch.abs(x) + 0.01
# reference result
z_ref = eval(expr if torch_expr is None else torch_expr)
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
def _test_binary(dtype_x, dtype_y, expr, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off) |
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
y = triton.testing.random(SIZE, dtype=cvt[dtype_y], device=device)
# reference result
z_ref = eval(expr)
# triton result
z_tri = torch.empty(SIZE, dtype=z_ref.dtype, device=device)
kernel[(1, )](z_tri, x, y, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['+', '-', '*', '/', '%'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['&', '|', '^'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, expr, device='cuda'):
if 'float' in dtype_x + dtype_y:
with pytest.raises(RuntimeError):
_test_binary(dtype_x, dtype_y, expr, device=device)
else:
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test compare ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['==', '!=', '>', '<', '>=', '<='] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_compare_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, f' -x') for dtype_x in float_dtypes
] + [\
(dtype_x, f' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'torch.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr", [f'x[{s}]' for s in
['None, :', ':, None',\
'None, :, :', ':, :, None']\
])
def test_index1d(expr, device='cuda'):
dtype = torch.int32
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, **meta):
SIZE = meta['SIZE']
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = triton.testing.random(shape_x, dtype=dtype, device=device)
y = torch.zeros(shape_z, dtype=dtype, device=device)
z_ref = eval(expr) + y
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, num_warps=1, SIZE=shape_x[0])
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x, mode", itertools.chain.from_iterable([
[('add', 'int32', mode), ('add', 'float16', mode), ('add', 'float32', mode), \
('max', 'int32', mode), ('max', 'float32', mode),\
('min', 'int32', mode), ('min', 'float32', mode),\
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x, mode, device='cuda'):
dtype_x = cvt[dtype_x]
n_programs = 37
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
torch_op = {'add': torch.sum, 'max': torch.max, 'min': torch.min}[op]
max_neutral = float('-inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).min
min_neutral = float('inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
x_tri = triton.testing.random((n_programs, ), dtype=dtype_x, device=device)
if mode == 'all_neg':
x_tri = -torch.abs(x_tri)
if mode == 'all_pos':
x_tri = torch.abs(x_tri)
if mode == 'min_neg':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = -torch.max(torch.abs(x_tri)) - 1
if mode == 'max_pos':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = torch.max(torch.abs(x_tri)) + 1
z_tri = torch.empty([], dtype=dtype_x, device=device)
z_tri.fill_(neutral)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = torch_op(x_tri).to(dtype_x)
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == z_tri.item()
else:
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False) \
for dtype_x in dtypes\
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True)
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
x = torch.tensor([43.5], dtype=cvt[dtype_x], device=device)
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=meta['BITCAST'])
tl.store(Z, z)
# triton result
z_tri = torch.empty((1, ), dtype=cvt[dtype_z], device=device)
kernel[(1, )](x, z_tri, BITCAST=bitcast)
# torch result
if bitcast:
import numpy as np
z_ref = x.detach().cpu().numpy().view(getattr(np, dtype_z))
z_ref = torch.from_numpy(z_ref).to(device)
else:
z_ref = x.to(z_tri.dtype)
assert z_tri == z_ref
# ---------------
# test load
# ---------------
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# --------------- | y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z) | random_line_split |
test_language.py | import torch
import triton
import triton.language as tl
import copy
import pytest
import ast
import itertools
torch.manual_seed(0)
# convert from string to torch.dtype
# Necessary because doesn't print torch.dtype properly
cvt = {
'bool': torch.bool,
'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
}
int_dtypes = ['int8', 'int16', 'int32', 'int64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + float_dtypes
def patch_kernel(template, to_replace):
kernel = copy.deepcopy(template)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
# generic test functions
def _test_unary(dtype_x, expr, torch_expr=None, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
if 'log' in expr: x = torch.abs(x) + 0.01
# reference result
z_ref = eval(expr if torch_expr is None else torch_expr)
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
def _test_binary(dtype_x, dtype_y, expr, device='cuda'):
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, **meta):
off = tl.arange(0, meta['SIZE'])
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = triton.testing.random(SIZE, dtype=cvt[dtype_x], device=device)
y = triton.testing.random(SIZE, dtype=cvt[dtype_y], device=device)
# reference result
z_ref = eval(expr)
# triton result
z_tri = torch.empty(SIZE, dtype=z_ref.dtype, device=device)
kernel[(1, )](z_tri, x, y, SIZE=SIZE, num_warps=4)
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['+', '-', '*', '/', '%'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bin_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['&', '|', '^'] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_bitwise_op(dtype_x, dtype_y, expr, device='cuda'):
if 'float' in dtype_x + dtype_y:
with pytest.raises(RuntimeError):
_test_binary(dtype_x, dtype_y, expr, device=device)
else:
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test compare ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, expr", [
(dtype_x, dtype_y, f' x {op} y') \
for op in ['==', '!=', '>', '<', '>=', '<='] \
for dtype_x in dtypes \
for dtype_y in dtypes
])
def test_compare_op(dtype_x, dtype_y, expr, device='cuda'):
_test_binary(dtype_x, dtype_y, expr, device=device)
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, f' -x') for dtype_x in float_dtypes
] + [\
(dtype_x, f' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'torch.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr", [f'x[{s}]' for s in
['None, :', ':, None',\
'None, :, :', ':, :, None']\
])
def test_index1d(expr, device='cuda'):
dtype = torch.int32
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, **meta):
SIZE = meta['SIZE']
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
kernel = patch_kernel(kernel, to_replace)
# torch result
x = triton.testing.random(shape_x, dtype=dtype, device=device)
y = torch.zeros(shape_z, dtype=dtype, device=device)
z_ref = eval(expr) + y
# triton result
z_tri = torch.empty_like(z_ref)
kernel[(1, )](z_tri, x, num_warps=1, SIZE=shape_x[0])
# compare
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
|
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x, mode", itertools.chain.from_iterable([
[('add', 'int32', mode), ('add', 'float16', mode), ('add', 'float32', mode), \
('max', 'int32', mode), ('max', 'float32', mode),\
('min', 'int32', mode), ('min', 'float32', mode),\
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x, mode, device='cuda'):
dtype_x = cvt[dtype_x]
n_programs = 37
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
torch_op = {'add': torch.sum, 'max': torch.max, 'min': torch.min}[op]
max_neutral = float('-inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).min
min_neutral = float('inf') if dtype_x.is_floating_point else torch.iinfo(dtype_x).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
x_tri = triton.testing.random((n_programs, ), dtype=dtype_x, device=device)
if mode == 'all_neg':
x_tri = -torch.abs(x_tri)
if mode == 'all_pos':
x_tri = torch.abs(x_tri)
if mode == 'min_neg':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = -torch.max(torch.abs(x_tri)) - 1
if mode == 'max_pos':
idx = torch.randint(n_programs, size=(1, )).item()
x_tri[idx] = torch.max(torch.abs(x_tri)) + 1
z_tri = torch.empty([], dtype=dtype_x, device=device)
z_tri.fill_(neutral)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = torch_op(x_tri).to(dtype_x)
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == z_tri.item()
else:
triton.testing.assert_allclose(z_ref, z_tri)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False) \
for dtype_x in dtypes\
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True)
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
x = torch.tensor([43.5], dtype=cvt[dtype_x], device=device)
# triton kernel
@triton.jit
def kernel(X, Z, **meta):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=meta['BITCAST'])
tl.store(Z, z)
# triton result
z_tri = torch.empty((1, ), dtype=cvt[dtype_z], device=device)
kernel[(1, )](x, z_tri, BITCAST=bitcast)
# torch result
if bitcast:
import numpy as np
z_ref = x.detach().cpu().numpy().view(getattr(np, dtype_z))
z_ref = torch.from_numpy(z_ref).to(device)
else:
z_ref = x.to(z_tri.dtype)
assert z_tri == z_ref
# ---------------
# test load
# ---------------
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
| device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref | identifier_body |
variant.go | package variant
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"github.com/zclconf/go-cty/cty"
"golang.org/x/xerrors"
"github.com/mumoshu/variant2/pkg/app"
)
var Version string
type Main struct {
// Command is the name of the executable used for this process.
// E.g. `go build -o myapp ./` and `./myapp cmd --flag1` results in Command being "myapp".
Command string
Source []byte
// Path can be a path to the directory or the file containing the definition for the Variant command being run
Path string
Stdout, Stderr io.Writer
Args []string
Getenv func(string) string
Getwd func() (string, error)
Setup app.Setup
}
type Setup func() (*Main, error)
type InitParams struct {
Command string
Setup app.Setup
}
type Option func(*Main)
func FromPath(path string, opts ...Option) Setup {
return func() (*Main, error) {
if path == "" {
var err error
path, err = os.Getwd()
if err != nil {
return nil, xerrors.Errorf("getwd: %w", err)
}
}
info, err := os.Stat(path)
if err != nil {
return nil, xerrors.Errorf("stat %s: %w", path, err)
}
var setup app.Setup
if info.IsDir() {
setup = app.FromDir(path)
} else {
setup = app.FromFile(path)
}
m := &Main{
Setup: setup,
}
if m.Command == "" {
m.Command = filepath.Base(path)
}
for _, o := range opts {
o(m)
}
return m, nil
}
}
func FromSource(cmd, source string) Setup {
return func() (*Main, error) {
if cmd == "" {
return nil, errors.New("command name must be set when loadling from Variant source file")
}
return &Main{
Command: cmd,
Setup: app.FromSources(map[string][]byte{cmd: []byte(source)}),
}, nil
}
}
func Load(setup Setup) (*Runner, error) {
initParams, err := setup()
if err != nil {
return nil, err
}
m := Init(*initParams)
return m.createRunner(m.Command, m.Setup)
}
func MustLoad(setup Setup) *Runner {
r, err := Load(setup)
if err != nil {
panic(err)
}
return r
}
func New() Main {
return Init(Main{})
}
type Env struct {
Args []string
Getenv func(name string) string
Getwd func() (string, error)
}
func GetPathAndArgsFromEnv(env Env) (string, string, []string) {
osArgs := env.Args
var cmd string
var path string
if len(osArgs) > 1 {
file := osArgs[1]
info, err := os.Stat(file)
if err == nil && info != nil && !info.IsDir() {
osArgs = osArgs[2:]
path = file
cmd = filepath.Base(file)
} else {
osArgs = osArgs[1:]
}
} else {
osArgs = []string{}
}
if path == "" {
dirFromEnv := env.Getenv("VARIANT_DIR")
if dirFromEnv != "" {
path = dirFromEnv
} else {
var err error
path, err = env.Getwd()
if err != nil {
panic(err)
}
}
}
return cmd, path, osArgs
}
func Init(m Main) Main {
if m.Stdout == nil |
if m.Stderr == nil {
m.Stderr = os.Stderr
}
if m.Getenv == nil {
m.Getenv = os.Getenv
}
if m.Getwd == nil {
m.Getwd = os.Getwd
}
cmdNameFromEnv := m.Getenv("VARIANT_NAME")
if cmdNameFromEnv != "" {
m.Command = cmdNameFromEnv
}
return m
}
type Config struct {
Parameters func([]string) (map[string]interface{}, error)
Options func() map[string]func() interface{}
}
func valueOnChange(cli *cobra.Command, name string, v interface{}) func() interface{} {
return func() interface{} {
// This avoids setting "" when the flag is actually missing, so that
// we can differentiate between when (1)an empty string is specified vs (2)no flag is provided.
if cli.PersistentFlags().Lookup(name).Changed {
return v
}
return nil
}
}
func createCobraFlagsFromVariantOptions(cli *cobra.Command, opts []app.OptionSpec, interactive bool) (map[string]func() interface{}, error) {
lazyOptionValues := map[string]func() interface{}{}
for i := range opts {
o := opts[i]
var tpe cty.Type
tpe, diags := typeexpr.TypeConstraint(o.Type)
if diags != nil {
return nil, diags
}
var desc string
if o.Description != nil {
desc = *o.Description
}
switch tpe {
case cty.String:
var v string
if o.Short != nil {
cli.PersistentFlags().StringVarP(&v, o.Name, *o.Short, "", desc)
} else {
cli.PersistentFlags().StringVar(&v, o.Name, "", desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Bool:
var v bool
if o.Short != nil {
cli.PersistentFlags().BoolVarP(&v, o.Name, *o.Short, false, desc)
} else {
cli.PersistentFlags().BoolVar(&v, o.Name, false, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Number:
var v int
if o.Short != nil {
cli.PersistentFlags().IntVarP(&v, o.Name, *o.Short, 0, desc)
} else {
cli.PersistentFlags().IntVar(&v, o.Name, 0, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.String):
v := []string{}
if o.Short != nil {
cli.PersistentFlags().StringSliceVarP(&v, o.Name, *o.Short, []string{}, desc)
} else {
cli.PersistentFlags().StringSliceVar(&v, o.Name, []string{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.Number):
v := []int{}
if o.Short != nil {
cli.PersistentFlags().IntSliceVarP(&v, o.Name, *o.Short, []int{}, desc)
} else {
cli.PersistentFlags().IntSliceVar(&v, o.Name, []int{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
}
if !app.IsExpressionEmpty(o.Default) || interactive {
} else if err := cli.MarkPersistentFlagRequired(o.Name); err != nil {
panic(err)
}
}
return lazyOptionValues, nil
}
func configureCommand(cli *cobra.Command, root app.JobSpec, interactive bool) (*Config, error) {
lazyOptionValues, err := createCobraFlagsFromVariantOptions(cli, root.Options, interactive)
if err != nil {
return nil, err
}
opts := func() map[string]func() interface{} {
m := map[string]func() interface{}{}
for name, f := range lazyOptionValues {
m[name] = f
}
return m
}
var minArgs int
var maxArgs int
lazyParamValues := map[string]func(args []string) (interface{}, error){}
var hasVarArgs bool
for i := range root.Parameters {
maxArgs++
p := root.Parameters[i]
r := p.Default.Range()
if r.Start == r.End {
minArgs++
}
ii := i
ty, err := typeexpr.TypeConstraint(p.Type)
if err != nil {
return nil, err
}
var f func([]string, int) (interface{}, error)
switch ty {
case cty.Bool:
f = func(args []string, i int) (interface{}, error) {
return strconv.ParseBool(args[i])
}
case cty.String:
f = func(args []string, i int) (interface{}, error) {
return args[i], nil
}
case cty.Number:
f = func(args []string, i int) (interface{}, error) {
return strconv.Atoi(args[i])
}
case cty.List(cty.String):
if i != len(root.Parameters)-1 {
return nil, fmt.Errorf("list(string) parameter %q must be positioned at last", p.Name)
}
f = func(args []string, i int) (interface{}, error) {
return args[i:], nil
}
hasVarArgs = true
default:
return nil, fmt.Errorf("invalid parameter %q: type %s is not supported", p.Name, ty.FriendlyName())
}
lazyParamValues[p.Name] = func(args []string) (interface{}, error) {
if len(args) <= ii {
return nil, nil
}
return f(args, ii)
}
}
if hasVarArgs {
cli.Args = cobra.MinimumNArgs(minArgs)
} else {
cli.Args = cobra.RangeArgs(minArgs, maxArgs)
}
params := func(args []string) (map[string]interface{}, error) {
m := map[string]interface{}{}
for name, f := range lazyParamValues {
v, err := f(args)
if err != nil {
return nil, err
}
m[name] = v
}
return m, nil
}
return &Config{Parameters: params, Options: opts}, nil
}
func getMergedParamsAndOpts(
cfgs map[string]*Config, cmdName string, args []string) (map[string]interface{}, map[string]interface{}, error) {
names := strings.Split(cmdName, " ")
optGetters := map[string]func() interface{}{}
for i := range names {
curName := strings.Join(names[:i+1], " ")
if curCfg, ok := cfgs[curName]; ok {
curOpts := curCfg.Options()
for n := range curOpts {
optGetters[n] = curOpts[n]
}
}
}
cfg := cfgs[cmdName]
params, err := cfg.Parameters(args)
if err != nil {
return nil, nil, err
}
opts := map[string]interface{}{}
for n, get := range optGetters {
opts[n] = get()
}
return params, opts, nil
}
func (m *Main) initApp(setup app.Setup) (*app.App, error) {
ap, err := app.New(setup)
if err != nil {
if ap == nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
} else {
ap.PrintError(err)
}
//nolint:wrapcheck
return nil, err
}
ap.Stdout = m.Stdout
ap.Stderr = m.Stderr
return ap, nil
}
func (m Main) createRunner(cmd string, setup app.Setup) (*Runner, error) {
ap, err := m.initApp(setup)
if err != nil {
return nil, err
}
return m.newRunner(ap, cmd), nil
}
func (m Main) newRunner(ap *app.App, cmdName string) *Runner {
m2 := &Runner{
mut: &sync.Mutex{},
ap: ap,
runCmdName: cmdName,
}
m.initRunner(m2)
return m2
}
func (m Main) initRunner(r *Runner) {
siTty := isatty.IsTerminal(os.Stdin.Fd())
soTty := isatty.IsTerminal(os.Stdout.Fd())
// Enable prompts for missing inputs when stdin and stdout are connected to a tty
r.Interactive = siTty && soTty
if r.Interactive {
r.SetOpts = app.DefaultSetOpts
}
r.goJobs = map[string]Job{}
r.jobRunProviders = map[string]func(State) JobRun{}
for jobName := range r.ap.JobByName {
n := jobName
r.jobRunProviders[n] = func(st State) JobRun {
return func(ctx context.Context) error {
if st.Stdout != nil {
defer func() {
if err := st.Stdout.Close(); err != nil {
panic(err)
}
}()
}
if st.Stderr != nil {
defer func() {
if err := st.Stderr.Close(); err != nil {
panic(err)
}
}()
}
r, err := r.ap.Run(n, st.Parameters, st.Options)
if err != nil {
return xerrors.Errorf("running job %q: %w", n, err)
}
if st.Stdout != nil {
if _, err := st.Stdout.Write([]byte(r.Stdout)); err != nil {
return xerrors.Errorf("writing stdout of job %q: %w", n, err)
}
}
if st.Stderr != nil {
if _, err := st.Stderr.Write([]byte(r.Stderr)); err != nil {
return xerrors.Errorf("writing stderr of job %q: %w", n, err)
}
}
return nil
}
}
}
}
type Runner struct {
ap *app.App
runCmdName string
runCmd *cobra.Command
variantCmd *cobra.Command
goJobs map[string]Job
jobRunProviders map[string]func(State) JobRun
Interactive bool
SetOpts app.SetOptsFunc
mut *sync.Mutex
}
func (r *Runner) Cobra() (*cobra.Command, error) {
ap, rootCmdName := r.ap, r.runCmdName
if rootCmdName == "" {
rootCmdName = "run"
}
jobs := map[string]app.JobSpec{}
jobNames := []string{}
for jobName, j := range ap.JobByName {
var name string
if jobName == "" {
name = rootCmdName
} else {
name = fmt.Sprintf("%s %s", rootCmdName, jobName)
}
jobs[name] = j
jobNames = append(jobNames, name)
}
sort.Strings(jobNames)
commands := map[string]*cobra.Command{}
cfgs := map[string]*Config{}
for _, n := range jobNames {
name := n
job := jobs[name]
names := strings.Split(name, " ")
var parent *cobra.Command
cmdName := names[len(names)-1]
switch len(names) {
case 1:
default:
names = names[:len(names)-1]
var ok bool
parent, ok = commands[strings.Join(names, " ")]
if !ok {
for i := range names {
intName := strings.Join(names[:i+1], " ")
cur, ok := commands[intName]
if !ok {
cur = &cobra.Command{
Use: names[i],
}
parent.AddCommand(cur)
commands[intName] = cur
}
parent = cur
}
}
}
var desc string
if job.Description != nil {
desc = *job.Description
}
for _, p := range job.Parameters {
cmdName += fmt.Sprintf(" [%s]", strings.ToUpper(p.Name))
}
cli := &cobra.Command{
Use: cmdName,
Short: strings.Split(desc, "\n")[0],
Long: desc,
}
if job.Private != nil {
cli.Hidden = *job.Private
}
cfg, err := configureCommand(cli, job, r.Interactive)
if err != nil {
return nil, err
}
cfgs[name] = cfg
cli.RunE = func(cmd *cobra.Command, args []string) error {
params, opts, err := getMergedParamsAndOpts(cfgs, name, args)
if err != nil {
return err
}
_, err = ap.Run(job.Name, params, opts, r.SetOpts)
if err != nil && err.Error() != app.NoRunMessage {
cmd.SilenceUsage = true
}
//nolint:wrapcheck
return err
}
commands[name] = cli
if parent != nil {
parent.AddCommand(cli)
}
}
rootCmd := commands[rootCmdName]
return rootCmd, nil
}
type RunOptions struct {
Stdout io.Writer
Stderr io.Writer
SetOpts app.SetOptsFunc
DisableLocking bool
}
// Add adds a job to this runner so that it can later by calling `Job`.
func (r Runner) Add(job Job) {
r.goJobs[job.Name] = job
if job.Name == "" {
panic(fmt.Errorf("invalid job name %q", job.Name))
}
r.jobRunProviders[job.Name] = func(st State) JobRun {
return func(ctx context.Context) error {
return job.Run(ctx, st)
}
}
}
// Job prepares a job to be run.
func (r Runner) Job(job string, opts State) (JobRun, error) {
f, ok := r.jobRunProviders[job]
if !ok {
return nil, fmt.Errorf("job %q not added", job)
}
if opts.Options == nil {
opts.Options = map[string]interface{}{}
}
if opts.Parameters == nil {
opts.Parameters = map[string]interface{}{}
}
jr := f(opts)
return jr, nil
}
func (r *Runner) Run(arguments []string, opt ...RunOptions) error {
var opts RunOptions
if len(opt) > 0 {
opts = opt[0]
}
if !opts.DisableLocking {
r.mut.Lock()
defer r.mut.Unlock()
}
if opts.SetOpts != nil {
r.SetOpts = opts.SetOpts
defer func() {
r.SetOpts = nil
}()
}
if r.runCmd == nil {
var err error
r.runCmd, err = r.Cobra()
if err != nil {
r.ap.PrintError(err)
return err
}
}
var cmd *cobra.Command
if r.runCmdName != "" {
cmd = r.runCmd
} else {
if r.variantCmd == nil {
r.variantCmd = r.createVariantRootCommand()
}
cmd = r.variantCmd
}
var err error
{
cmdStdout := cmd.OutOrStdout()
cmdStderr := cmd.OutOrStderr()
appStdout := r.ap.Stdout
appStderr := r.ap.Stderr
cmd.SetArgs(arguments)
if opts.Stdout != nil {
cmd.SetOut(opts.Stdout)
r.ap.Stdout = opts.Stdout
}
if opts.Stderr != nil {
cmd.SetErr(opts.Stderr)
r.ap.Stderr = opts.Stderr
}
err = cmd.Execute()
cmd.SetOut(cmdStdout)
cmd.SetErr(cmdStderr)
r.ap.Stdout = appStdout
r.ap.Stderr = appStderr
}
//nolint:wrapcheck
return err
}
type Error struct {
Message string
ExitCode int
}
func (e Error) Error() string {
return e.Message
}
func (r *Runner) createVariantRootCommand() *cobra.Command {
const VariantBinName = "variant"
rootCmd := &cobra.Command{
Use: VariantBinName,
Version: Version,
}
testCmd := &cobra.Command{
Use: "test [NAME]",
Short: "Run test(s)",
Args: cobra.MaximumNArgs(1),
RunE: func(c *cobra.Command, args []string) error {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
_, err := r.ap.RunTests(prefix)
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd := &cobra.Command{
Use: "export SUBCOMMAND SRC_DIR OUTPUT_PATH",
Short: "Export the Variant command defined in SRC_DIR to OUTPUT_PATH",
}
{
shimCmd := &cobra.Command{
Use: "shim SRC_DIR DST_DIR",
Short: "Copy and generate shim for the Variant command defined in the SRC",
Args: cobra.ExactArgs(2),
RunE: func(c *cobra.Command, args []string) error {
err := r.ap.ExportShim(args[0], args[1])
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd.AddCommand(shimCmd)
exportCmd.AddCommand(newExportGo(r))
exportCmd.AddCommand(newExportBinary(r))
}
generateCmd := &cobra.Command{
Use: "generate RESOURCE DIR",
Short: "Generate RESOURCE for the Variant command defined in DIR",
}
{
generateShimCmd := &cobra.Command{
Use: "shim DIR",
Short: "Generate a shim for the Variant command defined in DIR",
Args: cobra.ExactArgs(1),
RunE: func(c *cobra.Command, args []string) error {
err := app.GenerateShim(VariantBinName, args[0])
if err != nil {
c.SilenceUsage = true
}
return err
},
}
generateCmd.AddCommand(generateShimCmd)
}
startCmd := &cobra.Command{
Use: "start NAME",
Short: "Start the named integration to turn the Variant command to whatever",
}
{
var botName string
startSlackbotCmd := &cobra.Command{
Use: "slackbot",
Short: "Start the slackbot that responds to slash commands by running corresopnding Variant commands",
RunE: func(c *cobra.Command, args []string) error {
err := r.StartSlackbot(botName)
if err != nil {
c.SilenceUsage = true
}
return err
},
}
startSlackbotCmd.Flags().StringVarP(&botName, "name", "n", "", "Name of the slash command without /. For example, \"--name foo\" results in the bot responding to \"/foo <CMD> <ARGS>\"")
if err := startSlackbotCmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
startCmd.AddCommand(startSlackbotCmd)
}
rootCmd.AddCommand(r.runCmd)
rootCmd.AddCommand(testCmd)
rootCmd.AddCommand(exportCmd)
rootCmd.AddCommand(generateCmd)
rootCmd.AddCommand(startCmd)
return rootCmd
}
| {
m.Stdout = os.Stdout
} | conditional_block |
variant.go | package variant
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"github.com/zclconf/go-cty/cty"
"golang.org/x/xerrors"
"github.com/mumoshu/variant2/pkg/app"
)
var Version string
type Main struct {
// Command is the name of the executable used for this process.
// E.g. `go build -o myapp ./` and `./myapp cmd --flag1` results in Command being "myapp".
Command string
Source []byte
// Path can be a path to the directory or the file containing the definition for the Variant command being run
Path string
Stdout, Stderr io.Writer
Args []string
Getenv func(string) string
Getwd func() (string, error)
Setup app.Setup
}
type Setup func() (*Main, error)
type InitParams struct {
Command string
Setup app.Setup
}
type Option func(*Main)
func FromPath(path string, opts ...Option) Setup {
return func() (*Main, error) {
if path == "" {
var err error
path, err = os.Getwd()
if err != nil {
return nil, xerrors.Errorf("getwd: %w", err)
}
}
info, err := os.Stat(path)
if err != nil {
return nil, xerrors.Errorf("stat %s: %w", path, err)
}
var setup app.Setup
if info.IsDir() {
setup = app.FromDir(path)
} else {
setup = app.FromFile(path)
}
m := &Main{
Setup: setup,
}
if m.Command == "" {
m.Command = filepath.Base(path)
}
for _, o := range opts {
o(m)
}
return m, nil
}
}
func FromSource(cmd, source string) Setup {
return func() (*Main, error) {
if cmd == "" {
return nil, errors.New("command name must be set when loadling from Variant source file")
}
return &Main{
Command: cmd,
Setup: app.FromSources(map[string][]byte{cmd: []byte(source)}),
}, nil
}
}
func Load(setup Setup) (*Runner, error) {
initParams, err := setup()
if err != nil {
return nil, err
}
m := Init(*initParams)
return m.createRunner(m.Command, m.Setup)
}
func MustLoad(setup Setup) *Runner {
r, err := Load(setup)
if err != nil {
panic(err)
}
return r
}
func New() Main {
return Init(Main{})
}
type Env struct {
Args []string
Getenv func(name string) string
Getwd func() (string, error)
}
func GetPathAndArgsFromEnv(env Env) (string, string, []string) {
osArgs := env.Args
var cmd string
var path string
if len(osArgs) > 1 {
file := osArgs[1]
info, err := os.Stat(file)
if err == nil && info != nil && !info.IsDir() {
osArgs = osArgs[2:]
path = file
cmd = filepath.Base(file)
} else {
osArgs = osArgs[1:]
}
} else {
osArgs = []string{}
}
if path == "" {
dirFromEnv := env.Getenv("VARIANT_DIR")
if dirFromEnv != "" {
path = dirFromEnv
} else {
var err error
path, err = env.Getwd()
if err != nil {
panic(err)
}
}
}
return cmd, path, osArgs
}
func Init(m Main) Main {
if m.Stdout == nil {
m.Stdout = os.Stdout
}
if m.Stderr == nil {
m.Stderr = os.Stderr
}
if m.Getenv == nil {
m.Getenv = os.Getenv
}
if m.Getwd == nil {
m.Getwd = os.Getwd
}
cmdNameFromEnv := m.Getenv("VARIANT_NAME")
if cmdNameFromEnv != "" {
m.Command = cmdNameFromEnv
}
return m
}
type Config struct {
Parameters func([]string) (map[string]interface{}, error)
Options func() map[string]func() interface{}
}
func valueOnChange(cli *cobra.Command, name string, v interface{}) func() interface{} {
return func() interface{} {
// This avoids setting "" when the flag is actually missing, so that
// we can differentiate between when (1)an empty string is specified vs (2)no flag is provided.
if cli.PersistentFlags().Lookup(name).Changed {
return v
}
return nil
}
}
func createCobraFlagsFromVariantOptions(cli *cobra.Command, opts []app.OptionSpec, interactive bool) (map[string]func() interface{}, error) {
lazyOptionValues := map[string]func() interface{}{}
for i := range opts {
o := opts[i]
var tpe cty.Type
tpe, diags := typeexpr.TypeConstraint(o.Type)
if diags != nil {
return nil, diags
}
var desc string
if o.Description != nil {
desc = *o.Description
}
switch tpe {
case cty.String:
var v string
if o.Short != nil {
cli.PersistentFlags().StringVarP(&v, o.Name, *o.Short, "", desc)
} else {
cli.PersistentFlags().StringVar(&v, o.Name, "", desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Bool:
var v bool
if o.Short != nil {
cli.PersistentFlags().BoolVarP(&v, o.Name, *o.Short, false, desc)
} else {
cli.PersistentFlags().BoolVar(&v, o.Name, false, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Number:
var v int
if o.Short != nil {
cli.PersistentFlags().IntVarP(&v, o.Name, *o.Short, 0, desc)
} else {
cli.PersistentFlags().IntVar(&v, o.Name, 0, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.String):
v := []string{}
if o.Short != nil {
cli.PersistentFlags().StringSliceVarP(&v, o.Name, *o.Short, []string{}, desc)
} else {
cli.PersistentFlags().StringSliceVar(&v, o.Name, []string{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.Number):
v := []int{}
if o.Short != nil {
cli.PersistentFlags().IntSliceVarP(&v, o.Name, *o.Short, []int{}, desc)
} else {
cli.PersistentFlags().IntSliceVar(&v, o.Name, []int{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
}
if !app.IsExpressionEmpty(o.Default) || interactive {
} else if err := cli.MarkPersistentFlagRequired(o.Name); err != nil {
panic(err)
}
}
return lazyOptionValues, nil
}
func configureCommand(cli *cobra.Command, root app.JobSpec, interactive bool) (*Config, error) {
lazyOptionValues, err := createCobraFlagsFromVariantOptions(cli, root.Options, interactive)
if err != nil {
return nil, err
}
opts := func() map[string]func() interface{} {
m := map[string]func() interface{}{}
for name, f := range lazyOptionValues {
m[name] = f
}
return m
}
var minArgs int
var maxArgs int
lazyParamValues := map[string]func(args []string) (interface{}, error){}
var hasVarArgs bool
for i := range root.Parameters {
maxArgs++
p := root.Parameters[i]
r := p.Default.Range()
if r.Start == r.End {
minArgs++
}
ii := i
ty, err := typeexpr.TypeConstraint(p.Type)
if err != nil {
return nil, err
}
var f func([]string, int) (interface{}, error)
switch ty {
case cty.Bool:
f = func(args []string, i int) (interface{}, error) {
return strconv.ParseBool(args[i])
}
case cty.String:
f = func(args []string, i int) (interface{}, error) {
return args[i], nil
}
case cty.Number:
f = func(args []string, i int) (interface{}, error) {
return strconv.Atoi(args[i])
}
case cty.List(cty.String):
if i != len(root.Parameters)-1 {
return nil, fmt.Errorf("list(string) parameter %q must be positioned at last", p.Name)
}
f = func(args []string, i int) (interface{}, error) {
return args[i:], nil
}
hasVarArgs = true
default:
return nil, fmt.Errorf("invalid parameter %q: type %s is not supported", p.Name, ty.FriendlyName())
}
lazyParamValues[p.Name] = func(args []string) (interface{}, error) {
if len(args) <= ii {
return nil, nil
}
return f(args, ii)
}
}
if hasVarArgs {
cli.Args = cobra.MinimumNArgs(minArgs)
} else {
cli.Args = cobra.RangeArgs(minArgs, maxArgs)
}
params := func(args []string) (map[string]interface{}, error) {
m := map[string]interface{}{}
for name, f := range lazyParamValues {
v, err := f(args)
if err != nil {
return nil, err
}
m[name] = v
}
return m, nil
}
return &Config{Parameters: params, Options: opts}, nil
}
func getMergedParamsAndOpts(
cfgs map[string]*Config, cmdName string, args []string) (map[string]interface{}, map[string]interface{}, error) {
names := strings.Split(cmdName, " ")
optGetters := map[string]func() interface{}{}
for i := range names {
curName := strings.Join(names[:i+1], " ")
if curCfg, ok := cfgs[curName]; ok {
curOpts := curCfg.Options()
for n := range curOpts {
optGetters[n] = curOpts[n]
}
}
}
cfg := cfgs[cmdName]
params, err := cfg.Parameters(args)
if err != nil {
return nil, nil, err
}
opts := map[string]interface{}{}
for n, get := range optGetters {
opts[n] = get()
}
return params, opts, nil
}
func (m *Main) initApp(setup app.Setup) (*app.App, error) {
ap, err := app.New(setup)
if err != nil {
if ap == nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
} else {
ap.PrintError(err)
}
//nolint:wrapcheck
return nil, err
}
ap.Stdout = m.Stdout
ap.Stderr = m.Stderr
return ap, nil
}
func (m Main) createRunner(cmd string, setup app.Setup) (*Runner, error) {
ap, err := m.initApp(setup)
if err != nil {
return nil, err
}
return m.newRunner(ap, cmd), nil
}
func (m Main) newRunner(ap *app.App, cmdName string) *Runner {
m2 := &Runner{
mut: &sync.Mutex{},
ap: ap,
runCmdName: cmdName,
}
m.initRunner(m2)
return m2
}
func (m Main) initRunner(r *Runner) {
siTty := isatty.IsTerminal(os.Stdin.Fd())
soTty := isatty.IsTerminal(os.Stdout.Fd())
// Enable prompts for missing inputs when stdin and stdout are connected to a tty
r.Interactive = siTty && soTty
if r.Interactive {
r.SetOpts = app.DefaultSetOpts
}
r.goJobs = map[string]Job{}
r.jobRunProviders = map[string]func(State) JobRun{}
for jobName := range r.ap.JobByName {
n := jobName
r.jobRunProviders[n] = func(st State) JobRun {
return func(ctx context.Context) error {
if st.Stdout != nil {
defer func() {
if err := st.Stdout.Close(); err != nil {
panic(err)
}
}()
}
if st.Stderr != nil {
defer func() {
if err := st.Stderr.Close(); err != nil {
panic(err)
}
}()
}
r, err := r.ap.Run(n, st.Parameters, st.Options)
if err != nil {
return xerrors.Errorf("running job %q: %w", n, err)
}
if st.Stdout != nil {
if _, err := st.Stdout.Write([]byte(r.Stdout)); err != nil {
return xerrors.Errorf("writing stdout of job %q: %w", n, err)
}
}
if st.Stderr != nil {
if _, err := st.Stderr.Write([]byte(r.Stderr)); err != nil {
return xerrors.Errorf("writing stderr of job %q: %w", n, err)
}
}
return nil
}
}
}
}
type Runner struct {
ap *app.App
runCmdName string
runCmd *cobra.Command
variantCmd *cobra.Command
goJobs map[string]Job
jobRunProviders map[string]func(State) JobRun
Interactive bool
SetOpts app.SetOptsFunc
mut *sync.Mutex
}
func (r *Runner) Cobra() (*cobra.Command, error) {
ap, rootCmdName := r.ap, r.runCmdName
if rootCmdName == "" {
rootCmdName = "run"
}
jobs := map[string]app.JobSpec{}
jobNames := []string{}
for jobName, j := range ap.JobByName {
var name string
if jobName == "" {
name = rootCmdName
} else {
name = fmt.Sprintf("%s %s", rootCmdName, jobName)
}
jobs[name] = j
jobNames = append(jobNames, name)
}
sort.Strings(jobNames)
commands := map[string]*cobra.Command{}
cfgs := map[string]*Config{}
for _, n := range jobNames {
name := n
job := jobs[name]
names := strings.Split(name, " ")
var parent *cobra.Command
cmdName := names[len(names)-1]
switch len(names) {
case 1:
default:
names = names[:len(names)-1]
var ok bool
parent, ok = commands[strings.Join(names, " ")]
if !ok {
for i := range names {
intName := strings.Join(names[:i+1], " ")
cur, ok := commands[intName]
if !ok {
cur = &cobra.Command{
Use: names[i],
}
parent.AddCommand(cur)
commands[intName] = cur
}
parent = cur
}
}
}
var desc string
if job.Description != nil {
desc = *job.Description
}
for _, p := range job.Parameters {
cmdName += fmt.Sprintf(" [%s]", strings.ToUpper(p.Name))
}
cli := &cobra.Command{
Use: cmdName,
Short: strings.Split(desc, "\n")[0],
Long: desc,
}
if job.Private != nil {
cli.Hidden = *job.Private
}
cfg, err := configureCommand(cli, job, r.Interactive)
if err != nil {
return nil, err
}
cfgs[name] = cfg
cli.RunE = func(cmd *cobra.Command, args []string) error {
params, opts, err := getMergedParamsAndOpts(cfgs, name, args)
if err != nil {
return err
}
_, err = ap.Run(job.Name, params, opts, r.SetOpts)
if err != nil && err.Error() != app.NoRunMessage {
cmd.SilenceUsage = true
}
//nolint:wrapcheck
return err
}
commands[name] = cli
if parent != nil {
parent.AddCommand(cli)
}
}
rootCmd := commands[rootCmdName]
return rootCmd, nil
}
type RunOptions struct {
Stdout io.Writer
Stderr io.Writer
SetOpts app.SetOptsFunc
DisableLocking bool
}
// Add adds a job to this runner so that it can later by calling `Job`.
func (r Runner) Add(job Job) {
r.goJobs[job.Name] = job
if job.Name == "" {
panic(fmt.Errorf("invalid job name %q", job.Name))
}
r.jobRunProviders[job.Name] = func(st State) JobRun {
return func(ctx context.Context) error {
return job.Run(ctx, st)
}
}
}
// Job prepares a job to be run.
func (r Runner) Job(job string, opts State) (JobRun, error) {
f, ok := r.jobRunProviders[job]
if !ok {
return nil, fmt.Errorf("job %q not added", job)
}
if opts.Options == nil {
opts.Options = map[string]interface{}{}
}
if opts.Parameters == nil {
opts.Parameters = map[string]interface{}{}
}
jr := f(opts)
return jr, nil
}
func (r *Runner) Run(arguments []string, opt ...RunOptions) error {
var opts RunOptions
if len(opt) > 0 {
opts = opt[0]
}
if !opts.DisableLocking {
r.mut.Lock()
defer r.mut.Unlock()
}
if opts.SetOpts != nil {
r.SetOpts = opts.SetOpts
defer func() {
r.SetOpts = nil
}()
}
if r.runCmd == nil {
var err error
r.runCmd, err = r.Cobra()
if err != nil {
r.ap.PrintError(err)
return err
}
}
var cmd *cobra.Command
if r.runCmdName != "" {
cmd = r.runCmd
} else {
if r.variantCmd == nil { |
cmd = r.variantCmd
}
var err error
{
cmdStdout := cmd.OutOrStdout()
cmdStderr := cmd.OutOrStderr()
appStdout := r.ap.Stdout
appStderr := r.ap.Stderr
cmd.SetArgs(arguments)
if opts.Stdout != nil {
cmd.SetOut(opts.Stdout)
r.ap.Stdout = opts.Stdout
}
if opts.Stderr != nil {
cmd.SetErr(opts.Stderr)
r.ap.Stderr = opts.Stderr
}
err = cmd.Execute()
cmd.SetOut(cmdStdout)
cmd.SetErr(cmdStderr)
r.ap.Stdout = appStdout
r.ap.Stderr = appStderr
}
//nolint:wrapcheck
return err
}
type Error struct {
Message string
ExitCode int
}
func (e Error) Error() string {
return e.Message
}
func (r *Runner) createVariantRootCommand() *cobra.Command {
const VariantBinName = "variant"
rootCmd := &cobra.Command{
Use: VariantBinName,
Version: Version,
}
testCmd := &cobra.Command{
Use: "test [NAME]",
Short: "Run test(s)",
Args: cobra.MaximumNArgs(1),
RunE: func(c *cobra.Command, args []string) error {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
_, err := r.ap.RunTests(prefix)
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd := &cobra.Command{
Use: "export SUBCOMMAND SRC_DIR OUTPUT_PATH",
Short: "Export the Variant command defined in SRC_DIR to OUTPUT_PATH",
}
{
shimCmd := &cobra.Command{
Use: "shim SRC_DIR DST_DIR",
Short: "Copy and generate shim for the Variant command defined in the SRC",
Args: cobra.ExactArgs(2),
RunE: func(c *cobra.Command, args []string) error {
err := r.ap.ExportShim(args[0], args[1])
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd.AddCommand(shimCmd)
exportCmd.AddCommand(newExportGo(r))
exportCmd.AddCommand(newExportBinary(r))
}
generateCmd := &cobra.Command{
Use: "generate RESOURCE DIR",
Short: "Generate RESOURCE for the Variant command defined in DIR",
}
{
generateShimCmd := &cobra.Command{
Use: "shim DIR",
Short: "Generate a shim for the Variant command defined in DIR",
Args: cobra.ExactArgs(1),
RunE: func(c *cobra.Command, args []string) error {
err := app.GenerateShim(VariantBinName, args[0])
if err != nil {
c.SilenceUsage = true
}
return err
},
}
generateCmd.AddCommand(generateShimCmd)
}
startCmd := &cobra.Command{
Use: "start NAME",
Short: "Start the named integration to turn the Variant command to whatever",
}
{
var botName string
startSlackbotCmd := &cobra.Command{
Use: "slackbot",
Short: "Start the slackbot that responds to slash commands by running corresopnding Variant commands",
RunE: func(c *cobra.Command, args []string) error {
err := r.StartSlackbot(botName)
if err != nil {
c.SilenceUsage = true
}
return err
},
}
startSlackbotCmd.Flags().StringVarP(&botName, "name", "n", "", "Name of the slash command without /. For example, \"--name foo\" results in the bot responding to \"/foo <CMD> <ARGS>\"")
if err := startSlackbotCmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
startCmd.AddCommand(startSlackbotCmd)
}
rootCmd.AddCommand(r.runCmd)
rootCmd.AddCommand(testCmd)
rootCmd.AddCommand(exportCmd)
rootCmd.AddCommand(generateCmd)
rootCmd.AddCommand(startCmd)
return rootCmd
} | r.variantCmd = r.createVariantRootCommand()
} | random_line_split |
variant.go | package variant
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"github.com/zclconf/go-cty/cty"
"golang.org/x/xerrors"
"github.com/mumoshu/variant2/pkg/app"
)
var Version string
type Main struct {
// Command is the name of the executable used for this process.
// E.g. `go build -o myapp ./` and `./myapp cmd --flag1` results in Command being "myapp".
Command string
Source []byte
// Path can be a path to the directory or the file containing the definition for the Variant command being run
Path string
Stdout, Stderr io.Writer
Args []string
Getenv func(string) string
Getwd func() (string, error)
Setup app.Setup
}
type Setup func() (*Main, error)
type InitParams struct {
Command string
Setup app.Setup
}
type Option func(*Main)
func FromPath(path string, opts ...Option) Setup {
return func() (*Main, error) {
if path == "" {
var err error
path, err = os.Getwd()
if err != nil {
return nil, xerrors.Errorf("getwd: %w", err)
}
}
info, err := os.Stat(path)
if err != nil {
return nil, xerrors.Errorf("stat %s: %w", path, err)
}
var setup app.Setup
if info.IsDir() {
setup = app.FromDir(path)
} else {
setup = app.FromFile(path)
}
m := &Main{
Setup: setup,
}
if m.Command == "" {
m.Command = filepath.Base(path)
}
for _, o := range opts {
o(m)
}
return m, nil
}
}
func FromSource(cmd, source string) Setup {
return func() (*Main, error) {
if cmd == "" {
return nil, errors.New("command name must be set when loadling from Variant source file")
}
return &Main{
Command: cmd,
Setup: app.FromSources(map[string][]byte{cmd: []byte(source)}),
}, nil
}
}
func Load(setup Setup) (*Runner, error) {
initParams, err := setup()
if err != nil {
return nil, err
}
m := Init(*initParams)
return m.createRunner(m.Command, m.Setup)
}
func MustLoad(setup Setup) *Runner {
r, err := Load(setup)
if err != nil {
panic(err)
}
return r
}
func New() Main {
return Init(Main{})
}
type Env struct {
Args []string
Getenv func(name string) string
Getwd func() (string, error)
}
func GetPathAndArgsFromEnv(env Env) (string, string, []string) {
osArgs := env.Args
var cmd string
var path string
if len(osArgs) > 1 {
file := osArgs[1]
info, err := os.Stat(file)
if err == nil && info != nil && !info.IsDir() {
osArgs = osArgs[2:]
path = file
cmd = filepath.Base(file)
} else {
osArgs = osArgs[1:]
}
} else {
osArgs = []string{}
}
if path == "" {
dirFromEnv := env.Getenv("VARIANT_DIR")
if dirFromEnv != "" {
path = dirFromEnv
} else {
var err error
path, err = env.Getwd()
if err != nil {
panic(err)
}
}
}
return cmd, path, osArgs
}
func Init(m Main) Main {
if m.Stdout == nil {
m.Stdout = os.Stdout
}
if m.Stderr == nil {
m.Stderr = os.Stderr
}
if m.Getenv == nil {
m.Getenv = os.Getenv
}
if m.Getwd == nil {
m.Getwd = os.Getwd
}
cmdNameFromEnv := m.Getenv("VARIANT_NAME")
if cmdNameFromEnv != "" {
m.Command = cmdNameFromEnv
}
return m
}
type Config struct {
Parameters func([]string) (map[string]interface{}, error)
Options func() map[string]func() interface{}
}
func valueOnChange(cli *cobra.Command, name string, v interface{}) func() interface{} {
return func() interface{} {
// This avoids setting "" when the flag is actually missing, so that
// we can differentiate between when (1)an empty string is specified vs (2)no flag is provided.
if cli.PersistentFlags().Lookup(name).Changed {
return v
}
return nil
}
}
func createCobraFlagsFromVariantOptions(cli *cobra.Command, opts []app.OptionSpec, interactive bool) (map[string]func() interface{}, error) {
lazyOptionValues := map[string]func() interface{}{}
for i := range opts {
o := opts[i]
var tpe cty.Type
tpe, diags := typeexpr.TypeConstraint(o.Type)
if diags != nil {
return nil, diags
}
var desc string
if o.Description != nil {
desc = *o.Description
}
switch tpe {
case cty.String:
var v string
if o.Short != nil {
cli.PersistentFlags().StringVarP(&v, o.Name, *o.Short, "", desc)
} else {
cli.PersistentFlags().StringVar(&v, o.Name, "", desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Bool:
var v bool
if o.Short != nil {
cli.PersistentFlags().BoolVarP(&v, o.Name, *o.Short, false, desc)
} else {
cli.PersistentFlags().BoolVar(&v, o.Name, false, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Number:
var v int
if o.Short != nil {
cli.PersistentFlags().IntVarP(&v, o.Name, *o.Short, 0, desc)
} else {
cli.PersistentFlags().IntVar(&v, o.Name, 0, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.String):
v := []string{}
if o.Short != nil {
cli.PersistentFlags().StringSliceVarP(&v, o.Name, *o.Short, []string{}, desc)
} else {
cli.PersistentFlags().StringSliceVar(&v, o.Name, []string{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.Number):
v := []int{}
if o.Short != nil {
cli.PersistentFlags().IntSliceVarP(&v, o.Name, *o.Short, []int{}, desc)
} else {
cli.PersistentFlags().IntSliceVar(&v, o.Name, []int{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
}
if !app.IsExpressionEmpty(o.Default) || interactive {
} else if err := cli.MarkPersistentFlagRequired(o.Name); err != nil {
panic(err)
}
}
return lazyOptionValues, nil
}
func configureCommand(cli *cobra.Command, root app.JobSpec, interactive bool) (*Config, error) {
lazyOptionValues, err := createCobraFlagsFromVariantOptions(cli, root.Options, interactive)
if err != nil {
return nil, err
}
opts := func() map[string]func() interface{} {
m := map[string]func() interface{}{}
for name, f := range lazyOptionValues {
m[name] = f
}
return m
}
var minArgs int
var maxArgs int
lazyParamValues := map[string]func(args []string) (interface{}, error){}
var hasVarArgs bool
for i := range root.Parameters {
maxArgs++
p := root.Parameters[i]
r := p.Default.Range()
if r.Start == r.End {
minArgs++
}
ii := i
ty, err := typeexpr.TypeConstraint(p.Type)
if err != nil {
return nil, err
}
var f func([]string, int) (interface{}, error)
switch ty {
case cty.Bool:
f = func(args []string, i int) (interface{}, error) {
return strconv.ParseBool(args[i])
}
case cty.String:
f = func(args []string, i int) (interface{}, error) {
return args[i], nil
}
case cty.Number:
f = func(args []string, i int) (interface{}, error) {
return strconv.Atoi(args[i])
}
case cty.List(cty.String):
if i != len(root.Parameters)-1 {
return nil, fmt.Errorf("list(string) parameter %q must be positioned at last", p.Name)
}
f = func(args []string, i int) (interface{}, error) {
return args[i:], nil
}
hasVarArgs = true
default:
return nil, fmt.Errorf("invalid parameter %q: type %s is not supported", p.Name, ty.FriendlyName())
}
lazyParamValues[p.Name] = func(args []string) (interface{}, error) {
if len(args) <= ii {
return nil, nil
}
return f(args, ii)
}
}
if hasVarArgs {
cli.Args = cobra.MinimumNArgs(minArgs)
} else {
cli.Args = cobra.RangeArgs(minArgs, maxArgs)
}
params := func(args []string) (map[string]interface{}, error) {
m := map[string]interface{}{}
for name, f := range lazyParamValues {
v, err := f(args)
if err != nil {
return nil, err
}
m[name] = v
}
return m, nil
}
return &Config{Parameters: params, Options: opts}, nil
}
func getMergedParamsAndOpts(
cfgs map[string]*Config, cmdName string, args []string) (map[string]interface{}, map[string]interface{}, error) {
names := strings.Split(cmdName, " ")
optGetters := map[string]func() interface{}{}
for i := range names {
curName := strings.Join(names[:i+1], " ")
if curCfg, ok := cfgs[curName]; ok {
curOpts := curCfg.Options()
for n := range curOpts {
optGetters[n] = curOpts[n]
}
}
}
cfg := cfgs[cmdName]
params, err := cfg.Parameters(args)
if err != nil {
return nil, nil, err
}
opts := map[string]interface{}{}
for n, get := range optGetters {
opts[n] = get()
}
return params, opts, nil
}
func (m *Main) initApp(setup app.Setup) (*app.App, error) {
ap, err := app.New(setup)
if err != nil {
if ap == nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
} else {
ap.PrintError(err)
}
//nolint:wrapcheck
return nil, err
}
ap.Stdout = m.Stdout
ap.Stderr = m.Stderr
return ap, nil
}
func (m Main) createRunner(cmd string, setup app.Setup) (*Runner, error) {
ap, err := m.initApp(setup)
if err != nil {
return nil, err
}
return m.newRunner(ap, cmd), nil
}
func (m Main) newRunner(ap *app.App, cmdName string) *Runner {
m2 := &Runner{
mut: &sync.Mutex{},
ap: ap,
runCmdName: cmdName,
}
m.initRunner(m2)
return m2
}
func (m Main) initRunner(r *Runner) {
siTty := isatty.IsTerminal(os.Stdin.Fd())
soTty := isatty.IsTerminal(os.Stdout.Fd())
// Enable prompts for missing inputs when stdin and stdout are connected to a tty
r.Interactive = siTty && soTty
if r.Interactive {
r.SetOpts = app.DefaultSetOpts
}
r.goJobs = map[string]Job{}
r.jobRunProviders = map[string]func(State) JobRun{}
for jobName := range r.ap.JobByName {
n := jobName
r.jobRunProviders[n] = func(st State) JobRun {
return func(ctx context.Context) error {
if st.Stdout != nil {
defer func() {
if err := st.Stdout.Close(); err != nil {
panic(err)
}
}()
}
if st.Stderr != nil {
defer func() {
if err := st.Stderr.Close(); err != nil {
panic(err)
}
}()
}
r, err := r.ap.Run(n, st.Parameters, st.Options)
if err != nil {
return xerrors.Errorf("running job %q: %w", n, err)
}
if st.Stdout != nil {
if _, err := st.Stdout.Write([]byte(r.Stdout)); err != nil {
return xerrors.Errorf("writing stdout of job %q: %w", n, err)
}
}
if st.Stderr != nil {
if _, err := st.Stderr.Write([]byte(r.Stderr)); err != nil {
return xerrors.Errorf("writing stderr of job %q: %w", n, err)
}
}
return nil
}
}
}
}
type Runner struct {
ap *app.App
runCmdName string
runCmd *cobra.Command
variantCmd *cobra.Command
goJobs map[string]Job
jobRunProviders map[string]func(State) JobRun
Interactive bool
SetOpts app.SetOptsFunc
mut *sync.Mutex
}
func (r *Runner) Cobra() (*cobra.Command, error) {
ap, rootCmdName := r.ap, r.runCmdName
if rootCmdName == "" {
rootCmdName = "run"
}
jobs := map[string]app.JobSpec{}
jobNames := []string{}
for jobName, j := range ap.JobByName {
var name string
if jobName == "" {
name = rootCmdName
} else {
name = fmt.Sprintf("%s %s", rootCmdName, jobName)
}
jobs[name] = j
jobNames = append(jobNames, name)
}
sort.Strings(jobNames)
commands := map[string]*cobra.Command{}
cfgs := map[string]*Config{}
for _, n := range jobNames {
name := n
job := jobs[name]
names := strings.Split(name, " ")
var parent *cobra.Command
cmdName := names[len(names)-1]
switch len(names) {
case 1:
default:
names = names[:len(names)-1]
var ok bool
parent, ok = commands[strings.Join(names, " ")]
if !ok {
for i := range names {
intName := strings.Join(names[:i+1], " ")
cur, ok := commands[intName]
if !ok {
cur = &cobra.Command{
Use: names[i],
}
parent.AddCommand(cur)
commands[intName] = cur
}
parent = cur
}
}
}
var desc string
if job.Description != nil {
desc = *job.Description
}
for _, p := range job.Parameters {
cmdName += fmt.Sprintf(" [%s]", strings.ToUpper(p.Name))
}
cli := &cobra.Command{
Use: cmdName,
Short: strings.Split(desc, "\n")[0],
Long: desc,
}
if job.Private != nil {
cli.Hidden = *job.Private
}
cfg, err := configureCommand(cli, job, r.Interactive)
if err != nil {
return nil, err
}
cfgs[name] = cfg
cli.RunE = func(cmd *cobra.Command, args []string) error {
params, opts, err := getMergedParamsAndOpts(cfgs, name, args)
if err != nil {
return err
}
_, err = ap.Run(job.Name, params, opts, r.SetOpts)
if err != nil && err.Error() != app.NoRunMessage {
cmd.SilenceUsage = true
}
//nolint:wrapcheck
return err
}
commands[name] = cli
if parent != nil {
parent.AddCommand(cli)
}
}
rootCmd := commands[rootCmdName]
return rootCmd, nil
}
type RunOptions struct {
Stdout io.Writer
Stderr io.Writer
SetOpts app.SetOptsFunc
DisableLocking bool
}
// Add adds a job to this runner so that it can later by calling `Job`.
func (r Runner) Add(job Job) {
r.goJobs[job.Name] = job
if job.Name == "" {
panic(fmt.Errorf("invalid job name %q", job.Name))
}
r.jobRunProviders[job.Name] = func(st State) JobRun {
return func(ctx context.Context) error {
return job.Run(ctx, st)
}
}
}
// Job prepares a job to be run.
func (r Runner) Job(job string, opts State) (JobRun, error) {
f, ok := r.jobRunProviders[job]
if !ok {
return nil, fmt.Errorf("job %q not added", job)
}
if opts.Options == nil {
opts.Options = map[string]interface{}{}
}
if opts.Parameters == nil {
opts.Parameters = map[string]interface{}{}
}
jr := f(opts)
return jr, nil
}
func (r *Runner) Run(arguments []string, opt ...RunOptions) error {
var opts RunOptions
if len(opt) > 0 {
opts = opt[0]
}
if !opts.DisableLocking {
r.mut.Lock()
defer r.mut.Unlock()
}
if opts.SetOpts != nil {
r.SetOpts = opts.SetOpts
defer func() {
r.SetOpts = nil
}()
}
if r.runCmd == nil {
var err error
r.runCmd, err = r.Cobra()
if err != nil {
r.ap.PrintError(err)
return err
}
}
var cmd *cobra.Command
if r.runCmdName != "" {
cmd = r.runCmd
} else {
if r.variantCmd == nil {
r.variantCmd = r.createVariantRootCommand()
}
cmd = r.variantCmd
}
var err error
{
cmdStdout := cmd.OutOrStdout()
cmdStderr := cmd.OutOrStderr()
appStdout := r.ap.Stdout
appStderr := r.ap.Stderr
cmd.SetArgs(arguments)
if opts.Stdout != nil {
cmd.SetOut(opts.Stdout)
r.ap.Stdout = opts.Stdout
}
if opts.Stderr != nil {
cmd.SetErr(opts.Stderr)
r.ap.Stderr = opts.Stderr
}
err = cmd.Execute()
cmd.SetOut(cmdStdout)
cmd.SetErr(cmdStderr)
r.ap.Stdout = appStdout
r.ap.Stderr = appStderr
}
//nolint:wrapcheck
return err
}
type Error struct {
Message string
ExitCode int
}
func (e Error) Error() string |
func (r *Runner) createVariantRootCommand() *cobra.Command {
const VariantBinName = "variant"
rootCmd := &cobra.Command{
Use: VariantBinName,
Version: Version,
}
testCmd := &cobra.Command{
Use: "test [NAME]",
Short: "Run test(s)",
Args: cobra.MaximumNArgs(1),
RunE: func(c *cobra.Command, args []string) error {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
_, err := r.ap.RunTests(prefix)
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd := &cobra.Command{
Use: "export SUBCOMMAND SRC_DIR OUTPUT_PATH",
Short: "Export the Variant command defined in SRC_DIR to OUTPUT_PATH",
}
{
shimCmd := &cobra.Command{
Use: "shim SRC_DIR DST_DIR",
Short: "Copy and generate shim for the Variant command defined in the SRC",
Args: cobra.ExactArgs(2),
RunE: func(c *cobra.Command, args []string) error {
err := r.ap.ExportShim(args[0], args[1])
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd.AddCommand(shimCmd)
exportCmd.AddCommand(newExportGo(r))
exportCmd.AddCommand(newExportBinary(r))
}
generateCmd := &cobra.Command{
Use: "generate RESOURCE DIR",
Short: "Generate RESOURCE for the Variant command defined in DIR",
}
{
generateShimCmd := &cobra.Command{
Use: "shim DIR",
Short: "Generate a shim for the Variant command defined in DIR",
Args: cobra.ExactArgs(1),
RunE: func(c *cobra.Command, args []string) error {
err := app.GenerateShim(VariantBinName, args[0])
if err != nil {
c.SilenceUsage = true
}
return err
},
}
generateCmd.AddCommand(generateShimCmd)
}
startCmd := &cobra.Command{
Use: "start NAME",
Short: "Start the named integration to turn the Variant command to whatever",
}
{
var botName string
startSlackbotCmd := &cobra.Command{
Use: "slackbot",
Short: "Start the slackbot that responds to slash commands by running corresopnding Variant commands",
RunE: func(c *cobra.Command, args []string) error {
err := r.StartSlackbot(botName)
if err != nil {
c.SilenceUsage = true
}
return err
},
}
startSlackbotCmd.Flags().StringVarP(&botName, "name", "n", "", "Name of the slash command without /. For example, \"--name foo\" results in the bot responding to \"/foo <CMD> <ARGS>\"")
if err := startSlackbotCmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
startCmd.AddCommand(startSlackbotCmd)
}
rootCmd.AddCommand(r.runCmd)
rootCmd.AddCommand(testCmd)
rootCmd.AddCommand(exportCmd)
rootCmd.AddCommand(generateCmd)
rootCmd.AddCommand(startCmd)
return rootCmd
}
| {
return e.Message
} | identifier_body |
variant.go | package variant
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"github.com/zclconf/go-cty/cty"
"golang.org/x/xerrors"
"github.com/mumoshu/variant2/pkg/app"
)
var Version string
type Main struct {
// Command is the name of the executable used for this process.
// E.g. `go build -o myapp ./` and `./myapp cmd --flag1` results in Command being "myapp".
Command string
Source []byte
// Path can be a path to the directory or the file containing the definition for the Variant command being run
Path string
Stdout, Stderr io.Writer
Args []string
Getenv func(string) string
Getwd func() (string, error)
Setup app.Setup
}
type Setup func() (*Main, error)
type InitParams struct {
Command string
Setup app.Setup
}
type Option func(*Main)
func FromPath(path string, opts ...Option) Setup {
return func() (*Main, error) {
if path == "" {
var err error
path, err = os.Getwd()
if err != nil {
return nil, xerrors.Errorf("getwd: %w", err)
}
}
info, err := os.Stat(path)
if err != nil {
return nil, xerrors.Errorf("stat %s: %w", path, err)
}
var setup app.Setup
if info.IsDir() {
setup = app.FromDir(path)
} else {
setup = app.FromFile(path)
}
m := &Main{
Setup: setup,
}
if m.Command == "" {
m.Command = filepath.Base(path)
}
for _, o := range opts {
o(m)
}
return m, nil
}
}
func FromSource(cmd, source string) Setup {
return func() (*Main, error) {
if cmd == "" {
return nil, errors.New("command name must be set when loadling from Variant source file")
}
return &Main{
Command: cmd,
Setup: app.FromSources(map[string][]byte{cmd: []byte(source)}),
}, nil
}
}
func Load(setup Setup) (*Runner, error) {
initParams, err := setup()
if err != nil {
return nil, err
}
m := Init(*initParams)
return m.createRunner(m.Command, m.Setup)
}
func | (setup Setup) *Runner {
r, err := Load(setup)
if err != nil {
panic(err)
}
return r
}
func New() Main {
return Init(Main{})
}
type Env struct {
Args []string
Getenv func(name string) string
Getwd func() (string, error)
}
func GetPathAndArgsFromEnv(env Env) (string, string, []string) {
osArgs := env.Args
var cmd string
var path string
if len(osArgs) > 1 {
file := osArgs[1]
info, err := os.Stat(file)
if err == nil && info != nil && !info.IsDir() {
osArgs = osArgs[2:]
path = file
cmd = filepath.Base(file)
} else {
osArgs = osArgs[1:]
}
} else {
osArgs = []string{}
}
if path == "" {
dirFromEnv := env.Getenv("VARIANT_DIR")
if dirFromEnv != "" {
path = dirFromEnv
} else {
var err error
path, err = env.Getwd()
if err != nil {
panic(err)
}
}
}
return cmd, path, osArgs
}
func Init(m Main) Main {
if m.Stdout == nil {
m.Stdout = os.Stdout
}
if m.Stderr == nil {
m.Stderr = os.Stderr
}
if m.Getenv == nil {
m.Getenv = os.Getenv
}
if m.Getwd == nil {
m.Getwd = os.Getwd
}
cmdNameFromEnv := m.Getenv("VARIANT_NAME")
if cmdNameFromEnv != "" {
m.Command = cmdNameFromEnv
}
return m
}
type Config struct {
Parameters func([]string) (map[string]interface{}, error)
Options func() map[string]func() interface{}
}
func valueOnChange(cli *cobra.Command, name string, v interface{}) func() interface{} {
return func() interface{} {
// This avoids setting "" when the flag is actually missing, so that
// we can differentiate between when (1)an empty string is specified vs (2)no flag is provided.
if cli.PersistentFlags().Lookup(name).Changed {
return v
}
return nil
}
}
func createCobraFlagsFromVariantOptions(cli *cobra.Command, opts []app.OptionSpec, interactive bool) (map[string]func() interface{}, error) {
lazyOptionValues := map[string]func() interface{}{}
for i := range opts {
o := opts[i]
var tpe cty.Type
tpe, diags := typeexpr.TypeConstraint(o.Type)
if diags != nil {
return nil, diags
}
var desc string
if o.Description != nil {
desc = *o.Description
}
switch tpe {
case cty.String:
var v string
if o.Short != nil {
cli.PersistentFlags().StringVarP(&v, o.Name, *o.Short, "", desc)
} else {
cli.PersistentFlags().StringVar(&v, o.Name, "", desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Bool:
var v bool
if o.Short != nil {
cli.PersistentFlags().BoolVarP(&v, o.Name, *o.Short, false, desc)
} else {
cli.PersistentFlags().BoolVar(&v, o.Name, false, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.Number:
var v int
if o.Short != nil {
cli.PersistentFlags().IntVarP(&v, o.Name, *o.Short, 0, desc)
} else {
cli.PersistentFlags().IntVar(&v, o.Name, 0, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.String):
v := []string{}
if o.Short != nil {
cli.PersistentFlags().StringSliceVarP(&v, o.Name, *o.Short, []string{}, desc)
} else {
cli.PersistentFlags().StringSliceVar(&v, o.Name, []string{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
case cty.List(cty.Number):
v := []int{}
if o.Short != nil {
cli.PersistentFlags().IntSliceVarP(&v, o.Name, *o.Short, []int{}, desc)
} else {
cli.PersistentFlags().IntSliceVar(&v, o.Name, []int{}, desc)
}
lazyOptionValues[o.Name] = valueOnChange(cli, o.Name, &v)
}
if !app.IsExpressionEmpty(o.Default) || interactive {
} else if err := cli.MarkPersistentFlagRequired(o.Name); err != nil {
panic(err)
}
}
return lazyOptionValues, nil
}
func configureCommand(cli *cobra.Command, root app.JobSpec, interactive bool) (*Config, error) {
lazyOptionValues, err := createCobraFlagsFromVariantOptions(cli, root.Options, interactive)
if err != nil {
return nil, err
}
opts := func() map[string]func() interface{} {
m := map[string]func() interface{}{}
for name, f := range lazyOptionValues {
m[name] = f
}
return m
}
var minArgs int
var maxArgs int
lazyParamValues := map[string]func(args []string) (interface{}, error){}
var hasVarArgs bool
for i := range root.Parameters {
maxArgs++
p := root.Parameters[i]
r := p.Default.Range()
if r.Start == r.End {
minArgs++
}
ii := i
ty, err := typeexpr.TypeConstraint(p.Type)
if err != nil {
return nil, err
}
var f func([]string, int) (interface{}, error)
switch ty {
case cty.Bool:
f = func(args []string, i int) (interface{}, error) {
return strconv.ParseBool(args[i])
}
case cty.String:
f = func(args []string, i int) (interface{}, error) {
return args[i], nil
}
case cty.Number:
f = func(args []string, i int) (interface{}, error) {
return strconv.Atoi(args[i])
}
case cty.List(cty.String):
if i != len(root.Parameters)-1 {
return nil, fmt.Errorf("list(string) parameter %q must be positioned at last", p.Name)
}
f = func(args []string, i int) (interface{}, error) {
return args[i:], nil
}
hasVarArgs = true
default:
return nil, fmt.Errorf("invalid parameter %q: type %s is not supported", p.Name, ty.FriendlyName())
}
lazyParamValues[p.Name] = func(args []string) (interface{}, error) {
if len(args) <= ii {
return nil, nil
}
return f(args, ii)
}
}
if hasVarArgs {
cli.Args = cobra.MinimumNArgs(minArgs)
} else {
cli.Args = cobra.RangeArgs(minArgs, maxArgs)
}
params := func(args []string) (map[string]interface{}, error) {
m := map[string]interface{}{}
for name, f := range lazyParamValues {
v, err := f(args)
if err != nil {
return nil, err
}
m[name] = v
}
return m, nil
}
return &Config{Parameters: params, Options: opts}, nil
}
func getMergedParamsAndOpts(
cfgs map[string]*Config, cmdName string, args []string) (map[string]interface{}, map[string]interface{}, error) {
names := strings.Split(cmdName, " ")
optGetters := map[string]func() interface{}{}
for i := range names {
curName := strings.Join(names[:i+1], " ")
if curCfg, ok := cfgs[curName]; ok {
curOpts := curCfg.Options()
for n := range curOpts {
optGetters[n] = curOpts[n]
}
}
}
cfg := cfgs[cmdName]
params, err := cfg.Parameters(args)
if err != nil {
return nil, nil, err
}
opts := map[string]interface{}{}
for n, get := range optGetters {
opts[n] = get()
}
return params, opts, nil
}
func (m *Main) initApp(setup app.Setup) (*app.App, error) {
ap, err := app.New(setup)
if err != nil {
if ap == nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
} else {
ap.PrintError(err)
}
//nolint:wrapcheck
return nil, err
}
ap.Stdout = m.Stdout
ap.Stderr = m.Stderr
return ap, nil
}
func (m Main) createRunner(cmd string, setup app.Setup) (*Runner, error) {
ap, err := m.initApp(setup)
if err != nil {
return nil, err
}
return m.newRunner(ap, cmd), nil
}
func (m Main) newRunner(ap *app.App, cmdName string) *Runner {
m2 := &Runner{
mut: &sync.Mutex{},
ap: ap,
runCmdName: cmdName,
}
m.initRunner(m2)
return m2
}
func (m Main) initRunner(r *Runner) {
siTty := isatty.IsTerminal(os.Stdin.Fd())
soTty := isatty.IsTerminal(os.Stdout.Fd())
// Enable prompts for missing inputs when stdin and stdout are connected to a tty
r.Interactive = siTty && soTty
if r.Interactive {
r.SetOpts = app.DefaultSetOpts
}
r.goJobs = map[string]Job{}
r.jobRunProviders = map[string]func(State) JobRun{}
for jobName := range r.ap.JobByName {
n := jobName
r.jobRunProviders[n] = func(st State) JobRun {
return func(ctx context.Context) error {
if st.Stdout != nil {
defer func() {
if err := st.Stdout.Close(); err != nil {
panic(err)
}
}()
}
if st.Stderr != nil {
defer func() {
if err := st.Stderr.Close(); err != nil {
panic(err)
}
}()
}
r, err := r.ap.Run(n, st.Parameters, st.Options)
if err != nil {
return xerrors.Errorf("running job %q: %w", n, err)
}
if st.Stdout != nil {
if _, err := st.Stdout.Write([]byte(r.Stdout)); err != nil {
return xerrors.Errorf("writing stdout of job %q: %w", n, err)
}
}
if st.Stderr != nil {
if _, err := st.Stderr.Write([]byte(r.Stderr)); err != nil {
return xerrors.Errorf("writing stderr of job %q: %w", n, err)
}
}
return nil
}
}
}
}
type Runner struct {
ap *app.App
runCmdName string
runCmd *cobra.Command
variantCmd *cobra.Command
goJobs map[string]Job
jobRunProviders map[string]func(State) JobRun
Interactive bool
SetOpts app.SetOptsFunc
mut *sync.Mutex
}
func (r *Runner) Cobra() (*cobra.Command, error) {
ap, rootCmdName := r.ap, r.runCmdName
if rootCmdName == "" {
rootCmdName = "run"
}
jobs := map[string]app.JobSpec{}
jobNames := []string{}
for jobName, j := range ap.JobByName {
var name string
if jobName == "" {
name = rootCmdName
} else {
name = fmt.Sprintf("%s %s", rootCmdName, jobName)
}
jobs[name] = j
jobNames = append(jobNames, name)
}
sort.Strings(jobNames)
commands := map[string]*cobra.Command{}
cfgs := map[string]*Config{}
for _, n := range jobNames {
name := n
job := jobs[name]
names := strings.Split(name, " ")
var parent *cobra.Command
cmdName := names[len(names)-1]
switch len(names) {
case 1:
default:
names = names[:len(names)-1]
var ok bool
parent, ok = commands[strings.Join(names, " ")]
if !ok {
for i := range names {
intName := strings.Join(names[:i+1], " ")
cur, ok := commands[intName]
if !ok {
cur = &cobra.Command{
Use: names[i],
}
parent.AddCommand(cur)
commands[intName] = cur
}
parent = cur
}
}
}
var desc string
if job.Description != nil {
desc = *job.Description
}
for _, p := range job.Parameters {
cmdName += fmt.Sprintf(" [%s]", strings.ToUpper(p.Name))
}
cli := &cobra.Command{
Use: cmdName,
Short: strings.Split(desc, "\n")[0],
Long: desc,
}
if job.Private != nil {
cli.Hidden = *job.Private
}
cfg, err := configureCommand(cli, job, r.Interactive)
if err != nil {
return nil, err
}
cfgs[name] = cfg
cli.RunE = func(cmd *cobra.Command, args []string) error {
params, opts, err := getMergedParamsAndOpts(cfgs, name, args)
if err != nil {
return err
}
_, err = ap.Run(job.Name, params, opts, r.SetOpts)
if err != nil && err.Error() != app.NoRunMessage {
cmd.SilenceUsage = true
}
//nolint:wrapcheck
return err
}
commands[name] = cli
if parent != nil {
parent.AddCommand(cli)
}
}
rootCmd := commands[rootCmdName]
return rootCmd, nil
}
type RunOptions struct {
Stdout io.Writer
Stderr io.Writer
SetOpts app.SetOptsFunc
DisableLocking bool
}
// Add adds a job to this runner so that it can later by calling `Job`.
func (r Runner) Add(job Job) {
r.goJobs[job.Name] = job
if job.Name == "" {
panic(fmt.Errorf("invalid job name %q", job.Name))
}
r.jobRunProviders[job.Name] = func(st State) JobRun {
return func(ctx context.Context) error {
return job.Run(ctx, st)
}
}
}
// Job prepares a job to be run.
func (r Runner) Job(job string, opts State) (JobRun, error) {
f, ok := r.jobRunProviders[job]
if !ok {
return nil, fmt.Errorf("job %q not added", job)
}
if opts.Options == nil {
opts.Options = map[string]interface{}{}
}
if opts.Parameters == nil {
opts.Parameters = map[string]interface{}{}
}
jr := f(opts)
return jr, nil
}
func (r *Runner) Run(arguments []string, opt ...RunOptions) error {
var opts RunOptions
if len(opt) > 0 {
opts = opt[0]
}
if !opts.DisableLocking {
r.mut.Lock()
defer r.mut.Unlock()
}
if opts.SetOpts != nil {
r.SetOpts = opts.SetOpts
defer func() {
r.SetOpts = nil
}()
}
if r.runCmd == nil {
var err error
r.runCmd, err = r.Cobra()
if err != nil {
r.ap.PrintError(err)
return err
}
}
var cmd *cobra.Command
if r.runCmdName != "" {
cmd = r.runCmd
} else {
if r.variantCmd == nil {
r.variantCmd = r.createVariantRootCommand()
}
cmd = r.variantCmd
}
var err error
{
cmdStdout := cmd.OutOrStdout()
cmdStderr := cmd.OutOrStderr()
appStdout := r.ap.Stdout
appStderr := r.ap.Stderr
cmd.SetArgs(arguments)
if opts.Stdout != nil {
cmd.SetOut(opts.Stdout)
r.ap.Stdout = opts.Stdout
}
if opts.Stderr != nil {
cmd.SetErr(opts.Stderr)
r.ap.Stderr = opts.Stderr
}
err = cmd.Execute()
cmd.SetOut(cmdStdout)
cmd.SetErr(cmdStderr)
r.ap.Stdout = appStdout
r.ap.Stderr = appStderr
}
//nolint:wrapcheck
return err
}
type Error struct {
Message string
ExitCode int
}
func (e Error) Error() string {
return e.Message
}
func (r *Runner) createVariantRootCommand() *cobra.Command {
const VariantBinName = "variant"
rootCmd := &cobra.Command{
Use: VariantBinName,
Version: Version,
}
testCmd := &cobra.Command{
Use: "test [NAME]",
Short: "Run test(s)",
Args: cobra.MaximumNArgs(1),
RunE: func(c *cobra.Command, args []string) error {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
_, err := r.ap.RunTests(prefix)
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd := &cobra.Command{
Use: "export SUBCOMMAND SRC_DIR OUTPUT_PATH",
Short: "Export the Variant command defined in SRC_DIR to OUTPUT_PATH",
}
{
shimCmd := &cobra.Command{
Use: "shim SRC_DIR DST_DIR",
Short: "Copy and generate shim for the Variant command defined in the SRC",
Args: cobra.ExactArgs(2),
RunE: func(c *cobra.Command, args []string) error {
err := r.ap.ExportShim(args[0], args[1])
if err != nil {
c.SilenceUsage = true
}
//nolint:wrapcheck
return err
},
}
exportCmd.AddCommand(shimCmd)
exportCmd.AddCommand(newExportGo(r))
exportCmd.AddCommand(newExportBinary(r))
}
generateCmd := &cobra.Command{
Use: "generate RESOURCE DIR",
Short: "Generate RESOURCE for the Variant command defined in DIR",
}
{
generateShimCmd := &cobra.Command{
Use: "shim DIR",
Short: "Generate a shim for the Variant command defined in DIR",
Args: cobra.ExactArgs(1),
RunE: func(c *cobra.Command, args []string) error {
err := app.GenerateShim(VariantBinName, args[0])
if err != nil {
c.SilenceUsage = true
}
return err
},
}
generateCmd.AddCommand(generateShimCmd)
}
startCmd := &cobra.Command{
Use: "start NAME",
Short: "Start the named integration to turn the Variant command to whatever",
}
{
var botName string
startSlackbotCmd := &cobra.Command{
Use: "slackbot",
Short: "Start the slackbot that responds to slash commands by running corresopnding Variant commands",
RunE: func(c *cobra.Command, args []string) error {
err := r.StartSlackbot(botName)
if err != nil {
c.SilenceUsage = true
}
return err
},
}
startSlackbotCmd.Flags().StringVarP(&botName, "name", "n", "", "Name of the slash command without /. For example, \"--name foo\" results in the bot responding to \"/foo <CMD> <ARGS>\"")
if err := startSlackbotCmd.MarkFlagRequired("name"); err != nil {
panic(err)
}
startCmd.AddCommand(startSlackbotCmd)
}
rootCmd.AddCommand(r.runCmd)
rootCmd.AddCommand(testCmd)
rootCmd.AddCommand(exportCmd)
rootCmd.AddCommand(generateCmd)
rootCmd.AddCommand(startCmd)
return rootCmd
}
| MustLoad | identifier_name |
image_build.py | """
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
|
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def _check_vmware():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names):
missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
rule_names = _get_archive_rules(self.image_path(), image_names)
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}')
| """
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups | identifier_body |
image_build.py | """
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
"""
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def | ():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names):
missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
rule_names = _get_archive_rules(self.image_path(), image_names)
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}')
| _check_vmware | identifier_name |
image_build.py | """
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
"""
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def _check_vmware():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names): | missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
rule_names = _get_archive_rules(self.image_path(), image_names)
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}') | random_line_split | |
image_build.py | """
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
"""
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def _check_vmware():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names):
missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
|
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}')
| rule_names = _get_archive_rules(self.image_path(), image_names) | conditional_block |
ChangeSwimlanesJira.user.js | // ==UserScript==
// @name ChangeSwimlanesJira
// @namespace http://tampermonkey.net/
// @version 3.3.3
// @description I'm sad that I've to say goodbye!
// @author WLAD
// @updateSite https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @downloadURL https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @require https://raw.githubusercontent.com/dchester/jsonpath/master/jsonpath.js
// @require https://raw.githubusercontent.com/moderntribe/tampermonkey-scripts/master/waitForKeyElements.js
// @include /https:\/\/trackspace.lhsystems.com\/secure\/RapidBoard.jspa\?rapidView=2839.*/
// The CSS file, use file:/// for local CSS files.
// @resource customCSS https://github.com/tepesware/TepesColors/raw/master/ChangeJiraSwim.css
// @grant GM_getResourceText
// @grant GM_addStyle
// ==/UserScript==
var done = false;
(function () {
'use strict';
console.debug('start: add CSS');
var cssTxt = GM_getResourceText("customCSS");
GM_addStyle(cssTxt);
console.debug('done: add CSS');
var allData;
var observer = new MutationObserver(function (mutations) {
// For the sake of...observation...let's output the mutation to console to see how this all works
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
observer.disconnect();
registerContentObserver();
updateTheBoard();
}
});
});
function registerContentObserver() {
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
updateTheBoard();
}
});
});
var targetNode = document.getElementById("ghx-pool");
observer.observe(targetNode, observerConfig);
}
// Notify me of everything!
var observerConfig = {
attributes: false,
childList: true,
characterData: false
};
// Node, config
// In this case we'll listen to all changes to body and child nodes
var targetNode = document.getElementById("ghx-work");
observer.observe(targetNode, observerConfig);
function fillIssues(issues) {
for (var i = 0; i < issues.length; i++) {
// debugger;
// console.log(issues[i]);
fillIssue(issues[i]);
}
}
function getAllData() {
const url = "https://trackspace.lhsystems.com/rest/greenhopper/1.0/xboard/work/allData.json?rapidViewId=2839"
var result;
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: url,
data: "{}",
dataType: "json",
success: function (data) {
console.log("all data " + data);
allData = (data);
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
},
async: false
});
}
function updateTheBoard() {
getAllData();
var swimlanes = document.getElementsByClassName("ghx-info");
if (swimlanes.length > 0) {
for (var i = 0; i < swimlanes.length; i++) {
if (swimlanes[i].getElementsByTagName("span")[0].textContent == "To Do") {
$(document.getElementsByClassName("ghx-info")[i].parentElement.parentElement).css('background-color', "#dfe1e5");
}
}
var rows = $("div[swimlane-id]");
var issues = rows.children(".ghx-swimlane-header");
fillIssues(issues);
addGeneralInfo();
}
}
function addGeneralInfo(){
var parrent = $(".subnav-container");
const info = "";
const imgInfo = "<img class='emoticon' src='https://trackspace.lhsystems.com/images/icons/emoticons/warning.png' height='16' width='16' align='absmiddle' alt='' border='0' >";
let htmlInfo = "<span class='generalInfo'>";
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat(info);
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat("</span>");
parrent.append(htmlInfo);
}
function removeOldStatuses(ussueID) {
var rows = $(".statusesTepes." + ussueID);
rows.remove();
}
function addTestExecutionSummary(issue, stat) |
function addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks) {
var temp = $(issue).children("div.ghx-heading");
var html = "<span class='issueInfo ";
html = html.concat(ussueID);
html = html.concat("'>");
var subtaskHtml = addSubtaskRectangles(ussueID, data.fields.assignee.avatarUrls, parrsedSubtasks);
console.log(subtaskHtml);
html = html.concat(addPR(data.fields.customfield_25700));
html = html.concat(subtaskHtml);
html = html.concat("</span>");
temp.append(html);
}
function fillIssue(issue) {
if (issue.hasAttribute("data-issue-key")) {
var ussueID = issue.getAttribute("data-issue-key");
console.log("updateuje issue " + ussueID);
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: "https://trackspace.lhsystems.com/rest/api/latest/issue/" + ussueID,
data: "{}",
dataType: "json",
success: function (data) {
var subtasks = data.fields.subtasks;
var parrsedSubtasks = [];
for (var i = 0; i < subtasks.length; i++) {
var avatarForSubtask = getAssigneAvatarForIssue(subtasks[i].key);
var subtask = new Subtask(subtasks[i].fields.status.name, subtasks[i].fields.summary.substring(0, 6), subtasks[i].key, avatarForSubtask);
parrsedSubtasks.push(subtask);
}
removeOldStatuses(ussueID);
addAssigneField(data.fields.assignee.avatarUrls, issue);
addPoints(data.fields.customfield_10233, issue);
addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks);
if (data.fields.issuetype.id === "10202") {
addTestExecutionSummary(issue, data.fields.customfield_17918);
}
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
}
});
} else {
}
}
function addAssigneField(avatarsArray, issue) {
var text = "";
var temp = $(issue).children("div.ghx-heading");
var html = "<img class='ghx-avatarTepes-img' src='";
var avatarUrl = avatarsArray['32x32'];
html = html.concat(avatarUrl);
html = html.concat("'>");
//debugger;
temp.prepend(html);
}
function isABot(avatarUrl) {
return ("" + avatarUrl).contains("ermbot");
}
function addSubtaskRectangles(ussueID, avatarsArray, parrseedSubtasks) {
var text = "";
var html = "<span class='statusesTepes ";
html = html.concat(ussueID);
html = html.concat("'>");
var order = ["Done", "In Progress", "To Do"];
var orderClass = ["statusboxDone", "statusboxInProgress", "statusboxTodo"];
for (var j = 0; j < order.length; j++) {
for (var i = 0; i < parrseedSubtasks.length; i++) {
if (parrseedSubtasks[i].status === order[j]) {
html = html.concat(" <span class='" + orderClass[j] + "'>" + parrseedSubtasks[i].name);
var avatarUrl = parrseedSubtasks[i].avatarForSubtask;
var size = "";
if (order[j] === "In Progress") {
size = "-Big";
}
html = html.concat("<span class='tepes-avatar-inner" + size + "'>");
if (order[j] != "In Progress" || isABot(avatarUrl)) {
avatarUrl = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D";
console.log("PUSTY " + order[j]);
} else {
html = html.concat("<img src='" + avatarUrl + "'></img>");
}
// if(j ===1 ){
// }
html = html.concat("</span></span>");
}
}
}
html = html.concat("</span>");
return html;
}
function addPoints(storyPoints, issue) {
var text = "";
var temp = $(issue).find("div.ghx-heading > span.ghx-info")
var html = "<span class='storyPoints ";
html = html.concat("'>" + storyPoints);
html = html.concat("</span>");
temp.prepend(html);
}
function addPR(prStatus, htmlParrent) {
var status = prStatus.match("PullRequest.*state='(.*?)'")[1];
var html = "<span class='aui-lozenge aui-lozenge-overflow aui-lozenge-subtle aui-lozenge-success pullrequest-state'>";
if (status === "MERGED") {
html = html.concat(status);
html = html.concat("</span>");
return html;
} else if (status === "OPEN") {
var stateCount = prStatus.match("PullRequest.*{stateCount=(.*?),")[1];
if (stateCount > 0) {
html = html.concat(status);
html = html.concat("</span>");
return html;
}
}
return "";
}
function getAssigneAvatarForIssue(key) {
var result;
// debugger;
result = jsonpath.query(allData, "$.issuesData.issues[?(@.key=='" + key + "')].avatarUrl");
return result;
}
class Subtask {
constructor(status, name, key, avatarForSubtask) {
this.status = status;
this.name = name;
this.key = key;
this.avatarForSubtask = avatarForSubtask;
}
}
})();
| {
//debugger
var temp = $(issue).children("div.ghx-heading");
var html = "<div class=\"testexec-status-block\">";
var statuses = stat.statuses;
for (var i = 0; i < statuses.length; i++) {
if (statuses[i].statusCount > 0) {
html = html.concat("<span style=\"color:");
html = html.concat(statuses[i].color).concat("\" class=\"testexec-status-count\">");
html = html.concat(statuses[i].statusCount).concat("</span><span class=\"testexec-status-name\">");
html = html.concat(statuses[i].name).concat("</span>");
}
}
html = html.concat("</div>");
temp.append(html);
} | identifier_body |
ChangeSwimlanesJira.user.js | // ==UserScript==
// @name ChangeSwimlanesJira
// @namespace http://tampermonkey.net/
// @version 3.3.3
// @description I'm sad that I've to say goodbye!
// @author WLAD
// @updateSite https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @downloadURL https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @require https://raw.githubusercontent.com/dchester/jsonpath/master/jsonpath.js
// @require https://raw.githubusercontent.com/moderntribe/tampermonkey-scripts/master/waitForKeyElements.js
// @include /https:\/\/trackspace.lhsystems.com\/secure\/RapidBoard.jspa\?rapidView=2839.*/
// The CSS file, use file:/// for local CSS files.
// @resource customCSS https://github.com/tepesware/TepesColors/raw/master/ChangeJiraSwim.css
// @grant GM_getResourceText
// @grant GM_addStyle
// ==/UserScript==
var done = false;
(function () {
'use strict';
console.debug('start: add CSS');
var cssTxt = GM_getResourceText("customCSS");
GM_addStyle(cssTxt);
console.debug('done: add CSS');
var allData;
var observer = new MutationObserver(function (mutations) {
// For the sake of...observation...let's output the mutation to console to see how this all works
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
observer.disconnect();
registerContentObserver();
updateTheBoard();
}
});
});
function registerContentObserver() {
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
updateTheBoard();
}
});
});
var targetNode = document.getElementById("ghx-pool");
observer.observe(targetNode, observerConfig);
}
// Notify me of everything!
var observerConfig = {
attributes: false,
childList: true,
characterData: false
};
// Node, config
// In this case we'll listen to all changes to body and child nodes
var targetNode = document.getElementById("ghx-work");
observer.observe(targetNode, observerConfig);
function fillIssues(issues) {
for (var i = 0; i < issues.length; i++) {
// debugger;
// console.log(issues[i]);
fillIssue(issues[i]);
}
}
function getAllData() {
const url = "https://trackspace.lhsystems.com/rest/greenhopper/1.0/xboard/work/allData.json?rapidViewId=2839"
var result;
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: url,
data: "{}",
dataType: "json",
success: function (data) {
console.log("all data " + data);
allData = (data);
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
},
async: false
});
}
function updateTheBoard() {
getAllData();
var swimlanes = document.getElementsByClassName("ghx-info");
if (swimlanes.length > 0) {
for (var i = 0; i < swimlanes.length; i++) {
if (swimlanes[i].getElementsByTagName("span")[0].textContent == "To Do") {
$(document.getElementsByClassName("ghx-info")[i].parentElement.parentElement).css('background-color', "#dfe1e5");
}
}
var rows = $("div[swimlane-id]");
var issues = rows.children(".ghx-swimlane-header");
fillIssues(issues);
addGeneralInfo();
}
}
function addGeneralInfo(){
var parrent = $(".subnav-container");
const info = "";
const imgInfo = "<img class='emoticon' src='https://trackspace.lhsystems.com/images/icons/emoticons/warning.png' height='16' width='16' align='absmiddle' alt='' border='0' >";
let htmlInfo = "<span class='generalInfo'>";
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat(info);
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat("</span>");
parrent.append(htmlInfo);
}
function removeOldStatuses(ussueID) {
var rows = $(".statusesTepes." + ussueID);
rows.remove();
}
function addTestExecutionSummary(issue, stat) {
//debugger
var temp = $(issue).children("div.ghx-heading");
var html = "<div class=\"testexec-status-block\">";
var statuses = stat.statuses;
for (var i = 0; i < statuses.length; i++) {
if (statuses[i].statusCount > 0) {
html = html.concat("<span style=\"color:");
html = html.concat(statuses[i].color).concat("\" class=\"testexec-status-count\">");
html = html.concat(statuses[i].statusCount).concat("</span><span class=\"testexec-status-name\">");
html = html.concat(statuses[i].name).concat("</span>");
}
}
html = html.concat("</div>");
temp.append(html);
}
function addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks) {
var temp = $(issue).children("div.ghx-heading");
var html = "<span class='issueInfo ";
html = html.concat(ussueID);
html = html.concat("'>");
var subtaskHtml = addSubtaskRectangles(ussueID, data.fields.assignee.avatarUrls, parrsedSubtasks);
console.log(subtaskHtml);
html = html.concat(addPR(data.fields.customfield_25700));
html = html.concat(subtaskHtml);
html = html.concat("</span>");
temp.append(html);
}
function fillIssue(issue) {
if (issue.hasAttribute("data-issue-key")) {
var ussueID = issue.getAttribute("data-issue-key");
console.log("updateuje issue " + ussueID);
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: "https://trackspace.lhsystems.com/rest/api/latest/issue/" + ussueID,
data: "{}",
dataType: "json",
success: function (data) {
var subtasks = data.fields.subtasks;
var parrsedSubtasks = [];
for (var i = 0; i < subtasks.length; i++) {
var avatarForSubtask = getAssigneAvatarForIssue(subtasks[i].key);
var subtask = new Subtask(subtasks[i].fields.status.name, subtasks[i].fields.summary.substring(0, 6), subtasks[i].key, avatarForSubtask);
parrsedSubtasks.push(subtask);
}
removeOldStatuses(ussueID);
addAssigneField(data.fields.assignee.avatarUrls, issue);
addPoints(data.fields.customfield_10233, issue);
addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks);
if (data.fields.issuetype.id === "10202") {
addTestExecutionSummary(issue, data.fields.customfield_17918);
}
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
}
});
} else {
}
}
function addAssigneField(avatarsArray, issue) {
var text = "";
var temp = $(issue).children("div.ghx-heading");
var html = "<img class='ghx-avatarTepes-img' src='";
var avatarUrl = avatarsArray['32x32'];
html = html.concat(avatarUrl);
html = html.concat("'>");
//debugger;
temp.prepend(html);
}
function isABot(avatarUrl) {
return ("" + avatarUrl).contains("ermbot");
}
function addSubtaskRectangles(ussueID, avatarsArray, parrseedSubtasks) {
var text = "";
var html = "<span class='statusesTepes ";
html = html.concat(ussueID);
html = html.concat("'>");
var order = ["Done", "In Progress", "To Do"];
var orderClass = ["statusboxDone", "statusboxInProgress", "statusboxTodo"];
for (var j = 0; j < order.length; j++) {
for (var i = 0; i < parrseedSubtasks.length; i++) {
if (parrseedSubtasks[i].status === order[j]) {
html = html.concat(" <span class='" + orderClass[j] + "'>" + parrseedSubtasks[i].name);
var avatarUrl = parrseedSubtasks[i].avatarForSubtask;
var size = "";
if (order[j] === "In Progress") {
size = "-Big";
}
html = html.concat("<span class='tepes-avatar-inner" + size + "'>");
if (order[j] != "In Progress" || isABot(avatarUrl)) {
avatarUrl = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D";
console.log("PUSTY " + order[j]);
} else {
html = html.concat("<img src='" + avatarUrl + "'></img>");
}
// if(j ===1 ){
// }
html = html.concat("</span></span>");
}
}
}
html = html.concat("</span>");
return html;
}
function addPoints(storyPoints, issue) {
var text = "";
var temp = $(issue).find("div.ghx-heading > span.ghx-info")
var html = "<span class='storyPoints ";
html = html.concat("'>" + storyPoints);
html = html.concat("</span>");
temp.prepend(html);
}
function addPR(prStatus, htmlParrent) {
var status = prStatus.match("PullRequest.*state='(.*?)'")[1];
var html = "<span class='aui-lozenge aui-lozenge-overflow aui-lozenge-subtle aui-lozenge-success pullrequest-state'>";
if (status === "MERGED") {
html = html.concat(status);
html = html.concat("</span>");
return html;
} else if (status === "OPEN") {
var stateCount = prStatus.match("PullRequest.*{stateCount=(.*?),")[1];
if (stateCount > 0) {
html = html.concat(status); | html = html.concat("</span>");
return html;
}
}
return "";
}
function getAssigneAvatarForIssue(key) {
var result;
// debugger;
result = jsonpath.query(allData, "$.issuesData.issues[?(@.key=='" + key + "')].avatarUrl");
return result;
}
class Subtask {
constructor(status, name, key, avatarForSubtask) {
this.status = status;
this.name = name;
this.key = key;
this.avatarForSubtask = avatarForSubtask;
}
}
})(); | random_line_split | |
ChangeSwimlanesJira.user.js | // ==UserScript==
// @name ChangeSwimlanesJira
// @namespace http://tampermonkey.net/
// @version 3.3.3
// @description I'm sad that I've to say goodbye!
// @author WLAD
// @updateSite https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @downloadURL https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @require https://raw.githubusercontent.com/dchester/jsonpath/master/jsonpath.js
// @require https://raw.githubusercontent.com/moderntribe/tampermonkey-scripts/master/waitForKeyElements.js
// @include /https:\/\/trackspace.lhsystems.com\/secure\/RapidBoard.jspa\?rapidView=2839.*/
// The CSS file, use file:/// for local CSS files.
// @resource customCSS https://github.com/tepesware/TepesColors/raw/master/ChangeJiraSwim.css
// @grant GM_getResourceText
// @grant GM_addStyle
// ==/UserScript==
var done = false;
(function () {
'use strict';
console.debug('start: add CSS');
var cssTxt = GM_getResourceText("customCSS");
GM_addStyle(cssTxt);
console.debug('done: add CSS');
var allData;
var observer = new MutationObserver(function (mutations) {
// For the sake of...observation...let's output the mutation to console to see how this all works
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
observer.disconnect();
registerContentObserver();
updateTheBoard();
}
});
});
function registerContentObserver() {
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
updateTheBoard();
}
});
});
var targetNode = document.getElementById("ghx-pool");
observer.observe(targetNode, observerConfig);
}
// Notify me of everything!
var observerConfig = {
attributes: false,
childList: true,
characterData: false
};
// Node, config
// In this case we'll listen to all changes to body and child nodes
var targetNode = document.getElementById("ghx-work");
observer.observe(targetNode, observerConfig);
function fillIssues(issues) {
for (var i = 0; i < issues.length; i++) {
// debugger;
// console.log(issues[i]);
fillIssue(issues[i]);
}
}
function getAllData() {
const url = "https://trackspace.lhsystems.com/rest/greenhopper/1.0/xboard/work/allData.json?rapidViewId=2839"
var result;
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: url,
data: "{}",
dataType: "json",
success: function (data) {
console.log("all data " + data);
allData = (data);
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
},
async: false
});
}
function | () {
getAllData();
var swimlanes = document.getElementsByClassName("ghx-info");
if (swimlanes.length > 0) {
for (var i = 0; i < swimlanes.length; i++) {
if (swimlanes[i].getElementsByTagName("span")[0].textContent == "To Do") {
$(document.getElementsByClassName("ghx-info")[i].parentElement.parentElement).css('background-color', "#dfe1e5");
}
}
var rows = $("div[swimlane-id]");
var issues = rows.children(".ghx-swimlane-header");
fillIssues(issues);
addGeneralInfo();
}
}
function addGeneralInfo(){
var parrent = $(".subnav-container");
const info = "";
const imgInfo = "<img class='emoticon' src='https://trackspace.lhsystems.com/images/icons/emoticons/warning.png' height='16' width='16' align='absmiddle' alt='' border='0' >";
let htmlInfo = "<span class='generalInfo'>";
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat(info);
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat("</span>");
parrent.append(htmlInfo);
}
function removeOldStatuses(ussueID) {
var rows = $(".statusesTepes." + ussueID);
rows.remove();
}
function addTestExecutionSummary(issue, stat) {
//debugger
var temp = $(issue).children("div.ghx-heading");
var html = "<div class=\"testexec-status-block\">";
var statuses = stat.statuses;
for (var i = 0; i < statuses.length; i++) {
if (statuses[i].statusCount > 0) {
html = html.concat("<span style=\"color:");
html = html.concat(statuses[i].color).concat("\" class=\"testexec-status-count\">");
html = html.concat(statuses[i].statusCount).concat("</span><span class=\"testexec-status-name\">");
html = html.concat(statuses[i].name).concat("</span>");
}
}
html = html.concat("</div>");
temp.append(html);
}
function addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks) {
var temp = $(issue).children("div.ghx-heading");
var html = "<span class='issueInfo ";
html = html.concat(ussueID);
html = html.concat("'>");
var subtaskHtml = addSubtaskRectangles(ussueID, data.fields.assignee.avatarUrls, parrsedSubtasks);
console.log(subtaskHtml);
html = html.concat(addPR(data.fields.customfield_25700));
html = html.concat(subtaskHtml);
html = html.concat("</span>");
temp.append(html);
}
function fillIssue(issue) {
if (issue.hasAttribute("data-issue-key")) {
var ussueID = issue.getAttribute("data-issue-key");
console.log("updateuje issue " + ussueID);
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: "https://trackspace.lhsystems.com/rest/api/latest/issue/" + ussueID,
data: "{}",
dataType: "json",
success: function (data) {
var subtasks = data.fields.subtasks;
var parrsedSubtasks = [];
for (var i = 0; i < subtasks.length; i++) {
var avatarForSubtask = getAssigneAvatarForIssue(subtasks[i].key);
var subtask = new Subtask(subtasks[i].fields.status.name, subtasks[i].fields.summary.substring(0, 6), subtasks[i].key, avatarForSubtask);
parrsedSubtasks.push(subtask);
}
removeOldStatuses(ussueID);
addAssigneField(data.fields.assignee.avatarUrls, issue);
addPoints(data.fields.customfield_10233, issue);
addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks);
if (data.fields.issuetype.id === "10202") {
addTestExecutionSummary(issue, data.fields.customfield_17918);
}
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
}
});
} else {
}
}
function addAssigneField(avatarsArray, issue) {
var text = "";
var temp = $(issue).children("div.ghx-heading");
var html = "<img class='ghx-avatarTepes-img' src='";
var avatarUrl = avatarsArray['32x32'];
html = html.concat(avatarUrl);
html = html.concat("'>");
//debugger;
temp.prepend(html);
}
function isABot(avatarUrl) {
return ("" + avatarUrl).contains("ermbot");
}
function addSubtaskRectangles(ussueID, avatarsArray, parrseedSubtasks) {
var text = "";
var html = "<span class='statusesTepes ";
html = html.concat(ussueID);
html = html.concat("'>");
var order = ["Done", "In Progress", "To Do"];
var orderClass = ["statusboxDone", "statusboxInProgress", "statusboxTodo"];
for (var j = 0; j < order.length; j++) {
for (var i = 0; i < parrseedSubtasks.length; i++) {
if (parrseedSubtasks[i].status === order[j]) {
html = html.concat(" <span class='" + orderClass[j] + "'>" + parrseedSubtasks[i].name);
var avatarUrl = parrseedSubtasks[i].avatarForSubtask;
var size = "";
if (order[j] === "In Progress") {
size = "-Big";
}
html = html.concat("<span class='tepes-avatar-inner" + size + "'>");
if (order[j] != "In Progress" || isABot(avatarUrl)) {
avatarUrl = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D";
console.log("PUSTY " + order[j]);
} else {
html = html.concat("<img src='" + avatarUrl + "'></img>");
}
// if(j ===1 ){
// }
html = html.concat("</span></span>");
}
}
}
html = html.concat("</span>");
return html;
}
function addPoints(storyPoints, issue) {
var text = "";
var temp = $(issue).find("div.ghx-heading > span.ghx-info")
var html = "<span class='storyPoints ";
html = html.concat("'>" + storyPoints);
html = html.concat("</span>");
temp.prepend(html);
}
function addPR(prStatus, htmlParrent) {
var status = prStatus.match("PullRequest.*state='(.*?)'")[1];
var html = "<span class='aui-lozenge aui-lozenge-overflow aui-lozenge-subtle aui-lozenge-success pullrequest-state'>";
if (status === "MERGED") {
html = html.concat(status);
html = html.concat("</span>");
return html;
} else if (status === "OPEN") {
var stateCount = prStatus.match("PullRequest.*{stateCount=(.*?),")[1];
if (stateCount > 0) {
html = html.concat(status);
html = html.concat("</span>");
return html;
}
}
return "";
}
function getAssigneAvatarForIssue(key) {
var result;
// debugger;
result = jsonpath.query(allData, "$.issuesData.issues[?(@.key=='" + key + "')].avatarUrl");
return result;
}
class Subtask {
constructor(status, name, key, avatarForSubtask) {
this.status = status;
this.name = name;
this.key = key;
this.avatarForSubtask = avatarForSubtask;
}
}
})();
| updateTheBoard | identifier_name |
ChangeSwimlanesJira.user.js | // ==UserScript==
// @name ChangeSwimlanesJira
// @namespace http://tampermonkey.net/
// @version 3.3.3
// @description I'm sad that I've to say goodbye!
// @author WLAD
// @updateSite https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @downloadURL https://github.com/tepesware/TepesColors/raw/master/ChangeSwimlanesJira.user.js
// @require https://raw.githubusercontent.com/dchester/jsonpath/master/jsonpath.js
// @require https://raw.githubusercontent.com/moderntribe/tampermonkey-scripts/master/waitForKeyElements.js
// @include /https:\/\/trackspace.lhsystems.com\/secure\/RapidBoard.jspa\?rapidView=2839.*/
// The CSS file, use file:/// for local CSS files.
// @resource customCSS https://github.com/tepesware/TepesColors/raw/master/ChangeJiraSwim.css
// @grant GM_getResourceText
// @grant GM_addStyle
// ==/UserScript==
var done = false;
(function () {
'use strict';
console.debug('start: add CSS');
var cssTxt = GM_getResourceText("customCSS");
GM_addStyle(cssTxt);
console.debug('done: add CSS');
var allData;
var observer = new MutationObserver(function (mutations) {
// For the sake of...observation...let's output the mutation to console to see how this all works
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
observer.disconnect();
registerContentObserver();
updateTheBoard();
}
});
});
function registerContentObserver() {
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
var message = mutation.type;
//debugger;
if (mutation.addedNodes.length > 0) {
updateTheBoard();
}
});
});
var targetNode = document.getElementById("ghx-pool");
observer.observe(targetNode, observerConfig);
}
// Notify me of everything!
var observerConfig = {
attributes: false,
childList: true,
characterData: false
};
// Node, config
// In this case we'll listen to all changes to body and child nodes
var targetNode = document.getElementById("ghx-work");
observer.observe(targetNode, observerConfig);
function fillIssues(issues) {
for (var i = 0; i < issues.length; i++) {
// debugger;
// console.log(issues[i]);
fillIssue(issues[i]);
}
}
function getAllData() {
const url = "https://trackspace.lhsystems.com/rest/greenhopper/1.0/xboard/work/allData.json?rapidViewId=2839"
var result;
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: url,
data: "{}",
dataType: "json",
success: function (data) {
console.log("all data " + data);
allData = (data);
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
},
async: false
});
}
function updateTheBoard() {
getAllData();
var swimlanes = document.getElementsByClassName("ghx-info");
if (swimlanes.length > 0) {
for (var i = 0; i < swimlanes.length; i++) {
if (swimlanes[i].getElementsByTagName("span")[0].textContent == "To Do") {
$(document.getElementsByClassName("ghx-info")[i].parentElement.parentElement).css('background-color', "#dfe1e5");
}
}
var rows = $("div[swimlane-id]");
var issues = rows.children(".ghx-swimlane-header");
fillIssues(issues);
addGeneralInfo();
}
}
function addGeneralInfo(){
var parrent = $(".subnav-container");
const info = "";
const imgInfo = "<img class='emoticon' src='https://trackspace.lhsystems.com/images/icons/emoticons/warning.png' height='16' width='16' align='absmiddle' alt='' border='0' >";
let htmlInfo = "<span class='generalInfo'>";
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat(info);
htmlInfo = htmlInfo.concat(imgInfo);
htmlInfo = htmlInfo.concat("</span>");
parrent.append(htmlInfo);
}
function removeOldStatuses(ussueID) {
var rows = $(".statusesTepes." + ussueID);
rows.remove();
}
function addTestExecutionSummary(issue, stat) {
//debugger
var temp = $(issue).children("div.ghx-heading");
var html = "<div class=\"testexec-status-block\">";
var statuses = stat.statuses;
for (var i = 0; i < statuses.length; i++) {
if (statuses[i].statusCount > 0) {
html = html.concat("<span style=\"color:");
html = html.concat(statuses[i].color).concat("\" class=\"testexec-status-count\">");
html = html.concat(statuses[i].statusCount).concat("</span><span class=\"testexec-status-name\">");
html = html.concat(statuses[i].name).concat("</span>");
}
}
html = html.concat("</div>");
temp.append(html);
}
function addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks) {
var temp = $(issue).children("div.ghx-heading");
var html = "<span class='issueInfo ";
html = html.concat(ussueID);
html = html.concat("'>");
var subtaskHtml = addSubtaskRectangles(ussueID, data.fields.assignee.avatarUrls, parrsedSubtasks);
console.log(subtaskHtml);
html = html.concat(addPR(data.fields.customfield_25700));
html = html.concat(subtaskHtml);
html = html.concat("</span>");
temp.append(html);
}
function fillIssue(issue) {
if (issue.hasAttribute("data-issue-key")) {
var ussueID = issue.getAttribute("data-issue-key");
console.log("updateuje issue " + ussueID);
$.ajax({
type: "GET",
contentType: "application/json; charset=utf-8",
url: "https://trackspace.lhsystems.com/rest/api/latest/issue/" + ussueID,
data: "{}",
dataType: "json",
success: function (data) {
var subtasks = data.fields.subtasks;
var parrsedSubtasks = [];
for (var i = 0; i < subtasks.length; i++) {
var avatarForSubtask = getAssigneAvatarForIssue(subtasks[i].key);
var subtask = new Subtask(subtasks[i].fields.status.name, subtasks[i].fields.summary.substring(0, 6), subtasks[i].key, avatarForSubtask);
parrsedSubtasks.push(subtask);
}
removeOldStatuses(ussueID);
addAssigneField(data.fields.assignee.avatarUrls, issue);
addPoints(data.fields.customfield_10233, issue);
addIssueAdditionalInfo(issue, ussueID, data, parrsedSubtasks);
if (data.fields.issuetype.id === "10202") {
addTestExecutionSummary(issue, data.fields.customfield_17918);
}
},
error: function (result) {
console.log("Error " + result);
//alert("Error");
}
});
} else {
}
}
function addAssigneField(avatarsArray, issue) {
var text = "";
var temp = $(issue).children("div.ghx-heading");
var html = "<img class='ghx-avatarTepes-img' src='";
var avatarUrl = avatarsArray['32x32'];
html = html.concat(avatarUrl);
html = html.concat("'>");
//debugger;
temp.prepend(html);
}
function isABot(avatarUrl) {
return ("" + avatarUrl).contains("ermbot");
}
function addSubtaskRectangles(ussueID, avatarsArray, parrseedSubtasks) {
var text = "";
var html = "<span class='statusesTepes ";
html = html.concat(ussueID);
html = html.concat("'>");
var order = ["Done", "In Progress", "To Do"];
var orderClass = ["statusboxDone", "statusboxInProgress", "statusboxTodo"];
for (var j = 0; j < order.length; j++) {
for (var i = 0; i < parrseedSubtasks.length; i++) |
}
html = html.concat("</span>");
return html;
}
function addPoints(storyPoints, issue) {
var text = "";
var temp = $(issue).find("div.ghx-heading > span.ghx-info")
var html = "<span class='storyPoints ";
html = html.concat("'>" + storyPoints);
html = html.concat("</span>");
temp.prepend(html);
}
function addPR(prStatus, htmlParrent) {
var status = prStatus.match("PullRequest.*state='(.*?)'")[1];
var html = "<span class='aui-lozenge aui-lozenge-overflow aui-lozenge-subtle aui-lozenge-success pullrequest-state'>";
if (status === "MERGED") {
html = html.concat(status);
html = html.concat("</span>");
return html;
} else if (status === "OPEN") {
var stateCount = prStatus.match("PullRequest.*{stateCount=(.*?),")[1];
if (stateCount > 0) {
html = html.concat(status);
html = html.concat("</span>");
return html;
}
}
return "";
}
function getAssigneAvatarForIssue(key) {
var result;
// debugger;
result = jsonpath.query(allData, "$.issuesData.issues[?(@.key=='" + key + "')].avatarUrl");
return result;
}
class Subtask {
constructor(status, name, key, avatarForSubtask) {
this.status = status;
this.name = name;
this.key = key;
this.avatarForSubtask = avatarForSubtask;
}
}
})();
| {
if (parrseedSubtasks[i].status === order[j]) {
html = html.concat(" <span class='" + orderClass[j] + "'>" + parrseedSubtasks[i].name);
var avatarUrl = parrseedSubtasks[i].avatarForSubtask;
var size = "";
if (order[j] === "In Progress") {
size = "-Big";
}
html = html.concat("<span class='tepes-avatar-inner" + size + "'>");
if (order[j] != "In Progress" || isABot(avatarUrl)) {
avatarUrl = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D";
console.log("PUSTY " + order[j]);
} else {
html = html.concat("<img src='" + avatarUrl + "'></img>");
}
// if(j ===1 ){
// }
html = html.concat("</span></span>");
}
} | conditional_block |
avi.go | // A detailed description of the format is at
// https://msdn.microsoft.com/en-us/library/ms779636.aspx
package avi
import (
"bytes"
"errors"
"fmt"
"io"
"os"
)
var (
errMissingKeywordHeader = errors.New("avi: missing keyword")
errMissingRIFFChunkHeader = errors.New("avi: missing RIFF chunk header")
errMissingAVIChunkHeader = errors.New("avi: missing AVI chunk header")
errMissingLIST = errors.New("avi: missing LIST keyword")
errListSubchunkTooLong = errors.New("avi: list subchunk too long")
errShortData = errors.New("avi: short data")
fccRIFF = FOURCC{'R', 'I', 'F', 'F'} // RIFF is super class of avi file
fccAVI = FOURCC{'A', 'V', 'I', ' '} // AVI is identifier of avi file
fccLIST = FOURCC{'L', 'I', 'S', 'T'} // LIST is identifier of LIST type
fcchdrl = FOURCC{'h', 'd', 'r', 'l'} // hdrl is header list
fccavih = FOURCC{'a', 'v', 'i', 'h'} // avih is AVI header
fccstrf = FOURCC{'s', 't', 'r', 'f'} // strf is stream format
fccstrl = FOURCC{'s', 't', 'r', 'l'} // strl is stream list
fccstrh = FOURCC{'s', 't', 'r', 'h'} // strh is stream header
fccstrn = FOURCC{'s', 't', 'r', 'n'} // strn is stream name
fccvids = FOURCC{'v', 'i', 'd', 's'} // vids is fccType of stream
fccmovi = FOURCC{'m', 'o', 'v', 'i'} // movi
fccdb = FOURCC{'\x30', '\x30', 'd', 'b'} // db is uncompressed video frame
fccrec = FOURCC{'r', 'e', 'c', ' '} // rec
fccindx = FOURCC{'i', 'n', 'd', 'x'} // indx is optional elememt in List
fccnnix = FOURCC{'n', 'n', 'i', 'x'} // nnix is optional element in List
fccidx1 = FOURCC{'i', 'd', 'x', '1'} // idx1 is indexer of image files
fccJUNK = FOURCC{'J', 'U', 'N', 'K'} // JUNK is data unused.
fccodml = FOURCC{'o', 'd', 'm', 'l'} // odml is OpenDML
fccdmlh = FOURCC{'d', 'm', 'l', 'h'} // dmlh is OpenDML header
)
// FourCC is a four character code.
type FOURCC [4]byte
// 'RIFF' fileSize 'AVI ' data
// fileSize includes size of 'AVI '(FOURCC), data(io.Reader)
// actual size is fileSize + 8
type AVI struct {
file *os.File
Size uint32
lists []*List
r io.Reader
}
// 'LIST' listSize listType listData
// listSize includes size of listType(FOURCC), listdata(io.Reader)
// actual size is fileSize + 8
type List struct {
Size uint32
Type FOURCC
JunkSize uint32 // JUNK is only in
lists []*List
chunks []*Chunk
imagechunks []*ImageChunk
imageNum int
}
// ckID ckSize ckData
// ckSize includes size of ckData.
// actual size is ckSize + 8
// The data is always padded to nearest WORD boundary.
type Chunk struct {
ID FOURCC
Size uint32
Data map[string]uint32
}
type ImageChunk struct {
ID FOURCC
Size uint32
Image []byte
ImageID int
}
// u32 decodes the first four bytes of b as a little-endian integer.
func decodeU32(b []byte) uint32 {
switch len(b) {
case 4:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
case 2:
return uint32(b[0]) | uint32(b[1])<<8
case 1:
return uint32(b[0])
}
panic("length must be 4, 2, or 1")
}
func decode(s string) FOURCC {
return FOURCC{s[0], s[1], s[2], s[3]}
}
func | (u uint32) *FOURCC {
return &FOURCC{byte(u >> 0), byte(u >> 8), byte(u >> 16), byte(u >> 24)}
}
func (fcc *FOURCC) String() string {
return string([]byte{fcc[0], fcc[1], fcc[2], fcc[3]})
}
func equal(a, b FOURCC) bool {
if a[0] != b[0] || a[1] != b[1] || a[2] != b[2] || a[3] != b[3] {
return false
}
return true
}
func (avi *AVI) GetMoviList() []*ImageChunk {
return avi.lists[1].imagechunks
}
func (avi *AVI) AVIPrint() {
fmt.Printf("AVI (%d)\n", avi.Size)
for _, l := range avi.lists {
l.ListPrint("\t")
}
}
func (l *List) ListPrint(indent string) {
fmt.Printf("%sList (%d) %s\n", indent, l.Size, l.Type.String())
for _, e := range l.chunks {
e.ChunkPrint(indent + "\t")
}
for _, e := range l.lists {
e.ListPrint(indent + "\t")
}
if l.JunkSize != 0 {
fmt.Printf("\t%sJUNK (%d)\n", indent, l.JunkSize)
}
for _, e := range l.imagechunks {
e.ImageChunkPrint(indent + "\t")
}
}
func (c *Chunk) ChunkPrint(indent string) {
fmt.Printf("%s%s(%d)\n", indent, c.ID, c.Size)
for k, v := range c.Data {
if k == "fccType" || k == "fccHandler" || k == "dwChunkId" {
fmt.Printf("%s\t%s: %s\n", indent, k, encodeU32(v))
} else {
fmt.Printf("%s\t%s: %d\n", indent, k, v)
}
}
}
func (ick *ImageChunk) ImageChunkPrint(indent string) {
fmt.Printf("%s%s ID: %d\n", indent, ick.ID, ick.ImageID)
}
func (avi *AVI) readData(size uint32) ([]byte, error) {
data := make([]byte, size)
if _, err := avi.file.Read(data); err != nil {
return nil, err
}
avi.r = bytes.NewReader(data)
buf := make([]byte, size)
if n, err := io.ReadFull(avi.r, buf); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errShortData
}
fmt.Println(n, " out of ", size)
return nil, err
}
return buf, nil
}
// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and
// its chunks as a *Reader.
func HeadReader(f *os.File) (*AVI, error) {
avi := &AVI{file: f}
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure the first FOURCC lieral is 'RIFF'
if !equal([4]byte{buf[0], buf[1], buf[2], buf[3]}, fccRIFF) {
return nil, errMissingRIFFChunkHeader
}
// Read size of AVI file
avi.Size = decodeU32(buf[4:8])
// Make sure the 9th to 11th bytes is 'AVI '
if !equal([4]byte{buf[8], buf[9], buf[10], buf[11]}, fccAVI) {
return nil, errMissingAVIChunkHeader
}
// Read hdrl list
list, err := avi.ListReader()
if err != nil {
return nil, err
}
avi.lists = append(avi.lists, list)
return avi, nil
}
// ListReader returns List type
func (avi *AVI) ListReader() (*List, error) {
var l List
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure that first 4th letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return nil, errMissingLIST
}
// Read size of the list
l.Size = decodeU32(buf[4:8])
// Read type of the list
copy(l.Type[:], buf[8:12])
switch l.Type {
case fcchdrl:
// Read avih chunk ... 8 + 56 bytes
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strl List ... 12 + 56
l2, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l2)
// Read odml List ... 12 + 40
l3, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l3)
// Read JUNK ... 12 + 64496
if err := avi.JUNKReader(&l); err != nil {
return nil, err
}
case fccstrl:
// Read strh 8 + 56
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strf 8 + 1064
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read indx 8 + 40
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
case fccodml:
// Read dmlr 8 + 4
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
}
return &l, nil
}
// MOVIReader reads frames.
// input:
// num is an argument that MOVIReader is going to read.
//
// TODO: there should be a max limit for the input
// TODO: this func should return err. if necessary
func (avi *AVI) MOVIReader(num int) {
var l List
buf, err := avi.readData(12)
if err != nil {
return
}
// Make sure that first 4 letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return
}
// Read size of MOVI List
l.Size = decodeU32(buf[4:8])
// Make sure that third 4 letters are "movi"
if !equal(FOURCC{buf[8], buf[9], buf[10], buf[11]}, fccmovi) {
return
}
l.Type = fccmovi
for i := 0; i < num; i++ {
err := avi.ImageChunkReader(&l)
if err != nil {
return
}
}
// Put MOVI list as a part of lists in AVI struct
avi.lists = append(avi.lists, &l)
}
func (avi *AVI) ImageChunkReader(l *List) error {
c := ImageChunk{}
// Increment imageNum. imageNum counts the number of frames that
// the function has read the file.
l.imageNum += 1
c.ImageID = l.imageNum
buf, err := avi.readData(8)
if err != nil {
return err
}
c.ID = FOURCC{buf[0], buf[1], buf[2], buf[3]}
// buf[4:8] is size of the image frame.
// this code reads one frame of the avi data.
c.Image, err = avi.readData(decodeU32(buf[4:8]))
if err != nil {
return err
}
l.imagechunks = append(l.imagechunks, &c)
return nil
}
func (avi *AVI) ChunkReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
ck := Chunk{}
copy(ck.ID[:], buf[:4])
ck.Size = decodeU32(buf[4:])
switch ck.ID {
case fccavih:
ck.Data, err = avi.AVIHeaderReader(ck.Size)
case fccstrh:
ck.Data, err = avi.StreamHeaderReader(ck.Size)
case fccstrf:
ck.Data, err = avi.StreamFormatReader(ck.Size)
case fccindx:
ck.Data, err = avi.MetaIndexReader(ck.Size)
case fccdmlh:
ck.Data, err = avi.ExtendedAVIHeaderReader(ck.Size)
}
if err != nil {
return err
}
l.chunks = append(l.chunks, &ck) // add chunk object ck to l.chunks
return nil
}
func (avi *AVI) JUNKReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccJUNK) {
return errMissingKeywordHeader
}
l.JunkSize = decodeU32(buf[4:8])
buf, err = avi.readData(l.JunkSize)
return nil
}
func (avi *AVI) AVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwMicroSecPerFrame"] = decodeU32(buf[:4])
m["dwMaxBytesPerSec"] = decodeU32(buf[4:8])
m["dwPaddingGranularity"] = decodeU32(buf[8:12])
m["dwFlags"] = decodeU32(buf[12:16])
m["dwTotalFrames"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwStreams"] = decodeU32(buf[24:28])
m["dwSuggestedBufferSize"] = decodeU32(buf[28:32])
m["dwWidth"] = decodeU32(buf[32:36])
m["dwHeight"] = decodeU32(buf[36:40])
m["dwReserved"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) StreamHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["fccType"] = decodeU32(buf[:4])
m["fccHandler"] = decodeU32(buf[4:8])
m["dwFlags"] = decodeU32(buf[8:12])
m["wPriority"] = decodeU32(buf[12:16])
m["wLanguage"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwScale"] = decodeU32(buf[24:28])
m["dwRate"] = decodeU32(buf[28:32])
m["dwStart"] = decodeU32(buf[32:36])
m["dwLength"] = decodeU32(buf[36:40])
m["dwSuggestedBufferSize"] = decodeU32(buf[40:44])
m["dwQuality"] = decodeU32(buf[44:48])
m["dwSampleSize"] = decodeU32(buf[48:52])
m["rcFrame1"] = uint32(buf[48])
m["rcFrame2"] = uint32(buf[49])
m["rcFrame3"] = uint32(buf[50])
m["rcFrame4"] = uint32(buf[51])
return m, nil
}
func (avi *AVI) StreamFormatReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["biSize"] = decodeU32(buf[:4])
m["biWidth"] = decodeU32(buf[4:8])
m["biHeight"] = decodeU32(buf[8:12])
m["biPlanes"] = decodeU32(buf[12:16])
m["biBitCount"] = decodeU32(buf[16:20])
m["biCompression"] = decodeU32(buf[20:24])
m["biSizeImage"] = decodeU32(buf[24:28])
m["biXPelsPerMeter"] = decodeU32(buf[28:32])
m["biYPelsPerMeter"] = decodeU32(buf[32:36])
m["biClrUsed"] = decodeU32(buf[36:40])
m["biClrImportant"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) MetaIndexReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["wLongsPerEntry"] = decodeU32(buf[:2])
m["bIndexSubType"] = decodeU32(buf[2:3])
m["bIndexType"] = decodeU32(buf[3:4])
m["nEntriesInUse"] = decodeU32(buf[4:8])
m["dwChunkId"] = decodeU32(buf[8:12])
m["dwReserved1"] = decodeU32(buf[12:16])
m["dwReserved2"] = decodeU32(buf[16:20])
m["dwReserved3"] = decodeU32(buf[20:24])
// aIndex[] part
switch m["bIndexType"] {
case 0x0:
m["qwOffset1"] = decodeU32(buf[24:28])
m["qwOffset2"] = decodeU32(buf[28:32])
m["dwSize"] = decodeU32(buf[32:36])
m["dwDuration"] = decodeU32(buf[36:40])
}
// TODO: aIndex[] might store multiple items.
return m, nil
}
func (avi *AVI) ExtendedAVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwTotalFrames"] = decodeU32(buf[:4])
return m, nil
}
| encodeU32 | identifier_name |
avi.go | // A detailed description of the format is at
// https://msdn.microsoft.com/en-us/library/ms779636.aspx
package avi
import (
"bytes"
"errors"
"fmt"
"io"
"os"
)
var (
errMissingKeywordHeader = errors.New("avi: missing keyword")
errMissingRIFFChunkHeader = errors.New("avi: missing RIFF chunk header")
errMissingAVIChunkHeader = errors.New("avi: missing AVI chunk header")
errMissingLIST = errors.New("avi: missing LIST keyword")
errListSubchunkTooLong = errors.New("avi: list subchunk too long")
errShortData = errors.New("avi: short data")
fccRIFF = FOURCC{'R', 'I', 'F', 'F'} // RIFF is super class of avi file
fccAVI = FOURCC{'A', 'V', 'I', ' '} // AVI is identifier of avi file
fccLIST = FOURCC{'L', 'I', 'S', 'T'} // LIST is identifier of LIST type
fcchdrl = FOURCC{'h', 'd', 'r', 'l'} // hdrl is header list
fccavih = FOURCC{'a', 'v', 'i', 'h'} // avih is AVI header
fccstrf = FOURCC{'s', 't', 'r', 'f'} // strf is stream format
fccstrl = FOURCC{'s', 't', 'r', 'l'} // strl is stream list
fccstrh = FOURCC{'s', 't', 'r', 'h'} // strh is stream header
fccstrn = FOURCC{'s', 't', 'r', 'n'} // strn is stream name
fccvids = FOURCC{'v', 'i', 'd', 's'} // vids is fccType of stream
fccmovi = FOURCC{'m', 'o', 'v', 'i'} // movi
fccdb = FOURCC{'\x30', '\x30', 'd', 'b'} // db is uncompressed video frame
fccrec = FOURCC{'r', 'e', 'c', ' '} // rec
fccindx = FOURCC{'i', 'n', 'd', 'x'} // indx is optional elememt in List
fccnnix = FOURCC{'n', 'n', 'i', 'x'} // nnix is optional element in List
fccidx1 = FOURCC{'i', 'd', 'x', '1'} // idx1 is indexer of image files
fccJUNK = FOURCC{'J', 'U', 'N', 'K'} // JUNK is data unused.
fccodml = FOURCC{'o', 'd', 'm', 'l'} // odml is OpenDML
fccdmlh = FOURCC{'d', 'm', 'l', 'h'} // dmlh is OpenDML header
)
// FourCC is a four character code.
type FOURCC [4]byte
// 'RIFF' fileSize 'AVI ' data
// fileSize includes size of 'AVI '(FOURCC), data(io.Reader)
// actual size is fileSize + 8
type AVI struct {
file *os.File
Size uint32
lists []*List
r io.Reader
}
// 'LIST' listSize listType listData
// listSize includes size of listType(FOURCC), listdata(io.Reader)
// actual size is fileSize + 8
type List struct {
Size uint32
Type FOURCC | lists []*List
chunks []*Chunk
imagechunks []*ImageChunk
imageNum int
}
// ckID ckSize ckData
// ckSize includes size of ckData.
// actual size is ckSize + 8
// The data is always padded to nearest WORD boundary.
type Chunk struct {
ID FOURCC
Size uint32
Data map[string]uint32
}
type ImageChunk struct {
ID FOURCC
Size uint32
Image []byte
ImageID int
}
// u32 decodes the first four bytes of b as a little-endian integer.
func decodeU32(b []byte) uint32 {
switch len(b) {
case 4:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
case 2:
return uint32(b[0]) | uint32(b[1])<<8
case 1:
return uint32(b[0])
}
panic("length must be 4, 2, or 1")
}
func decode(s string) FOURCC {
return FOURCC{s[0], s[1], s[2], s[3]}
}
func encodeU32(u uint32) *FOURCC {
return &FOURCC{byte(u >> 0), byte(u >> 8), byte(u >> 16), byte(u >> 24)}
}
func (fcc *FOURCC) String() string {
return string([]byte{fcc[0], fcc[1], fcc[2], fcc[3]})
}
func equal(a, b FOURCC) bool {
if a[0] != b[0] || a[1] != b[1] || a[2] != b[2] || a[3] != b[3] {
return false
}
return true
}
func (avi *AVI) GetMoviList() []*ImageChunk {
return avi.lists[1].imagechunks
}
func (avi *AVI) AVIPrint() {
fmt.Printf("AVI (%d)\n", avi.Size)
for _, l := range avi.lists {
l.ListPrint("\t")
}
}
func (l *List) ListPrint(indent string) {
fmt.Printf("%sList (%d) %s\n", indent, l.Size, l.Type.String())
for _, e := range l.chunks {
e.ChunkPrint(indent + "\t")
}
for _, e := range l.lists {
e.ListPrint(indent + "\t")
}
if l.JunkSize != 0 {
fmt.Printf("\t%sJUNK (%d)\n", indent, l.JunkSize)
}
for _, e := range l.imagechunks {
e.ImageChunkPrint(indent + "\t")
}
}
func (c *Chunk) ChunkPrint(indent string) {
fmt.Printf("%s%s(%d)\n", indent, c.ID, c.Size)
for k, v := range c.Data {
if k == "fccType" || k == "fccHandler" || k == "dwChunkId" {
fmt.Printf("%s\t%s: %s\n", indent, k, encodeU32(v))
} else {
fmt.Printf("%s\t%s: %d\n", indent, k, v)
}
}
}
func (ick *ImageChunk) ImageChunkPrint(indent string) {
fmt.Printf("%s%s ID: %d\n", indent, ick.ID, ick.ImageID)
}
func (avi *AVI) readData(size uint32) ([]byte, error) {
data := make([]byte, size)
if _, err := avi.file.Read(data); err != nil {
return nil, err
}
avi.r = bytes.NewReader(data)
buf := make([]byte, size)
if n, err := io.ReadFull(avi.r, buf); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errShortData
}
fmt.Println(n, " out of ", size)
return nil, err
}
return buf, nil
}
// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and
// its chunks as a *Reader.
func HeadReader(f *os.File) (*AVI, error) {
avi := &AVI{file: f}
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure the first FOURCC lieral is 'RIFF'
if !equal([4]byte{buf[0], buf[1], buf[2], buf[3]}, fccRIFF) {
return nil, errMissingRIFFChunkHeader
}
// Read size of AVI file
avi.Size = decodeU32(buf[4:8])
// Make sure the 9th to 11th bytes is 'AVI '
if !equal([4]byte{buf[8], buf[9], buf[10], buf[11]}, fccAVI) {
return nil, errMissingAVIChunkHeader
}
// Read hdrl list
list, err := avi.ListReader()
if err != nil {
return nil, err
}
avi.lists = append(avi.lists, list)
return avi, nil
}
// ListReader returns List type
func (avi *AVI) ListReader() (*List, error) {
var l List
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure that first 4th letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return nil, errMissingLIST
}
// Read size of the list
l.Size = decodeU32(buf[4:8])
// Read type of the list
copy(l.Type[:], buf[8:12])
switch l.Type {
case fcchdrl:
// Read avih chunk ... 8 + 56 bytes
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strl List ... 12 + 56
l2, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l2)
// Read odml List ... 12 + 40
l3, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l3)
// Read JUNK ... 12 + 64496
if err := avi.JUNKReader(&l); err != nil {
return nil, err
}
case fccstrl:
// Read strh 8 + 56
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strf 8 + 1064
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read indx 8 + 40
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
case fccodml:
// Read dmlr 8 + 4
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
}
return &l, nil
}
// MOVIReader reads frames.
// input:
// num is an argument that MOVIReader is going to read.
//
// TODO: there should be a max limit for the input
// TODO: this func should return err. if necessary
func (avi *AVI) MOVIReader(num int) {
var l List
buf, err := avi.readData(12)
if err != nil {
return
}
// Make sure that first 4 letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return
}
// Read size of MOVI List
l.Size = decodeU32(buf[4:8])
// Make sure that third 4 letters are "movi"
if !equal(FOURCC{buf[8], buf[9], buf[10], buf[11]}, fccmovi) {
return
}
l.Type = fccmovi
for i := 0; i < num; i++ {
err := avi.ImageChunkReader(&l)
if err != nil {
return
}
}
// Put MOVI list as a part of lists in AVI struct
avi.lists = append(avi.lists, &l)
}
func (avi *AVI) ImageChunkReader(l *List) error {
c := ImageChunk{}
// Increment imageNum. imageNum counts the number of frames that
// the function has read the file.
l.imageNum += 1
c.ImageID = l.imageNum
buf, err := avi.readData(8)
if err != nil {
return err
}
c.ID = FOURCC{buf[0], buf[1], buf[2], buf[3]}
// buf[4:8] is size of the image frame.
// this code reads one frame of the avi data.
c.Image, err = avi.readData(decodeU32(buf[4:8]))
if err != nil {
return err
}
l.imagechunks = append(l.imagechunks, &c)
return nil
}
func (avi *AVI) ChunkReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
ck := Chunk{}
copy(ck.ID[:], buf[:4])
ck.Size = decodeU32(buf[4:])
switch ck.ID {
case fccavih:
ck.Data, err = avi.AVIHeaderReader(ck.Size)
case fccstrh:
ck.Data, err = avi.StreamHeaderReader(ck.Size)
case fccstrf:
ck.Data, err = avi.StreamFormatReader(ck.Size)
case fccindx:
ck.Data, err = avi.MetaIndexReader(ck.Size)
case fccdmlh:
ck.Data, err = avi.ExtendedAVIHeaderReader(ck.Size)
}
if err != nil {
return err
}
l.chunks = append(l.chunks, &ck) // add chunk object ck to l.chunks
return nil
}
func (avi *AVI) JUNKReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccJUNK) {
return errMissingKeywordHeader
}
l.JunkSize = decodeU32(buf[4:8])
buf, err = avi.readData(l.JunkSize)
return nil
}
func (avi *AVI) AVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwMicroSecPerFrame"] = decodeU32(buf[:4])
m["dwMaxBytesPerSec"] = decodeU32(buf[4:8])
m["dwPaddingGranularity"] = decodeU32(buf[8:12])
m["dwFlags"] = decodeU32(buf[12:16])
m["dwTotalFrames"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwStreams"] = decodeU32(buf[24:28])
m["dwSuggestedBufferSize"] = decodeU32(buf[28:32])
m["dwWidth"] = decodeU32(buf[32:36])
m["dwHeight"] = decodeU32(buf[36:40])
m["dwReserved"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) StreamHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["fccType"] = decodeU32(buf[:4])
m["fccHandler"] = decodeU32(buf[4:8])
m["dwFlags"] = decodeU32(buf[8:12])
m["wPriority"] = decodeU32(buf[12:16])
m["wLanguage"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwScale"] = decodeU32(buf[24:28])
m["dwRate"] = decodeU32(buf[28:32])
m["dwStart"] = decodeU32(buf[32:36])
m["dwLength"] = decodeU32(buf[36:40])
m["dwSuggestedBufferSize"] = decodeU32(buf[40:44])
m["dwQuality"] = decodeU32(buf[44:48])
m["dwSampleSize"] = decodeU32(buf[48:52])
m["rcFrame1"] = uint32(buf[48])
m["rcFrame2"] = uint32(buf[49])
m["rcFrame3"] = uint32(buf[50])
m["rcFrame4"] = uint32(buf[51])
return m, nil
}
func (avi *AVI) StreamFormatReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["biSize"] = decodeU32(buf[:4])
m["biWidth"] = decodeU32(buf[4:8])
m["biHeight"] = decodeU32(buf[8:12])
m["biPlanes"] = decodeU32(buf[12:16])
m["biBitCount"] = decodeU32(buf[16:20])
m["biCompression"] = decodeU32(buf[20:24])
m["biSizeImage"] = decodeU32(buf[24:28])
m["biXPelsPerMeter"] = decodeU32(buf[28:32])
m["biYPelsPerMeter"] = decodeU32(buf[32:36])
m["biClrUsed"] = decodeU32(buf[36:40])
m["biClrImportant"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) MetaIndexReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["wLongsPerEntry"] = decodeU32(buf[:2])
m["bIndexSubType"] = decodeU32(buf[2:3])
m["bIndexType"] = decodeU32(buf[3:4])
m["nEntriesInUse"] = decodeU32(buf[4:8])
m["dwChunkId"] = decodeU32(buf[8:12])
m["dwReserved1"] = decodeU32(buf[12:16])
m["dwReserved2"] = decodeU32(buf[16:20])
m["dwReserved3"] = decodeU32(buf[20:24])
// aIndex[] part
switch m["bIndexType"] {
case 0x0:
m["qwOffset1"] = decodeU32(buf[24:28])
m["qwOffset2"] = decodeU32(buf[28:32])
m["dwSize"] = decodeU32(buf[32:36])
m["dwDuration"] = decodeU32(buf[36:40])
}
// TODO: aIndex[] might store multiple items.
return m, nil
}
func (avi *AVI) ExtendedAVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwTotalFrames"] = decodeU32(buf[:4])
return m, nil
} | JunkSize uint32 // JUNK is only in
| random_line_split |
avi.go | // A detailed description of the format is at
// https://msdn.microsoft.com/en-us/library/ms779636.aspx
package avi
import (
"bytes"
"errors"
"fmt"
"io"
"os"
)
var (
errMissingKeywordHeader = errors.New("avi: missing keyword")
errMissingRIFFChunkHeader = errors.New("avi: missing RIFF chunk header")
errMissingAVIChunkHeader = errors.New("avi: missing AVI chunk header")
errMissingLIST = errors.New("avi: missing LIST keyword")
errListSubchunkTooLong = errors.New("avi: list subchunk too long")
errShortData = errors.New("avi: short data")
fccRIFF = FOURCC{'R', 'I', 'F', 'F'} // RIFF is super class of avi file
fccAVI = FOURCC{'A', 'V', 'I', ' '} // AVI is identifier of avi file
fccLIST = FOURCC{'L', 'I', 'S', 'T'} // LIST is identifier of LIST type
fcchdrl = FOURCC{'h', 'd', 'r', 'l'} // hdrl is header list
fccavih = FOURCC{'a', 'v', 'i', 'h'} // avih is AVI header
fccstrf = FOURCC{'s', 't', 'r', 'f'} // strf is stream format
fccstrl = FOURCC{'s', 't', 'r', 'l'} // strl is stream list
fccstrh = FOURCC{'s', 't', 'r', 'h'} // strh is stream header
fccstrn = FOURCC{'s', 't', 'r', 'n'} // strn is stream name
fccvids = FOURCC{'v', 'i', 'd', 's'} // vids is fccType of stream
fccmovi = FOURCC{'m', 'o', 'v', 'i'} // movi
fccdb = FOURCC{'\x30', '\x30', 'd', 'b'} // db is uncompressed video frame
fccrec = FOURCC{'r', 'e', 'c', ' '} // rec
fccindx = FOURCC{'i', 'n', 'd', 'x'} // indx is optional elememt in List
fccnnix = FOURCC{'n', 'n', 'i', 'x'} // nnix is optional element in List
fccidx1 = FOURCC{'i', 'd', 'x', '1'} // idx1 is indexer of image files
fccJUNK = FOURCC{'J', 'U', 'N', 'K'} // JUNK is data unused.
fccodml = FOURCC{'o', 'd', 'm', 'l'} // odml is OpenDML
fccdmlh = FOURCC{'d', 'm', 'l', 'h'} // dmlh is OpenDML header
)
// FourCC is a four character code.
type FOURCC [4]byte
// 'RIFF' fileSize 'AVI ' data
// fileSize includes size of 'AVI '(FOURCC), data(io.Reader)
// actual size is fileSize + 8
type AVI struct {
file *os.File
Size uint32
lists []*List
r io.Reader
}
// 'LIST' listSize listType listData
// listSize includes size of listType(FOURCC), listdata(io.Reader)
// actual size is fileSize + 8
type List struct {
Size uint32
Type FOURCC
JunkSize uint32 // JUNK is only in
lists []*List
chunks []*Chunk
imagechunks []*ImageChunk
imageNum int
}
// ckID ckSize ckData
// ckSize includes size of ckData.
// actual size is ckSize + 8
// The data is always padded to nearest WORD boundary.
type Chunk struct {
ID FOURCC
Size uint32
Data map[string]uint32
}
type ImageChunk struct {
ID FOURCC
Size uint32
Image []byte
ImageID int
}
// u32 decodes the first four bytes of b as a little-endian integer.
func decodeU32(b []byte) uint32 {
switch len(b) {
case 4:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
case 2:
return uint32(b[0]) | uint32(b[1])<<8
case 1:
return uint32(b[0])
}
panic("length must be 4, 2, or 1")
}
func decode(s string) FOURCC {
return FOURCC{s[0], s[1], s[2], s[3]}
}
func encodeU32(u uint32) *FOURCC {
return &FOURCC{byte(u >> 0), byte(u >> 8), byte(u >> 16), byte(u >> 24)}
}
func (fcc *FOURCC) String() string {
return string([]byte{fcc[0], fcc[1], fcc[2], fcc[3]})
}
func equal(a, b FOURCC) bool {
if a[0] != b[0] || a[1] != b[1] || a[2] != b[2] || a[3] != b[3] {
return false
}
return true
}
func (avi *AVI) GetMoviList() []*ImageChunk {
return avi.lists[1].imagechunks
}
func (avi *AVI) AVIPrint() {
fmt.Printf("AVI (%d)\n", avi.Size)
for _, l := range avi.lists {
l.ListPrint("\t")
}
}
func (l *List) ListPrint(indent string) {
fmt.Printf("%sList (%d) %s\n", indent, l.Size, l.Type.String())
for _, e := range l.chunks {
e.ChunkPrint(indent + "\t")
}
for _, e := range l.lists {
e.ListPrint(indent + "\t")
}
if l.JunkSize != 0 {
fmt.Printf("\t%sJUNK (%d)\n", indent, l.JunkSize)
}
for _, e := range l.imagechunks {
e.ImageChunkPrint(indent + "\t")
}
}
func (c *Chunk) ChunkPrint(indent string) {
fmt.Printf("%s%s(%d)\n", indent, c.ID, c.Size)
for k, v := range c.Data {
if k == "fccType" || k == "fccHandler" || k == "dwChunkId" {
fmt.Printf("%s\t%s: %s\n", indent, k, encodeU32(v))
} else {
fmt.Printf("%s\t%s: %d\n", indent, k, v)
}
}
}
func (ick *ImageChunk) ImageChunkPrint(indent string) {
fmt.Printf("%s%s ID: %d\n", indent, ick.ID, ick.ImageID)
}
func (avi *AVI) readData(size uint32) ([]byte, error) {
data := make([]byte, size)
if _, err := avi.file.Read(data); err != nil |
avi.r = bytes.NewReader(data)
buf := make([]byte, size)
if n, err := io.ReadFull(avi.r, buf); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errShortData
}
fmt.Println(n, " out of ", size)
return nil, err
}
return buf, nil
}
// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and
// its chunks as a *Reader.
func HeadReader(f *os.File) (*AVI, error) {
avi := &AVI{file: f}
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure the first FOURCC lieral is 'RIFF'
if !equal([4]byte{buf[0], buf[1], buf[2], buf[3]}, fccRIFF) {
return nil, errMissingRIFFChunkHeader
}
// Read size of AVI file
avi.Size = decodeU32(buf[4:8])
// Make sure the 9th to 11th bytes is 'AVI '
if !equal([4]byte{buf[8], buf[9], buf[10], buf[11]}, fccAVI) {
return nil, errMissingAVIChunkHeader
}
// Read hdrl list
list, err := avi.ListReader()
if err != nil {
return nil, err
}
avi.lists = append(avi.lists, list)
return avi, nil
}
// ListReader returns List type
func (avi *AVI) ListReader() (*List, error) {
var l List
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure that first 4th letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return nil, errMissingLIST
}
// Read size of the list
l.Size = decodeU32(buf[4:8])
// Read type of the list
copy(l.Type[:], buf[8:12])
switch l.Type {
case fcchdrl:
// Read avih chunk ... 8 + 56 bytes
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strl List ... 12 + 56
l2, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l2)
// Read odml List ... 12 + 40
l3, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l3)
// Read JUNK ... 12 + 64496
if err := avi.JUNKReader(&l); err != nil {
return nil, err
}
case fccstrl:
// Read strh 8 + 56
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strf 8 + 1064
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read indx 8 + 40
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
case fccodml:
// Read dmlr 8 + 4
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
}
return &l, nil
}
// MOVIReader reads frames.
// input:
// num is an argument that MOVIReader is going to read.
//
// TODO: there should be a max limit for the input
// TODO: this func should return err. if necessary
func (avi *AVI) MOVIReader(num int) {
var l List
buf, err := avi.readData(12)
if err != nil {
return
}
// Make sure that first 4 letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return
}
// Read size of MOVI List
l.Size = decodeU32(buf[4:8])
// Make sure that third 4 letters are "movi"
if !equal(FOURCC{buf[8], buf[9], buf[10], buf[11]}, fccmovi) {
return
}
l.Type = fccmovi
for i := 0; i < num; i++ {
err := avi.ImageChunkReader(&l)
if err != nil {
return
}
}
// Put MOVI list as a part of lists in AVI struct
avi.lists = append(avi.lists, &l)
}
func (avi *AVI) ImageChunkReader(l *List) error {
c := ImageChunk{}
// Increment imageNum. imageNum counts the number of frames that
// the function has read the file.
l.imageNum += 1
c.ImageID = l.imageNum
buf, err := avi.readData(8)
if err != nil {
return err
}
c.ID = FOURCC{buf[0], buf[1], buf[2], buf[3]}
// buf[4:8] is size of the image frame.
// this code reads one frame of the avi data.
c.Image, err = avi.readData(decodeU32(buf[4:8]))
if err != nil {
return err
}
l.imagechunks = append(l.imagechunks, &c)
return nil
}
func (avi *AVI) ChunkReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
ck := Chunk{}
copy(ck.ID[:], buf[:4])
ck.Size = decodeU32(buf[4:])
switch ck.ID {
case fccavih:
ck.Data, err = avi.AVIHeaderReader(ck.Size)
case fccstrh:
ck.Data, err = avi.StreamHeaderReader(ck.Size)
case fccstrf:
ck.Data, err = avi.StreamFormatReader(ck.Size)
case fccindx:
ck.Data, err = avi.MetaIndexReader(ck.Size)
case fccdmlh:
ck.Data, err = avi.ExtendedAVIHeaderReader(ck.Size)
}
if err != nil {
return err
}
l.chunks = append(l.chunks, &ck) // add chunk object ck to l.chunks
return nil
}
func (avi *AVI) JUNKReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccJUNK) {
return errMissingKeywordHeader
}
l.JunkSize = decodeU32(buf[4:8])
buf, err = avi.readData(l.JunkSize)
return nil
}
func (avi *AVI) AVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwMicroSecPerFrame"] = decodeU32(buf[:4])
m["dwMaxBytesPerSec"] = decodeU32(buf[4:8])
m["dwPaddingGranularity"] = decodeU32(buf[8:12])
m["dwFlags"] = decodeU32(buf[12:16])
m["dwTotalFrames"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwStreams"] = decodeU32(buf[24:28])
m["dwSuggestedBufferSize"] = decodeU32(buf[28:32])
m["dwWidth"] = decodeU32(buf[32:36])
m["dwHeight"] = decodeU32(buf[36:40])
m["dwReserved"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) StreamHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["fccType"] = decodeU32(buf[:4])
m["fccHandler"] = decodeU32(buf[4:8])
m["dwFlags"] = decodeU32(buf[8:12])
m["wPriority"] = decodeU32(buf[12:16])
m["wLanguage"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwScale"] = decodeU32(buf[24:28])
m["dwRate"] = decodeU32(buf[28:32])
m["dwStart"] = decodeU32(buf[32:36])
m["dwLength"] = decodeU32(buf[36:40])
m["dwSuggestedBufferSize"] = decodeU32(buf[40:44])
m["dwQuality"] = decodeU32(buf[44:48])
m["dwSampleSize"] = decodeU32(buf[48:52])
m["rcFrame1"] = uint32(buf[48])
m["rcFrame2"] = uint32(buf[49])
m["rcFrame3"] = uint32(buf[50])
m["rcFrame4"] = uint32(buf[51])
return m, nil
}
func (avi *AVI) StreamFormatReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["biSize"] = decodeU32(buf[:4])
m["biWidth"] = decodeU32(buf[4:8])
m["biHeight"] = decodeU32(buf[8:12])
m["biPlanes"] = decodeU32(buf[12:16])
m["biBitCount"] = decodeU32(buf[16:20])
m["biCompression"] = decodeU32(buf[20:24])
m["biSizeImage"] = decodeU32(buf[24:28])
m["biXPelsPerMeter"] = decodeU32(buf[28:32])
m["biYPelsPerMeter"] = decodeU32(buf[32:36])
m["biClrUsed"] = decodeU32(buf[36:40])
m["biClrImportant"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) MetaIndexReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["wLongsPerEntry"] = decodeU32(buf[:2])
m["bIndexSubType"] = decodeU32(buf[2:3])
m["bIndexType"] = decodeU32(buf[3:4])
m["nEntriesInUse"] = decodeU32(buf[4:8])
m["dwChunkId"] = decodeU32(buf[8:12])
m["dwReserved1"] = decodeU32(buf[12:16])
m["dwReserved2"] = decodeU32(buf[16:20])
m["dwReserved3"] = decodeU32(buf[20:24])
// aIndex[] part
switch m["bIndexType"] {
case 0x0:
m["qwOffset1"] = decodeU32(buf[24:28])
m["qwOffset2"] = decodeU32(buf[28:32])
m["dwSize"] = decodeU32(buf[32:36])
m["dwDuration"] = decodeU32(buf[36:40])
}
// TODO: aIndex[] might store multiple items.
return m, nil
}
func (avi *AVI) ExtendedAVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwTotalFrames"] = decodeU32(buf[:4])
return m, nil
}
| {
return nil, err
} | conditional_block |
avi.go | // A detailed description of the format is at
// https://msdn.microsoft.com/en-us/library/ms779636.aspx
package avi
import (
"bytes"
"errors"
"fmt"
"io"
"os"
)
var (
errMissingKeywordHeader = errors.New("avi: missing keyword")
errMissingRIFFChunkHeader = errors.New("avi: missing RIFF chunk header")
errMissingAVIChunkHeader = errors.New("avi: missing AVI chunk header")
errMissingLIST = errors.New("avi: missing LIST keyword")
errListSubchunkTooLong = errors.New("avi: list subchunk too long")
errShortData = errors.New("avi: short data")
fccRIFF = FOURCC{'R', 'I', 'F', 'F'} // RIFF is super class of avi file
fccAVI = FOURCC{'A', 'V', 'I', ' '} // AVI is identifier of avi file
fccLIST = FOURCC{'L', 'I', 'S', 'T'} // LIST is identifier of LIST type
fcchdrl = FOURCC{'h', 'd', 'r', 'l'} // hdrl is header list
fccavih = FOURCC{'a', 'v', 'i', 'h'} // avih is AVI header
fccstrf = FOURCC{'s', 't', 'r', 'f'} // strf is stream format
fccstrl = FOURCC{'s', 't', 'r', 'l'} // strl is stream list
fccstrh = FOURCC{'s', 't', 'r', 'h'} // strh is stream header
fccstrn = FOURCC{'s', 't', 'r', 'n'} // strn is stream name
fccvids = FOURCC{'v', 'i', 'd', 's'} // vids is fccType of stream
fccmovi = FOURCC{'m', 'o', 'v', 'i'} // movi
fccdb = FOURCC{'\x30', '\x30', 'd', 'b'} // db is uncompressed video frame
fccrec = FOURCC{'r', 'e', 'c', ' '} // rec
fccindx = FOURCC{'i', 'n', 'd', 'x'} // indx is optional elememt in List
fccnnix = FOURCC{'n', 'n', 'i', 'x'} // nnix is optional element in List
fccidx1 = FOURCC{'i', 'd', 'x', '1'} // idx1 is indexer of image files
fccJUNK = FOURCC{'J', 'U', 'N', 'K'} // JUNK is data unused.
fccodml = FOURCC{'o', 'd', 'm', 'l'} // odml is OpenDML
fccdmlh = FOURCC{'d', 'm', 'l', 'h'} // dmlh is OpenDML header
)
// FourCC is a four character code.
type FOURCC [4]byte
// 'RIFF' fileSize 'AVI ' data
// fileSize includes size of 'AVI '(FOURCC), data(io.Reader)
// actual size is fileSize + 8
type AVI struct {
file *os.File
Size uint32
lists []*List
r io.Reader
}
// 'LIST' listSize listType listData
// listSize includes size of listType(FOURCC), listdata(io.Reader)
// actual size is fileSize + 8
type List struct {
Size uint32
Type FOURCC
JunkSize uint32 // JUNK is only in
lists []*List
chunks []*Chunk
imagechunks []*ImageChunk
imageNum int
}
// ckID ckSize ckData
// ckSize includes size of ckData.
// actual size is ckSize + 8
// The data is always padded to nearest WORD boundary.
type Chunk struct {
ID FOURCC
Size uint32
Data map[string]uint32
}
type ImageChunk struct {
ID FOURCC
Size uint32
Image []byte
ImageID int
}
// u32 decodes the first four bytes of b as a little-endian integer.
func decodeU32(b []byte) uint32 {
switch len(b) {
case 4:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
case 2:
return uint32(b[0]) | uint32(b[1])<<8
case 1:
return uint32(b[0])
}
panic("length must be 4, 2, or 1")
}
func decode(s string) FOURCC {
return FOURCC{s[0], s[1], s[2], s[3]}
}
func encodeU32(u uint32) *FOURCC {
return &FOURCC{byte(u >> 0), byte(u >> 8), byte(u >> 16), byte(u >> 24)}
}
func (fcc *FOURCC) String() string {
return string([]byte{fcc[0], fcc[1], fcc[2], fcc[3]})
}
func equal(a, b FOURCC) bool {
if a[0] != b[0] || a[1] != b[1] || a[2] != b[2] || a[3] != b[3] {
return false
}
return true
}
func (avi *AVI) GetMoviList() []*ImageChunk {
return avi.lists[1].imagechunks
}
func (avi *AVI) AVIPrint() {
fmt.Printf("AVI (%d)\n", avi.Size)
for _, l := range avi.lists {
l.ListPrint("\t")
}
}
func (l *List) ListPrint(indent string) {
fmt.Printf("%sList (%d) %s\n", indent, l.Size, l.Type.String())
for _, e := range l.chunks {
e.ChunkPrint(indent + "\t")
}
for _, e := range l.lists {
e.ListPrint(indent + "\t")
}
if l.JunkSize != 0 {
fmt.Printf("\t%sJUNK (%d)\n", indent, l.JunkSize)
}
for _, e := range l.imagechunks {
e.ImageChunkPrint(indent + "\t")
}
}
func (c *Chunk) ChunkPrint(indent string) {
fmt.Printf("%s%s(%d)\n", indent, c.ID, c.Size)
for k, v := range c.Data {
if k == "fccType" || k == "fccHandler" || k == "dwChunkId" {
fmt.Printf("%s\t%s: %s\n", indent, k, encodeU32(v))
} else {
fmt.Printf("%s\t%s: %d\n", indent, k, v)
}
}
}
func (ick *ImageChunk) ImageChunkPrint(indent string) {
fmt.Printf("%s%s ID: %d\n", indent, ick.ID, ick.ImageID)
}
func (avi *AVI) readData(size uint32) ([]byte, error) {
data := make([]byte, size)
if _, err := avi.file.Read(data); err != nil {
return nil, err
}
avi.r = bytes.NewReader(data)
buf := make([]byte, size)
if n, err := io.ReadFull(avi.r, buf); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errShortData
}
fmt.Println(n, " out of ", size)
return nil, err
}
return buf, nil
}
// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and
// its chunks as a *Reader.
func HeadReader(f *os.File) (*AVI, error) {
avi := &AVI{file: f}
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure the first FOURCC lieral is 'RIFF'
if !equal([4]byte{buf[0], buf[1], buf[2], buf[3]}, fccRIFF) {
return nil, errMissingRIFFChunkHeader
}
// Read size of AVI file
avi.Size = decodeU32(buf[4:8])
// Make sure the 9th to 11th bytes is 'AVI '
if !equal([4]byte{buf[8], buf[9], buf[10], buf[11]}, fccAVI) {
return nil, errMissingAVIChunkHeader
}
// Read hdrl list
list, err := avi.ListReader()
if err != nil {
return nil, err
}
avi.lists = append(avi.lists, list)
return avi, nil
}
// ListReader returns List type
func (avi *AVI) ListReader() (*List, error) {
var l List
buf, err := avi.readData(12)
if err != nil {
return nil, err
}
// Make sure that first 4th letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return nil, errMissingLIST
}
// Read size of the list
l.Size = decodeU32(buf[4:8])
// Read type of the list
copy(l.Type[:], buf[8:12])
switch l.Type {
case fcchdrl:
// Read avih chunk ... 8 + 56 bytes
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strl List ... 12 + 56
l2, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l2)
// Read odml List ... 12 + 40
l3, err := avi.ListReader()
if err != nil {
return nil, err
}
l.lists = append(l.lists, l3)
// Read JUNK ... 12 + 64496
if err := avi.JUNKReader(&l); err != nil {
return nil, err
}
case fccstrl:
// Read strh 8 + 56
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read strf 8 + 1064
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
// Read indx 8 + 40
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
case fccodml:
// Read dmlr 8 + 4
if err := avi.ChunkReader(&l); err != nil {
return nil, err
}
}
return &l, nil
}
// MOVIReader reads frames.
// input:
// num is an argument that MOVIReader is going to read.
//
// TODO: there should be a max limit for the input
// TODO: this func should return err. if necessary
func (avi *AVI) MOVIReader(num int) {
var l List
buf, err := avi.readData(12)
if err != nil {
return
}
// Make sure that first 4 letters are "LIST"
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccLIST) {
return
}
// Read size of MOVI List
l.Size = decodeU32(buf[4:8])
// Make sure that third 4 letters are "movi"
if !equal(FOURCC{buf[8], buf[9], buf[10], buf[11]}, fccmovi) {
return
}
l.Type = fccmovi
for i := 0; i < num; i++ {
err := avi.ImageChunkReader(&l)
if err != nil {
return
}
}
// Put MOVI list as a part of lists in AVI struct
avi.lists = append(avi.lists, &l)
}
func (avi *AVI) ImageChunkReader(l *List) error {
c := ImageChunk{}
// Increment imageNum. imageNum counts the number of frames that
// the function has read the file.
l.imageNum += 1
c.ImageID = l.imageNum
buf, err := avi.readData(8)
if err != nil {
return err
}
c.ID = FOURCC{buf[0], buf[1], buf[2], buf[3]}
// buf[4:8] is size of the image frame.
// this code reads one frame of the avi data.
c.Image, err = avi.readData(decodeU32(buf[4:8]))
if err != nil {
return err
}
l.imagechunks = append(l.imagechunks, &c)
return nil
}
func (avi *AVI) ChunkReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
ck := Chunk{}
copy(ck.ID[:], buf[:4])
ck.Size = decodeU32(buf[4:])
switch ck.ID {
case fccavih:
ck.Data, err = avi.AVIHeaderReader(ck.Size)
case fccstrh:
ck.Data, err = avi.StreamHeaderReader(ck.Size)
case fccstrf:
ck.Data, err = avi.StreamFormatReader(ck.Size)
case fccindx:
ck.Data, err = avi.MetaIndexReader(ck.Size)
case fccdmlh:
ck.Data, err = avi.ExtendedAVIHeaderReader(ck.Size)
}
if err != nil {
return err
}
l.chunks = append(l.chunks, &ck) // add chunk object ck to l.chunks
return nil
}
func (avi *AVI) JUNKReader(l *List) error {
buf, err := avi.readData(8)
if err != nil {
return err
}
if !equal(FOURCC{buf[0], buf[1], buf[2], buf[3]}, fccJUNK) {
return errMissingKeywordHeader
}
l.JunkSize = decodeU32(buf[4:8])
buf, err = avi.readData(l.JunkSize)
return nil
}
func (avi *AVI) AVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwMicroSecPerFrame"] = decodeU32(buf[:4])
m["dwMaxBytesPerSec"] = decodeU32(buf[4:8])
m["dwPaddingGranularity"] = decodeU32(buf[8:12])
m["dwFlags"] = decodeU32(buf[12:16])
m["dwTotalFrames"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwStreams"] = decodeU32(buf[24:28])
m["dwSuggestedBufferSize"] = decodeU32(buf[28:32])
m["dwWidth"] = decodeU32(buf[32:36])
m["dwHeight"] = decodeU32(buf[36:40])
m["dwReserved"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) StreamHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["fccType"] = decodeU32(buf[:4])
m["fccHandler"] = decodeU32(buf[4:8])
m["dwFlags"] = decodeU32(buf[8:12])
m["wPriority"] = decodeU32(buf[12:16])
m["wLanguage"] = decodeU32(buf[16:20])
m["dwInitialFrames"] = decodeU32(buf[20:24])
m["dwScale"] = decodeU32(buf[24:28])
m["dwRate"] = decodeU32(buf[28:32])
m["dwStart"] = decodeU32(buf[32:36])
m["dwLength"] = decodeU32(buf[36:40])
m["dwSuggestedBufferSize"] = decodeU32(buf[40:44])
m["dwQuality"] = decodeU32(buf[44:48])
m["dwSampleSize"] = decodeU32(buf[48:52])
m["rcFrame1"] = uint32(buf[48])
m["rcFrame2"] = uint32(buf[49])
m["rcFrame3"] = uint32(buf[50])
m["rcFrame4"] = uint32(buf[51])
return m, nil
}
func (avi *AVI) StreamFormatReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["biSize"] = decodeU32(buf[:4])
m["biWidth"] = decodeU32(buf[4:8])
m["biHeight"] = decodeU32(buf[8:12])
m["biPlanes"] = decodeU32(buf[12:16])
m["biBitCount"] = decodeU32(buf[16:20])
m["biCompression"] = decodeU32(buf[20:24])
m["biSizeImage"] = decodeU32(buf[24:28])
m["biXPelsPerMeter"] = decodeU32(buf[28:32])
m["biYPelsPerMeter"] = decodeU32(buf[32:36])
m["biClrUsed"] = decodeU32(buf[36:40])
m["biClrImportant"] = decodeU32(buf[40:44])
return m, nil
}
func (avi *AVI) MetaIndexReader(size uint32) (map[string]uint32, error) |
func (avi *AVI) ExtendedAVIHeaderReader(size uint32) (map[string]uint32, error) {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["dwTotalFrames"] = decodeU32(buf[:4])
return m, nil
}
| {
buf, err := avi.readData(size)
if err != nil {
return nil, err
}
m := make(map[string]uint32)
m["wLongsPerEntry"] = decodeU32(buf[:2])
m["bIndexSubType"] = decodeU32(buf[2:3])
m["bIndexType"] = decodeU32(buf[3:4])
m["nEntriesInUse"] = decodeU32(buf[4:8])
m["dwChunkId"] = decodeU32(buf[8:12])
m["dwReserved1"] = decodeU32(buf[12:16])
m["dwReserved2"] = decodeU32(buf[16:20])
m["dwReserved3"] = decodeU32(buf[20:24])
// aIndex[] part
switch m["bIndexType"] {
case 0x0:
m["qwOffset1"] = decodeU32(buf[24:28])
m["qwOffset2"] = decodeU32(buf[28:32])
m["dwSize"] = decodeU32(buf[32:36])
m["dwDuration"] = decodeU32(buf[36:40])
}
// TODO: aIndex[] might store multiple items.
return m, nil
} | identifier_body |
parser.py | # ------------------------------------------------------------------------------
#
# Project: pycql <https://github.com/geopython/pycql>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import logging
from ply import yacc
from .lexer import CQLLexer
from . import ast
from . import values
LOGGER = logging.getLogger(__name__)
class CQLParser:
def __init__(self, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
self.lexer = CQLLexer(
# lextab='ecql.lextab',
# outputdir="ecql"
geometry_factory,
bbox_factory,
time_factory,
duration_factory,
optimize=True,
)
self.lexer.build()
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(
module=self,
# start='condition_or_empty',
# debug=True,
optimize=True,
# tabmodule='ecql.yacctab',
# outputdir="ecql"
errorlog=yacc.NullLogger(),
)
def parse(self, text):
self.__query = text
return self.parser.parse(
input=text,
lexer=self.lexer
)
def restart(self, *args, **kwargs):
return self.parser.restart(*args, **kwargs)
precedence = (
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
)
#
# grammar
#
start = 'condition_or_empty'
def p_condition_or_empty(self, p):
""" condition_or_empty : condition
| empty
"""
p[0] = p[1]
def p_condition(self, p):
""" condition : predicate
| condition AND condition
| condition OR condition
| NOT condition
| LPAREN condition RPAREN
| LBRACKET condition RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif p[2] in ("AND", "OR"):
p[0] = ast.CombinationConditionNode(p[1], p[3], p[2])
elif p[1] == "NOT":
p[0] = ast.NotConditionNode(p[2])
elif p[1] in ("(", "["):
p[0] = p[2]
def p_predicate(self, p):
""" predicate : expression EQ expression
| expression NE expression
| expression LT expression
| expression LE expression
| expression GT expression
| expression GE expression
| expression NOT BETWEEN expression AND expression
| expression BETWEEN expression AND expression
| expression NOT LIKE QUOTED
| expression LIKE QUOTED
| expression NOT ILIKE QUOTED
| expression ILIKE QUOTED
| expression NOT IN LPAREN expression_list RPAREN
| expression IN LPAREN expression_list RPAREN
| expression IS NOT NULL
| expression IS NULL
| temporal_predicate
| spatial_predicate
"""
if len(p) == 2: # hand over temporal and spatial predicates
|
elif p[2] in ("=", "<>", "<", "<=", ">", ">="):
p[0] = ast.ComparisonPredicateNode(p[1], p[3], p[2])
else:
not_ = False
op = p[2]
if op == 'NOT':
not_ = True
op = p[3]
if op == "BETWEEN":
p[0] = ast.BetweenPredicateNode(
p[1], p[4 if not_ else 3], p[6 if not_ else 5], not_
)
elif op in ("LIKE", "ILIKE"):
p[0] = ast.LikePredicateNode(
p[1], ast.LiteralExpression(p[4 if not_ else 3]),
op == "LIKE", not_
)
elif op == "IN":
p[0] = ast.InPredicateNode(p[1], p[5 if not_ else 4], not_)
elif op == "IS":
p[0] = ast.NullPredicateNode(p[1], p[3] == "NOT")
def p_temporal_predicate(self, p):
""" temporal_predicate : expression BEFORE TIME
| expression BEFORE OR DURING time_period
| expression DURING time_period
| expression DURING OR AFTER time_period
| expression AFTER TIME
"""
if len(p) > 4:
op = " ".join(p[2:-1])
else:
op = p[2]
p[0] = ast.TemporalPredicateNode(p[1], p[3 if len(p) == 4 else 5], op)
def p_time_period(self, p):
""" time_period : TIME DIVIDE TIME
| TIME DIVIDE DURATION
| DURATION DIVIDE TIME
"""
p[0] = (p[1], p[3])
def p_spatial_predicate(self, p):
""" spatial_predicate : INTERSECTS LPAREN expression COMMA expression RPAREN
| DISJOINT LPAREN expression COMMA expression RPAREN
| CONTAINS LPAREN expression COMMA expression RPAREN
| WITHIN LPAREN expression COMMA expression RPAREN
| TOUCHES LPAREN expression COMMA expression RPAREN
| CROSSES LPAREN expression COMMA expression RPAREN
| OVERLAPS LPAREN expression COMMA expression RPAREN
| EQUALS LPAREN expression COMMA expression RPAREN
| RELATE LPAREN expression COMMA expression COMMA QUOTED RPAREN
| DWITHIN LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BEYOND LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number COMMA QUOTED RPAREN
"""
op = p[1]
lhs = p[3]
rhs = p[5]
if op == "RELATE":
p[0] = ast.SpatialPredicateNode(lhs, rhs, op, pattern=p[7])
elif op in ("DWITHIN", "BEYOND"):
p[0] = ast.SpatialPredicateNode(
lhs, rhs, op, distance=p[7], units=p[9]
)
elif op == "BBOX":
p[0] = ast.BBoxPredicateNode(lhs, *p[5::2])
else:
p[0] = ast.SpatialPredicateNode(lhs, rhs, op)
def p_expression_list(self, p):
""" expression_list : expression_list COMMA expression
| expression
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1]
def p_expression(self, p):
""" expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| LPAREN expression RPAREN
| LBRACKET expression RBRACKET
| GEOMETRY
| ENVELOPE
| attribute
| QUOTED
| INTEGER
| FLOAT
"""
if len(p) == 2:
if isinstance(p[1], ast.Node):
p[0] = p[1]
else:
p[0] = ast.LiteralExpression(p[1])
else:
if p[1] in ("(", "["):
p[0] = p[2]
else:
op = p[2]
lhs = p[1]
rhs = p[3]
p[0] = ast.ArithmeticExpressionNode(lhs, rhs, op)
def p_number(self, p):
""" number : INTEGER
| FLOAT
"""
p[0] = ast.LiteralExpression(p[1])
def p_attribute(self, p):
""" attribute : ATTRIBUTE
"""
p[0] = ast.AttributeExpression(p[1])
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
if p:
LOGGER.debug(dir(p))
LOGGER.debug(f"Syntax error at token {p.type}, {p.value}, {p.lexpos}, {p.lineno}")
LOGGER.debug(self.__query.split('\n'))
line = self.__query.split('\n')[p.lineno - 1]
LOGGER.debug(line)
LOGGER.debug((' ' * p.lexpos) + '^')
# Just discard the token and tell the parser it's okay.
#p.parser.errok()
else:
LOGGER.debug("Syntax error at EOF")
def parse(cql, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
""" Parses the passed CQL to its AST interpretation.
:param cql: the CQL expression string to parse
:type cql: str
:param geometry_factory: the geometry parsing function: it shall parse
the given WKT geometry string the relevant type
:param bbox_factory: the bbox parsing function: it shall parse
the given BBox tuple the relevant type.
:param time_factory: the timestamp parsing function: it shall parse
the given ISO8601 timestamp string tuple the relevant
type.
:param duration_factory: the duration parsing function: it shall parse
the given ISO8601 furation string tuple the relevant
type.
:return: the parsed CQL expression as an AST
:rtype: ~pycql.ast.Node
"""
parser = CQLParser(
geometry_factory,
bbox_factory,
time_factory,
duration_factory
)
return parser.parse(cql)
| p[0] = p[1] | conditional_block |
parser.py | # ------------------------------------------------------------------------------
#
# Project: pycql <https://github.com/geopython/pycql>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import logging
from ply import yacc
from .lexer import CQLLexer
from . import ast
from . import values
LOGGER = logging.getLogger(__name__)
class | :
def __init__(self, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
self.lexer = CQLLexer(
# lextab='ecql.lextab',
# outputdir="ecql"
geometry_factory,
bbox_factory,
time_factory,
duration_factory,
optimize=True,
)
self.lexer.build()
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(
module=self,
# start='condition_or_empty',
# debug=True,
optimize=True,
# tabmodule='ecql.yacctab',
# outputdir="ecql"
errorlog=yacc.NullLogger(),
)
def parse(self, text):
self.__query = text
return self.parser.parse(
input=text,
lexer=self.lexer
)
def restart(self, *args, **kwargs):
return self.parser.restart(*args, **kwargs)
precedence = (
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
)
#
# grammar
#
start = 'condition_or_empty'
def p_condition_or_empty(self, p):
""" condition_or_empty : condition
| empty
"""
p[0] = p[1]
def p_condition(self, p):
""" condition : predicate
| condition AND condition
| condition OR condition
| NOT condition
| LPAREN condition RPAREN
| LBRACKET condition RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif p[2] in ("AND", "OR"):
p[0] = ast.CombinationConditionNode(p[1], p[3], p[2])
elif p[1] == "NOT":
p[0] = ast.NotConditionNode(p[2])
elif p[1] in ("(", "["):
p[0] = p[2]
def p_predicate(self, p):
""" predicate : expression EQ expression
| expression NE expression
| expression LT expression
| expression LE expression
| expression GT expression
| expression GE expression
| expression NOT BETWEEN expression AND expression
| expression BETWEEN expression AND expression
| expression NOT LIKE QUOTED
| expression LIKE QUOTED
| expression NOT ILIKE QUOTED
| expression ILIKE QUOTED
| expression NOT IN LPAREN expression_list RPAREN
| expression IN LPAREN expression_list RPAREN
| expression IS NOT NULL
| expression IS NULL
| temporal_predicate
| spatial_predicate
"""
if len(p) == 2: # hand over temporal and spatial predicates
p[0] = p[1]
elif p[2] in ("=", "<>", "<", "<=", ">", ">="):
p[0] = ast.ComparisonPredicateNode(p[1], p[3], p[2])
else:
not_ = False
op = p[2]
if op == 'NOT':
not_ = True
op = p[3]
if op == "BETWEEN":
p[0] = ast.BetweenPredicateNode(
p[1], p[4 if not_ else 3], p[6 if not_ else 5], not_
)
elif op in ("LIKE", "ILIKE"):
p[0] = ast.LikePredicateNode(
p[1], ast.LiteralExpression(p[4 if not_ else 3]),
op == "LIKE", not_
)
elif op == "IN":
p[0] = ast.InPredicateNode(p[1], p[5 if not_ else 4], not_)
elif op == "IS":
p[0] = ast.NullPredicateNode(p[1], p[3] == "NOT")
def p_temporal_predicate(self, p):
""" temporal_predicate : expression BEFORE TIME
| expression BEFORE OR DURING time_period
| expression DURING time_period
| expression DURING OR AFTER time_period
| expression AFTER TIME
"""
if len(p) > 4:
op = " ".join(p[2:-1])
else:
op = p[2]
p[0] = ast.TemporalPredicateNode(p[1], p[3 if len(p) == 4 else 5], op)
def p_time_period(self, p):
""" time_period : TIME DIVIDE TIME
| TIME DIVIDE DURATION
| DURATION DIVIDE TIME
"""
p[0] = (p[1], p[3])
def p_spatial_predicate(self, p):
""" spatial_predicate : INTERSECTS LPAREN expression COMMA expression RPAREN
| DISJOINT LPAREN expression COMMA expression RPAREN
| CONTAINS LPAREN expression COMMA expression RPAREN
| WITHIN LPAREN expression COMMA expression RPAREN
| TOUCHES LPAREN expression COMMA expression RPAREN
| CROSSES LPAREN expression COMMA expression RPAREN
| OVERLAPS LPAREN expression COMMA expression RPAREN
| EQUALS LPAREN expression COMMA expression RPAREN
| RELATE LPAREN expression COMMA expression COMMA QUOTED RPAREN
| DWITHIN LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BEYOND LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number COMMA QUOTED RPAREN
"""
op = p[1]
lhs = p[3]
rhs = p[5]
if op == "RELATE":
p[0] = ast.SpatialPredicateNode(lhs, rhs, op, pattern=p[7])
elif op in ("DWITHIN", "BEYOND"):
p[0] = ast.SpatialPredicateNode(
lhs, rhs, op, distance=p[7], units=p[9]
)
elif op == "BBOX":
p[0] = ast.BBoxPredicateNode(lhs, *p[5::2])
else:
p[0] = ast.SpatialPredicateNode(lhs, rhs, op)
def p_expression_list(self, p):
""" expression_list : expression_list COMMA expression
| expression
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1]
def p_expression(self, p):
""" expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| LPAREN expression RPAREN
| LBRACKET expression RBRACKET
| GEOMETRY
| ENVELOPE
| attribute
| QUOTED
| INTEGER
| FLOAT
"""
if len(p) == 2:
if isinstance(p[1], ast.Node):
p[0] = p[1]
else:
p[0] = ast.LiteralExpression(p[1])
else:
if p[1] in ("(", "["):
p[0] = p[2]
else:
op = p[2]
lhs = p[1]
rhs = p[3]
p[0] = ast.ArithmeticExpressionNode(lhs, rhs, op)
def p_number(self, p):
""" number : INTEGER
| FLOAT
"""
p[0] = ast.LiteralExpression(p[1])
def p_attribute(self, p):
""" attribute : ATTRIBUTE
"""
p[0] = ast.AttributeExpression(p[1])
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
if p:
LOGGER.debug(dir(p))
LOGGER.debug(f"Syntax error at token {p.type}, {p.value}, {p.lexpos}, {p.lineno}")
LOGGER.debug(self.__query.split('\n'))
line = self.__query.split('\n')[p.lineno - 1]
LOGGER.debug(line)
LOGGER.debug((' ' * p.lexpos) + '^')
# Just discard the token and tell the parser it's okay.
#p.parser.errok()
else:
LOGGER.debug("Syntax error at EOF")
def parse(cql, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
""" Parses the passed CQL to its AST interpretation.
:param cql: the CQL expression string to parse
:type cql: str
:param geometry_factory: the geometry parsing function: it shall parse
the given WKT geometry string the relevant type
:param bbox_factory: the bbox parsing function: it shall parse
the given BBox tuple the relevant type.
:param time_factory: the timestamp parsing function: it shall parse
the given ISO8601 timestamp string tuple the relevant
type.
:param duration_factory: the duration parsing function: it shall parse
the given ISO8601 furation string tuple the relevant
type.
:return: the parsed CQL expression as an AST
:rtype: ~pycql.ast.Node
"""
parser = CQLParser(
geometry_factory,
bbox_factory,
time_factory,
duration_factory
)
return parser.parse(cql)
| CQLParser | identifier_name |
parser.py | # ------------------------------------------------------------------------------
#
# Project: pycql <https://github.com/geopython/pycql>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import logging
from ply import yacc
from .lexer import CQLLexer
from . import ast
from . import values
LOGGER = logging.getLogger(__name__)
class CQLParser:
def __init__(self, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
self.lexer = CQLLexer(
# lextab='ecql.lextab',
# outputdir="ecql"
geometry_factory,
bbox_factory,
time_factory,
duration_factory,
optimize=True,
)
self.lexer.build()
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(
module=self,
# start='condition_or_empty',
# debug=True,
optimize=True,
# tabmodule='ecql.yacctab',
# outputdir="ecql"
errorlog=yacc.NullLogger(),
)
def parse(self, text):
self.__query = text
return self.parser.parse(
input=text,
lexer=self.lexer
)
def restart(self, *args, **kwargs):
return self.parser.restart(*args, **kwargs)
precedence = (
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
)
#
# grammar
#
start = 'condition_or_empty'
def p_condition_or_empty(self, p):
""" condition_or_empty : condition
| empty
"""
p[0] = p[1]
def p_condition(self, p):
""" condition : predicate
| condition AND condition
| condition OR condition
| NOT condition
| LPAREN condition RPAREN
| LBRACKET condition RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif p[2] in ("AND", "OR"):
p[0] = ast.CombinationConditionNode(p[1], p[3], p[2])
elif p[1] == "NOT":
p[0] = ast.NotConditionNode(p[2])
elif p[1] in ("(", "["):
p[0] = p[2]
def p_predicate(self, p):
""" predicate : expression EQ expression
| expression NE expression
| expression LT expression
| expression LE expression
| expression GT expression
| expression GE expression
| expression NOT BETWEEN expression AND expression
| expression BETWEEN expression AND expression
| expression NOT LIKE QUOTED
| expression LIKE QUOTED
| expression NOT ILIKE QUOTED
| expression ILIKE QUOTED
| expression NOT IN LPAREN expression_list RPAREN
| expression IN LPAREN expression_list RPAREN
| expression IS NOT NULL
| expression IS NULL
| temporal_predicate
| spatial_predicate
"""
if len(p) == 2: # hand over temporal and spatial predicates
p[0] = p[1]
elif p[2] in ("=", "<>", "<", "<=", ">", ">="):
p[0] = ast.ComparisonPredicateNode(p[1], p[3], p[2])
else:
not_ = False
op = p[2]
if op == 'NOT':
not_ = True
op = p[3]
if op == "BETWEEN":
p[0] = ast.BetweenPredicateNode(
p[1], p[4 if not_ else 3], p[6 if not_ else 5], not_
)
elif op in ("LIKE", "ILIKE"):
p[0] = ast.LikePredicateNode(
p[1], ast.LiteralExpression(p[4 if not_ else 3]),
op == "LIKE", not_
)
elif op == "IN":
p[0] = ast.InPredicateNode(p[1], p[5 if not_ else 4], not_)
elif op == "IS":
p[0] = ast.NullPredicateNode(p[1], p[3] == "NOT")
def p_temporal_predicate(self, p):
""" temporal_predicate : expression BEFORE TIME
| expression BEFORE OR DURING time_period
| expression DURING time_period
| expression DURING OR AFTER time_period
| expression AFTER TIME
"""
if len(p) > 4:
op = " ".join(p[2:-1])
else:
op = p[2]
p[0] = ast.TemporalPredicateNode(p[1], p[3 if len(p) == 4 else 5], op)
def p_time_period(self, p):
""" time_period : TIME DIVIDE TIME
| TIME DIVIDE DURATION
| DURATION DIVIDE TIME
"""
p[0] = (p[1], p[3])
def p_spatial_predicate(self, p):
""" spatial_predicate : INTERSECTS LPAREN expression COMMA expression RPAREN
| DISJOINT LPAREN expression COMMA expression RPAREN
| CONTAINS LPAREN expression COMMA expression RPAREN
| WITHIN LPAREN expression COMMA expression RPAREN
| TOUCHES LPAREN expression COMMA expression RPAREN
| CROSSES LPAREN expression COMMA expression RPAREN
| OVERLAPS LPAREN expression COMMA expression RPAREN | | BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number COMMA QUOTED RPAREN
"""
op = p[1]
lhs = p[3]
rhs = p[5]
if op == "RELATE":
p[0] = ast.SpatialPredicateNode(lhs, rhs, op, pattern=p[7])
elif op in ("DWITHIN", "BEYOND"):
p[0] = ast.SpatialPredicateNode(
lhs, rhs, op, distance=p[7], units=p[9]
)
elif op == "BBOX":
p[0] = ast.BBoxPredicateNode(lhs, *p[5::2])
else:
p[0] = ast.SpatialPredicateNode(lhs, rhs, op)
def p_expression_list(self, p):
""" expression_list : expression_list COMMA expression
| expression
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1]
def p_expression(self, p):
""" expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| LPAREN expression RPAREN
| LBRACKET expression RBRACKET
| GEOMETRY
| ENVELOPE
| attribute
| QUOTED
| INTEGER
| FLOAT
"""
if len(p) == 2:
if isinstance(p[1], ast.Node):
p[0] = p[1]
else:
p[0] = ast.LiteralExpression(p[1])
else:
if p[1] in ("(", "["):
p[0] = p[2]
else:
op = p[2]
lhs = p[1]
rhs = p[3]
p[0] = ast.ArithmeticExpressionNode(lhs, rhs, op)
def p_number(self, p):
""" number : INTEGER
| FLOAT
"""
p[0] = ast.LiteralExpression(p[1])
def p_attribute(self, p):
""" attribute : ATTRIBUTE
"""
p[0] = ast.AttributeExpression(p[1])
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
if p:
LOGGER.debug(dir(p))
LOGGER.debug(f"Syntax error at token {p.type}, {p.value}, {p.lexpos}, {p.lineno}")
LOGGER.debug(self.__query.split('\n'))
line = self.__query.split('\n')[p.lineno - 1]
LOGGER.debug(line)
LOGGER.debug((' ' * p.lexpos) + '^')
# Just discard the token and tell the parser it's okay.
#p.parser.errok()
else:
LOGGER.debug("Syntax error at EOF")
def parse(cql, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
""" Parses the passed CQL to its AST interpretation.
:param cql: the CQL expression string to parse
:type cql: str
:param geometry_factory: the geometry parsing function: it shall parse
the given WKT geometry string the relevant type
:param bbox_factory: the bbox parsing function: it shall parse
the given BBox tuple the relevant type.
:param time_factory: the timestamp parsing function: it shall parse
the given ISO8601 timestamp string tuple the relevant
type.
:param duration_factory: the duration parsing function: it shall parse
the given ISO8601 furation string tuple the relevant
type.
:return: the parsed CQL expression as an AST
:rtype: ~pycql.ast.Node
"""
parser = CQLParser(
geometry_factory,
bbox_factory,
time_factory,
duration_factory
)
return parser.parse(cql) | | EQUALS LPAREN expression COMMA expression RPAREN
| RELATE LPAREN expression COMMA expression COMMA QUOTED RPAREN
| DWITHIN LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BEYOND LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number RPAREN | random_line_split |
parser.py | # ------------------------------------------------------------------------------
#
# Project: pycql <https://github.com/geopython/pycql>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import logging
from ply import yacc
from .lexer import CQLLexer
from . import ast
from . import values
LOGGER = logging.getLogger(__name__)
class CQLParser:
def __init__(self, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
self.lexer = CQLLexer(
# lextab='ecql.lextab',
# outputdir="ecql"
geometry_factory,
bbox_factory,
time_factory,
duration_factory,
optimize=True,
)
self.lexer.build()
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(
module=self,
# start='condition_or_empty',
# debug=True,
optimize=True,
# tabmodule='ecql.yacctab',
# outputdir="ecql"
errorlog=yacc.NullLogger(),
)
def parse(self, text):
self.__query = text
return self.parser.parse(
input=text,
lexer=self.lexer
)
def restart(self, *args, **kwargs):
return self.parser.restart(*args, **kwargs)
precedence = (
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
)
#
# grammar
#
start = 'condition_or_empty'
def p_condition_or_empty(self, p):
|
def p_condition(self, p):
""" condition : predicate
| condition AND condition
| condition OR condition
| NOT condition
| LPAREN condition RPAREN
| LBRACKET condition RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif p[2] in ("AND", "OR"):
p[0] = ast.CombinationConditionNode(p[1], p[3], p[2])
elif p[1] == "NOT":
p[0] = ast.NotConditionNode(p[2])
elif p[1] in ("(", "["):
p[0] = p[2]
def p_predicate(self, p):
""" predicate : expression EQ expression
| expression NE expression
| expression LT expression
| expression LE expression
| expression GT expression
| expression GE expression
| expression NOT BETWEEN expression AND expression
| expression BETWEEN expression AND expression
| expression NOT LIKE QUOTED
| expression LIKE QUOTED
| expression NOT ILIKE QUOTED
| expression ILIKE QUOTED
| expression NOT IN LPAREN expression_list RPAREN
| expression IN LPAREN expression_list RPAREN
| expression IS NOT NULL
| expression IS NULL
| temporal_predicate
| spatial_predicate
"""
if len(p) == 2: # hand over temporal and spatial predicates
p[0] = p[1]
elif p[2] in ("=", "<>", "<", "<=", ">", ">="):
p[0] = ast.ComparisonPredicateNode(p[1], p[3], p[2])
else:
not_ = False
op = p[2]
if op == 'NOT':
not_ = True
op = p[3]
if op == "BETWEEN":
p[0] = ast.BetweenPredicateNode(
p[1], p[4 if not_ else 3], p[6 if not_ else 5], not_
)
elif op in ("LIKE", "ILIKE"):
p[0] = ast.LikePredicateNode(
p[1], ast.LiteralExpression(p[4 if not_ else 3]),
op == "LIKE", not_
)
elif op == "IN":
p[0] = ast.InPredicateNode(p[1], p[5 if not_ else 4], not_)
elif op == "IS":
p[0] = ast.NullPredicateNode(p[1], p[3] == "NOT")
def p_temporal_predicate(self, p):
""" temporal_predicate : expression BEFORE TIME
| expression BEFORE OR DURING time_period
| expression DURING time_period
| expression DURING OR AFTER time_period
| expression AFTER TIME
"""
if len(p) > 4:
op = " ".join(p[2:-1])
else:
op = p[2]
p[0] = ast.TemporalPredicateNode(p[1], p[3 if len(p) == 4 else 5], op)
def p_time_period(self, p):
""" time_period : TIME DIVIDE TIME
| TIME DIVIDE DURATION
| DURATION DIVIDE TIME
"""
p[0] = (p[1], p[3])
def p_spatial_predicate(self, p):
""" spatial_predicate : INTERSECTS LPAREN expression COMMA expression RPAREN
| DISJOINT LPAREN expression COMMA expression RPAREN
| CONTAINS LPAREN expression COMMA expression RPAREN
| WITHIN LPAREN expression COMMA expression RPAREN
| TOUCHES LPAREN expression COMMA expression RPAREN
| CROSSES LPAREN expression COMMA expression RPAREN
| OVERLAPS LPAREN expression COMMA expression RPAREN
| EQUALS LPAREN expression COMMA expression RPAREN
| RELATE LPAREN expression COMMA expression COMMA QUOTED RPAREN
| DWITHIN LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BEYOND LPAREN expression COMMA expression COMMA number COMMA UNITS RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number RPAREN
| BBOX LPAREN expression COMMA number COMMA number COMMA number COMMA number COMMA QUOTED RPAREN
"""
op = p[1]
lhs = p[3]
rhs = p[5]
if op == "RELATE":
p[0] = ast.SpatialPredicateNode(lhs, rhs, op, pattern=p[7])
elif op in ("DWITHIN", "BEYOND"):
p[0] = ast.SpatialPredicateNode(
lhs, rhs, op, distance=p[7], units=p[9]
)
elif op == "BBOX":
p[0] = ast.BBoxPredicateNode(lhs, *p[5::2])
else:
p[0] = ast.SpatialPredicateNode(lhs, rhs, op)
def p_expression_list(self, p):
""" expression_list : expression_list COMMA expression
| expression
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1]
def p_expression(self, p):
""" expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| LPAREN expression RPAREN
| LBRACKET expression RBRACKET
| GEOMETRY
| ENVELOPE
| attribute
| QUOTED
| INTEGER
| FLOAT
"""
if len(p) == 2:
if isinstance(p[1], ast.Node):
p[0] = p[1]
else:
p[0] = ast.LiteralExpression(p[1])
else:
if p[1] in ("(", "["):
p[0] = p[2]
else:
op = p[2]
lhs = p[1]
rhs = p[3]
p[0] = ast.ArithmeticExpressionNode(lhs, rhs, op)
def p_number(self, p):
""" number : INTEGER
| FLOAT
"""
p[0] = ast.LiteralExpression(p[1])
def p_attribute(self, p):
""" attribute : ATTRIBUTE
"""
p[0] = ast.AttributeExpression(p[1])
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
if p:
LOGGER.debug(dir(p))
LOGGER.debug(f"Syntax error at token {p.type}, {p.value}, {p.lexpos}, {p.lineno}")
LOGGER.debug(self.__query.split('\n'))
line = self.__query.split('\n')[p.lineno - 1]
LOGGER.debug(line)
LOGGER.debug((' ' * p.lexpos) + '^')
# Just discard the token and tell the parser it's okay.
#p.parser.errok()
else:
LOGGER.debug("Syntax error at EOF")
def parse(cql, geometry_factory=values.Geometry, bbox_factory=values.BBox,
time_factory=values.Time, duration_factory=values.Duration):
""" Parses the passed CQL to its AST interpretation.
:param cql: the CQL expression string to parse
:type cql: str
:param geometry_factory: the geometry parsing function: it shall parse
the given WKT geometry string the relevant type
:param bbox_factory: the bbox parsing function: it shall parse
the given BBox tuple the relevant type.
:param time_factory: the timestamp parsing function: it shall parse
the given ISO8601 timestamp string tuple the relevant
type.
:param duration_factory: the duration parsing function: it shall parse
the given ISO8601 furation string tuple the relevant
type.
:return: the parsed CQL expression as an AST
:rtype: ~pycql.ast.Node
"""
parser = CQLParser(
geometry_factory,
bbox_factory,
time_factory,
duration_factory
)
return parser.parse(cql)
| """ condition_or_empty : condition
| empty
"""
p[0] = p[1] | identifier_body |
driver.go | package ipfs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
_path "path"
"runtime"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
shell "github.com/whyrusleeping/ipfs-shell"
)
const driverName = "ipfs"
const defaultAddr = "localhost:5001"
const defaultRoot = "/ipns/local/docker-registry"
func debugTime() func() {
before := time.Now()
pc, _, _, ok := runtime.Caller(1)
if !ok {
panic("this is not okay")
}
f := runtime.FuncForPC(pc)
fmt.Printf("starting %s\n", f.Name())
return func() {
fmt.Printf("%s took %s\n", f.Name(), time.Now().Sub(before))
}
}
func init() {
factory.Register(driverName, &ipfsDriverFactory{})
}
// ipfsDriverFactory implements the factory.StorageDriverFactory interface
type ipfsDriverFactory struct{}
func (factory *ipfsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters), nil
}
type driver struct {
root string
shell *shell.Shell
roothash string
rootlock sync.Mutex
publish chan<- string
}
func (d *driver) publishHash(hash string) {
log.Error("PUBLISH: ", hash)
d.publish <- hash
}
func (d *driver) runPublisher(ipnskey string) chan<- string {
out := make(chan string, 32)
go func() {
var topub string
var long <-chan time.Time
var short <-chan time.Time
for {
select {
case k := <-out:
if topub == "" {
long = time.After(time.Second * 5)
short = time.After(time.Second * 1)
} else {
short = time.After(time.Second * 1)
}
topub = k
case <-long:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
case <-short:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
}
}
}()
return out
}
func (d *driver) publishChild(ipnskey, dirname, hash string) error {
val, err := d.shell.Resolve(ipnskey)
if err != nil {
return err
}
newIpnsRoot, err := d.shell.PatchLink(val, dirname, hash, true)
if err != nil {
return err
}
err = d.shell.Publish(ipnskey, "/ipfs/"+newIpnsRoot)
if err != nil {
log.Error("failed to publish: ", err)
}
return nil
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// IPFS daemon.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - addr
// - root
func FromParameters(parameters map[string]interface{}) *Driver {
var addr = defaultAddr
var root = defaultRoot
if parameters != nil {
addrInterface, ok := parameters["addr"]
if ok {
addr = fmt.Sprint(addrInterface)
}
rootInterface, ok := parameters["root"]
if ok {
root = fmt.Sprint(rootInterface)
}
}
return New(addr, root)
}
// New constructs a new Driver with a given addr (address) and root (IPNS root)
func New(addr string, root string) *Driver {
defer debugTime()()
shell := shell.NewShell(addr)
info, err := shell.ID()
if err != nil {
log.Error("error constructing node: ", err)
return nil
}
if strings.HasPrefix(root, "/ipns/local/") {
root = strings.Replace(root, "local", info.ID, 1)
}
if !strings.HasPrefix(root, "/ipns/") {
log.Error("tried to use non-ipns root")
return nil
}
ipnsroot, err := shell.Resolve(info.ID)
if err != nil {
log.Error("failed to resolve ipns root: ", err)
return nil
}
log.Error("ID: ", info.ID)
log.Error("IPNSROOT: ", ipnsroot)
hash, err := shell.ResolvePath(ipnsroot + "/docker-registry")
if err != nil {
if !strings.Contains(err.Error(), "no link named") {
log.Error("failed to resolve docker-registry dir: ", err)
return nil
}
h, err := shell.NewObject("unixfs-dir")
if err != nil {
log.Error("failed to get new empty dir: ", err)
return nil
}
hash = h
}
d := &driver{
shell: shell,
root: root,
roothash: hash,
}
d.publish = d.runPublisher(info.ID)
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
content, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
log.Debugf("Got content %s: %s", path, content)
return content, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
defer debugTime()()
contentHash, err := d.shell.Add(bytes.NewReader(contents))
if err != nil {
return err
}
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
nroot, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return err
}
d.roothash = nroot
d.publishHash(nroot)
return nil
}
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
return nil, err
}
_, err = io.CopyN(ioutil.Discard, reader, offset)
if err != nil {
return nil, err
}
return ioutil.NopCloser(reader), nil
}
// WriteStream stores the contents of the provided io.Reader at a location
// designated by the given path.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
defer debugTime()()
fullPath := d.fullPath(path)
if offset > 0 {
oldReader, err := d.shell.Cat(fullPath)
if err == nil {
var buf bytes.Buffer
nn, err = io.CopyN(&buf, oldReader, offset)
if err != nil {
return 0, err
}
_, err := io.Copy(&buf, reader)
if err != nil {
return 0, err
}
reader = &buf
} else {
if strings.HasPrefix(err.Error(), "no link named") {
nn = 0
} else {
return 0, err
}
}
}
cr := &countReader{r: reader}
contentHash, err := d.shell.Add(cr)
if err != nil {
return 0, err
}
log.Errorf("Wrote content (after %d) %s: %s", nn, path, contentHash)
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
k, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return 0, err
}
d.roothash = k
d.publishHash(k)
return nn + cr.n, nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) |
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
defer debugTime()()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
keys := make([]string, 0, len(output.Links))
for _, link := range output.Links {
keys = append(keys, _path.Join(path, link.Name))
}
return keys, nil
}
// Move moves an object stored at source to dest, removing the
// original object.
func (d *driver) Move(ctx context.Context, source string, dest string) error {
defer debugTime()()
sourceobj := d.fullPath(source)
srchash, err := d.shell.ResolvePath(sourceobj)
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return storagedriver.PathNotFoundError{Path: source}
}
return err
}
d.rootlock.Lock()
defer d.rootlock.Unlock()
newroot, err := d.shell.Patch(d.roothash, "rm-link", source[1:])
if err != nil {
if err.Error() == "merkledag: not found" {
return storagedriver.PathNotFoundError{Path: source}
} else {
return err
}
}
// remove leading slash
dest = dest[1:]
newroot, err = d.shell.PatchLink(newroot, dest, srchash, true)
if err != nil {
return err
}
d.roothash = newroot
fmt.Println("HASH AFTER MOVE: ", newroot)
d.publishHash(newroot)
return nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
log.Error("roothash: ", d.roothash)
newParentHash, err := d.shell.Patch(d.roothash, "rm-link", path[1:])
if err != nil {
log.Error("delete err: ", err)
if err.Error() == "merkledag: not found" {
fmt.Println("PATHNOTFOUND HAPPY HAPPY JOY JOY")
return storagedriver.PathNotFoundError{Path: path}
} else {
fmt.Println("GOT A BAD ERROR: ", err)
return err
}
}
d.roothash = newParentHash
d.publishHash(newParentHash)
return nil
}
// URLFor returns a URL which may be used to retrieve the content
// stored at the given path. It may return an UnsupportedMethodErr in
// certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod
}
// fullPath returns the absolute path of a key within the Driver's
// storage.
func (d *driver) fullPath(path string) string {
return _path.Join("/ipfs", d.roothash, path)
}
| {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: output.Type == "Directory",
ModTime: time.Time{},
}
if !fi.IsDir {
fi.Size = int64(output.Size)
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
} | identifier_body |
driver.go | package ipfs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
_path "path"
"runtime"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
shell "github.com/whyrusleeping/ipfs-shell"
)
const driverName = "ipfs"
const defaultAddr = "localhost:5001"
const defaultRoot = "/ipns/local/docker-registry"
func debugTime() func() {
before := time.Now()
pc, _, _, ok := runtime.Caller(1)
if !ok {
panic("this is not okay")
}
f := runtime.FuncForPC(pc)
fmt.Printf("starting %s\n", f.Name())
return func() {
fmt.Printf("%s took %s\n", f.Name(), time.Now().Sub(before))
}
}
func init() {
factory.Register(driverName, &ipfsDriverFactory{})
}
// ipfsDriverFactory implements the factory.StorageDriverFactory interface
type ipfsDriverFactory struct{}
func (factory *ipfsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters), nil
}
type driver struct {
root string
shell *shell.Shell
roothash string
rootlock sync.Mutex
publish chan<- string
}
func (d *driver) publishHash(hash string) {
log.Error("PUBLISH: ", hash)
d.publish <- hash
}
func (d *driver) runPublisher(ipnskey string) chan<- string {
out := make(chan string, 32)
go func() {
var topub string
var long <-chan time.Time
var short <-chan time.Time
for {
select {
case k := <-out:
if topub == "" {
long = time.After(time.Second * 5)
short = time.After(time.Second * 1)
} else {
short = time.After(time.Second * 1)
}
topub = k
case <-long:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
case <-short:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
}
}
}()
return out
}
func (d *driver) publishChild(ipnskey, dirname, hash string) error {
val, err := d.shell.Resolve(ipnskey)
if err != nil {
return err
}
newIpnsRoot, err := d.shell.PatchLink(val, dirname, hash, true)
if err != nil {
return err
}
err = d.shell.Publish(ipnskey, "/ipfs/"+newIpnsRoot)
if err != nil {
log.Error("failed to publish: ", err)
}
return nil
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// IPFS daemon.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - addr
// - root
func FromParameters(parameters map[string]interface{}) *Driver {
var addr = defaultAddr
var root = defaultRoot
if parameters != nil {
addrInterface, ok := parameters["addr"]
if ok {
addr = fmt.Sprint(addrInterface)
}
rootInterface, ok := parameters["root"]
if ok {
root = fmt.Sprint(rootInterface)
}
}
return New(addr, root)
}
// New constructs a new Driver with a given addr (address) and root (IPNS root)
func New(addr string, root string) *Driver {
defer debugTime()()
shell := shell.NewShell(addr)
info, err := shell.ID()
if err != nil {
log.Error("error constructing node: ", err)
return nil
}
if strings.HasPrefix(root, "/ipns/local/") {
root = strings.Replace(root, "local", info.ID, 1)
}
if !strings.HasPrefix(root, "/ipns/") {
log.Error("tried to use non-ipns root")
return nil
}
ipnsroot, err := shell.Resolve(info.ID)
if err != nil {
log.Error("failed to resolve ipns root: ", err)
return nil
}
log.Error("ID: ", info.ID)
log.Error("IPNSROOT: ", ipnsroot)
hash, err := shell.ResolvePath(ipnsroot + "/docker-registry")
if err != nil {
if !strings.Contains(err.Error(), "no link named") {
log.Error("failed to resolve docker-registry dir: ", err)
return nil
}
h, err := shell.NewObject("unixfs-dir")
if err != nil {
log.Error("failed to get new empty dir: ", err)
return nil
}
hash = h
}
d := &driver{
shell: shell,
root: root,
roothash: hash,
}
d.publish = d.runPublisher(info.ID)
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) | () string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
content, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
log.Debugf("Got content %s: %s", path, content)
return content, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
defer debugTime()()
contentHash, err := d.shell.Add(bytes.NewReader(contents))
if err != nil {
return err
}
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
nroot, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return err
}
d.roothash = nroot
d.publishHash(nroot)
return nil
}
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
return nil, err
}
_, err = io.CopyN(ioutil.Discard, reader, offset)
if err != nil {
return nil, err
}
return ioutil.NopCloser(reader), nil
}
// WriteStream stores the contents of the provided io.Reader at a location
// designated by the given path.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
defer debugTime()()
fullPath := d.fullPath(path)
if offset > 0 {
oldReader, err := d.shell.Cat(fullPath)
if err == nil {
var buf bytes.Buffer
nn, err = io.CopyN(&buf, oldReader, offset)
if err != nil {
return 0, err
}
_, err := io.Copy(&buf, reader)
if err != nil {
return 0, err
}
reader = &buf
} else {
if strings.HasPrefix(err.Error(), "no link named") {
nn = 0
} else {
return 0, err
}
}
}
cr := &countReader{r: reader}
contentHash, err := d.shell.Add(cr)
if err != nil {
return 0, err
}
log.Errorf("Wrote content (after %d) %s: %s", nn, path, contentHash)
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
k, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return 0, err
}
d.roothash = k
d.publishHash(k)
return nn + cr.n, nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: output.Type == "Directory",
ModTime: time.Time{},
}
if !fi.IsDir {
fi.Size = int64(output.Size)
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
defer debugTime()()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
keys := make([]string, 0, len(output.Links))
for _, link := range output.Links {
keys = append(keys, _path.Join(path, link.Name))
}
return keys, nil
}
// Move moves an object stored at source to dest, removing the
// original object.
func (d *driver) Move(ctx context.Context, source string, dest string) error {
defer debugTime()()
sourceobj := d.fullPath(source)
srchash, err := d.shell.ResolvePath(sourceobj)
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return storagedriver.PathNotFoundError{Path: source}
}
return err
}
d.rootlock.Lock()
defer d.rootlock.Unlock()
newroot, err := d.shell.Patch(d.roothash, "rm-link", source[1:])
if err != nil {
if err.Error() == "merkledag: not found" {
return storagedriver.PathNotFoundError{Path: source}
} else {
return err
}
}
// remove leading slash
dest = dest[1:]
newroot, err = d.shell.PatchLink(newroot, dest, srchash, true)
if err != nil {
return err
}
d.roothash = newroot
fmt.Println("HASH AFTER MOVE: ", newroot)
d.publishHash(newroot)
return nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
log.Error("roothash: ", d.roothash)
newParentHash, err := d.shell.Patch(d.roothash, "rm-link", path[1:])
if err != nil {
log.Error("delete err: ", err)
if err.Error() == "merkledag: not found" {
fmt.Println("PATHNOTFOUND HAPPY HAPPY JOY JOY")
return storagedriver.PathNotFoundError{Path: path}
} else {
fmt.Println("GOT A BAD ERROR: ", err)
return err
}
}
d.roothash = newParentHash
d.publishHash(newParentHash)
return nil
}
// URLFor returns a URL which may be used to retrieve the content
// stored at the given path. It may return an UnsupportedMethodErr in
// certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod
}
// fullPath returns the absolute path of a key within the Driver's
// storage.
func (d *driver) fullPath(path string) string {
return _path.Join("/ipfs", d.roothash, path)
}
| Name | identifier_name |
driver.go | package ipfs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
_path "path"
"runtime"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
shell "github.com/whyrusleeping/ipfs-shell"
)
const driverName = "ipfs"
const defaultAddr = "localhost:5001"
const defaultRoot = "/ipns/local/docker-registry"
func debugTime() func() {
before := time.Now()
pc, _, _, ok := runtime.Caller(1)
if !ok {
panic("this is not okay")
}
f := runtime.FuncForPC(pc)
fmt.Printf("starting %s\n", f.Name())
return func() {
fmt.Printf("%s took %s\n", f.Name(), time.Now().Sub(before))
}
}
func init() {
factory.Register(driverName, &ipfsDriverFactory{})
}
// ipfsDriverFactory implements the factory.StorageDriverFactory interface
type ipfsDriverFactory struct{}
func (factory *ipfsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters), nil
}
type driver struct {
root string
shell *shell.Shell
roothash string
rootlock sync.Mutex
publish chan<- string
}
func (d *driver) publishHash(hash string) {
log.Error("PUBLISH: ", hash)
d.publish <- hash
}
func (d *driver) runPublisher(ipnskey string) chan<- string {
out := make(chan string, 32)
go func() {
var topub string
var long <-chan time.Time
var short <-chan time.Time
for {
select {
case k := <-out:
if topub == "" {
long = time.After(time.Second * 5)
short = time.After(time.Second * 1)
} else {
short = time.After(time.Second * 1)
}
topub = k
case <-long:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
case <-short:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
}
}
}()
return out
}
func (d *driver) publishChild(ipnskey, dirname, hash string) error {
val, err := d.shell.Resolve(ipnskey)
if err != nil {
return err
}
newIpnsRoot, err := d.shell.PatchLink(val, dirname, hash, true)
if err != nil {
return err
}
err = d.shell.Publish(ipnskey, "/ipfs/"+newIpnsRoot)
if err != nil {
log.Error("failed to publish: ", err)
}
return nil
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// IPFS daemon.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - addr
// - root
func FromParameters(parameters map[string]interface{}) *Driver {
var addr = defaultAddr
var root = defaultRoot
if parameters != nil {
addrInterface, ok := parameters["addr"]
if ok {
addr = fmt.Sprint(addrInterface)
}
rootInterface, ok := parameters["root"]
if ok {
root = fmt.Sprint(rootInterface)
}
}
return New(addr, root)
}
// New constructs a new Driver with a given addr (address) and root (IPNS root)
func New(addr string, root string) *Driver {
defer debugTime()()
shell := shell.NewShell(addr)
info, err := shell.ID()
if err != nil {
log.Error("error constructing node: ", err)
return nil
}
if strings.HasPrefix(root, "/ipns/local/") {
root = strings.Replace(root, "local", info.ID, 1)
}
if !strings.HasPrefix(root, "/ipns/") {
log.Error("tried to use non-ipns root")
return nil
}
ipnsroot, err := shell.Resolve(info.ID)
if err != nil {
log.Error("failed to resolve ipns root: ", err)
return nil
}
log.Error("ID: ", info.ID)
log.Error("IPNSROOT: ", ipnsroot)
hash, err := shell.ResolvePath(ipnsroot + "/docker-registry")
if err != nil {
if !strings.Contains(err.Error(), "no link named") { | if err != nil {
log.Error("failed to get new empty dir: ", err)
return nil
}
hash = h
}
d := &driver{
shell: shell,
root: root,
roothash: hash,
}
d.publish = d.runPublisher(info.ID)
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
content, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
log.Debugf("Got content %s: %s", path, content)
return content, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
defer debugTime()()
contentHash, err := d.shell.Add(bytes.NewReader(contents))
if err != nil {
return err
}
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
nroot, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return err
}
d.roothash = nroot
d.publishHash(nroot)
return nil
}
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
return nil, err
}
_, err = io.CopyN(ioutil.Discard, reader, offset)
if err != nil {
return nil, err
}
return ioutil.NopCloser(reader), nil
}
// WriteStream stores the contents of the provided io.Reader at a location
// designated by the given path.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
defer debugTime()()
fullPath := d.fullPath(path)
if offset > 0 {
oldReader, err := d.shell.Cat(fullPath)
if err == nil {
var buf bytes.Buffer
nn, err = io.CopyN(&buf, oldReader, offset)
if err != nil {
return 0, err
}
_, err := io.Copy(&buf, reader)
if err != nil {
return 0, err
}
reader = &buf
} else {
if strings.HasPrefix(err.Error(), "no link named") {
nn = 0
} else {
return 0, err
}
}
}
cr := &countReader{r: reader}
contentHash, err := d.shell.Add(cr)
if err != nil {
return 0, err
}
log.Errorf("Wrote content (after %d) %s: %s", nn, path, contentHash)
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
k, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return 0, err
}
d.roothash = k
d.publishHash(k)
return nn + cr.n, nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: output.Type == "Directory",
ModTime: time.Time{},
}
if !fi.IsDir {
fi.Size = int64(output.Size)
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
defer debugTime()()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
keys := make([]string, 0, len(output.Links))
for _, link := range output.Links {
keys = append(keys, _path.Join(path, link.Name))
}
return keys, nil
}
// Move moves an object stored at source to dest, removing the
// original object.
func (d *driver) Move(ctx context.Context, source string, dest string) error {
defer debugTime()()
sourceobj := d.fullPath(source)
srchash, err := d.shell.ResolvePath(sourceobj)
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return storagedriver.PathNotFoundError{Path: source}
}
return err
}
d.rootlock.Lock()
defer d.rootlock.Unlock()
newroot, err := d.shell.Patch(d.roothash, "rm-link", source[1:])
if err != nil {
if err.Error() == "merkledag: not found" {
return storagedriver.PathNotFoundError{Path: source}
} else {
return err
}
}
// remove leading slash
dest = dest[1:]
newroot, err = d.shell.PatchLink(newroot, dest, srchash, true)
if err != nil {
return err
}
d.roothash = newroot
fmt.Println("HASH AFTER MOVE: ", newroot)
d.publishHash(newroot)
return nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
log.Error("roothash: ", d.roothash)
newParentHash, err := d.shell.Patch(d.roothash, "rm-link", path[1:])
if err != nil {
log.Error("delete err: ", err)
if err.Error() == "merkledag: not found" {
fmt.Println("PATHNOTFOUND HAPPY HAPPY JOY JOY")
return storagedriver.PathNotFoundError{Path: path}
} else {
fmt.Println("GOT A BAD ERROR: ", err)
return err
}
}
d.roothash = newParentHash
d.publishHash(newParentHash)
return nil
}
// URLFor returns a URL which may be used to retrieve the content
// stored at the given path. It may return an UnsupportedMethodErr in
// certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod
}
// fullPath returns the absolute path of a key within the Driver's
// storage.
func (d *driver) fullPath(path string) string {
return _path.Join("/ipfs", d.roothash, path)
} | log.Error("failed to resolve docker-registry dir: ", err)
return nil
}
h, err := shell.NewObject("unixfs-dir") | random_line_split |
driver.go | package ipfs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
_path "path"
"runtime"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
shell "github.com/whyrusleeping/ipfs-shell"
)
const driverName = "ipfs"
const defaultAddr = "localhost:5001"
const defaultRoot = "/ipns/local/docker-registry"
func debugTime() func() {
before := time.Now()
pc, _, _, ok := runtime.Caller(1)
if !ok {
panic("this is not okay")
}
f := runtime.FuncForPC(pc)
fmt.Printf("starting %s\n", f.Name())
return func() {
fmt.Printf("%s took %s\n", f.Name(), time.Now().Sub(before))
}
}
func init() {
factory.Register(driverName, &ipfsDriverFactory{})
}
// ipfsDriverFactory implements the factory.StorageDriverFactory interface
type ipfsDriverFactory struct{}
func (factory *ipfsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters), nil
}
type driver struct {
root string
shell *shell.Shell
roothash string
rootlock sync.Mutex
publish chan<- string
}
func (d *driver) publishHash(hash string) {
log.Error("PUBLISH: ", hash)
d.publish <- hash
}
func (d *driver) runPublisher(ipnskey string) chan<- string {
out := make(chan string, 32)
go func() {
var topub string
var long <-chan time.Time
var short <-chan time.Time
for |
}()
return out
}
func (d *driver) publishChild(ipnskey, dirname, hash string) error {
val, err := d.shell.Resolve(ipnskey)
if err != nil {
return err
}
newIpnsRoot, err := d.shell.PatchLink(val, dirname, hash, true)
if err != nil {
return err
}
err = d.shell.Publish(ipnskey, "/ipfs/"+newIpnsRoot)
if err != nil {
log.Error("failed to publish: ", err)
}
return nil
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// IPFS daemon.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - addr
// - root
func FromParameters(parameters map[string]interface{}) *Driver {
var addr = defaultAddr
var root = defaultRoot
if parameters != nil {
addrInterface, ok := parameters["addr"]
if ok {
addr = fmt.Sprint(addrInterface)
}
rootInterface, ok := parameters["root"]
if ok {
root = fmt.Sprint(rootInterface)
}
}
return New(addr, root)
}
// New constructs a new Driver with a given addr (address) and root (IPNS root)
func New(addr string, root string) *Driver {
defer debugTime()()
shell := shell.NewShell(addr)
info, err := shell.ID()
if err != nil {
log.Error("error constructing node: ", err)
return nil
}
if strings.HasPrefix(root, "/ipns/local/") {
root = strings.Replace(root, "local", info.ID, 1)
}
if !strings.HasPrefix(root, "/ipns/") {
log.Error("tried to use non-ipns root")
return nil
}
ipnsroot, err := shell.Resolve(info.ID)
if err != nil {
log.Error("failed to resolve ipns root: ", err)
return nil
}
log.Error("ID: ", info.ID)
log.Error("IPNSROOT: ", ipnsroot)
hash, err := shell.ResolvePath(ipnsroot + "/docker-registry")
if err != nil {
if !strings.Contains(err.Error(), "no link named") {
log.Error("failed to resolve docker-registry dir: ", err)
return nil
}
h, err := shell.NewObject("unixfs-dir")
if err != nil {
log.Error("failed to get new empty dir: ", err)
return nil
}
hash = h
}
d := &driver{
shell: shell,
root: root,
roothash: hash,
}
d.publish = d.runPublisher(info.ID)
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
content, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
log.Debugf("Got content %s: %s", path, content)
return content, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
defer debugTime()()
contentHash, err := d.shell.Add(bytes.NewReader(contents))
if err != nil {
return err
}
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
nroot, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return err
}
d.roothash = nroot
d.publishHash(nroot)
return nil
}
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
defer debugTime()()
reader, err := d.shell.Cat(d.fullPath(path))
if err != nil {
return nil, err
}
_, err = io.CopyN(ioutil.Discard, reader, offset)
if err != nil {
return nil, err
}
return ioutil.NopCloser(reader), nil
}
// WriteStream stores the contents of the provided io.Reader at a location
// designated by the given path.
func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
defer debugTime()()
fullPath := d.fullPath(path)
if offset > 0 {
oldReader, err := d.shell.Cat(fullPath)
if err == nil {
var buf bytes.Buffer
nn, err = io.CopyN(&buf, oldReader, offset)
if err != nil {
return 0, err
}
_, err := io.Copy(&buf, reader)
if err != nil {
return 0, err
}
reader = &buf
} else {
if strings.HasPrefix(err.Error(), "no link named") {
nn = 0
} else {
return 0, err
}
}
}
cr := &countReader{r: reader}
contentHash, err := d.shell.Add(cr)
if err != nil {
return 0, err
}
log.Errorf("Wrote content (after %d) %s: %s", nn, path, contentHash)
// strip off leading slash
path = path[1:]
d.rootlock.Lock()
defer d.rootlock.Unlock()
k, err := d.shell.PatchLink(d.roothash, path, contentHash, true)
if err != nil {
return 0, err
}
d.roothash = k
d.publishHash(k)
return nn + cr.n, nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: output.Type == "Directory",
ModTime: time.Time{},
}
if !fi.IsDir {
fi.Size = int64(output.Size)
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
defer debugTime()()
output, err := d.shell.FileList(d.fullPath(path))
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
keys := make([]string, 0, len(output.Links))
for _, link := range output.Links {
keys = append(keys, _path.Join(path, link.Name))
}
return keys, nil
}
// Move moves an object stored at source to dest, removing the
// original object.
func (d *driver) Move(ctx context.Context, source string, dest string) error {
defer debugTime()()
sourceobj := d.fullPath(source)
srchash, err := d.shell.ResolvePath(sourceobj)
if err != nil {
if strings.HasPrefix(err.Error(), "no link named") {
return storagedriver.PathNotFoundError{Path: source}
}
return err
}
d.rootlock.Lock()
defer d.rootlock.Unlock()
newroot, err := d.shell.Patch(d.roothash, "rm-link", source[1:])
if err != nil {
if err.Error() == "merkledag: not found" {
return storagedriver.PathNotFoundError{Path: source}
} else {
return err
}
}
// remove leading slash
dest = dest[1:]
newroot, err = d.shell.PatchLink(newroot, dest, srchash, true)
if err != nil {
return err
}
d.roothash = newroot
fmt.Println("HASH AFTER MOVE: ", newroot)
d.publishHash(newroot)
return nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
defer debugTime()()
d.rootlock.Lock()
defer d.rootlock.Unlock()
log.Error("roothash: ", d.roothash)
newParentHash, err := d.shell.Patch(d.roothash, "rm-link", path[1:])
if err != nil {
log.Error("delete err: ", err)
if err.Error() == "merkledag: not found" {
fmt.Println("PATHNOTFOUND HAPPY HAPPY JOY JOY")
return storagedriver.PathNotFoundError{Path: path}
} else {
fmt.Println("GOT A BAD ERROR: ", err)
return err
}
}
d.roothash = newParentHash
d.publishHash(newParentHash)
return nil
}
// URLFor returns a URL which may be used to retrieve the content
// stored at the given path. It may return an UnsupportedMethodErr in
// certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", storagedriver.ErrUnsupportedMethod
}
// fullPath returns the absolute path of a key within the Driver's
// storage.
func (d *driver) fullPath(path string) string {
return _path.Join("/ipfs", d.roothash, path)
}
| {
select {
case k := <-out:
if topub == "" {
long = time.After(time.Second * 5)
short = time.After(time.Second * 1)
} else {
short = time.After(time.Second * 1)
}
topub = k
case <-long:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
case <-short:
k := topub
topub = ""
long = nil
short = nil
err := d.publishChild(ipnskey, "docker-registry", k)
if err != nil {
log.Error("failed to publish: ", err)
}
}
} | conditional_block |
tracker.py | import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
|
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)
| req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr) | conditional_block |
tracker.py | import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
|
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)
| peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers) | identifier_body |
tracker.py | import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True) | info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False) |
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = { | random_line_split |
tracker.py | import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def | (*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)
| announcement_to_kademlia_peers | identifier_name |
endpoints_calculator.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncers
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/ingress-gce/pkg/neg/metrics"
"k8s.io/ingress-gce/pkg/neg/metrics/metricscollector"
"k8s.io/ingress-gce/pkg/neg/types"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/ingress-gce/pkg/network"
"k8s.io/ingress-gce/pkg/utils"
"k8s.io/klog/v2"
)
// LocalL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Local" mode.
// In this mode, the endpoints of the NEG are calculated by listing the nodes that host the service endpoints(pods)
// for the given service. These candidate nodes picked as is, if the count is less than the subset size limit(250).
// Otherwise, a subset of nodes is selected.
// In a cluster with nodes node1... node 50. If nodes node10 to node 45 run the pods for a given ILB service, all these
// nodes - node10, node 11 ... node45 will be part of the subset.
type LocalL4ILBEndpointsCalculator struct {
nodeLister listers.NodeLister
zoneGetter types.ZoneGetter
subsetSizeLimit int
svcId string
logger klog.Logger
networkInfo *network.NetworkInfo
}
func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *LocalL4ILBEndpointsCalculator {
return &LocalL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeLocal,
svcId: svcId,
logger: logger.WithName("LocalL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *LocalL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4LocalMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// List all nodes where the service endpoints are running. Get a subset of the desired count.
zoneNodeMap := make(map[string][]*v1.Node)
processedNodes := sets.String{}
numEndpoints := 0
candidateNodeCheck := utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes
for _, ed := range eds {
for _, addr := range ed.Addresses {
if addr.NodeName == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated node. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
if addr.TargetRef == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated pod. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
numEndpoints++
if processedNodes.Has(*addr.NodeName) {
continue
}
processedNodes.Insert(*addr.NodeName)
node, err := l.nodeLister.Get(*addr.NodeName)
if err != nil {
l.logger.Error(err, "failed to retrieve node object", "nodeName", *addr.NodeName)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
if ok := candidateNodeCheck(node); !ok {
l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name)
continue
}
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node, skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
}
if numEndpoints == 0 {
// Not having backends will cause clients to see connection timeout instead of an "ICMP ConnectionRefused".
return nil, nil, 0, nil
}
// Compute the networkEndpoints, with total endpoints count <= l.subsetSizeLimit
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// ClusterL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Cluster" mode This is the default mode.
// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this
// mode) are selected.
type ClusterL4ILBEndpointsCalculator struct {
// nodeLister is used for listing all the nodes in the cluster when calculating the subset.
nodeLister listers.NodeLister
// zoneGetter looks up the zone for a given node when calculating subsets.
zoneGetter types.ZoneGetter
// subsetSizeLimit is the max value of the subset size in this mode.
subsetSizeLimit int
// svcId is the unique identifier for the service, that is used as a salt when hashing nodenames.
svcId string
networkInfo *network.NetworkInfo
logger klog.Logger
}
func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *ClusterL4ILBEndpointsCalculator {
return &ClusterL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeDefault,
svcId: svcId,
logger: logger.WithName("ClusterL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *ClusterL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4ClusterMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpoints(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// In this mode, any of the cluster nodes can be part of the subset, whether or not a matching pod runs on it.
nodes, _ := utils.ListWithPredicate(l.nodeLister, utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes)
zoneNodeMap := make(map[string][]*v1.Node)
for _, node := range nodes {
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
// Compute the networkEndpoints, with total endpoints <= l.subsetSizeLimit.
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *ClusterL4ILBEndpointsCalculator) | (_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// L7EndpointsCalculator implements methods to calculate Network endpoints for VM_IP_PORT NEGs
type L7EndpointsCalculator struct {
zoneGetter types.ZoneGetter
servicePortName string
podLister cache.Indexer
nodeLister cache.Indexer
serviceLister cache.Indexer
syncerKey types.NegSyncerKey
networkEndpointType types.NetworkEndpointType
enableDualStackNEG bool
logger klog.Logger
syncMetricsCollector *metricscollector.SyncerMetrics
}
func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, syncerKey types.NegSyncerKey, logger klog.Logger, enableDualStackNEG bool, syncMetricsCollector *metricscollector.SyncerMetrics) *L7EndpointsCalculator {
return &L7EndpointsCalculator{
zoneGetter: zoneGetter,
servicePortName: syncerKey.PortTuple.Name,
podLister: podLister,
nodeLister: nodeLister,
serviceLister: serviceLister,
syncerKey: syncerKey,
networkEndpointType: syncerKey.NegType,
enableDualStackNEG: enableDualStackNEG,
logger: logger.WithName("L7EndpointsCalculator"),
syncMetricsCollector: syncMetricsCollector,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *L7EndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L7Mode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
if err == nil { // If current calculation ends up in error, we trigger and emit metrics in degraded mode.
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
}
return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate], err
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpointsDegradedMode(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
return result.NetworkEndpointSet, result.EndpointPodMap, nil
}
func nodeMapToString(nodeMap map[string][]*v1.Node) string {
var str []string
for zone, nodeList := range nodeMap {
str = append(str, fmt.Sprintf("Zone %s: %d nodes", zone, len(nodeList)))
}
return strings.Join(str, ",")
}
// ValidateEndpoints checks if endpoint information is correct.
//
// For L7 Endpoint Calculator, it returns error if one of the two checks fails:
// 1. The endpoint count from endpointData doesn't equal to the one from endpointPodMap:
// endpiontPodMap removes the duplicated endpoints, and dupCount stores the number of duplicated it removed
// and we compare the endpoint counts with duplicates
// 2. The endpoint count from endpointData or the one from endpointPodMap is 0
func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// Endpoint count from EndpointPodMap
countFromPodMap := len(endpointPodMap) + dupCount
if countFromPodMap == 0 {
l.logger.Info("Detected endpoint count from endpointPodMap going to zero", "endpointPodMap", endpointPodMap)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPCalculationCountZero)
}
// Endpoint count from EndpointData
countFromEndpointData := 0
for _, ed := range endpointData {
countFromEndpointData += len(ed.Addresses)
}
if countFromEndpointData == 0 {
l.logger.Info("Detected endpoint count from endpointData going to zero", "endpointData", endpointData)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPSEndpointCountZero)
}
if countFromEndpointData != countFromPodMap {
l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "dupCount", dupCount)
return fmt.Errorf("%w: Detect endpoint mismatch, count from endpoint slice=%d, count after calculation=%d", types.ErrEPCountsDiffer, countFromEndpointData, countFromPodMap)
}
return nil
}
| CalculateEndpointsDegradedMode | identifier_name |
endpoints_calculator.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncers
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/ingress-gce/pkg/neg/metrics"
"k8s.io/ingress-gce/pkg/neg/metrics/metricscollector"
"k8s.io/ingress-gce/pkg/neg/types"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/ingress-gce/pkg/network"
"k8s.io/ingress-gce/pkg/utils"
"k8s.io/klog/v2"
)
// LocalL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Local" mode.
// In this mode, the endpoints of the NEG are calculated by listing the nodes that host the service endpoints(pods)
// for the given service. These candidate nodes picked as is, if the count is less than the subset size limit(250).
// Otherwise, a subset of nodes is selected.
// In a cluster with nodes node1... node 50. If nodes node10 to node 45 run the pods for a given ILB service, all these
// nodes - node10, node 11 ... node45 will be part of the subset.
type LocalL4ILBEndpointsCalculator struct {
nodeLister listers.NodeLister
zoneGetter types.ZoneGetter
subsetSizeLimit int
svcId string
logger klog.Logger
networkInfo *network.NetworkInfo
}
func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *LocalL4ILBEndpointsCalculator {
return &LocalL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeLocal,
svcId: svcId,
logger: logger.WithName("LocalL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *LocalL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4LocalMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// List all nodes where the service endpoints are running. Get a subset of the desired count.
zoneNodeMap := make(map[string][]*v1.Node)
processedNodes := sets.String{}
numEndpoints := 0
candidateNodeCheck := utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes
for _, ed := range eds {
for _, addr := range ed.Addresses {
if addr.NodeName == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated node. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
if addr.TargetRef == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated pod. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
numEndpoints++
if processedNodes.Has(*addr.NodeName) {
continue
}
processedNodes.Insert(*addr.NodeName)
node, err := l.nodeLister.Get(*addr.NodeName)
if err != nil {
l.logger.Error(err, "failed to retrieve node object", "nodeName", *addr.NodeName)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
if ok := candidateNodeCheck(node); !ok {
l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name)
continue
}
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node, skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
}
if numEndpoints == 0 {
// Not having backends will cause clients to see connection timeout instead of an "ICMP ConnectionRefused".
return nil, nil, 0, nil
}
// Compute the networkEndpoints, with total endpoints count <= l.subsetSizeLimit
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// ClusterL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Cluster" mode This is the default mode.
// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this
// mode) are selected.
type ClusterL4ILBEndpointsCalculator struct {
// nodeLister is used for listing all the nodes in the cluster when calculating the subset.
nodeLister listers.NodeLister
// zoneGetter looks up the zone for a given node when calculating subsets.
zoneGetter types.ZoneGetter
// subsetSizeLimit is the max value of the subset size in this mode.
subsetSizeLimit int
// svcId is the unique identifier for the service, that is used as a salt when hashing nodenames.
svcId string
networkInfo *network.NetworkInfo
logger klog.Logger
}
func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *ClusterL4ILBEndpointsCalculator {
return &ClusterL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeDefault,
svcId: svcId,
logger: logger.WithName("ClusterL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *ClusterL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4ClusterMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpoints(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// In this mode, any of the cluster nodes can be part of the subset, whether or not a matching pod runs on it.
nodes, _ := utils.ListWithPredicate(l.nodeLister, utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes)
zoneNodeMap := make(map[string][]*v1.Node)
for _, node := range nodes {
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
// Compute the networkEndpoints, with total endpoints <= l.subsetSizeLimit.
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// L7EndpointsCalculator implements methods to calculate Network endpoints for VM_IP_PORT NEGs
type L7EndpointsCalculator struct {
zoneGetter types.ZoneGetter
servicePortName string
podLister cache.Indexer
nodeLister cache.Indexer
serviceLister cache.Indexer
syncerKey types.NegSyncerKey
networkEndpointType types.NetworkEndpointType
enableDualStackNEG bool
logger klog.Logger
syncMetricsCollector *metricscollector.SyncerMetrics
}
func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, syncerKey types.NegSyncerKey, logger klog.Logger, enableDualStackNEG bool, syncMetricsCollector *metricscollector.SyncerMetrics) *L7EndpointsCalculator {
return &L7EndpointsCalculator{
zoneGetter: zoneGetter,
servicePortName: syncerKey.PortTuple.Name,
podLister: podLister,
nodeLister: nodeLister,
serviceLister: serviceLister,
syncerKey: syncerKey,
networkEndpointType: syncerKey.NegType,
enableDualStackNEG: enableDualStackNEG,
logger: logger.WithName("L7EndpointsCalculator"),
syncMetricsCollector: syncMetricsCollector,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *L7EndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L7Mode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
if err == nil { // If current calculation ends up in error, we trigger and emit metrics in degraded mode.
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
}
return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate], err
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpointsDegradedMode(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
return result.NetworkEndpointSet, result.EndpointPodMap, nil
}
func nodeMapToString(nodeMap map[string][]*v1.Node) string {
var str []string
for zone, nodeList := range nodeMap {
str = append(str, fmt.Sprintf("Zone %s: %d nodes", zone, len(nodeList)))
}
return strings.Join(str, ",")
}
// ValidateEndpoints checks if endpoint information is correct.
//
// For L7 Endpoint Calculator, it returns error if one of the two checks fails:
// 1. The endpoint count from endpointData doesn't equal to the one from endpointPodMap:
// endpiontPodMap removes the duplicated endpoints, and dupCount stores the number of duplicated it removed
// and we compare the endpoint counts with duplicates
// 2. The endpoint count from endpointData or the one from endpointPodMap is 0
func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// Endpoint count from EndpointPodMap
countFromPodMap := len(endpointPodMap) + dupCount
if countFromPodMap == 0 {
l.logger.Info("Detected endpoint count from endpointPodMap going to zero", "endpointPodMap", endpointPodMap)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPCalculationCountZero)
}
// Endpoint count from EndpointData
countFromEndpointData := 0
for _, ed := range endpointData {
countFromEndpointData += len(ed.Addresses)
} | }
if countFromEndpointData != countFromPodMap {
l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "dupCount", dupCount)
return fmt.Errorf("%w: Detect endpoint mismatch, count from endpoint slice=%d, count after calculation=%d", types.ErrEPCountsDiffer, countFromEndpointData, countFromPodMap)
}
return nil
} | if countFromEndpointData == 0 {
l.logger.Info("Detected endpoint count from endpointData going to zero", "endpointData", endpointData)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPSEndpointCountZero) | random_line_split |
endpoints_calculator.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncers
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/ingress-gce/pkg/neg/metrics"
"k8s.io/ingress-gce/pkg/neg/metrics/metricscollector"
"k8s.io/ingress-gce/pkg/neg/types"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/ingress-gce/pkg/network"
"k8s.io/ingress-gce/pkg/utils"
"k8s.io/klog/v2"
)
// LocalL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Local" mode.
// In this mode, the endpoints of the NEG are calculated by listing the nodes that host the service endpoints(pods)
// for the given service. These candidate nodes picked as is, if the count is less than the subset size limit(250).
// Otherwise, a subset of nodes is selected.
// In a cluster with nodes node1... node 50. If nodes node10 to node 45 run the pods for a given ILB service, all these
// nodes - node10, node 11 ... node45 will be part of the subset.
type LocalL4ILBEndpointsCalculator struct {
nodeLister listers.NodeLister
zoneGetter types.ZoneGetter
subsetSizeLimit int
svcId string
logger klog.Logger
networkInfo *network.NetworkInfo
}
func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *LocalL4ILBEndpointsCalculator {
return &LocalL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeLocal,
svcId: svcId,
logger: logger.WithName("LocalL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *LocalL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4LocalMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// List all nodes where the service endpoints are running. Get a subset of the desired count.
zoneNodeMap := make(map[string][]*v1.Node)
processedNodes := sets.String{}
numEndpoints := 0
candidateNodeCheck := utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes
for _, ed := range eds {
for _, addr := range ed.Addresses {
if addr.NodeName == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated node. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
if addr.TargetRef == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated pod. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
numEndpoints++
if processedNodes.Has(*addr.NodeName) {
continue
}
processedNodes.Insert(*addr.NodeName)
node, err := l.nodeLister.Get(*addr.NodeName)
if err != nil {
l.logger.Error(err, "failed to retrieve node object", "nodeName", *addr.NodeName)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
if ok := candidateNodeCheck(node); !ok {
l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name)
continue
}
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node, skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
}
if numEndpoints == 0 {
// Not having backends will cause clients to see connection timeout instead of an "ICMP ConnectionRefused".
return nil, nil, 0, nil
}
// Compute the networkEndpoints, with total endpoints count <= l.subsetSizeLimit
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) |
func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// ClusterL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Cluster" mode This is the default mode.
// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this
// mode) are selected.
type ClusterL4ILBEndpointsCalculator struct {
// nodeLister is used for listing all the nodes in the cluster when calculating the subset.
nodeLister listers.NodeLister
// zoneGetter looks up the zone for a given node when calculating subsets.
zoneGetter types.ZoneGetter
// subsetSizeLimit is the max value of the subset size in this mode.
subsetSizeLimit int
// svcId is the unique identifier for the service, that is used as a salt when hashing nodenames.
svcId string
networkInfo *network.NetworkInfo
logger klog.Logger
}
func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *ClusterL4ILBEndpointsCalculator {
return &ClusterL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeDefault,
svcId: svcId,
logger: logger.WithName("ClusterL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *ClusterL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4ClusterMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpoints(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// In this mode, any of the cluster nodes can be part of the subset, whether or not a matching pod runs on it.
nodes, _ := utils.ListWithPredicate(l.nodeLister, utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes)
zoneNodeMap := make(map[string][]*v1.Node)
for _, node := range nodes {
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
// Compute the networkEndpoints, with total endpoints <= l.subsetSizeLimit.
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// L7EndpointsCalculator implements methods to calculate Network endpoints for VM_IP_PORT NEGs
type L7EndpointsCalculator struct {
zoneGetter types.ZoneGetter
servicePortName string
podLister cache.Indexer
nodeLister cache.Indexer
serviceLister cache.Indexer
syncerKey types.NegSyncerKey
networkEndpointType types.NetworkEndpointType
enableDualStackNEG bool
logger klog.Logger
syncMetricsCollector *metricscollector.SyncerMetrics
}
func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, syncerKey types.NegSyncerKey, logger klog.Logger, enableDualStackNEG bool, syncMetricsCollector *metricscollector.SyncerMetrics) *L7EndpointsCalculator {
return &L7EndpointsCalculator{
zoneGetter: zoneGetter,
servicePortName: syncerKey.PortTuple.Name,
podLister: podLister,
nodeLister: nodeLister,
serviceLister: serviceLister,
syncerKey: syncerKey,
networkEndpointType: syncerKey.NegType,
enableDualStackNEG: enableDualStackNEG,
logger: logger.WithName("L7EndpointsCalculator"),
syncMetricsCollector: syncMetricsCollector,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *L7EndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L7Mode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
if err == nil { // If current calculation ends up in error, we trigger and emit metrics in degraded mode.
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
}
return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate], err
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpointsDegradedMode(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
return result.NetworkEndpointSet, result.EndpointPodMap, nil
}
func nodeMapToString(nodeMap map[string][]*v1.Node) string {
var str []string
for zone, nodeList := range nodeMap {
str = append(str, fmt.Sprintf("Zone %s: %d nodes", zone, len(nodeList)))
}
return strings.Join(str, ",")
}
// ValidateEndpoints checks if endpoint information is correct.
//
// For L7 Endpoint Calculator, it returns error if one of the two checks fails:
// 1. The endpoint count from endpointData doesn't equal to the one from endpointPodMap:
// endpiontPodMap removes the duplicated endpoints, and dupCount stores the number of duplicated it removed
// and we compare the endpoint counts with duplicates
// 2. The endpoint count from endpointData or the one from endpointPodMap is 0
func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// Endpoint count from EndpointPodMap
countFromPodMap := len(endpointPodMap) + dupCount
if countFromPodMap == 0 {
l.logger.Info("Detected endpoint count from endpointPodMap going to zero", "endpointPodMap", endpointPodMap)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPCalculationCountZero)
}
// Endpoint count from EndpointData
countFromEndpointData := 0
for _, ed := range endpointData {
countFromEndpointData += len(ed.Addresses)
}
if countFromEndpointData == 0 {
l.logger.Info("Detected endpoint count from endpointData going to zero", "endpointData", endpointData)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPSEndpointCountZero)
}
if countFromEndpointData != countFromPodMap {
l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "dupCount", dupCount)
return fmt.Errorf("%w: Detect endpoint mismatch, count from endpoint slice=%d, count after calculation=%d", types.ErrEPCountsDiffer, countFromEndpointData, countFromPodMap)
}
return nil
}
| {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
} | identifier_body |
endpoints_calculator.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncers
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/ingress-gce/pkg/neg/metrics"
"k8s.io/ingress-gce/pkg/neg/metrics/metricscollector"
"k8s.io/ingress-gce/pkg/neg/types"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/ingress-gce/pkg/network"
"k8s.io/ingress-gce/pkg/utils"
"k8s.io/klog/v2"
)
// LocalL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Local" mode.
// In this mode, the endpoints of the NEG are calculated by listing the nodes that host the service endpoints(pods)
// for the given service. These candidate nodes picked as is, if the count is less than the subset size limit(250).
// Otherwise, a subset of nodes is selected.
// In a cluster with nodes node1... node 50. If nodes node10 to node 45 run the pods for a given ILB service, all these
// nodes - node10, node 11 ... node45 will be part of the subset.
type LocalL4ILBEndpointsCalculator struct {
nodeLister listers.NodeLister
zoneGetter types.ZoneGetter
subsetSizeLimit int
svcId string
logger klog.Logger
networkInfo *network.NetworkInfo
}
func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *LocalL4ILBEndpointsCalculator {
return &LocalL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeLocal,
svcId: svcId,
logger: logger.WithName("LocalL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *LocalL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4LocalMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// List all nodes where the service endpoints are running. Get a subset of the desired count.
zoneNodeMap := make(map[string][]*v1.Node)
processedNodes := sets.String{}
numEndpoints := 0
candidateNodeCheck := utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes
for _, ed := range eds {
for _, addr := range ed.Addresses |
}
if numEndpoints == 0 {
// Not having backends will cause clients to see connection timeout instead of an "ICMP ConnectionRefused".
return nil, nil, 0, nil
}
// Compute the networkEndpoints, with total endpoints count <= l.subsetSizeLimit
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *LocalL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// ClusterL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface.
// It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service
// uses "ExternalTrafficPolicy: Cluster" mode This is the default mode.
// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this
// mode) are selected.
type ClusterL4ILBEndpointsCalculator struct {
// nodeLister is used for listing all the nodes in the cluster when calculating the subset.
nodeLister listers.NodeLister
// zoneGetter looks up the zone for a given node when calculating subsets.
zoneGetter types.ZoneGetter
// subsetSizeLimit is the max value of the subset size in this mode.
subsetSizeLimit int
// svcId is the unique identifier for the service, that is used as a salt when hashing nodenames.
svcId string
networkInfo *network.NetworkInfo
logger klog.Logger
}
func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *ClusterL4ILBEndpointsCalculator {
return &ClusterL4ILBEndpointsCalculator{
nodeLister: nodeLister,
zoneGetter: zoneGetter,
subsetSizeLimit: maxSubsetSizeDefault,
svcId: svcId,
logger: logger.WithName("ClusterL4ILBEndpointsCalculator"),
networkInfo: networkInfo,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *ClusterL4ILBEndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L4ClusterMode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpoints(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
// In this mode, any of the cluster nodes can be part of the subset, whether or not a matching pod runs on it.
nodes, _ := utils.ListWithPredicate(l.nodeLister, utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes)
zoneNodeMap := make(map[string][]*v1.Node)
for _, node := range nodes {
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
}
klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId)
// Compute the networkEndpoints, with total endpoints <= l.subsetSizeLimit.
subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger, l.networkInfo)
return subsetMap, nil, 0, err
}
func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(_ []types.EndpointsData, currentMap map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
// this should be the same as CalculateEndpoints for L4 ec
subsetMap, _, _, err := l.CalculateEndpoints(nil, currentMap)
return subsetMap, nil, err
}
func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// this should be a no-op for now
return nil
}
// L7EndpointsCalculator implements methods to calculate Network endpoints for VM_IP_PORT NEGs
type L7EndpointsCalculator struct {
zoneGetter types.ZoneGetter
servicePortName string
podLister cache.Indexer
nodeLister cache.Indexer
serviceLister cache.Indexer
syncerKey types.NegSyncerKey
networkEndpointType types.NetworkEndpointType
enableDualStackNEG bool
logger klog.Logger
syncMetricsCollector *metricscollector.SyncerMetrics
}
func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, syncerKey types.NegSyncerKey, logger klog.Logger, enableDualStackNEG bool, syncMetricsCollector *metricscollector.SyncerMetrics) *L7EndpointsCalculator {
return &L7EndpointsCalculator{
zoneGetter: zoneGetter,
servicePortName: syncerKey.PortTuple.Name,
podLister: podLister,
nodeLister: nodeLister,
serviceLister: serviceLister,
syncerKey: syncerKey,
networkEndpointType: syncerKey.NegType,
enableDualStackNEG: enableDualStackNEG,
logger: logger.WithName("L7EndpointsCalculator"),
syncMetricsCollector: syncMetricsCollector,
}
}
// Mode indicates the mode that the EndpointsCalculator is operating in.
func (l *L7EndpointsCalculator) Mode() types.EndpointsCalculatorMode {
return types.L7Mode
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) {
result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
if err == nil { // If current calculation ends up in error, we trigger and emit metrics in degraded mode.
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
}
return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate], err
}
// CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs.
func (l *L7EndpointsCalculator) CalculateEndpointsDegradedMode(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) {
result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG)
l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount)
return result.NetworkEndpointSet, result.EndpointPodMap, nil
}
func nodeMapToString(nodeMap map[string][]*v1.Node) string {
var str []string
for zone, nodeList := range nodeMap {
str = append(str, fmt.Sprintf("Zone %s: %d nodes", zone, len(nodeList)))
}
return strings.Join(str, ",")
}
// ValidateEndpoints checks if endpoint information is correct.
//
// For L7 Endpoint Calculator, it returns error if one of the two checks fails:
// 1. The endpoint count from endpointData doesn't equal to the one from endpointPodMap:
// endpiontPodMap removes the duplicated endpoints, and dupCount stores the number of duplicated it removed
// and we compare the endpoint counts with duplicates
// 2. The endpoint count from endpointData or the one from endpointPodMap is 0
func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error {
// Endpoint count from EndpointPodMap
countFromPodMap := len(endpointPodMap) + dupCount
if countFromPodMap == 0 {
l.logger.Info("Detected endpoint count from endpointPodMap going to zero", "endpointPodMap", endpointPodMap)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPCalculationCountZero)
}
// Endpoint count from EndpointData
countFromEndpointData := 0
for _, ed := range endpointData {
countFromEndpointData += len(ed.Addresses)
}
if countFromEndpointData == 0 {
l.logger.Info("Detected endpoint count from endpointData going to zero", "endpointData", endpointData)
return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPSEndpointCountZero)
}
if countFromEndpointData != countFromPodMap {
l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "dupCount", dupCount)
return fmt.Errorf("%w: Detect endpoint mismatch, count from endpoint slice=%d, count after calculation=%d", types.ErrEPCountsDiffer, countFromEndpointData, countFromPodMap)
}
return nil
}
| {
if addr.NodeName == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated node. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
if addr.TargetRef == nil {
l.logger.V(2).Info("Address inside Endpoints does not have an associated pod. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name))
continue
}
numEndpoints++
if processedNodes.Has(*addr.NodeName) {
continue
}
processedNodes.Insert(*addr.NodeName)
node, err := l.nodeLister.Get(*addr.NodeName)
if err != nil {
l.logger.Error(err, "failed to retrieve node object", "nodeName", *addr.NodeName)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
if ok := candidateNodeCheck(node); !ok {
l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name)
continue
}
if !l.networkInfo.IsNodeConnected(node) {
l.logger.Info("Node not connected to service network", "nodeName", node.Name, "network", l.networkInfo.K8sNetwork)
continue
}
zone, err := l.zoneGetter.GetZoneForNode(node.Name)
if err != nil {
l.logger.Error(err, "Unable to find zone for node, skipping", "nodeName", node.Name)
metrics.PublishNegControllerErrorCountMetrics(err, true)
continue
}
zoneNodeMap[zone] = append(zoneNodeMap[zone], node)
} | conditional_block |
api.go | package php
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync/atomic"
"time"
radio "github.com/R-a-dio/valkyrie"
"github.com/R-a-dio/valkyrie/config"
"github.com/R-a-dio/valkyrie/errors"
"github.com/R-a-dio/valkyrie/search"
"github.com/R-a-dio/valkyrie/website/middleware"
"github.com/go-chi/chi"
chiware "github.com/go-chi/chi/middleware"
)
func NewAPI(ctx context.Context, cfg config.Config, storage radio.StorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*API, error) {
status, err := newV0Status(ctx, storage, streamer, manager)
if err != nil {
return nil, err
}
searcher, err := search.Open(cfg)
if err != nil {
return nil, err
}
api := API{
Config: cfg,
storage: storage,
streamer: streamer,
manager: manager,
status: status,
search: searcher,
}
return &api, nil
}
type API struct {
config.Config
search radio.SearchService
storage radio.StorageService
streamer radio.StreamerService
manager radio.ManagerService
status *v0Status
}
func (a *API) Router() chi.Router {
r := chi.NewRouter()
r.Use(chiware.SetHeader("Content-Type", "application/json"))
r.Method("GET", "/", a.status)
r.Get("/ping", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"ping":true}`))
})
r.Get("/user-cooldown", a.getUserCooldown)
r.Get("/news", a.getNews)
r.Get("/search/{query}", a.getSearch)
r.Get("/can-request", a.getCanRequest)
// should be static-images only
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID}-*", a.getDJImage)
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID:[0-9]+}", a.getDJImage)
// these are deprecated
r.Get("/song", a.getSong)
r.Get("/metadata", a.getMetadata)
return r
}
func (a *API) getSong(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getMetadata(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getUserCooldown(w http.ResponseWriter, r *http.Request) {
identifier := r.RemoteAddr | submissionTime, err := a.storage.Submissions(r.Context()).LastSubmissionTime(identifier)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserUploadDelay),
submissionTime,
)
response := userCooldownResponse{
Cooldown: submissionTime.Unix(),
Now: time.Now().Unix(),
Delay: int64(time.Duration(a.Conf().UserUploadDelay) / time.Second),
}
if ok {
response.Message = "You can upload a song!"
} else {
response.Message = fmt.Sprintf(
"You cannot upload another song just yet. You can upload %s",
submissionTime.
Add(time.Duration(a.Conf().UserUploadDelay)).
Format(timeagoFormat),
)
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type userCooldownResponse struct {
// time of last upload
Cooldown int64 `json:"cooldown"`
// current time
Now int64 `json:"now"`
// configured cooldown in seconds
Delay int64 `json:"delay"`
// message to the user
Message string `json:"message"`
}
func (a *API) getNews(w http.ResponseWriter, r *http.Request) {
result, err := a.storage.News(r.Context()).ListPublic(3, 0)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
// copy the entries to sanitized output struct
entries := result.Entries
var response = make([]newsResponse, len(entries))
for i := range response {
response[i].Title = entries[i].Title
response[i].Header = entries[i].Header
response[i].Body = entries[i].Body
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type newsResponse struct {
Title string `json:"title"`
Header string `json:"header"`
Body string `json:"text"`
}
func (a *API) getSearch(w http.ResponseWriter, r *http.Request) {
// parse the query string for page and limit settings
values, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
var limit = 20
{
rawLimit := values.Get("limit")
parsedLimit, err := strconv.Atoi(rawLimit)
if err == nil && parsedLimit < 20 {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a number and it's
// not above the allowed limit
limit = parsedLimit
}
}
var page = 1
{
rawPage := values.Get("page")
parsedPage, err := strconv.Atoi(rawPage)
if err == nil {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a valid number
page = parsedPage
}
}
var offset = (page - 1) * limit
if offset < 0 {
offset = 0
}
ctx := r.Context()
// key from the url router, query is part of the url
query := chi.URLParamFromCtx(ctx, "query")
result, err := a.search.Search(ctx, query, limit, offset)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
songs := result.Songs
// create pagination information for the result
var response = searchResponse{
Total: result.TotalHits,
PerPage: limit,
CurrentPage: page,
LastPage: result.TotalHits/limit + 1,
From: offset + 1,
To: offset + len(songs),
}
// move over the results to sanitized output structs
response.Results = make([]searchResponseItem, len(songs))
for i := range songs {
response.Results[i].fromSong(songs[i])
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type searchResponse struct {
Total int `json:"total"`
PerPage int `json:"per_page"`
CurrentPage int `json:"current_page"`
LastPage int `json:"last_page"`
From int `json:"from"`
To int `json:"to"`
Results []searchResponseItem `json:"data"`
}
type searchResponseItem struct {
Artist string `json:"artist"`
Title string `json:"title"`
TrackID radio.TrackID `json:"id"`
LastPlayed int64 `json:"lastplayed"`
LastRequested int64 `json:"lastrequested"`
Requestable bool `json:"requestable"`
}
// fromSong copies relevant fields from the song given to the response item
func (sri *searchResponseItem) fromSong(s radio.Song) error {
if !s.HasTrack() {
// TODO: look at error handling
return errors.New("Song without track found in search API")
}
sri.Artist = s.Artist
sri.Title = s.Title
sri.TrackID = s.TrackID
if s.LastPlayed.IsZero() {
sri.LastPlayed = 0
} else {
sri.LastPlayed = s.LastPlayed.Unix()
}
if s.LastRequested.IsZero() {
sri.LastRequested = 0
} else {
sri.LastRequested = s.LastRequested.Unix()
}
sri.Requestable = s.Requestable()
return nil
}
func (a *API) getCanRequest(w http.ResponseWriter, r *http.Request) {
status, err := a.manager.Status(r.Context())
if err != nil {
return
}
response := canRequestResponse{}
// send our response when we return
defer func() {
// but not if an error occured
if err != nil {
// TODO: handle error
http.Error(w, http.StatusText(501), 501)
return
}
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
// all requests are disabled
if !status.RequestsEnabled {
return
}
identifier := r.RemoteAddr
userLastRequest, err := a.storage.Request(r.Context()).LastRequest(identifier)
if err != nil {
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserRequestDelay),
userLastRequest,
)
if !ok {
return
}
response.Main.Requests = true
return
}
type canRequestResponse struct {
Main struct {
Requests bool `json:"requests"`
}
}
func (a *API) getDJImage(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
w.Header().Del("Content-Type")
w.Header().Set("Content-Type", "image/png")
user, ok := ctx.Value(middleware.UserKey).(radio.User)
if !ok {
panic("missing UserByDJIDCtx middleware")
return
}
sid := chi.URLParamFromCtx(ctx, "DJID")
filename := filepath.Join(a.Conf().Website.DJImagePath, sid)
f, err := os.Open(filename)
if err != nil {
log.Println(err)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
log.Println(err)
return
}
http.ServeContent(w, r, user.DJ.Image, fi.ModTime(), f)
}
// RequestRoute is the router setup for handling requests
func (a *API) RequestRoute(r chi.Router) {
r.Use(middleware.TrackCtx(a.storage))
r.Post("/", a.postRequest)
}
// postRequest handles /request in legacy PHP format
func (a *API) postRequest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
response := map[string]string{}
defer func() {
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
song, ok := ctx.Value(middleware.TrackKey).(radio.Song)
if !ok {
response["error"] = "invalid parameter"
return
}
err := a.streamer.RequestSong(ctx, song, r.RemoteAddr)
if err == nil {
response["success"] = "Thank you for making your request!"
return
}
switch {
case errors.Is(errors.SongCooldown, err):
response["error"] = "That song is still on cooldown, You'll have to wait longer to request it."
case errors.Is(errors.UserCooldown, err):
response["error"] = "You recently requested a song. You have to wait longer until you can request again."
case errors.Is(errors.StreamerNoRequests, err):
response["error"] = "Requests are disabled currently."
default:
log.Println(err)
response["error"] = "something broke, report to IRC."
}
}
type requestResponse map[string]string
func newV0Status(ctx context.Context, storage radio.SongStorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*v0Status, error) {
s := v0Status{
songs: storage,
streamer: streamer,
manager: manager,
updatePeriod: time.Second * 2,
longUpdatePeriod: time.Second * 10,
}
// initialize the atomic.Value
s.storeCache(v0StatusJSON{})
// run a periodic updater
go s.runUpdate(ctx)
// but also call update to get an initial value before we return
return &s, s.updateStatusJSON(ctx)
}
// v0Status implements the root of the /api endpoint
type v0Status struct {
// song storage to get last played songs
songs radio.SongStorageService
// streamer for queue contents
streamer radio.StreamerService
// manager for overall stream status
manager radio.ManagerService
updatePeriod time.Duration
longUpdatePeriod time.Duration
// cache contains a v0StatusJSON
cache atomic.Value
}
type v0StatusJSON struct {
Main v0StatusMain `json:"main"`
// field to determine when we created the contents of LastPlayed and Queue
ListCreatedOn time.Time `json:"-"`
}
type v0StatusMain struct {
NowPlaying string `json:"np"`
Listeners int `json:"listeners"`
BitRate int `json:"bitrate"`
IsAFKStream bool `json:"isafkstream"`
IsStreamDesk bool `json:"isstreamdesk"`
CurrentTime int64 `json:"current"`
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
LastSet string `json:"lastset"`
TrackID int `json:"trackid"`
Thread string `json:"thread"`
Requesting bool `json:"requesting"`
DJName string `json:"djname"`
DJ v0StatusDJ `json:"dj"`
Queue []v0StatusListEntry `json:"queue"`
LastPlayed []v0StatusListEntry `json:"lp"`
}
type v0StatusDJ struct {
ID int `json:"id" db:"djid"`
Name string `json:"djname" db:"djname"`
Description string `json:"djtext" db:"djtext"`
Image string `json:"djimage" db:"djimage"`
Color string `json:"djcolor" db:"djcolor"`
Visible bool `json:"visible" db:"visible"`
Priority int `json:"priority" db:"priority"`
ThemeCSS string `json:"css" db:"css"`
ThemeID int `json:"theme_id" db:"theme_id"`
Role string `json:"role" db:"role"`
}
type v0StatusListEntry struct {
Metadata string `json:"meta" db:"meta"`
Time string `json:"time"`
Type int `json:"type" db:"type"`
Timestamp int64 `json:"timestamp" db:"time"`
}
func (s *v0Status) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
status := s.loadCache()
status.Main.CurrentTime = time.Now().Unix()
h := rw.Header()
h.Set("Content-Type", "application/json")
h.Set("Access-Control-Allow-Origin", "*")
e := json.NewEncoder(rw)
e.SetEscapeHTML(false)
err := e.Encode(status)
if err != nil {
log.Printf("json encoding error: %s", err)
}
}
func (s *v0Status) loadCache() v0StatusJSON {
return s.cache.Load().(v0StatusJSON)
}
func (s *v0Status) storeCache(ss v0StatusJSON) {
s.cache.Store(ss)
}
const timeagoFormat = `<time class="timeago" datetime="2006-01-02T15:04:05-0700">15:04:05</time>`
// createStatusJSON creates a new populated v0StatusJSON, if an error occurs it returns
// the previous v0StatusJSON that was stored in the cache
//
// Additionally, the Queue and LastPlayed fields are only updated if a period of length
// LongUpdatePeriod has passed, otherwise uses the contents of the previous status
func (s *v0Status) createStatusJSON(ctx context.Context) (v0StatusJSON, error) {
var now = time.Now()
var status v0StatusJSON
last := s.loadCache()
queue := last.Main.Queue
lastplayed := last.Main.LastPlayed
// see if we need to update the queue and lastplayed values
if last.ListCreatedOn.IsZero() ||
now.Sub(last.ListCreatedOn) < s.longUpdatePeriod {
q, err := s.streamer.Queue(ctx)
if err != nil {
return last, err
}
if len(q) > 5 {
q = q[:5]
}
queue = make([]v0StatusListEntry, len(q))
for i, entry := range q {
queue[i].Metadata = entry.Song.Metadata
queue[i].Time = entry.ExpectedStartTime.Format(timeagoFormat)
queue[i].Timestamp = entry.ExpectedStartTime.Unix()
if entry.IsUserRequest {
queue[i].Type = 1
}
}
lp, err := s.songs.Song(ctx).LastPlayed(0, 5)
if err != nil {
return last, err
}
lastplayed = make([]v0StatusListEntry, len(lp))
for i, song := range lp {
lastplayed[i].Metadata = song.Metadata
lastplayed[i].Time = song.LastPlayed.Format(timeagoFormat)
lastplayed[i].Timestamp = song.LastPlayed.Unix()
}
// record when we created these values, so we know when to refresh again
status.ListCreatedOn = now
}
ms, err := s.manager.Status(ctx)
if err != nil {
return last, err
}
// End might be the zero time, in which case calling Unix
// returns a large negative number that we don't want
var endTime int64
if !ms.SongInfo.End.IsZero() {
endTime = ms.SongInfo.End.Unix()
}
// Song might not have a track associated with it, so we
// have to check for that first, before reading the TrackID
var trackID int
if ms.Song.HasTrack() {
trackID = int(ms.Song.TrackID)
}
// Thread seems to be a literal "none" if no thread is supposed to be shown in
// the old API
thread := ms.Thread
if ms.Thread == "" {
thread = "none"
}
dj := ms.User.DJ
status.Main = v0StatusMain{
NowPlaying: ms.Song.Metadata,
Listeners: ms.Listeners,
IsAFKStream: ms.User.Username == "AFK",
StartTime: ms.SongInfo.Start.Unix(),
EndTime: endTime,
LastSet: now.Format("2006-01-02 15:04:05"),
TrackID: trackID,
Thread: thread,
// TODO(wessie): use RequestsEnabled again when it is implemented properly,
// right now nothing sets it and the streamer ignores the value too, only
// reading the configuration file instead
Requesting: ms.User.Username == "AFK",
// Requesting: ms.RequestsEnabled,
DJName: dj.Name,
DJ: v0StatusDJ{
ID: int(dj.ID),
Name: dj.Name,
Description: dj.Text,
Image: dj.Image,
Color: dj.Color,
Visible: dj.Visible,
Priority: dj.Priority,
ThemeCSS: dj.CSS,
ThemeID: int(dj.Theme.ID),
Role: dj.Role,
},
Queue: queue,
LastPlayed: lastplayed,
}
return status, nil
}
func (s *v0Status) updateStatusJSON(ctx context.Context) error {
ss, err := s.createStatusJSON(ctx)
if err != nil {
return err
}
s.storeCache(ss)
return nil
}
func (s *v0Status) runUpdate(ctx context.Context) {
ticker := time.NewTicker(s.updatePeriod)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
err := s.updateStatusJSON(ctx)
if err != nil {
log.Printf("status: update error: %s", err)
}
}
} | random_line_split | |
api.go | package php
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync/atomic"
"time"
radio "github.com/R-a-dio/valkyrie"
"github.com/R-a-dio/valkyrie/config"
"github.com/R-a-dio/valkyrie/errors"
"github.com/R-a-dio/valkyrie/search"
"github.com/R-a-dio/valkyrie/website/middleware"
"github.com/go-chi/chi"
chiware "github.com/go-chi/chi/middleware"
)
func NewAPI(ctx context.Context, cfg config.Config, storage radio.StorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*API, error) {
status, err := newV0Status(ctx, storage, streamer, manager)
if err != nil {
return nil, err
}
searcher, err := search.Open(cfg)
if err != nil {
return nil, err
}
api := API{
Config: cfg,
storage: storage,
streamer: streamer,
manager: manager,
status: status,
search: searcher,
}
return &api, nil
}
type API struct {
config.Config
search radio.SearchService
storage radio.StorageService
streamer radio.StreamerService
manager radio.ManagerService
status *v0Status
}
func (a *API) Router() chi.Router |
func (a *API) getSong(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getMetadata(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getUserCooldown(w http.ResponseWriter, r *http.Request) {
identifier := r.RemoteAddr
submissionTime, err := a.storage.Submissions(r.Context()).LastSubmissionTime(identifier)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserUploadDelay),
submissionTime,
)
response := userCooldownResponse{
Cooldown: submissionTime.Unix(),
Now: time.Now().Unix(),
Delay: int64(time.Duration(a.Conf().UserUploadDelay) / time.Second),
}
if ok {
response.Message = "You can upload a song!"
} else {
response.Message = fmt.Sprintf(
"You cannot upload another song just yet. You can upload %s",
submissionTime.
Add(time.Duration(a.Conf().UserUploadDelay)).
Format(timeagoFormat),
)
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type userCooldownResponse struct {
// time of last upload
Cooldown int64 `json:"cooldown"`
// current time
Now int64 `json:"now"`
// configured cooldown in seconds
Delay int64 `json:"delay"`
// message to the user
Message string `json:"message"`
}
func (a *API) getNews(w http.ResponseWriter, r *http.Request) {
result, err := a.storage.News(r.Context()).ListPublic(3, 0)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
// copy the entries to sanitized output struct
entries := result.Entries
var response = make([]newsResponse, len(entries))
for i := range response {
response[i].Title = entries[i].Title
response[i].Header = entries[i].Header
response[i].Body = entries[i].Body
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type newsResponse struct {
Title string `json:"title"`
Header string `json:"header"`
Body string `json:"text"`
}
func (a *API) getSearch(w http.ResponseWriter, r *http.Request) {
// parse the query string for page and limit settings
values, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
var limit = 20
{
rawLimit := values.Get("limit")
parsedLimit, err := strconv.Atoi(rawLimit)
if err == nil && parsedLimit < 20 {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a number and it's
// not above the allowed limit
limit = parsedLimit
}
}
var page = 1
{
rawPage := values.Get("page")
parsedPage, err := strconv.Atoi(rawPage)
if err == nil {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a valid number
page = parsedPage
}
}
var offset = (page - 1) * limit
if offset < 0 {
offset = 0
}
ctx := r.Context()
// key from the url router, query is part of the url
query := chi.URLParamFromCtx(ctx, "query")
result, err := a.search.Search(ctx, query, limit, offset)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
songs := result.Songs
// create pagination information for the result
var response = searchResponse{
Total: result.TotalHits,
PerPage: limit,
CurrentPage: page,
LastPage: result.TotalHits/limit + 1,
From: offset + 1,
To: offset + len(songs),
}
// move over the results to sanitized output structs
response.Results = make([]searchResponseItem, len(songs))
for i := range songs {
response.Results[i].fromSong(songs[i])
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type searchResponse struct {
Total int `json:"total"`
PerPage int `json:"per_page"`
CurrentPage int `json:"current_page"`
LastPage int `json:"last_page"`
From int `json:"from"`
To int `json:"to"`
Results []searchResponseItem `json:"data"`
}
type searchResponseItem struct {
Artist string `json:"artist"`
Title string `json:"title"`
TrackID radio.TrackID `json:"id"`
LastPlayed int64 `json:"lastplayed"`
LastRequested int64 `json:"lastrequested"`
Requestable bool `json:"requestable"`
}
// fromSong copies relevant fields from the song given to the response item
func (sri *searchResponseItem) fromSong(s radio.Song) error {
if !s.HasTrack() {
// TODO: look at error handling
return errors.New("Song without track found in search API")
}
sri.Artist = s.Artist
sri.Title = s.Title
sri.TrackID = s.TrackID
if s.LastPlayed.IsZero() {
sri.LastPlayed = 0
} else {
sri.LastPlayed = s.LastPlayed.Unix()
}
if s.LastRequested.IsZero() {
sri.LastRequested = 0
} else {
sri.LastRequested = s.LastRequested.Unix()
}
sri.Requestable = s.Requestable()
return nil
}
func (a *API) getCanRequest(w http.ResponseWriter, r *http.Request) {
status, err := a.manager.Status(r.Context())
if err != nil {
return
}
response := canRequestResponse{}
// send our response when we return
defer func() {
// but not if an error occured
if err != nil {
// TODO: handle error
http.Error(w, http.StatusText(501), 501)
return
}
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
// all requests are disabled
if !status.RequestsEnabled {
return
}
identifier := r.RemoteAddr
userLastRequest, err := a.storage.Request(r.Context()).LastRequest(identifier)
if err != nil {
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserRequestDelay),
userLastRequest,
)
if !ok {
return
}
response.Main.Requests = true
return
}
type canRequestResponse struct {
Main struct {
Requests bool `json:"requests"`
}
}
func (a *API) getDJImage(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
w.Header().Del("Content-Type")
w.Header().Set("Content-Type", "image/png")
user, ok := ctx.Value(middleware.UserKey).(radio.User)
if !ok {
panic("missing UserByDJIDCtx middleware")
return
}
sid := chi.URLParamFromCtx(ctx, "DJID")
filename := filepath.Join(a.Conf().Website.DJImagePath, sid)
f, err := os.Open(filename)
if err != nil {
log.Println(err)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
log.Println(err)
return
}
http.ServeContent(w, r, user.DJ.Image, fi.ModTime(), f)
}
// RequestRoute is the router setup for handling requests
func (a *API) RequestRoute(r chi.Router) {
r.Use(middleware.TrackCtx(a.storage))
r.Post("/", a.postRequest)
}
// postRequest handles /request in legacy PHP format
func (a *API) postRequest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
response := map[string]string{}
defer func() {
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
song, ok := ctx.Value(middleware.TrackKey).(radio.Song)
if !ok {
response["error"] = "invalid parameter"
return
}
err := a.streamer.RequestSong(ctx, song, r.RemoteAddr)
if err == nil {
response["success"] = "Thank you for making your request!"
return
}
switch {
case errors.Is(errors.SongCooldown, err):
response["error"] = "That song is still on cooldown, You'll have to wait longer to request it."
case errors.Is(errors.UserCooldown, err):
response["error"] = "You recently requested a song. You have to wait longer until you can request again."
case errors.Is(errors.StreamerNoRequests, err):
response["error"] = "Requests are disabled currently."
default:
log.Println(err)
response["error"] = "something broke, report to IRC."
}
}
type requestResponse map[string]string
func newV0Status(ctx context.Context, storage radio.SongStorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*v0Status, error) {
s := v0Status{
songs: storage,
streamer: streamer,
manager: manager,
updatePeriod: time.Second * 2,
longUpdatePeriod: time.Second * 10,
}
// initialize the atomic.Value
s.storeCache(v0StatusJSON{})
// run a periodic updater
go s.runUpdate(ctx)
// but also call update to get an initial value before we return
return &s, s.updateStatusJSON(ctx)
}
// v0Status implements the root of the /api endpoint
type v0Status struct {
// song storage to get last played songs
songs radio.SongStorageService
// streamer for queue contents
streamer radio.StreamerService
// manager for overall stream status
manager radio.ManagerService
updatePeriod time.Duration
longUpdatePeriod time.Duration
// cache contains a v0StatusJSON
cache atomic.Value
}
type v0StatusJSON struct {
Main v0StatusMain `json:"main"`
// field to determine when we created the contents of LastPlayed and Queue
ListCreatedOn time.Time `json:"-"`
}
type v0StatusMain struct {
NowPlaying string `json:"np"`
Listeners int `json:"listeners"`
BitRate int `json:"bitrate"`
IsAFKStream bool `json:"isafkstream"`
IsStreamDesk bool `json:"isstreamdesk"`
CurrentTime int64 `json:"current"`
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
LastSet string `json:"lastset"`
TrackID int `json:"trackid"`
Thread string `json:"thread"`
Requesting bool `json:"requesting"`
DJName string `json:"djname"`
DJ v0StatusDJ `json:"dj"`
Queue []v0StatusListEntry `json:"queue"`
LastPlayed []v0StatusListEntry `json:"lp"`
}
type v0StatusDJ struct {
ID int `json:"id" db:"djid"`
Name string `json:"djname" db:"djname"`
Description string `json:"djtext" db:"djtext"`
Image string `json:"djimage" db:"djimage"`
Color string `json:"djcolor" db:"djcolor"`
Visible bool `json:"visible" db:"visible"`
Priority int `json:"priority" db:"priority"`
ThemeCSS string `json:"css" db:"css"`
ThemeID int `json:"theme_id" db:"theme_id"`
Role string `json:"role" db:"role"`
}
type v0StatusListEntry struct {
Metadata string `json:"meta" db:"meta"`
Time string `json:"time"`
Type int `json:"type" db:"type"`
Timestamp int64 `json:"timestamp" db:"time"`
}
func (s *v0Status) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
status := s.loadCache()
status.Main.CurrentTime = time.Now().Unix()
h := rw.Header()
h.Set("Content-Type", "application/json")
h.Set("Access-Control-Allow-Origin", "*")
e := json.NewEncoder(rw)
e.SetEscapeHTML(false)
err := e.Encode(status)
if err != nil {
log.Printf("json encoding error: %s", err)
}
}
func (s *v0Status) loadCache() v0StatusJSON {
return s.cache.Load().(v0StatusJSON)
}
func (s *v0Status) storeCache(ss v0StatusJSON) {
s.cache.Store(ss)
}
const timeagoFormat = `<time class="timeago" datetime="2006-01-02T15:04:05-0700">15:04:05</time>`
// createStatusJSON creates a new populated v0StatusJSON, if an error occurs it returns
// the previous v0StatusJSON that was stored in the cache
//
// Additionally, the Queue and LastPlayed fields are only updated if a period of length
// LongUpdatePeriod has passed, otherwise uses the contents of the previous status
func (s *v0Status) createStatusJSON(ctx context.Context) (v0StatusJSON, error) {
var now = time.Now()
var status v0StatusJSON
last := s.loadCache()
queue := last.Main.Queue
lastplayed := last.Main.LastPlayed
// see if we need to update the queue and lastplayed values
if last.ListCreatedOn.IsZero() ||
now.Sub(last.ListCreatedOn) < s.longUpdatePeriod {
q, err := s.streamer.Queue(ctx)
if err != nil {
return last, err
}
if len(q) > 5 {
q = q[:5]
}
queue = make([]v0StatusListEntry, len(q))
for i, entry := range q {
queue[i].Metadata = entry.Song.Metadata
queue[i].Time = entry.ExpectedStartTime.Format(timeagoFormat)
queue[i].Timestamp = entry.ExpectedStartTime.Unix()
if entry.IsUserRequest {
queue[i].Type = 1
}
}
lp, err := s.songs.Song(ctx).LastPlayed(0, 5)
if err != nil {
return last, err
}
lastplayed = make([]v0StatusListEntry, len(lp))
for i, song := range lp {
lastplayed[i].Metadata = song.Metadata
lastplayed[i].Time = song.LastPlayed.Format(timeagoFormat)
lastplayed[i].Timestamp = song.LastPlayed.Unix()
}
// record when we created these values, so we know when to refresh again
status.ListCreatedOn = now
}
ms, err := s.manager.Status(ctx)
if err != nil {
return last, err
}
// End might be the zero time, in which case calling Unix
// returns a large negative number that we don't want
var endTime int64
if !ms.SongInfo.End.IsZero() {
endTime = ms.SongInfo.End.Unix()
}
// Song might not have a track associated with it, so we
// have to check for that first, before reading the TrackID
var trackID int
if ms.Song.HasTrack() {
trackID = int(ms.Song.TrackID)
}
// Thread seems to be a literal "none" if no thread is supposed to be shown in
// the old API
thread := ms.Thread
if ms.Thread == "" {
thread = "none"
}
dj := ms.User.DJ
status.Main = v0StatusMain{
NowPlaying: ms.Song.Metadata,
Listeners: ms.Listeners,
IsAFKStream: ms.User.Username == "AFK",
StartTime: ms.SongInfo.Start.Unix(),
EndTime: endTime,
LastSet: now.Format("2006-01-02 15:04:05"),
TrackID: trackID,
Thread: thread,
// TODO(wessie): use RequestsEnabled again when it is implemented properly,
// right now nothing sets it and the streamer ignores the value too, only
// reading the configuration file instead
Requesting: ms.User.Username == "AFK",
// Requesting: ms.RequestsEnabled,
DJName: dj.Name,
DJ: v0StatusDJ{
ID: int(dj.ID),
Name: dj.Name,
Description: dj.Text,
Image: dj.Image,
Color: dj.Color,
Visible: dj.Visible,
Priority: dj.Priority,
ThemeCSS: dj.CSS,
ThemeID: int(dj.Theme.ID),
Role: dj.Role,
},
Queue: queue,
LastPlayed: lastplayed,
}
return status, nil
}
func (s *v0Status) updateStatusJSON(ctx context.Context) error {
ss, err := s.createStatusJSON(ctx)
if err != nil {
return err
}
s.storeCache(ss)
return nil
}
func (s *v0Status) runUpdate(ctx context.Context) {
ticker := time.NewTicker(s.updatePeriod)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
err := s.updateStatusJSON(ctx)
if err != nil {
log.Printf("status: update error: %s", err)
}
}
}
| {
r := chi.NewRouter()
r.Use(chiware.SetHeader("Content-Type", "application/json"))
r.Method("GET", "/", a.status)
r.Get("/ping", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"ping":true}`))
})
r.Get("/user-cooldown", a.getUserCooldown)
r.Get("/news", a.getNews)
r.Get("/search/{query}", a.getSearch)
r.Get("/can-request", a.getCanRequest)
// should be static-images only
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID}-*", a.getDJImage)
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID:[0-9]+}", a.getDJImage)
// these are deprecated
r.Get("/song", a.getSong)
r.Get("/metadata", a.getMetadata)
return r
} | identifier_body |
api.go | package php
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync/atomic"
"time"
radio "github.com/R-a-dio/valkyrie"
"github.com/R-a-dio/valkyrie/config"
"github.com/R-a-dio/valkyrie/errors"
"github.com/R-a-dio/valkyrie/search"
"github.com/R-a-dio/valkyrie/website/middleware"
"github.com/go-chi/chi"
chiware "github.com/go-chi/chi/middleware"
)
func NewAPI(ctx context.Context, cfg config.Config, storage radio.StorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*API, error) {
status, err := newV0Status(ctx, storage, streamer, manager)
if err != nil {
return nil, err
}
searcher, err := search.Open(cfg)
if err != nil {
return nil, err
}
api := API{
Config: cfg,
storage: storage,
streamer: streamer,
manager: manager,
status: status,
search: searcher,
}
return &api, nil
}
type API struct {
config.Config
search radio.SearchService
storage radio.StorageService
streamer radio.StreamerService
manager radio.ManagerService
status *v0Status
}
func (a *API) Router() chi.Router {
r := chi.NewRouter()
r.Use(chiware.SetHeader("Content-Type", "application/json"))
r.Method("GET", "/", a.status)
r.Get("/ping", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"ping":true}`))
})
r.Get("/user-cooldown", a.getUserCooldown)
r.Get("/news", a.getNews)
r.Get("/search/{query}", a.getSearch)
r.Get("/can-request", a.getCanRequest)
// should be static-images only
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID}-*", a.getDJImage)
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID:[0-9]+}", a.getDJImage)
// these are deprecated
r.Get("/song", a.getSong)
r.Get("/metadata", a.getMetadata)
return r
}
func (a *API) getSong(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getMetadata(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getUserCooldown(w http.ResponseWriter, r *http.Request) {
identifier := r.RemoteAddr
submissionTime, err := a.storage.Submissions(r.Context()).LastSubmissionTime(identifier)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserUploadDelay),
submissionTime,
)
response := userCooldownResponse{
Cooldown: submissionTime.Unix(),
Now: time.Now().Unix(),
Delay: int64(time.Duration(a.Conf().UserUploadDelay) / time.Second),
}
if ok {
response.Message = "You can upload a song!"
} else {
response.Message = fmt.Sprintf(
"You cannot upload another song just yet. You can upload %s",
submissionTime.
Add(time.Duration(a.Conf().UserUploadDelay)).
Format(timeagoFormat),
)
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type userCooldownResponse struct {
// time of last upload
Cooldown int64 `json:"cooldown"`
// current time
Now int64 `json:"now"`
// configured cooldown in seconds
Delay int64 `json:"delay"`
// message to the user
Message string `json:"message"`
}
func (a *API) getNews(w http.ResponseWriter, r *http.Request) {
result, err := a.storage.News(r.Context()).ListPublic(3, 0)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
// copy the entries to sanitized output struct
entries := result.Entries
var response = make([]newsResponse, len(entries))
for i := range response {
response[i].Title = entries[i].Title
response[i].Header = entries[i].Header
response[i].Body = entries[i].Body
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type newsResponse struct {
Title string `json:"title"`
Header string `json:"header"`
Body string `json:"text"`
}
func (a *API) getSearch(w http.ResponseWriter, r *http.Request) {
// parse the query string for page and limit settings
values, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
var limit = 20
{
rawLimit := values.Get("limit")
parsedLimit, err := strconv.Atoi(rawLimit)
if err == nil && parsedLimit < 20 {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a number and it's
// not above the allowed limit
limit = parsedLimit
}
}
var page = 1
{
rawPage := values.Get("page")
parsedPage, err := strconv.Atoi(rawPage)
if err == nil {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a valid number
page = parsedPage
}
}
var offset = (page - 1) * limit
if offset < 0 {
offset = 0
}
ctx := r.Context()
// key from the url router, query is part of the url
query := chi.URLParamFromCtx(ctx, "query")
result, err := a.search.Search(ctx, query, limit, offset)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
songs := result.Songs
// create pagination information for the result
var response = searchResponse{
Total: result.TotalHits,
PerPage: limit,
CurrentPage: page,
LastPage: result.TotalHits/limit + 1,
From: offset + 1,
To: offset + len(songs),
}
// move over the results to sanitized output structs
response.Results = make([]searchResponseItem, len(songs))
for i := range songs {
response.Results[i].fromSong(songs[i])
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type searchResponse struct {
Total int `json:"total"`
PerPage int `json:"per_page"`
CurrentPage int `json:"current_page"`
LastPage int `json:"last_page"`
From int `json:"from"`
To int `json:"to"`
Results []searchResponseItem `json:"data"`
}
type searchResponseItem struct {
Artist string `json:"artist"`
Title string `json:"title"`
TrackID radio.TrackID `json:"id"`
LastPlayed int64 `json:"lastplayed"`
LastRequested int64 `json:"lastrequested"`
Requestable bool `json:"requestable"`
}
// fromSong copies relevant fields from the song given to the response item
func (sri *searchResponseItem) fromSong(s radio.Song) error {
if !s.HasTrack() {
// TODO: look at error handling
return errors.New("Song without track found in search API")
}
sri.Artist = s.Artist
sri.Title = s.Title
sri.TrackID = s.TrackID
if s.LastPlayed.IsZero() {
sri.LastPlayed = 0
} else {
sri.LastPlayed = s.LastPlayed.Unix()
}
if s.LastRequested.IsZero() {
sri.LastRequested = 0
} else {
sri.LastRequested = s.LastRequested.Unix()
}
sri.Requestable = s.Requestable()
return nil
}
func (a *API) | (w http.ResponseWriter, r *http.Request) {
status, err := a.manager.Status(r.Context())
if err != nil {
return
}
response := canRequestResponse{}
// send our response when we return
defer func() {
// but not if an error occured
if err != nil {
// TODO: handle error
http.Error(w, http.StatusText(501), 501)
return
}
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
// all requests are disabled
if !status.RequestsEnabled {
return
}
identifier := r.RemoteAddr
userLastRequest, err := a.storage.Request(r.Context()).LastRequest(identifier)
if err != nil {
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserRequestDelay),
userLastRequest,
)
if !ok {
return
}
response.Main.Requests = true
return
}
type canRequestResponse struct {
Main struct {
Requests bool `json:"requests"`
}
}
func (a *API) getDJImage(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
w.Header().Del("Content-Type")
w.Header().Set("Content-Type", "image/png")
user, ok := ctx.Value(middleware.UserKey).(radio.User)
if !ok {
panic("missing UserByDJIDCtx middleware")
return
}
sid := chi.URLParamFromCtx(ctx, "DJID")
filename := filepath.Join(a.Conf().Website.DJImagePath, sid)
f, err := os.Open(filename)
if err != nil {
log.Println(err)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
log.Println(err)
return
}
http.ServeContent(w, r, user.DJ.Image, fi.ModTime(), f)
}
// RequestRoute is the router setup for handling requests
func (a *API) RequestRoute(r chi.Router) {
r.Use(middleware.TrackCtx(a.storage))
r.Post("/", a.postRequest)
}
// postRequest handles /request in legacy PHP format
func (a *API) postRequest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
response := map[string]string{}
defer func() {
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
song, ok := ctx.Value(middleware.TrackKey).(radio.Song)
if !ok {
response["error"] = "invalid parameter"
return
}
err := a.streamer.RequestSong(ctx, song, r.RemoteAddr)
if err == nil {
response["success"] = "Thank you for making your request!"
return
}
switch {
case errors.Is(errors.SongCooldown, err):
response["error"] = "That song is still on cooldown, You'll have to wait longer to request it."
case errors.Is(errors.UserCooldown, err):
response["error"] = "You recently requested a song. You have to wait longer until you can request again."
case errors.Is(errors.StreamerNoRequests, err):
response["error"] = "Requests are disabled currently."
default:
log.Println(err)
response["error"] = "something broke, report to IRC."
}
}
type requestResponse map[string]string
func newV0Status(ctx context.Context, storage radio.SongStorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*v0Status, error) {
s := v0Status{
songs: storage,
streamer: streamer,
manager: manager,
updatePeriod: time.Second * 2,
longUpdatePeriod: time.Second * 10,
}
// initialize the atomic.Value
s.storeCache(v0StatusJSON{})
// run a periodic updater
go s.runUpdate(ctx)
// but also call update to get an initial value before we return
return &s, s.updateStatusJSON(ctx)
}
// v0Status implements the root of the /api endpoint
type v0Status struct {
// song storage to get last played songs
songs radio.SongStorageService
// streamer for queue contents
streamer radio.StreamerService
// manager for overall stream status
manager radio.ManagerService
updatePeriod time.Duration
longUpdatePeriod time.Duration
// cache contains a v0StatusJSON
cache atomic.Value
}
type v0StatusJSON struct {
Main v0StatusMain `json:"main"`
// field to determine when we created the contents of LastPlayed and Queue
ListCreatedOn time.Time `json:"-"`
}
type v0StatusMain struct {
NowPlaying string `json:"np"`
Listeners int `json:"listeners"`
BitRate int `json:"bitrate"`
IsAFKStream bool `json:"isafkstream"`
IsStreamDesk bool `json:"isstreamdesk"`
CurrentTime int64 `json:"current"`
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
LastSet string `json:"lastset"`
TrackID int `json:"trackid"`
Thread string `json:"thread"`
Requesting bool `json:"requesting"`
DJName string `json:"djname"`
DJ v0StatusDJ `json:"dj"`
Queue []v0StatusListEntry `json:"queue"`
LastPlayed []v0StatusListEntry `json:"lp"`
}
type v0StatusDJ struct {
ID int `json:"id" db:"djid"`
Name string `json:"djname" db:"djname"`
Description string `json:"djtext" db:"djtext"`
Image string `json:"djimage" db:"djimage"`
Color string `json:"djcolor" db:"djcolor"`
Visible bool `json:"visible" db:"visible"`
Priority int `json:"priority" db:"priority"`
ThemeCSS string `json:"css" db:"css"`
ThemeID int `json:"theme_id" db:"theme_id"`
Role string `json:"role" db:"role"`
}
type v0StatusListEntry struct {
Metadata string `json:"meta" db:"meta"`
Time string `json:"time"`
Type int `json:"type" db:"type"`
Timestamp int64 `json:"timestamp" db:"time"`
}
func (s *v0Status) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
status := s.loadCache()
status.Main.CurrentTime = time.Now().Unix()
h := rw.Header()
h.Set("Content-Type", "application/json")
h.Set("Access-Control-Allow-Origin", "*")
e := json.NewEncoder(rw)
e.SetEscapeHTML(false)
err := e.Encode(status)
if err != nil {
log.Printf("json encoding error: %s", err)
}
}
func (s *v0Status) loadCache() v0StatusJSON {
return s.cache.Load().(v0StatusJSON)
}
func (s *v0Status) storeCache(ss v0StatusJSON) {
s.cache.Store(ss)
}
const timeagoFormat = `<time class="timeago" datetime="2006-01-02T15:04:05-0700">15:04:05</time>`
// createStatusJSON creates a new populated v0StatusJSON, if an error occurs it returns
// the previous v0StatusJSON that was stored in the cache
//
// Additionally, the Queue and LastPlayed fields are only updated if a period of length
// LongUpdatePeriod has passed, otherwise uses the contents of the previous status
func (s *v0Status) createStatusJSON(ctx context.Context) (v0StatusJSON, error) {
var now = time.Now()
var status v0StatusJSON
last := s.loadCache()
queue := last.Main.Queue
lastplayed := last.Main.LastPlayed
// see if we need to update the queue and lastplayed values
if last.ListCreatedOn.IsZero() ||
now.Sub(last.ListCreatedOn) < s.longUpdatePeriod {
q, err := s.streamer.Queue(ctx)
if err != nil {
return last, err
}
if len(q) > 5 {
q = q[:5]
}
queue = make([]v0StatusListEntry, len(q))
for i, entry := range q {
queue[i].Metadata = entry.Song.Metadata
queue[i].Time = entry.ExpectedStartTime.Format(timeagoFormat)
queue[i].Timestamp = entry.ExpectedStartTime.Unix()
if entry.IsUserRequest {
queue[i].Type = 1
}
}
lp, err := s.songs.Song(ctx).LastPlayed(0, 5)
if err != nil {
return last, err
}
lastplayed = make([]v0StatusListEntry, len(lp))
for i, song := range lp {
lastplayed[i].Metadata = song.Metadata
lastplayed[i].Time = song.LastPlayed.Format(timeagoFormat)
lastplayed[i].Timestamp = song.LastPlayed.Unix()
}
// record when we created these values, so we know when to refresh again
status.ListCreatedOn = now
}
ms, err := s.manager.Status(ctx)
if err != nil {
return last, err
}
// End might be the zero time, in which case calling Unix
// returns a large negative number that we don't want
var endTime int64
if !ms.SongInfo.End.IsZero() {
endTime = ms.SongInfo.End.Unix()
}
// Song might not have a track associated with it, so we
// have to check for that first, before reading the TrackID
var trackID int
if ms.Song.HasTrack() {
trackID = int(ms.Song.TrackID)
}
// Thread seems to be a literal "none" if no thread is supposed to be shown in
// the old API
thread := ms.Thread
if ms.Thread == "" {
thread = "none"
}
dj := ms.User.DJ
status.Main = v0StatusMain{
NowPlaying: ms.Song.Metadata,
Listeners: ms.Listeners,
IsAFKStream: ms.User.Username == "AFK",
StartTime: ms.SongInfo.Start.Unix(),
EndTime: endTime,
LastSet: now.Format("2006-01-02 15:04:05"),
TrackID: trackID,
Thread: thread,
// TODO(wessie): use RequestsEnabled again when it is implemented properly,
// right now nothing sets it and the streamer ignores the value too, only
// reading the configuration file instead
Requesting: ms.User.Username == "AFK",
// Requesting: ms.RequestsEnabled,
DJName: dj.Name,
DJ: v0StatusDJ{
ID: int(dj.ID),
Name: dj.Name,
Description: dj.Text,
Image: dj.Image,
Color: dj.Color,
Visible: dj.Visible,
Priority: dj.Priority,
ThemeCSS: dj.CSS,
ThemeID: int(dj.Theme.ID),
Role: dj.Role,
},
Queue: queue,
LastPlayed: lastplayed,
}
return status, nil
}
func (s *v0Status) updateStatusJSON(ctx context.Context) error {
ss, err := s.createStatusJSON(ctx)
if err != nil {
return err
}
s.storeCache(ss)
return nil
}
func (s *v0Status) runUpdate(ctx context.Context) {
ticker := time.NewTicker(s.updatePeriod)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
err := s.updateStatusJSON(ctx)
if err != nil {
log.Printf("status: update error: %s", err)
}
}
}
| getCanRequest | identifier_name |
api.go | package php
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync/atomic"
"time"
radio "github.com/R-a-dio/valkyrie"
"github.com/R-a-dio/valkyrie/config"
"github.com/R-a-dio/valkyrie/errors"
"github.com/R-a-dio/valkyrie/search"
"github.com/R-a-dio/valkyrie/website/middleware"
"github.com/go-chi/chi"
chiware "github.com/go-chi/chi/middleware"
)
func NewAPI(ctx context.Context, cfg config.Config, storage radio.StorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*API, error) {
status, err := newV0Status(ctx, storage, streamer, manager)
if err != nil {
return nil, err
}
searcher, err := search.Open(cfg)
if err != nil {
return nil, err
}
api := API{
Config: cfg,
storage: storage,
streamer: streamer,
manager: manager,
status: status,
search: searcher,
}
return &api, nil
}
type API struct {
config.Config
search radio.SearchService
storage radio.StorageService
streamer radio.StreamerService
manager radio.ManagerService
status *v0Status
}
func (a *API) Router() chi.Router {
r := chi.NewRouter()
r.Use(chiware.SetHeader("Content-Type", "application/json"))
r.Method("GET", "/", a.status)
r.Get("/ping", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"ping":true}`))
})
r.Get("/user-cooldown", a.getUserCooldown)
r.Get("/news", a.getNews)
r.Get("/search/{query}", a.getSearch)
r.Get("/can-request", a.getCanRequest)
// should be static-images only
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID}-*", a.getDJImage)
r.With(middleware.UserByDJIDCtx(a.storage)).
Get("/dj-image/{DJID:[0-9]+}", a.getDJImage)
// these are deprecated
r.Get("/song", a.getSong)
r.Get("/metadata", a.getMetadata)
return r
}
func (a *API) getSong(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getMetadata(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(410), 410)
}
func (a *API) getUserCooldown(w http.ResponseWriter, r *http.Request) {
identifier := r.RemoteAddr
submissionTime, err := a.storage.Submissions(r.Context()).LastSubmissionTime(identifier)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserUploadDelay),
submissionTime,
)
response := userCooldownResponse{
Cooldown: submissionTime.Unix(),
Now: time.Now().Unix(),
Delay: int64(time.Duration(a.Conf().UserUploadDelay) / time.Second),
}
if ok {
response.Message = "You can upload a song!"
} else {
response.Message = fmt.Sprintf(
"You cannot upload another song just yet. You can upload %s",
submissionTime.
Add(time.Duration(a.Conf().UserUploadDelay)).
Format(timeagoFormat),
)
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type userCooldownResponse struct {
// time of last upload
Cooldown int64 `json:"cooldown"`
// current time
Now int64 `json:"now"`
// configured cooldown in seconds
Delay int64 `json:"delay"`
// message to the user
Message string `json:"message"`
}
func (a *API) getNews(w http.ResponseWriter, r *http.Request) {
result, err := a.storage.News(r.Context()).ListPublic(3, 0)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
// copy the entries to sanitized output struct
entries := result.Entries
var response = make([]newsResponse, len(entries))
for i := range response {
response[i].Title = entries[i].Title
response[i].Header = entries[i].Header
response[i].Body = entries[i].Body
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type newsResponse struct {
Title string `json:"title"`
Header string `json:"header"`
Body string `json:"text"`
}
func (a *API) getSearch(w http.ResponseWriter, r *http.Request) {
// parse the query string for page and limit settings
values, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
var limit = 20
{
rawLimit := values.Get("limit")
parsedLimit, err := strconv.Atoi(rawLimit)
if err == nil && parsedLimit < 20 {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a number and it's
// not above the allowed limit
limit = parsedLimit
}
}
var page = 1
{
rawPage := values.Get("page")
parsedPage, err := strconv.Atoi(rawPage)
if err == nil {
// TODO: check if we just want to throw a fit if NaN
// only use the value if it's a valid number
page = parsedPage
}
}
var offset = (page - 1) * limit
if offset < 0 {
offset = 0
}
ctx := r.Context()
// key from the url router, query is part of the url
query := chi.URLParamFromCtx(ctx, "query")
result, err := a.search.Search(ctx, query, limit, offset)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
songs := result.Songs
// create pagination information for the result
var response = searchResponse{
Total: result.TotalHits,
PerPage: limit,
CurrentPage: page,
LastPage: result.TotalHits/limit + 1,
From: offset + 1,
To: offset + len(songs),
}
// move over the results to sanitized output structs
response.Results = make([]searchResponseItem, len(songs))
for i := range songs {
response.Results[i].fromSong(songs[i])
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
// TODO: look at error handling
log.Println(err)
return
}
}
type searchResponse struct {
Total int `json:"total"`
PerPage int `json:"per_page"`
CurrentPage int `json:"current_page"`
LastPage int `json:"last_page"`
From int `json:"from"`
To int `json:"to"`
Results []searchResponseItem `json:"data"`
}
type searchResponseItem struct {
Artist string `json:"artist"`
Title string `json:"title"`
TrackID radio.TrackID `json:"id"`
LastPlayed int64 `json:"lastplayed"`
LastRequested int64 `json:"lastrequested"`
Requestable bool `json:"requestable"`
}
// fromSong copies relevant fields from the song given to the response item
func (sri *searchResponseItem) fromSong(s radio.Song) error {
if !s.HasTrack() {
// TODO: look at error handling
return errors.New("Song without track found in search API")
}
sri.Artist = s.Artist
sri.Title = s.Title
sri.TrackID = s.TrackID
if s.LastPlayed.IsZero() {
sri.LastPlayed = 0
} else {
sri.LastPlayed = s.LastPlayed.Unix()
}
if s.LastRequested.IsZero() {
sri.LastRequested = 0
} else {
sri.LastRequested = s.LastRequested.Unix()
}
sri.Requestable = s.Requestable()
return nil
}
func (a *API) getCanRequest(w http.ResponseWriter, r *http.Request) {
status, err := a.manager.Status(r.Context())
if err != nil {
return
}
response := canRequestResponse{}
// send our response when we return
defer func() {
// but not if an error occured
if err != nil {
// TODO: handle error
http.Error(w, http.StatusText(501), 501)
return
}
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
// all requests are disabled
if !status.RequestsEnabled {
return
}
identifier := r.RemoteAddr
userLastRequest, err := a.storage.Request(r.Context()).LastRequest(identifier)
if err != nil {
return
}
_, ok := radio.CalculateCooldown(
time.Duration(a.Conf().UserRequestDelay),
userLastRequest,
)
if !ok |
response.Main.Requests = true
return
}
type canRequestResponse struct {
Main struct {
Requests bool `json:"requests"`
}
}
func (a *API) getDJImage(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
w.Header().Del("Content-Type")
w.Header().Set("Content-Type", "image/png")
user, ok := ctx.Value(middleware.UserKey).(radio.User)
if !ok {
panic("missing UserByDJIDCtx middleware")
return
}
sid := chi.URLParamFromCtx(ctx, "DJID")
filename := filepath.Join(a.Conf().Website.DJImagePath, sid)
f, err := os.Open(filename)
if err != nil {
log.Println(err)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
log.Println(err)
return
}
http.ServeContent(w, r, user.DJ.Image, fi.ModTime(), f)
}
// RequestRoute is the router setup for handling requests
func (a *API) RequestRoute(r chi.Router) {
r.Use(middleware.TrackCtx(a.storage))
r.Post("/", a.postRequest)
}
// postRequest handles /request in legacy PHP format
func (a *API) postRequest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
response := map[string]string{}
defer func() {
err := json.NewEncoder(w).Encode(response)
if err != nil {
log.Println(err)
}
}()
song, ok := ctx.Value(middleware.TrackKey).(radio.Song)
if !ok {
response["error"] = "invalid parameter"
return
}
err := a.streamer.RequestSong(ctx, song, r.RemoteAddr)
if err == nil {
response["success"] = "Thank you for making your request!"
return
}
switch {
case errors.Is(errors.SongCooldown, err):
response["error"] = "That song is still on cooldown, You'll have to wait longer to request it."
case errors.Is(errors.UserCooldown, err):
response["error"] = "You recently requested a song. You have to wait longer until you can request again."
case errors.Is(errors.StreamerNoRequests, err):
response["error"] = "Requests are disabled currently."
default:
log.Println(err)
response["error"] = "something broke, report to IRC."
}
}
type requestResponse map[string]string
func newV0Status(ctx context.Context, storage radio.SongStorageService,
streamer radio.StreamerService, manager radio.ManagerService) (*v0Status, error) {
s := v0Status{
songs: storage,
streamer: streamer,
manager: manager,
updatePeriod: time.Second * 2,
longUpdatePeriod: time.Second * 10,
}
// initialize the atomic.Value
s.storeCache(v0StatusJSON{})
// run a periodic updater
go s.runUpdate(ctx)
// but also call update to get an initial value before we return
return &s, s.updateStatusJSON(ctx)
}
// v0Status implements the root of the /api endpoint
type v0Status struct {
// song storage to get last played songs
songs radio.SongStorageService
// streamer for queue contents
streamer radio.StreamerService
// manager for overall stream status
manager radio.ManagerService
updatePeriod time.Duration
longUpdatePeriod time.Duration
// cache contains a v0StatusJSON
cache atomic.Value
}
type v0StatusJSON struct {
Main v0StatusMain `json:"main"`
// field to determine when we created the contents of LastPlayed and Queue
ListCreatedOn time.Time `json:"-"`
}
type v0StatusMain struct {
NowPlaying string `json:"np"`
Listeners int `json:"listeners"`
BitRate int `json:"bitrate"`
IsAFKStream bool `json:"isafkstream"`
IsStreamDesk bool `json:"isstreamdesk"`
CurrentTime int64 `json:"current"`
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
LastSet string `json:"lastset"`
TrackID int `json:"trackid"`
Thread string `json:"thread"`
Requesting bool `json:"requesting"`
DJName string `json:"djname"`
DJ v0StatusDJ `json:"dj"`
Queue []v0StatusListEntry `json:"queue"`
LastPlayed []v0StatusListEntry `json:"lp"`
}
type v0StatusDJ struct {
ID int `json:"id" db:"djid"`
Name string `json:"djname" db:"djname"`
Description string `json:"djtext" db:"djtext"`
Image string `json:"djimage" db:"djimage"`
Color string `json:"djcolor" db:"djcolor"`
Visible bool `json:"visible" db:"visible"`
Priority int `json:"priority" db:"priority"`
ThemeCSS string `json:"css" db:"css"`
ThemeID int `json:"theme_id" db:"theme_id"`
Role string `json:"role" db:"role"`
}
type v0StatusListEntry struct {
Metadata string `json:"meta" db:"meta"`
Time string `json:"time"`
Type int `json:"type" db:"type"`
Timestamp int64 `json:"timestamp" db:"time"`
}
func (s *v0Status) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
status := s.loadCache()
status.Main.CurrentTime = time.Now().Unix()
h := rw.Header()
h.Set("Content-Type", "application/json")
h.Set("Access-Control-Allow-Origin", "*")
e := json.NewEncoder(rw)
e.SetEscapeHTML(false)
err := e.Encode(status)
if err != nil {
log.Printf("json encoding error: %s", err)
}
}
func (s *v0Status) loadCache() v0StatusJSON {
return s.cache.Load().(v0StatusJSON)
}
func (s *v0Status) storeCache(ss v0StatusJSON) {
s.cache.Store(ss)
}
const timeagoFormat = `<time class="timeago" datetime="2006-01-02T15:04:05-0700">15:04:05</time>`
// createStatusJSON creates a new populated v0StatusJSON, if an error occurs it returns
// the previous v0StatusJSON that was stored in the cache
//
// Additionally, the Queue and LastPlayed fields are only updated if a period of length
// LongUpdatePeriod has passed, otherwise uses the contents of the previous status
func (s *v0Status) createStatusJSON(ctx context.Context) (v0StatusJSON, error) {
var now = time.Now()
var status v0StatusJSON
last := s.loadCache()
queue := last.Main.Queue
lastplayed := last.Main.LastPlayed
// see if we need to update the queue and lastplayed values
if last.ListCreatedOn.IsZero() ||
now.Sub(last.ListCreatedOn) < s.longUpdatePeriod {
q, err := s.streamer.Queue(ctx)
if err != nil {
return last, err
}
if len(q) > 5 {
q = q[:5]
}
queue = make([]v0StatusListEntry, len(q))
for i, entry := range q {
queue[i].Metadata = entry.Song.Metadata
queue[i].Time = entry.ExpectedStartTime.Format(timeagoFormat)
queue[i].Timestamp = entry.ExpectedStartTime.Unix()
if entry.IsUserRequest {
queue[i].Type = 1
}
}
lp, err := s.songs.Song(ctx).LastPlayed(0, 5)
if err != nil {
return last, err
}
lastplayed = make([]v0StatusListEntry, len(lp))
for i, song := range lp {
lastplayed[i].Metadata = song.Metadata
lastplayed[i].Time = song.LastPlayed.Format(timeagoFormat)
lastplayed[i].Timestamp = song.LastPlayed.Unix()
}
// record when we created these values, so we know when to refresh again
status.ListCreatedOn = now
}
ms, err := s.manager.Status(ctx)
if err != nil {
return last, err
}
// End might be the zero time, in which case calling Unix
// returns a large negative number that we don't want
var endTime int64
if !ms.SongInfo.End.IsZero() {
endTime = ms.SongInfo.End.Unix()
}
// Song might not have a track associated with it, so we
// have to check for that first, before reading the TrackID
var trackID int
if ms.Song.HasTrack() {
trackID = int(ms.Song.TrackID)
}
// Thread seems to be a literal "none" if no thread is supposed to be shown in
// the old API
thread := ms.Thread
if ms.Thread == "" {
thread = "none"
}
dj := ms.User.DJ
status.Main = v0StatusMain{
NowPlaying: ms.Song.Metadata,
Listeners: ms.Listeners,
IsAFKStream: ms.User.Username == "AFK",
StartTime: ms.SongInfo.Start.Unix(),
EndTime: endTime,
LastSet: now.Format("2006-01-02 15:04:05"),
TrackID: trackID,
Thread: thread,
// TODO(wessie): use RequestsEnabled again when it is implemented properly,
// right now nothing sets it and the streamer ignores the value too, only
// reading the configuration file instead
Requesting: ms.User.Username == "AFK",
// Requesting: ms.RequestsEnabled,
DJName: dj.Name,
DJ: v0StatusDJ{
ID: int(dj.ID),
Name: dj.Name,
Description: dj.Text,
Image: dj.Image,
Color: dj.Color,
Visible: dj.Visible,
Priority: dj.Priority,
ThemeCSS: dj.CSS,
ThemeID: int(dj.Theme.ID),
Role: dj.Role,
},
Queue: queue,
LastPlayed: lastplayed,
}
return status, nil
}
func (s *v0Status) updateStatusJSON(ctx context.Context) error {
ss, err := s.createStatusJSON(ctx)
if err != nil {
return err
}
s.storeCache(ss)
return nil
}
func (s *v0Status) runUpdate(ctx context.Context) {
ticker := time.NewTicker(s.updatePeriod)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
err := s.updateStatusJSON(ctx)
if err != nil {
log.Printf("status: update error: %s", err)
}
}
}
| {
return
} | conditional_block |
assets.responsive.js | /*************************************************************************
Namespaced method to use in conjunction with responsive methods.
**************************************************************************/
var A11yResp = {
Core: function() {
//Set responsive indicator in body
var indicator = document.createElement('div');
indicator.id = 'screen-indicator';
$('body').prepend(indicator);
//add browser compatibility
if ($('meta[http-equiv]').length === 0) {
$('title').before('<meta http-equiv="X-UA-Compatible" content="IE=edge">');
}
//add responsive meta tag to the head
if ($('meta[name=viewport]').length === 0) {
$('title').before('<meta name="viewport" content="width=device-width">');
}
| var context = this,
args = arguments;
var later = function() {
timeout = null;
if (!immediate) func.apply(context, args);
};
var callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, wait);
if (callNow) func.apply(context, args);
};
},
getScreenWidth: function() {
var index;
//requires media query css reference to #screen-indicator in order to work.
if (window.getComputedStyle) {
index = parseInt(window.getComputedStyle(document.getElementById('screen-indicator')).getPropertyValue('z-index'), 10);
} else {
// Use .getCompStyle instead of .getComputedStyle
window.getCompStyle = function(el, pseudo) {
this.el = el;
this.getPropertyValue = function(prop) {
var re = /(\-([a-z]){1})/g;
if (prop == 'float') prop = 'styleFloat';
if (re.test(prop)) {
prop = prop.replace(re, function() {
return arguments[2].toUpperCase();
});
}
return el.currentStyle[prop] ? el.currentStyle[prop] : null;
};
return this;
};
index = parseInt(window.getCompStyle(document.getElementById('screen-indicator')).getPropertyValue("z-index"), 10);
}
var states = {
2: 'screen-lg-min',
3: 'screen-md-min',
4: 'screen-sm-min',
5: 'screen-xs-min',
6: 'screen-xs-max',
7: 'screen-sm-max',
8: 'screen-md-max'
};
return states[index] || 'desktop';
},
accordionsToTabs: function() {
$('.accordions-tabs.ui-accordion').each(function() {
var $this = $(this);
var t = 0;
$this.prepend('<ul></ul>');
$(this).find("> .ui-accordion-header").each(function() {
t++;
$this.find('ul').append('<li><a href="#tabs-' + t + '">' + $(this).text() + "</a></li>");
});
$(this).find("> .ui-accordion-header").remove();
$(this).accordion("destroy");
$(this).tabs();
});
},
tabsToAccordions: function() {
$('.accordions-tabs.ui-tabs').each(function() {
var $this = $(this);
var n = 0;
$this.find('> ul > li').each(function() {
$('<h3>' + $(this).text() + '</h3>').insertBefore($this.find('> .ui-tabs-panel').eq(n));
n++;
});
$this.find('> ul').remove();
$(this).tabs('destroy');
});
},
// Adding Touch Event on default Android browsers <3.
// Currently browser does not support overflow: auto or overflow: scroll
// to implement call touchScroll("divID"); on container div
isTouchDevice: function() {
try {
document.createEvent("TouchEvent");
return true;
} catch (e) {
return false;
}
},
touchScroll: function(id) {
if (this.isTouchDevice()) { //if touch events exist...
var el = document.getElementById(id);
var scrollStartPosY = 0;
var scrollStartPosX = 0;
document.getElementById(id).addEventListener("touchstart", function(event) {
scrollStartPosY = this.scrollTop + event.touches[0].pageY;
scrollStartPosX = this.scrollLeft + event.touches[0].pageX;
}, false);
document.getElementById(id).addEventListener("touchmove", function(event) {
this.scrollTop = scrollStartPosY - event.touches[0].pageY;
this.scrollLeft = scrollStartPosX - event.touches[0].pageX;
}, false);
}
}
//end of A11y Responsive namespace functions
};
/*******************************************
Extension Methods for jQuery widgets
*******************************************/
//Extends jQuery JPanel library
A11yjPanel = function() {
// If navlist doesn't exist - use left-navs
var jPMmenu = document.getElementById('left-navs') === null ? '.nav-main > ul' : '#left-navs > ul';
var jPMmenuIdentify = document.getElementById('left-navs') === null ? 'jpanel-topnav' : 'jpanel-leftnav';
var jPM;
//check if jPanel dependency is loaded.
if (typeof $.jPanelMenu === 'function') {
//var jPMmenu = this;
jPM = $.jPanelMenu({
menu: jPMmenu, //default '#menu',
trigger: 'button.navbar-toggle-main', //default .menu-trigger
openPosition: '250px',
keyboardShortcuts: 'false',
closeOnContentClick: false,
afterOn: function() {
$('#jPanelMenu-menu').insertBefore('.jPanelMenu-panel');
// Remove all classes and and panel-group and nav class for collapse functionality
$('#jPanelMenu-menu').removeClass().addClass('nav panel-group ' + jPMmenuIdentify);
// Add class to direct children for collapse functionality
$('#jPanelMenu-menu > li').addClass('side-menu');
// Only add the following if and only if the menu contains submenu
if ($(jPMmenu).find('> li > ul').length > 0) {
// Remove jquery ui stuff
$('#jPanelMenu-menu li').removeClass('ui-menu-item');
$('#jPanelMenu-menu li a').removeAttr('id aria-haspopup').removeClass('ui-corner-all');
$('#jPanelMenu-menu .submenu-separator-container, #jPanelMenu-menu .ui-menu-icon').remove();
$('#jPanelMenu-menu li ul').removeAttr('style').removeClass('ui-menu ui-widget ui-widget-content ui-corner-all');
}
// Make the links expand collapse if the parent menu contains more than 1 link
if ($(jPMmenu).find('> li ul > li').length > 1) {
$('#jPanelMenu-menu > li > a')
.wrapInner('<span>')
.attr('href', 'javascript:void(0)')
.append(function() {
return '<em class="glyphicon glyphicon-chevron-down"><span class="sr-only">Click to expand ' + $(this).text() + ' menu</span></em>';
});
// Add collapsed class for toggling bg of the anchor tag
$('#jPanelMenu-menu > li > a').addClass('collapsed');
// On upper level link click
$('#jPanelMenu-menu > li > a').on('click', function() {
// Collapse all open dropdowns
$('#jPanelMenu-menu > li > ul.in').collapse('hide');
// Toggle the one that is directly under the anchor that is being clicked
$(this).next().collapse('toggle');
});
// Catch collapse events
$('#jPanelMenu-menu > li > ul').on({
'show.bs.collapse': function() {
// Remove class collapsed from the anchor if the dropdown is shown
$(this).prev().removeClass('collapsed');
},
'hide.bs.collapse': function() {
// Add class collapsed from the anchor if the dropdown is hidden
$(this).prev().addClass('collapsed');
}
});
// Add class to dropdown uls for collapse functionality
$('#jPanelMenu-menu > li > ul').addClass('panel-collapse collapse sub-menu');
} else {
// Add class to dropdown uls for collapse functionality
$('#jPanelMenu-menu > li > ul').addClass('panel-collapse sub-menu');
}
},
afterOpen: function() {
$('#liveText-polite').text('Menu has opened');
setTimeout(function() {
if ($('#jPanelMenu-menu').find(':focusable').length > 0) {
$('#jPanelMenu-menu').find(':focusable')[0].focus();
}
}, 500);
// Focus
$('#jPanelMenu-menu').on('keydown', function(e) {
// On tab out, focus to the trigger
if(e.keyCode == 9) {
var skipToggle = false;
// For links containing submenu
if($('#jPanelMenu-menu > li > ul').length > 0 && ($('#jPanelMenu-menu > li:last-child > a.collapsed').is($(e.target)) || $('#jPanelMenu-menu > li:last-child > ul > li:last-child > a').is($(e.target)))) skipToggle = true;
if($('#jPanelMenu-menu > li > ul').length == 0 && $('#jPanelMenu-menu > li:last-child > a').is($(e.target))) skipToggle = true;
if(skipToggle) {
e.preventDefault();
$('#liveText-polite').text('Menu has closed');
jPM.close();
}
}
});
},
afterClose: function() {
$('button.navbar-toggle-main').focus();
}
});
} else {
console.log('Missing jPanel library');
}
return jPM;
};
//end of extension methods | },
debounce: function(func, wait, immediate) {
var timeout;
return function() {
| random_line_split |
assets.responsive.js | /*************************************************************************
Namespaced method to use in conjunction with responsive methods.
**************************************************************************/
var A11yResp = {
Core: function() {
//Set responsive indicator in body
var indicator = document.createElement('div');
indicator.id = 'screen-indicator';
$('body').prepend(indicator);
//add browser compatibility
if ($('meta[http-equiv]').length === 0) {
$('title').before('<meta http-equiv="X-UA-Compatible" content="IE=edge">');
}
//add responsive meta tag to the head
if ($('meta[name=viewport]').length === 0) {
$('title').before('<meta name="viewport" content="width=device-width">');
}
},
debounce: function(func, wait, immediate) {
var timeout;
return function() {
var context = this,
args = arguments;
var later = function() {
timeout = null;
if (!immediate) func.apply(context, args);
};
var callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, wait);
if (callNow) func.apply(context, args);
};
},
getScreenWidth: function() {
var index;
//requires media query css reference to #screen-indicator in order to work.
if (window.getComputedStyle) {
index = parseInt(window.getComputedStyle(document.getElementById('screen-indicator')).getPropertyValue('z-index'), 10);
} else {
// Use .getCompStyle instead of .getComputedStyle
window.getCompStyle = function(el, pseudo) {
this.el = el;
this.getPropertyValue = function(prop) {
var re = /(\-([a-z]){1})/g;
if (prop == 'float') prop = 'styleFloat';
if (re.test(prop)) {
prop = prop.replace(re, function() {
return arguments[2].toUpperCase();
});
}
return el.currentStyle[prop] ? el.currentStyle[prop] : null;
};
return this;
};
index = parseInt(window.getCompStyle(document.getElementById('screen-indicator')).getPropertyValue("z-index"), 10);
}
var states = {
2: 'screen-lg-min',
3: 'screen-md-min',
4: 'screen-sm-min',
5: 'screen-xs-min',
6: 'screen-xs-max',
7: 'screen-sm-max',
8: 'screen-md-max'
};
return states[index] || 'desktop';
},
accordionsToTabs: function() {
$('.accordions-tabs.ui-accordion').each(function() {
var $this = $(this);
var t = 0;
$this.prepend('<ul></ul>');
$(this).find("> .ui-accordion-header").each(function() {
t++;
$this.find('ul').append('<li><a href="#tabs-' + t + '">' + $(this).text() + "</a></li>");
});
$(this).find("> .ui-accordion-header").remove();
$(this).accordion("destroy");
$(this).tabs();
});
},
tabsToAccordions: function() {
$('.accordions-tabs.ui-tabs').each(function() {
var $this = $(this);
var n = 0;
$this.find('> ul > li').each(function() {
$('<h3>' + $(this).text() + '</h3>').insertBefore($this.find('> .ui-tabs-panel').eq(n));
n++;
});
$this.find('> ul').remove();
$(this).tabs('destroy');
});
},
// Adding Touch Event on default Android browsers <3.
// Currently browser does not support overflow: auto or overflow: scroll
// to implement call touchScroll("divID"); on container div
isTouchDevice: function() {
try {
document.createEvent("TouchEvent");
return true;
} catch (e) {
return false;
}
},
touchScroll: function(id) {
if (this.isTouchDevice()) { //if touch events exist...
var el = document.getElementById(id);
var scrollStartPosY = 0;
var scrollStartPosX = 0;
document.getElementById(id).addEventListener("touchstart", function(event) {
scrollStartPosY = this.scrollTop + event.touches[0].pageY;
scrollStartPosX = this.scrollLeft + event.touches[0].pageX;
}, false);
document.getElementById(id).addEventListener("touchmove", function(event) {
this.scrollTop = scrollStartPosY - event.touches[0].pageY;
this.scrollLeft = scrollStartPosX - event.touches[0].pageX;
}, false);
}
}
//end of A11y Responsive namespace functions
};
/*******************************************
Extension Methods for jQuery widgets
*******************************************/
//Extends jQuery JPanel library
A11yjPanel = function() {
// If navlist doesn't exist - use left-navs
var jPMmenu = document.getElementById('left-navs') === null ? '.nav-main > ul' : '#left-navs > ul';
var jPMmenuIdentify = document.getElementById('left-navs') === null ? 'jpanel-topnav' : 'jpanel-leftnav';
var jPM;
//check if jPanel dependency is loaded.
if (typeof $.jPanelMenu === 'function') {
//var jPMmenu = this;
jPM = $.jPanelMenu({
menu: jPMmenu, //default '#menu',
trigger: 'button.navbar-toggle-main', //default .menu-trigger
openPosition: '250px',
keyboardShortcuts: 'false',
closeOnContentClick: false,
afterOn: function() {
$('#jPanelMenu-menu').insertBefore('.jPanelMenu-panel');
// Remove all classes and and panel-group and nav class for collapse functionality
$('#jPanelMenu-menu').removeClass().addClass('nav panel-group ' + jPMmenuIdentify);
// Add class to direct children for collapse functionality
$('#jPanelMenu-menu > li').addClass('side-menu');
// Only add the following if and only if the menu contains submenu
if ($(jPMmenu).find('> li > ul').length > 0) |
// Make the links expand collapse if the parent menu contains more than 1 link
if ($(jPMmenu).find('> li ul > li').length > 1) {
$('#jPanelMenu-menu > li > a')
.wrapInner('<span>')
.attr('href', 'javascript:void(0)')
.append(function() {
return '<em class="glyphicon glyphicon-chevron-down"><span class="sr-only">Click to expand ' + $(this).text() + ' menu</span></em>';
});
// Add collapsed class for toggling bg of the anchor tag
$('#jPanelMenu-menu > li > a').addClass('collapsed');
// On upper level link click
$('#jPanelMenu-menu > li > a').on('click', function() {
// Collapse all open dropdowns
$('#jPanelMenu-menu > li > ul.in').collapse('hide');
// Toggle the one that is directly under the anchor that is being clicked
$(this).next().collapse('toggle');
});
// Catch collapse events
$('#jPanelMenu-menu > li > ul').on({
'show.bs.collapse': function() {
// Remove class collapsed from the anchor if the dropdown is shown
$(this).prev().removeClass('collapsed');
},
'hide.bs.collapse': function() {
// Add class collapsed from the anchor if the dropdown is hidden
$(this).prev().addClass('collapsed');
}
});
// Add class to dropdown uls for collapse functionality
$('#jPanelMenu-menu > li > ul').addClass('panel-collapse collapse sub-menu');
} else {
// Add class to dropdown uls for collapse functionality
$('#jPanelMenu-menu > li > ul').addClass('panel-collapse sub-menu');
}
},
afterOpen: function() {
$('#liveText-polite').text('Menu has opened');
setTimeout(function() {
if ($('#jPanelMenu-menu').find(':focusable').length > 0) {
$('#jPanelMenu-menu').find(':focusable')[0].focus();
}
}, 500);
// Focus
$('#jPanelMenu-menu').on('keydown', function(e) {
// On tab out, focus to the trigger
if(e.keyCode == 9) {
var skipToggle = false;
// For links containing submenu
if($('#jPanelMenu-menu > li > ul').length > 0 && ($('#jPanelMenu-menu > li:last-child > a.collapsed').is($(e.target)) || $('#jPanelMenu-menu > li:last-child > ul > li:last-child > a').is($(e.target)))) skipToggle = true;
if($('#jPanelMenu-menu > li > ul').length == 0 && $('#jPanelMenu-menu > li:last-child > a').is($(e.target))) skipToggle = true;
if(skipToggle) {
e.preventDefault();
$('#liveText-polite').text('Menu has closed');
jPM.close();
}
}
});
},
afterClose: function() {
$('button.navbar-toggle-main').focus();
}
});
} else {
console.log('Missing jPanel library');
}
return jPM;
};
//end of extension methods | {
// Remove jquery ui stuff
$('#jPanelMenu-menu li').removeClass('ui-menu-item');
$('#jPanelMenu-menu li a').removeAttr('id aria-haspopup').removeClass('ui-corner-all');
$('#jPanelMenu-menu .submenu-separator-container, #jPanelMenu-menu .ui-menu-icon').remove();
$('#jPanelMenu-menu li ul').removeAttr('style').removeClass('ui-menu ui-widget ui-widget-content ui-corner-all');
} | conditional_block |
table_1.py | from datetime import date
import numpy as np
from os import mkdir
import pandas as pd
import xarray as xr
import xlsxwriter
from db_queries import (get_covariate_estimates, get_location_metadata,
get_population)
from fbd_core import argparse, YearRange
from fbd_core.etl import expand_dimensions
from fbd_core.file_interface import FBDPath, open_xr
ALL_AGE_ID = 22
BOTH_SEX_ID = 3
SCENARIOS = [-1, 0, 1, 2, 3]
YEARS = YearRange(1990, 2018, 2100)
CELL_HT = {
"title": 1,
"location": 1,
"stage": 0,
"data_cols": 2
}
COL_RANGE = {
"pop":2,
"tfr":1
}
INDENT_MAP = {
0: "",
1: " ",
2: " ",
3: " "
}
SCENARIO_MAP = {
0:"ref",
-1:"worse",
1:"better",
2:"fastest",
3:"sdg"
}
COL_NAME_MAP = {
"lancet_label":"Location",
"peak_pop_value": "Peak Population (year)",
"value_2017_pop_ref": "2017",
"value_2100_pop_ref": "2100 Reference Scenario",
"value_2100_pop_sdg": "2100 SDG Scenario",
"value_2017_tfr_ref": "2017",
"value_2100_tfr_ref": "2100 Reference Scenario",
"value_2100_tfr_sdg": "2100 SDG Scenario",
}
REVIEW_COL_NAME_MAP = {
"lancet_label": "Location name",
"mean_2017_pop_ref": "Population mean 2017",
"lower_2017_pop_ref": "Population lower 2017",
"upper_2017_pop_ref": "Population upper 2017",
"mean_2100_pop_ref": "Reference population mean 2100",
"lower_2100_pop_ref": "Reference population lower 2100",
"upper_2100_pop_ref": "Reference population upper 2100",
"mean_2100_pop_sdg": "SDG population mean 2100",
"lower_2100_pop_sdg": "SDG population lower 2100",
"upper_2100_pop_sdg": "SDG population upper 2100",
"peak_pop": "Peak population",
"peak_year": "Peak population year",
"mean_2017_tfr_ref": "TFR mean 2017",
"lower_2017_tfr_ref": "TFR lower 2017",
"upper_2017_tfr_ref": "TFR upper 2017",
"mean_2100_tfr_ref": "Reference TFR mean 2100",
"lower_2100_tfr_ref": "Reference TFR lower 2100",
"upper_2100_tfr_ref": "Reference TFR upper 2100",
"mean_2100_tfr_sdg": "SDG TFR mean 2100",
"lower_2100_tfr_sdg": "SDG TFR lower 2100",
"upper_2100_tfr_sdg": "SDG TFR upper 2100"
}
def melt_to_xarray(df):
"""Melts GBD data with 'mean', 'lower', and 'upper' columns to a single
'quantile' column; converts to xarray dataarray; and adds a scenario
dimension.
Args:
df (pandas dataframe):
Dataframe with 'year_id', 'location_id', 'mean', 'lower', and
'upper' columns.
Returns:
da_with_scenario (xarray dataarray):
Dataarray with 'year_id', 'quantile', 'location_id', and 'scenario'
dimensions.
"""
df_long = pd.melt(df,
id_vars=["year_id", "location_id"],
value_vars=["mean", "lower", "upper"],
var_name="quantile")
da = df_long.set_index(
["year_id", "quantile", "location_id"]).to_xarray()["value"]
da_with_scenario = expand_dimensions(da, scenario=[0])
return da_with_scenario
def combine_mean_ui(df, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', and 'upper' columns,
and returns a dataframe with a 'value' column that has the mean, lower,
and upper all together. If df_type == "pop", values are converted to
millions.
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns and added
'value' column.
"""
for col in ["mean", "lower", "upper"]:
if df_type == "pop":
df[col] = df[col] / 1000000
df[col] = df[col].apply(lambda x: round(x, 2))
df["value"] = (df["mean"].astype(str) + " (" + df["lower"].astype(str) +
" - " + df["upper"].astype(str) + ")")
return df
def pivot_scenarios(df, prefix, scen_map, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns, and returns a dataframe with wide scenarios. Scenario column names
are given by:
prefix + "_" + df_type + "_" + df["scenario"].map(scen_map)
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value' columns wide by
scenario.
"""
df["scenario"] = prefix + "_" + df_type + "_" +\
df["scenario"].map(scen_map)
df = df.pivot_table(values=["lower", "mean", "upper" ,"value"],
index="location_id",
columns="scenario",
aggfunc="first").reset_index()
# This flattens the column levels
df.columns = ['_'.join(col) for col in df.columns.values if col]
df.rename(columns={"location_id_":"location_id"}, inplace=True)
return df
def get_max_pop_year(group):
"""Takes a dataframe (or GroupBy object) with 'mean' and 'year_id' columns,
and returns a dataframe with the max value in 'mean' and the 'year_id'
of the max value.
Args:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
Returns:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
"""
max_year_val = group.loc[group["mean"].idxmax()][["year_id", "mean"]]
return max_year_val
def pull_reshape_pop(gbd_round_id, pop_version, location_ids):
"""Pulls year 2017 GBD round 5 populations, converts it an xarray dataarray,
pulls forecast population, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. Peak population and peak population
year are pulled for each location in the dataframe. All required data are
then reshaped and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
pop_version (str):
Forecast populations version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
pop_final_df (pandas dataframe):
Dataframe with all required population data, reshaped for downstream
table production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD pops
pop_2017 = get_population(gbd_round_id=gbd_round_id, age_group_id=22,
sex_id=3, location_id=location_ids,
status="best", year_id=p_end, with_ui=True)[[
"year_id", "location_id", "population", "lower", "upper"
]].rename(columns={"population": "mean"})
pop_2017_da = melt_to_xarray(pop_2017)
# Get future pops
pop_fut = open_xr(f"{gbd_round_id}/future/population/"
f"{pop_version}/population_combined.nc").data
pop_fut_sel = pop_fut.sel(location_id=location_ids, scenario=SCENARIOS,
age_group_id=ALL_AGE_ID, sex_id=BOTH_SEX_ID)
# Concat and make quantile wide
pop_da = xr.concat([pop_2017_da, pop_fut_sel], dim="year_id")
pop_df = pop_da.rename("value").to_dataframe().reset_index()
pop_df = pop_df.pivot_table(values="value",
index=["location_id", "year_id", "age_group_id",
"sex_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
pop_with_ui = combine_mean_ui(pop_df)
# Find peak pops and year of peak
peak_pop_df = pop_with_ui.query("scenario == 0").groupby(
"location_id").apply(
get_max_pop_year).reset_index().rename(
columns={"mean":"peak_pop","year_id":"peak_year"})
peak_pop_df["peak_pop"] = peak_pop_df["peak_pop"].apply(
lambda x: round(x, 2))
peak_pop_df["peak_pop_value"] = (peak_pop_df["peak_pop"].astype(str) +
" (" +
peak_pop_df["peak_year"].astype(
int).astype(str) +
")")
# Get 2017 and 2100 values
pop_2017_only = pop_with_ui.query(f"year_id == {p_end} and scenario == 0")
pop_2100_only = pop_with_ui.query(f"year_id == {f_end}")
pop_2017_wide = pivot_scenarios(pop_2017_only, f"{p_end}", SCENARIO_MAP)
pop_2100_wide = pivot_scenarios(pop_2100_only, f"{f_end}", SCENARIO_MAP)
# Merge
pop_final_df = pop_2017_wide.merge(peak_pop_df).merge(pop_2100_wide)
return pop_final_df
def pull_reshape_tfr(gbd_round_id, tfr_version, location_ids):
"""Pulls year 2017 GBD round 5 TFR, converts it an xarray dataarray,
pulls forecast TFR, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. All required data are then reshaped
and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
tfr_version (str):
Forecast TFR version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
tfr_final_df (pandas dataframe):
Dataframe with all required TFR data, reshaped for downstream table
production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD TFR
tfr_2017 = get_covariate_estimates(covariate_id=149,
gbd_round_id=gbd_round_id,
location_id=location_ids, year_id=p_end,
status="best")[[
"year_id", "location_id","mean_value", "lower_value", "upper_value"
]].rename(columns={"mean_value":"mean", "lower_value":"lower",
"upper_value":"upper"})
tfr_2017_da = melt_to_xarray(tfr_2017)
# Get future TFR
tfr_fut = open_xr(f"{gbd_round_id}/future/tfr/"
f"{tfr_version}/tfr_combined.nc").data
tfr_fut_sel = tfr_fut.sel(location_id=location_ids, scenario=SCENARIOS,
year_id=YEARS.forecast_years)
# Concat and make quantile wide
tfr_da = xr.concat([tfr_2017_da, tfr_fut_sel], dim="year_id")
tfr_df = tfr_da.to_dataframe().reset_index()
tfr_df = tfr_df.pivot_table(values="value",
index=["location_id", "year_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
tfr_df = combine_mean_ui(tfr_df, df_type="tfr")
# Get 2017 and 2100 values
tfr2017 = tfr_df.query(f"year_id == {p_end} and scenario==0")
tfr2100 = tfr_df.query(f"year_id == {f_end}")
tfr2017 = pivot_scenarios(tfr2017, f"{p_end}", SCENARIO_MAP, df_type="tfr")
tfr2100 = pivot_scenarios(tfr2100, f"{f_end}", SCENARIO_MAP, df_type="tfr")
# Merge
tfr_final_df = tfr2017.merge(tfr2100)
return tfr_final_df
def convert_to_floating(string):
"""
Takes a string with a decimal point and converts the decimal point to a
floating decimal point for lancet style formatting.
Args:
string (str):
A number string with a decimal point.
Returns:
str:
The original string with floating decimal.
"""
return "".join(["\u00b7" if char=="." else char for char in string])
def get_format_obj(workbook, font_name="Times New Roman", font_size=8,
bg_color="#FFFFFF", align=True, bold=False):
"""Utility function to dynamically create cell formatting options.
Args:
workbook (xlsxwriter Workbook):
Parent workbook of the worksheet to which the data is written.
font_name(str):
Font of the content.
font_size(int):
Font size of the content.
bg_color(str):
String representing the HEX code of cell color.
align(bool):
If cell content needs to be vertically and horizontally aligned.
bold (bool):
If cell content needs to be boldened.
Returns:
format_obj (xlsxwriter workbook format object):
Has specified format properties.
"""
format_obj = workbook.add_format(
{
"font_name": font_name,
"font_size": font_size
}
)
format_obj.set_border()
format_obj.set_text_wrap()
format_obj.set_bg_color(bg_color)
if bold:
format_obj.set_bold()
if align:
format_obj.set_align("center")
format_obj.set_align("vcenter")
return format_obj
def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):
"""Utility function to write the header for each page.
Args: | Worksheet to which the data is written.
curr_row (int):
Starting row number for the header.
cols (list):
List of characters representing the columns.
data_cols (pandas series):
Columns to be written.
header_format(xlsxwriter Format object):
Cell format options for headers.
stages (list):
"tfr", "pop" etc.
Returns:
int: An integer specifying the row number following the header.
"""
### Merge range function takes the locations of the cells to merge, the data
### to write and the cell format. A sample input would look like:
### worksheet.merge_range("A0:B1", "Location", cell_format_obj)
### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the
### value "Location".
end_row = curr_row + CELL_HT["location"]
row_range = cols[0] + str(curr_row) + ":" + cols[0] + str(end_row)
worksheet.merge_range(row_range, "Location", header_format)
num_pop_cols = sum(map(lambda i: "pop" in i, data_cols)) - 1
num_tfr_cols = sum(map(lambda i: "tfr" in i, data_cols)) - 1
col_end = 0
for i, stage in enumerate(stages):
if stage == "pop":
unit_txt = " (in millions)"
stage_txt = "Population"
col_range = num_pop_cols
else:
unit_txt = ""
stage_txt = "Total Fertility Rate"
col_range = num_tfr_cols
col_st = col_end + 1
col_end = col_st + col_range
curr_row_copy = curr_row
end_row = curr_row_copy + CELL_HT["stage"]
row_range = (
cols[col_st] + str(curr_row_copy) + ":" +
cols[col_end] + str(end_row)
)
col_txt = stage_txt + unit_txt
worksheet.merge_range(row_range, col_txt, header_format)
curr_row_copy = end_row + 1
end_row = curr_row_copy + CELL_HT["stage"]
col_st_copy = col_st
for column in data_cols:
if stage in column:
row_range = cols[col_st_copy] + str(curr_row_copy)
worksheet.write(row_range, COL_NAME_MAP[column], header_format)
col_st_copy += 1
return end_row + 1
def write_table(final_df, outfile, stages):
"""Writes the data to an xlsx table.
Args:
final_df (pandas dataframe):
Dataframe with formatted data.
outfile (FBDPath object):
Path to store the table.
stages (list):
"tfr", "pop" etc.
"""
workbook = xlsxwriter.Workbook(
str(outfile), {"constant_memory": False}
)
worksheet = workbook.add_worksheet("Table 1")
header_color = "#F2DCDB"
white = "#000000"
black = "#FFFFFF"
loc_cell_width = 20
data_cell_width = 15
column_start = 65
header_format = get_format_obj(
workbook, bg_color=header_color, font_size=12, bold=True
)
title_format = get_format_obj(
workbook, bg_color=white, font_size=13, align=False, bold=True
)
title_format.set_font_color(black)
# Column length is basically all columns in the dataframe except 'level'
col_len = final_df.shape[1]-1
data_cols = final_df.drop(["level", "lancet_label"], axis=1).columns.values
cols = list(map(chr, range(column_start, column_start+col_len)))
worksheet.set_column(cols[0]+":"+cols[0], loc_cell_width)
worksheet.set_column(cols[1]+":"+cols[-1], data_cell_width)
# place-holder to manually adjust title as needed
title = (
"Title goes here."
)
curr_row = 1
end_row = curr_row + CELL_HT["title"]
row_range = cols[0] + str(curr_row) + ":" + cols[-1] + str(end_row)
worksheet.merge_range(row_range, title, title_format)
curr_row = end_row+1
page_row_count = 1
page_breaks = []
for _, row in final_df.iterrows():
page_row_count += 1
### Insert page break after 20 rows.
if row["level"] == 0 or (page_row_count != 0 and
page_row_count % 20 == 0):
page_row_count = 0
page_breaks.append(curr_row - 1)
curr_row = write_header(
worksheet, curr_row, cols, data_cols,
header_format, stages
)
end_row = curr_row + CELL_HT["data_cols"]
col_idx = 0
if row["level"] < 3:
loc_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True,
align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True
)
else:
loc_fmt_obj = get_format_obj(
workbook, font_size=11, align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11
)
for col in final_df:
if col == "level":
continue
row_range = (
cols[col_idx] + str(curr_row) + ":" +
cols[col_idx] + str(end_row)
)
if col == "lancet_label":
loc_name = INDENT_MAP[row["level"]] + row[col]
worksheet.merge_range(row_range, loc_name, loc_fmt_obj)
else:
worksheet.merge_range(row_range, row[col], data_fmt_obj)
col_idx += 1
curr_row = end_row+1
worksheet.set_h_pagebreaks(page_breaks[1:])
worksheet.fit_to_pages(1, 0)
workbook.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--pop-version", type=str, required=True,
help="The version of population to use."
)
parser.add_argument("--tfr-version", type=str, required=True,
help="The version of fertility to use."
)
parser.add_argument(
"--gbd-round-id", type=int, required=True,
help="The GBD round associated with the data.")
parser.add_argument(
"--output-review-table", action="store_true",
help="Outputs table for reviewers along with Lancet-style table."
)
args = parser.parse_args()
# Define stages (population and TFR)
stages = ["pop", "tfr"]
# Get location metadata
gbd_loc_df = get_location_metadata(gbd_round_id=args.gbd_round_id,
location_set_id=35)
loc_meta = gbd_loc_df.query("level < 4")[
["location_id", "lancet_label", "level","sort_order"]]
location_ids = loc_meta.location_id.tolist()
# Make pop and TFR dataframes
pop_final_df = pull_reshape_pop(
args.gbd_round_id, args.pop_version, location_ids
)
tfr_final_df = pull_reshape_tfr(
args.gbd_round_id, args.tfr_version, location_ids
)
# merge dataframes
merged_df = loc_meta.merge(
pop_final_df).merge(
tfr_final_df,
on="location_id", how="left").sort_values(by="sort_order")
# Convert to floating decimal
data_cols = ["value_2017_pop_ref", "value_2100_pop_ref",
"value_2100_pop_sdg", "peak_pop_value",
"value_2017_tfr_ref", "value_2100_tfr_ref",
"value_2100_tfr_sdg"]
merged_df.loc[:, data_cols] = merged_df.loc[:, data_cols].applymap(
lambda x: convert_to_floating(x)
)
# Order final dataframe
final_df = merged_df[["level", "lancet_label"] + data_cols]
# Write table
plot_dir = (f"/ihme/forecasting/plot/{args.gbd_round_id}"
"/future/population/table_1/")
fname = date.today().strftime("%Y%m%d") + "_table_1.xlsx"
filepath = plot_dir + fname
try:
mkdir(plot_dir)
print(f"{plot_dir} created.")
except FileExistsError:
print(f"{plot_dir} already exists.")
write_table(final_df, filepath, stages)
if args.output_review_table:
review_fname = date.today().strftime("%Y%m%d") + "_table_1_review.csv"
review_cols = list(REVIEW_COL_NAME_MAP.keys())
review_df = merged_df[review_cols].drop_duplicates()
review_df.rename(columns=REVIEW_COL_NAME_MAP, inplace=True)
review_df.to_csv(plot_dir + review_fname, index=False) | worksheet (Worksheet object): | random_line_split |
table_1.py | from datetime import date
import numpy as np
from os import mkdir
import pandas as pd
import xarray as xr
import xlsxwriter
from db_queries import (get_covariate_estimates, get_location_metadata,
get_population)
from fbd_core import argparse, YearRange
from fbd_core.etl import expand_dimensions
from fbd_core.file_interface import FBDPath, open_xr
ALL_AGE_ID = 22
BOTH_SEX_ID = 3
SCENARIOS = [-1, 0, 1, 2, 3]
YEARS = YearRange(1990, 2018, 2100)
CELL_HT = {
"title": 1,
"location": 1,
"stage": 0,
"data_cols": 2
}
COL_RANGE = {
"pop":2,
"tfr":1
}
INDENT_MAP = {
0: "",
1: " ",
2: " ",
3: " "
}
SCENARIO_MAP = {
0:"ref",
-1:"worse",
1:"better",
2:"fastest",
3:"sdg"
}
COL_NAME_MAP = {
"lancet_label":"Location",
"peak_pop_value": "Peak Population (year)",
"value_2017_pop_ref": "2017",
"value_2100_pop_ref": "2100 Reference Scenario",
"value_2100_pop_sdg": "2100 SDG Scenario",
"value_2017_tfr_ref": "2017",
"value_2100_tfr_ref": "2100 Reference Scenario",
"value_2100_tfr_sdg": "2100 SDG Scenario",
}
REVIEW_COL_NAME_MAP = {
"lancet_label": "Location name",
"mean_2017_pop_ref": "Population mean 2017",
"lower_2017_pop_ref": "Population lower 2017",
"upper_2017_pop_ref": "Population upper 2017",
"mean_2100_pop_ref": "Reference population mean 2100",
"lower_2100_pop_ref": "Reference population lower 2100",
"upper_2100_pop_ref": "Reference population upper 2100",
"mean_2100_pop_sdg": "SDG population mean 2100",
"lower_2100_pop_sdg": "SDG population lower 2100",
"upper_2100_pop_sdg": "SDG population upper 2100",
"peak_pop": "Peak population",
"peak_year": "Peak population year",
"mean_2017_tfr_ref": "TFR mean 2017",
"lower_2017_tfr_ref": "TFR lower 2017",
"upper_2017_tfr_ref": "TFR upper 2017",
"mean_2100_tfr_ref": "Reference TFR mean 2100",
"lower_2100_tfr_ref": "Reference TFR lower 2100",
"upper_2100_tfr_ref": "Reference TFR upper 2100",
"mean_2100_tfr_sdg": "SDG TFR mean 2100",
"lower_2100_tfr_sdg": "SDG TFR lower 2100",
"upper_2100_tfr_sdg": "SDG TFR upper 2100"
}
def | (df):
"""Melts GBD data with 'mean', 'lower', and 'upper' columns to a single
'quantile' column; converts to xarray dataarray; and adds a scenario
dimension.
Args:
df (pandas dataframe):
Dataframe with 'year_id', 'location_id', 'mean', 'lower', and
'upper' columns.
Returns:
da_with_scenario (xarray dataarray):
Dataarray with 'year_id', 'quantile', 'location_id', and 'scenario'
dimensions.
"""
df_long = pd.melt(df,
id_vars=["year_id", "location_id"],
value_vars=["mean", "lower", "upper"],
var_name="quantile")
da = df_long.set_index(
["year_id", "quantile", "location_id"]).to_xarray()["value"]
da_with_scenario = expand_dimensions(da, scenario=[0])
return da_with_scenario
def combine_mean_ui(df, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', and 'upper' columns,
and returns a dataframe with a 'value' column that has the mean, lower,
and upper all together. If df_type == "pop", values are converted to
millions.
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns and added
'value' column.
"""
for col in ["mean", "lower", "upper"]:
if df_type == "pop":
df[col] = df[col] / 1000000
df[col] = df[col].apply(lambda x: round(x, 2))
df["value"] = (df["mean"].astype(str) + " (" + df["lower"].astype(str) +
" - " + df["upper"].astype(str) + ")")
return df
def pivot_scenarios(df, prefix, scen_map, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns, and returns a dataframe with wide scenarios. Scenario column names
are given by:
prefix + "_" + df_type + "_" + df["scenario"].map(scen_map)
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value' columns wide by
scenario.
"""
df["scenario"] = prefix + "_" + df_type + "_" +\
df["scenario"].map(scen_map)
df = df.pivot_table(values=["lower", "mean", "upper" ,"value"],
index="location_id",
columns="scenario",
aggfunc="first").reset_index()
# This flattens the column levels
df.columns = ['_'.join(col) for col in df.columns.values if col]
df.rename(columns={"location_id_":"location_id"}, inplace=True)
return df
def get_max_pop_year(group):
"""Takes a dataframe (or GroupBy object) with 'mean' and 'year_id' columns,
and returns a dataframe with the max value in 'mean' and the 'year_id'
of the max value.
Args:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
Returns:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
"""
max_year_val = group.loc[group["mean"].idxmax()][["year_id", "mean"]]
return max_year_val
def pull_reshape_pop(gbd_round_id, pop_version, location_ids):
"""Pulls year 2017 GBD round 5 populations, converts it an xarray dataarray,
pulls forecast population, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. Peak population and peak population
year are pulled for each location in the dataframe. All required data are
then reshaped and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
pop_version (str):
Forecast populations version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
pop_final_df (pandas dataframe):
Dataframe with all required population data, reshaped for downstream
table production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD pops
pop_2017 = get_population(gbd_round_id=gbd_round_id, age_group_id=22,
sex_id=3, location_id=location_ids,
status="best", year_id=p_end, with_ui=True)[[
"year_id", "location_id", "population", "lower", "upper"
]].rename(columns={"population": "mean"})
pop_2017_da = melt_to_xarray(pop_2017)
# Get future pops
pop_fut = open_xr(f"{gbd_round_id}/future/population/"
f"{pop_version}/population_combined.nc").data
pop_fut_sel = pop_fut.sel(location_id=location_ids, scenario=SCENARIOS,
age_group_id=ALL_AGE_ID, sex_id=BOTH_SEX_ID)
# Concat and make quantile wide
pop_da = xr.concat([pop_2017_da, pop_fut_sel], dim="year_id")
pop_df = pop_da.rename("value").to_dataframe().reset_index()
pop_df = pop_df.pivot_table(values="value",
index=["location_id", "year_id", "age_group_id",
"sex_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
pop_with_ui = combine_mean_ui(pop_df)
# Find peak pops and year of peak
peak_pop_df = pop_with_ui.query("scenario == 0").groupby(
"location_id").apply(
get_max_pop_year).reset_index().rename(
columns={"mean":"peak_pop","year_id":"peak_year"})
peak_pop_df["peak_pop"] = peak_pop_df["peak_pop"].apply(
lambda x: round(x, 2))
peak_pop_df["peak_pop_value"] = (peak_pop_df["peak_pop"].astype(str) +
" (" +
peak_pop_df["peak_year"].astype(
int).astype(str) +
")")
# Get 2017 and 2100 values
pop_2017_only = pop_with_ui.query(f"year_id == {p_end} and scenario == 0")
pop_2100_only = pop_with_ui.query(f"year_id == {f_end}")
pop_2017_wide = pivot_scenarios(pop_2017_only, f"{p_end}", SCENARIO_MAP)
pop_2100_wide = pivot_scenarios(pop_2100_only, f"{f_end}", SCENARIO_MAP)
# Merge
pop_final_df = pop_2017_wide.merge(peak_pop_df).merge(pop_2100_wide)
return pop_final_df
def pull_reshape_tfr(gbd_round_id, tfr_version, location_ids):
"""Pulls year 2017 GBD round 5 TFR, converts it an xarray dataarray,
pulls forecast TFR, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. All required data are then reshaped
and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
tfr_version (str):
Forecast TFR version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
tfr_final_df (pandas dataframe):
Dataframe with all required TFR data, reshaped for downstream table
production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD TFR
tfr_2017 = get_covariate_estimates(covariate_id=149,
gbd_round_id=gbd_round_id,
location_id=location_ids, year_id=p_end,
status="best")[[
"year_id", "location_id","mean_value", "lower_value", "upper_value"
]].rename(columns={"mean_value":"mean", "lower_value":"lower",
"upper_value":"upper"})
tfr_2017_da = melt_to_xarray(tfr_2017)
# Get future TFR
tfr_fut = open_xr(f"{gbd_round_id}/future/tfr/"
f"{tfr_version}/tfr_combined.nc").data
tfr_fut_sel = tfr_fut.sel(location_id=location_ids, scenario=SCENARIOS,
year_id=YEARS.forecast_years)
# Concat and make quantile wide
tfr_da = xr.concat([tfr_2017_da, tfr_fut_sel], dim="year_id")
tfr_df = tfr_da.to_dataframe().reset_index()
tfr_df = tfr_df.pivot_table(values="value",
index=["location_id", "year_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
tfr_df = combine_mean_ui(tfr_df, df_type="tfr")
# Get 2017 and 2100 values
tfr2017 = tfr_df.query(f"year_id == {p_end} and scenario==0")
tfr2100 = tfr_df.query(f"year_id == {f_end}")
tfr2017 = pivot_scenarios(tfr2017, f"{p_end}", SCENARIO_MAP, df_type="tfr")
tfr2100 = pivot_scenarios(tfr2100, f"{f_end}", SCENARIO_MAP, df_type="tfr")
# Merge
tfr_final_df = tfr2017.merge(tfr2100)
return tfr_final_df
def convert_to_floating(string):
"""
Takes a string with a decimal point and converts the decimal point to a
floating decimal point for lancet style formatting.
Args:
string (str):
A number string with a decimal point.
Returns:
str:
The original string with floating decimal.
"""
return "".join(["\u00b7" if char=="." else char for char in string])
def get_format_obj(workbook, font_name="Times New Roman", font_size=8,
bg_color="#FFFFFF", align=True, bold=False):
"""Utility function to dynamically create cell formatting options.
Args:
workbook (xlsxwriter Workbook):
Parent workbook of the worksheet to which the data is written.
font_name(str):
Font of the content.
font_size(int):
Font size of the content.
bg_color(str):
String representing the HEX code of cell color.
align(bool):
If cell content needs to be vertically and horizontally aligned.
bold (bool):
If cell content needs to be boldened.
Returns:
format_obj (xlsxwriter workbook format object):
Has specified format properties.
"""
format_obj = workbook.add_format(
{
"font_name": font_name,
"font_size": font_size
}
)
format_obj.set_border()
format_obj.set_text_wrap()
format_obj.set_bg_color(bg_color)
if bold:
format_obj.set_bold()
if align:
format_obj.set_align("center")
format_obj.set_align("vcenter")
return format_obj
def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):
"""Utility function to write the header for each page.
Args:
worksheet (Worksheet object):
Worksheet to which the data is written.
curr_row (int):
Starting row number for the header.
cols (list):
List of characters representing the columns.
data_cols (pandas series):
Columns to be written.
header_format(xlsxwriter Format object):
Cell format options for headers.
stages (list):
"tfr", "pop" etc.
Returns:
int: An integer specifying the row number following the header.
"""
### Merge range function takes the locations of the cells to merge, the data
### to write and the cell format. A sample input would look like:
### worksheet.merge_range("A0:B1", "Location", cell_format_obj)
### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the
### value "Location".
end_row = curr_row + CELL_HT["location"]
row_range = cols[0] + str(curr_row) + ":" + cols[0] + str(end_row)
worksheet.merge_range(row_range, "Location", header_format)
num_pop_cols = sum(map(lambda i: "pop" in i, data_cols)) - 1
num_tfr_cols = sum(map(lambda i: "tfr" in i, data_cols)) - 1
col_end = 0
for i, stage in enumerate(stages):
if stage == "pop":
unit_txt = " (in millions)"
stage_txt = "Population"
col_range = num_pop_cols
else:
unit_txt = ""
stage_txt = "Total Fertility Rate"
col_range = num_tfr_cols
col_st = col_end + 1
col_end = col_st + col_range
curr_row_copy = curr_row
end_row = curr_row_copy + CELL_HT["stage"]
row_range = (
cols[col_st] + str(curr_row_copy) + ":" +
cols[col_end] + str(end_row)
)
col_txt = stage_txt + unit_txt
worksheet.merge_range(row_range, col_txt, header_format)
curr_row_copy = end_row + 1
end_row = curr_row_copy + CELL_HT["stage"]
col_st_copy = col_st
for column in data_cols:
if stage in column:
row_range = cols[col_st_copy] + str(curr_row_copy)
worksheet.write(row_range, COL_NAME_MAP[column], header_format)
col_st_copy += 1
return end_row + 1
def write_table(final_df, outfile, stages):
"""Writes the data to an xlsx table.
Args:
final_df (pandas dataframe):
Dataframe with formatted data.
outfile (FBDPath object):
Path to store the table.
stages (list):
"tfr", "pop" etc.
"""
workbook = xlsxwriter.Workbook(
str(outfile), {"constant_memory": False}
)
worksheet = workbook.add_worksheet("Table 1")
header_color = "#F2DCDB"
white = "#000000"
black = "#FFFFFF"
loc_cell_width = 20
data_cell_width = 15
column_start = 65
header_format = get_format_obj(
workbook, bg_color=header_color, font_size=12, bold=True
)
title_format = get_format_obj(
workbook, bg_color=white, font_size=13, align=False, bold=True
)
title_format.set_font_color(black)
# Column length is basically all columns in the dataframe except 'level'
col_len = final_df.shape[1]-1
data_cols = final_df.drop(["level", "lancet_label"], axis=1).columns.values
cols = list(map(chr, range(column_start, column_start+col_len)))
worksheet.set_column(cols[0]+":"+cols[0], loc_cell_width)
worksheet.set_column(cols[1]+":"+cols[-1], data_cell_width)
# place-holder to manually adjust title as needed
title = (
"Title goes here."
)
curr_row = 1
end_row = curr_row + CELL_HT["title"]
row_range = cols[0] + str(curr_row) + ":" + cols[-1] + str(end_row)
worksheet.merge_range(row_range, title, title_format)
curr_row = end_row+1
page_row_count = 1
page_breaks = []
for _, row in final_df.iterrows():
page_row_count += 1
### Insert page break after 20 rows.
if row["level"] == 0 or (page_row_count != 0 and
page_row_count % 20 == 0):
page_row_count = 0
page_breaks.append(curr_row - 1)
curr_row = write_header(
worksheet, curr_row, cols, data_cols,
header_format, stages
)
end_row = curr_row + CELL_HT["data_cols"]
col_idx = 0
if row["level"] < 3:
loc_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True,
align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True
)
else:
loc_fmt_obj = get_format_obj(
workbook, font_size=11, align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11
)
for col in final_df:
if col == "level":
continue
row_range = (
cols[col_idx] + str(curr_row) + ":" +
cols[col_idx] + str(end_row)
)
if col == "lancet_label":
loc_name = INDENT_MAP[row["level"]] + row[col]
worksheet.merge_range(row_range, loc_name, loc_fmt_obj)
else:
worksheet.merge_range(row_range, row[col], data_fmt_obj)
col_idx += 1
curr_row = end_row+1
worksheet.set_h_pagebreaks(page_breaks[1:])
worksheet.fit_to_pages(1, 0)
workbook.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--pop-version", type=str, required=True,
help="The version of population to use."
)
parser.add_argument("--tfr-version", type=str, required=True,
help="The version of fertility to use."
)
parser.add_argument(
"--gbd-round-id", type=int, required=True,
help="The GBD round associated with the data.")
parser.add_argument(
"--output-review-table", action="store_true",
help="Outputs table for reviewers along with Lancet-style table."
)
args = parser.parse_args()
# Define stages (population and TFR)
stages = ["pop", "tfr"]
# Get location metadata
gbd_loc_df = get_location_metadata(gbd_round_id=args.gbd_round_id,
location_set_id=35)
loc_meta = gbd_loc_df.query("level < 4")[
["location_id", "lancet_label", "level","sort_order"]]
location_ids = loc_meta.location_id.tolist()
# Make pop and TFR dataframes
pop_final_df = pull_reshape_pop(
args.gbd_round_id, args.pop_version, location_ids
)
tfr_final_df = pull_reshape_tfr(
args.gbd_round_id, args.tfr_version, location_ids
)
# merge dataframes
merged_df = loc_meta.merge(
pop_final_df).merge(
tfr_final_df,
on="location_id", how="left").sort_values(by="sort_order")
# Convert to floating decimal
data_cols = ["value_2017_pop_ref", "value_2100_pop_ref",
"value_2100_pop_sdg", "peak_pop_value",
"value_2017_tfr_ref", "value_2100_tfr_ref",
"value_2100_tfr_sdg"]
merged_df.loc[:, data_cols] = merged_df.loc[:, data_cols].applymap(
lambda x: convert_to_floating(x)
)
# Order final dataframe
final_df = merged_df[["level", "lancet_label"] + data_cols]
# Write table
plot_dir = (f"/ihme/forecasting/plot/{args.gbd_round_id}"
"/future/population/table_1/")
fname = date.today().strftime("%Y%m%d") + "_table_1.xlsx"
filepath = plot_dir + fname
try:
mkdir(plot_dir)
print(f"{plot_dir} created.")
except FileExistsError:
print(f"{plot_dir} already exists.")
write_table(final_df, filepath, stages)
if args.output_review_table:
review_fname = date.today().strftime("%Y%m%d") + "_table_1_review.csv"
review_cols = list(REVIEW_COL_NAME_MAP.keys())
review_df = merged_df[review_cols].drop_duplicates()
review_df.rename(columns=REVIEW_COL_NAME_MAP, inplace=True)
review_df.to_csv(plot_dir + review_fname, index=False) | melt_to_xarray | identifier_name |
table_1.py | from datetime import date
import numpy as np
from os import mkdir
import pandas as pd
import xarray as xr
import xlsxwriter
from db_queries import (get_covariate_estimates, get_location_metadata,
get_population)
from fbd_core import argparse, YearRange
from fbd_core.etl import expand_dimensions
from fbd_core.file_interface import FBDPath, open_xr
ALL_AGE_ID = 22
BOTH_SEX_ID = 3
SCENARIOS = [-1, 0, 1, 2, 3]
YEARS = YearRange(1990, 2018, 2100)
CELL_HT = {
"title": 1,
"location": 1,
"stage": 0,
"data_cols": 2
}
COL_RANGE = {
"pop":2,
"tfr":1
}
INDENT_MAP = {
0: "",
1: " ",
2: " ",
3: " "
}
SCENARIO_MAP = {
0:"ref",
-1:"worse",
1:"better",
2:"fastest",
3:"sdg"
}
COL_NAME_MAP = {
"lancet_label":"Location",
"peak_pop_value": "Peak Population (year)",
"value_2017_pop_ref": "2017",
"value_2100_pop_ref": "2100 Reference Scenario",
"value_2100_pop_sdg": "2100 SDG Scenario",
"value_2017_tfr_ref": "2017",
"value_2100_tfr_ref": "2100 Reference Scenario",
"value_2100_tfr_sdg": "2100 SDG Scenario",
}
REVIEW_COL_NAME_MAP = {
"lancet_label": "Location name",
"mean_2017_pop_ref": "Population mean 2017",
"lower_2017_pop_ref": "Population lower 2017",
"upper_2017_pop_ref": "Population upper 2017",
"mean_2100_pop_ref": "Reference population mean 2100",
"lower_2100_pop_ref": "Reference population lower 2100",
"upper_2100_pop_ref": "Reference population upper 2100",
"mean_2100_pop_sdg": "SDG population mean 2100",
"lower_2100_pop_sdg": "SDG population lower 2100",
"upper_2100_pop_sdg": "SDG population upper 2100",
"peak_pop": "Peak population",
"peak_year": "Peak population year",
"mean_2017_tfr_ref": "TFR mean 2017",
"lower_2017_tfr_ref": "TFR lower 2017",
"upper_2017_tfr_ref": "TFR upper 2017",
"mean_2100_tfr_ref": "Reference TFR mean 2100",
"lower_2100_tfr_ref": "Reference TFR lower 2100",
"upper_2100_tfr_ref": "Reference TFR upper 2100",
"mean_2100_tfr_sdg": "SDG TFR mean 2100",
"lower_2100_tfr_sdg": "SDG TFR lower 2100",
"upper_2100_tfr_sdg": "SDG TFR upper 2100"
}
def melt_to_xarray(df):
"""Melts GBD data with 'mean', 'lower', and 'upper' columns to a single
'quantile' column; converts to xarray dataarray; and adds a scenario
dimension.
Args:
df (pandas dataframe):
Dataframe with 'year_id', 'location_id', 'mean', 'lower', and
'upper' columns.
Returns:
da_with_scenario (xarray dataarray):
Dataarray with 'year_id', 'quantile', 'location_id', and 'scenario'
dimensions.
"""
df_long = pd.melt(df,
id_vars=["year_id", "location_id"],
value_vars=["mean", "lower", "upper"],
var_name="quantile")
da = df_long.set_index(
["year_id", "quantile", "location_id"]).to_xarray()["value"]
da_with_scenario = expand_dimensions(da, scenario=[0])
return da_with_scenario
def combine_mean_ui(df, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', and 'upper' columns,
and returns a dataframe with a 'value' column that has the mean, lower,
and upper all together. If df_type == "pop", values are converted to
millions.
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns and added
'value' column.
"""
for col in ["mean", "lower", "upper"]:
if df_type == "pop":
df[col] = df[col] / 1000000
df[col] = df[col].apply(lambda x: round(x, 2))
df["value"] = (df["mean"].astype(str) + " (" + df["lower"].astype(str) +
" - " + df["upper"].astype(str) + ")")
return df
def pivot_scenarios(df, prefix, scen_map, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns, and returns a dataframe with wide scenarios. Scenario column names
are given by:
prefix + "_" + df_type + "_" + df["scenario"].map(scen_map)
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value' columns wide by
scenario.
"""
df["scenario"] = prefix + "_" + df_type + "_" +\
df["scenario"].map(scen_map)
df = df.pivot_table(values=["lower", "mean", "upper" ,"value"],
index="location_id",
columns="scenario",
aggfunc="first").reset_index()
# This flattens the column levels
df.columns = ['_'.join(col) for col in df.columns.values if col]
df.rename(columns={"location_id_":"location_id"}, inplace=True)
return df
def get_max_pop_year(group):
"""Takes a dataframe (or GroupBy object) with 'mean' and 'year_id' columns,
and returns a dataframe with the max value in 'mean' and the 'year_id'
of the max value.
Args:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
Returns:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
"""
max_year_val = group.loc[group["mean"].idxmax()][["year_id", "mean"]]
return max_year_val
def pull_reshape_pop(gbd_round_id, pop_version, location_ids):
|
def pull_reshape_tfr(gbd_round_id, tfr_version, location_ids):
"""Pulls year 2017 GBD round 5 TFR, converts it an xarray dataarray,
pulls forecast TFR, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. All required data are then reshaped
and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
tfr_version (str):
Forecast TFR version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
tfr_final_df (pandas dataframe):
Dataframe with all required TFR data, reshaped for downstream table
production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD TFR
tfr_2017 = get_covariate_estimates(covariate_id=149,
gbd_round_id=gbd_round_id,
location_id=location_ids, year_id=p_end,
status="best")[[
"year_id", "location_id","mean_value", "lower_value", "upper_value"
]].rename(columns={"mean_value":"mean", "lower_value":"lower",
"upper_value":"upper"})
tfr_2017_da = melt_to_xarray(tfr_2017)
# Get future TFR
tfr_fut = open_xr(f"{gbd_round_id}/future/tfr/"
f"{tfr_version}/tfr_combined.nc").data
tfr_fut_sel = tfr_fut.sel(location_id=location_ids, scenario=SCENARIOS,
year_id=YEARS.forecast_years)
# Concat and make quantile wide
tfr_da = xr.concat([tfr_2017_da, tfr_fut_sel], dim="year_id")
tfr_df = tfr_da.to_dataframe().reset_index()
tfr_df = tfr_df.pivot_table(values="value",
index=["location_id", "year_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
tfr_df = combine_mean_ui(tfr_df, df_type="tfr")
# Get 2017 and 2100 values
tfr2017 = tfr_df.query(f"year_id == {p_end} and scenario==0")
tfr2100 = tfr_df.query(f"year_id == {f_end}")
tfr2017 = pivot_scenarios(tfr2017, f"{p_end}", SCENARIO_MAP, df_type="tfr")
tfr2100 = pivot_scenarios(tfr2100, f"{f_end}", SCENARIO_MAP, df_type="tfr")
# Merge
tfr_final_df = tfr2017.merge(tfr2100)
return tfr_final_df
def convert_to_floating(string):
"""
Takes a string with a decimal point and converts the decimal point to a
floating decimal point for lancet style formatting.
Args:
string (str):
A number string with a decimal point.
Returns:
str:
The original string with floating decimal.
"""
return "".join(["\u00b7" if char=="." else char for char in string])
def get_format_obj(workbook, font_name="Times New Roman", font_size=8,
bg_color="#FFFFFF", align=True, bold=False):
"""Utility function to dynamically create cell formatting options.
Args:
workbook (xlsxwriter Workbook):
Parent workbook of the worksheet to which the data is written.
font_name(str):
Font of the content.
font_size(int):
Font size of the content.
bg_color(str):
String representing the HEX code of cell color.
align(bool):
If cell content needs to be vertically and horizontally aligned.
bold (bool):
If cell content needs to be boldened.
Returns:
format_obj (xlsxwriter workbook format object):
Has specified format properties.
"""
format_obj = workbook.add_format(
{
"font_name": font_name,
"font_size": font_size
}
)
format_obj.set_border()
format_obj.set_text_wrap()
format_obj.set_bg_color(bg_color)
if bold:
format_obj.set_bold()
if align:
format_obj.set_align("center")
format_obj.set_align("vcenter")
return format_obj
def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):
"""Utility function to write the header for each page.
Args:
worksheet (Worksheet object):
Worksheet to which the data is written.
curr_row (int):
Starting row number for the header.
cols (list):
List of characters representing the columns.
data_cols (pandas series):
Columns to be written.
header_format(xlsxwriter Format object):
Cell format options for headers.
stages (list):
"tfr", "pop" etc.
Returns:
int: An integer specifying the row number following the header.
"""
### Merge range function takes the locations of the cells to merge, the data
### to write and the cell format. A sample input would look like:
### worksheet.merge_range("A0:B1", "Location", cell_format_obj)
### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the
### value "Location".
end_row = curr_row + CELL_HT["location"]
row_range = cols[0] + str(curr_row) + ":" + cols[0] + str(end_row)
worksheet.merge_range(row_range, "Location", header_format)
num_pop_cols = sum(map(lambda i: "pop" in i, data_cols)) - 1
num_tfr_cols = sum(map(lambda i: "tfr" in i, data_cols)) - 1
col_end = 0
for i, stage in enumerate(stages):
if stage == "pop":
unit_txt = " (in millions)"
stage_txt = "Population"
col_range = num_pop_cols
else:
unit_txt = ""
stage_txt = "Total Fertility Rate"
col_range = num_tfr_cols
col_st = col_end + 1
col_end = col_st + col_range
curr_row_copy = curr_row
end_row = curr_row_copy + CELL_HT["stage"]
row_range = (
cols[col_st] + str(curr_row_copy) + ":" +
cols[col_end] + str(end_row)
)
col_txt = stage_txt + unit_txt
worksheet.merge_range(row_range, col_txt, header_format)
curr_row_copy = end_row + 1
end_row = curr_row_copy + CELL_HT["stage"]
col_st_copy = col_st
for column in data_cols:
if stage in column:
row_range = cols[col_st_copy] + str(curr_row_copy)
worksheet.write(row_range, COL_NAME_MAP[column], header_format)
col_st_copy += 1
return end_row + 1
def write_table(final_df, outfile, stages):
"""Writes the data to an xlsx table.
Args:
final_df (pandas dataframe):
Dataframe with formatted data.
outfile (FBDPath object):
Path to store the table.
stages (list):
"tfr", "pop" etc.
"""
workbook = xlsxwriter.Workbook(
str(outfile), {"constant_memory": False}
)
worksheet = workbook.add_worksheet("Table 1")
header_color = "#F2DCDB"
white = "#000000"
black = "#FFFFFF"
loc_cell_width = 20
data_cell_width = 15
column_start = 65
header_format = get_format_obj(
workbook, bg_color=header_color, font_size=12, bold=True
)
title_format = get_format_obj(
workbook, bg_color=white, font_size=13, align=False, bold=True
)
title_format.set_font_color(black)
# Column length is basically all columns in the dataframe except 'level'
col_len = final_df.shape[1]-1
data_cols = final_df.drop(["level", "lancet_label"], axis=1).columns.values
cols = list(map(chr, range(column_start, column_start+col_len)))
worksheet.set_column(cols[0]+":"+cols[0], loc_cell_width)
worksheet.set_column(cols[1]+":"+cols[-1], data_cell_width)
# place-holder to manually adjust title as needed
title = (
"Title goes here."
)
curr_row = 1
end_row = curr_row + CELL_HT["title"]
row_range = cols[0] + str(curr_row) + ":" + cols[-1] + str(end_row)
worksheet.merge_range(row_range, title, title_format)
curr_row = end_row+1
page_row_count = 1
page_breaks = []
for _, row in final_df.iterrows():
page_row_count += 1
### Insert page break after 20 rows.
if row["level"] == 0 or (page_row_count != 0 and
page_row_count % 20 == 0):
page_row_count = 0
page_breaks.append(curr_row - 1)
curr_row = write_header(
worksheet, curr_row, cols, data_cols,
header_format, stages
)
end_row = curr_row + CELL_HT["data_cols"]
col_idx = 0
if row["level"] < 3:
loc_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True,
align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True
)
else:
loc_fmt_obj = get_format_obj(
workbook, font_size=11, align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11
)
for col in final_df:
if col == "level":
continue
row_range = (
cols[col_idx] + str(curr_row) + ":" +
cols[col_idx] + str(end_row)
)
if col == "lancet_label":
loc_name = INDENT_MAP[row["level"]] + row[col]
worksheet.merge_range(row_range, loc_name, loc_fmt_obj)
else:
worksheet.merge_range(row_range, row[col], data_fmt_obj)
col_idx += 1
curr_row = end_row+1
worksheet.set_h_pagebreaks(page_breaks[1:])
worksheet.fit_to_pages(1, 0)
workbook.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--pop-version", type=str, required=True,
help="The version of population to use."
)
parser.add_argument("--tfr-version", type=str, required=True,
help="The version of fertility to use."
)
parser.add_argument(
"--gbd-round-id", type=int, required=True,
help="The GBD round associated with the data.")
parser.add_argument(
"--output-review-table", action="store_true",
help="Outputs table for reviewers along with Lancet-style table."
)
args = parser.parse_args()
# Define stages (population and TFR)
stages = ["pop", "tfr"]
# Get location metadata
gbd_loc_df = get_location_metadata(gbd_round_id=args.gbd_round_id,
location_set_id=35)
loc_meta = gbd_loc_df.query("level < 4")[
["location_id", "lancet_label", "level","sort_order"]]
location_ids = loc_meta.location_id.tolist()
# Make pop and TFR dataframes
pop_final_df = pull_reshape_pop(
args.gbd_round_id, args.pop_version, location_ids
)
tfr_final_df = pull_reshape_tfr(
args.gbd_round_id, args.tfr_version, location_ids
)
# merge dataframes
merged_df = loc_meta.merge(
pop_final_df).merge(
tfr_final_df,
on="location_id", how="left").sort_values(by="sort_order")
# Convert to floating decimal
data_cols = ["value_2017_pop_ref", "value_2100_pop_ref",
"value_2100_pop_sdg", "peak_pop_value",
"value_2017_tfr_ref", "value_2100_tfr_ref",
"value_2100_tfr_sdg"]
merged_df.loc[:, data_cols] = merged_df.loc[:, data_cols].applymap(
lambda x: convert_to_floating(x)
)
# Order final dataframe
final_df = merged_df[["level", "lancet_label"] + data_cols]
# Write table
plot_dir = (f"/ihme/forecasting/plot/{args.gbd_round_id}"
"/future/population/table_1/")
fname = date.today().strftime("%Y%m%d") + "_table_1.xlsx"
filepath = plot_dir + fname
try:
mkdir(plot_dir)
print(f"{plot_dir} created.")
except FileExistsError:
print(f"{plot_dir} already exists.")
write_table(final_df, filepath, stages)
if args.output_review_table:
review_fname = date.today().strftime("%Y%m%d") + "_table_1_review.csv"
review_cols = list(REVIEW_COL_NAME_MAP.keys())
review_df = merged_df[review_cols].drop_duplicates()
review_df.rename(columns=REVIEW_COL_NAME_MAP, inplace=True)
review_df.to_csv(plot_dir + review_fname, index=False) | """Pulls year 2017 GBD round 5 populations, converts it an xarray dataarray,
pulls forecast population, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. Peak population and peak population
year are pulled for each location in the dataframe. All required data are
then reshaped and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
pop_version (str):
Forecast populations version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
pop_final_df (pandas dataframe):
Dataframe with all required population data, reshaped for downstream
table production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD pops
pop_2017 = get_population(gbd_round_id=gbd_round_id, age_group_id=22,
sex_id=3, location_id=location_ids,
status="best", year_id=p_end, with_ui=True)[[
"year_id", "location_id", "population", "lower", "upper"
]].rename(columns={"population": "mean"})
pop_2017_da = melt_to_xarray(pop_2017)
# Get future pops
pop_fut = open_xr(f"{gbd_round_id}/future/population/"
f"{pop_version}/population_combined.nc").data
pop_fut_sel = pop_fut.sel(location_id=location_ids, scenario=SCENARIOS,
age_group_id=ALL_AGE_ID, sex_id=BOTH_SEX_ID)
# Concat and make quantile wide
pop_da = xr.concat([pop_2017_da, pop_fut_sel], dim="year_id")
pop_df = pop_da.rename("value").to_dataframe().reset_index()
pop_df = pop_df.pivot_table(values="value",
index=["location_id", "year_id", "age_group_id",
"sex_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
pop_with_ui = combine_mean_ui(pop_df)
# Find peak pops and year of peak
peak_pop_df = pop_with_ui.query("scenario == 0").groupby(
"location_id").apply(
get_max_pop_year).reset_index().rename(
columns={"mean":"peak_pop","year_id":"peak_year"})
peak_pop_df["peak_pop"] = peak_pop_df["peak_pop"].apply(
lambda x: round(x, 2))
peak_pop_df["peak_pop_value"] = (peak_pop_df["peak_pop"].astype(str) +
" (" +
peak_pop_df["peak_year"].astype(
int).astype(str) +
")")
# Get 2017 and 2100 values
pop_2017_only = pop_with_ui.query(f"year_id == {p_end} and scenario == 0")
pop_2100_only = pop_with_ui.query(f"year_id == {f_end}")
pop_2017_wide = pivot_scenarios(pop_2017_only, f"{p_end}", SCENARIO_MAP)
pop_2100_wide = pivot_scenarios(pop_2100_only, f"{f_end}", SCENARIO_MAP)
# Merge
pop_final_df = pop_2017_wide.merge(peak_pop_df).merge(pop_2100_wide)
return pop_final_df | identifier_body |
table_1.py | from datetime import date
import numpy as np
from os import mkdir
import pandas as pd
import xarray as xr
import xlsxwriter
from db_queries import (get_covariate_estimates, get_location_metadata,
get_population)
from fbd_core import argparse, YearRange
from fbd_core.etl import expand_dimensions
from fbd_core.file_interface import FBDPath, open_xr
ALL_AGE_ID = 22
BOTH_SEX_ID = 3
SCENARIOS = [-1, 0, 1, 2, 3]
YEARS = YearRange(1990, 2018, 2100)
CELL_HT = {
"title": 1,
"location": 1,
"stage": 0,
"data_cols": 2
}
COL_RANGE = {
"pop":2,
"tfr":1
}
INDENT_MAP = {
0: "",
1: " ",
2: " ",
3: " "
}
SCENARIO_MAP = {
0:"ref",
-1:"worse",
1:"better",
2:"fastest",
3:"sdg"
}
COL_NAME_MAP = {
"lancet_label":"Location",
"peak_pop_value": "Peak Population (year)",
"value_2017_pop_ref": "2017",
"value_2100_pop_ref": "2100 Reference Scenario",
"value_2100_pop_sdg": "2100 SDG Scenario",
"value_2017_tfr_ref": "2017",
"value_2100_tfr_ref": "2100 Reference Scenario",
"value_2100_tfr_sdg": "2100 SDG Scenario",
}
REVIEW_COL_NAME_MAP = {
"lancet_label": "Location name",
"mean_2017_pop_ref": "Population mean 2017",
"lower_2017_pop_ref": "Population lower 2017",
"upper_2017_pop_ref": "Population upper 2017",
"mean_2100_pop_ref": "Reference population mean 2100",
"lower_2100_pop_ref": "Reference population lower 2100",
"upper_2100_pop_ref": "Reference population upper 2100",
"mean_2100_pop_sdg": "SDG population mean 2100",
"lower_2100_pop_sdg": "SDG population lower 2100",
"upper_2100_pop_sdg": "SDG population upper 2100",
"peak_pop": "Peak population",
"peak_year": "Peak population year",
"mean_2017_tfr_ref": "TFR mean 2017",
"lower_2017_tfr_ref": "TFR lower 2017",
"upper_2017_tfr_ref": "TFR upper 2017",
"mean_2100_tfr_ref": "Reference TFR mean 2100",
"lower_2100_tfr_ref": "Reference TFR lower 2100",
"upper_2100_tfr_ref": "Reference TFR upper 2100",
"mean_2100_tfr_sdg": "SDG TFR mean 2100",
"lower_2100_tfr_sdg": "SDG TFR lower 2100",
"upper_2100_tfr_sdg": "SDG TFR upper 2100"
}
def melt_to_xarray(df):
"""Melts GBD data with 'mean', 'lower', and 'upper' columns to a single
'quantile' column; converts to xarray dataarray; and adds a scenario
dimension.
Args:
df (pandas dataframe):
Dataframe with 'year_id', 'location_id', 'mean', 'lower', and
'upper' columns.
Returns:
da_with_scenario (xarray dataarray):
Dataarray with 'year_id', 'quantile', 'location_id', and 'scenario'
dimensions.
"""
df_long = pd.melt(df,
id_vars=["year_id", "location_id"],
value_vars=["mean", "lower", "upper"],
var_name="quantile")
da = df_long.set_index(
["year_id", "quantile", "location_id"]).to_xarray()["value"]
da_with_scenario = expand_dimensions(da, scenario=[0])
return da_with_scenario
def combine_mean_ui(df, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', and 'upper' columns,
and returns a dataframe with a 'value' column that has the mean, lower,
and upper all together. If df_type == "pop", values are converted to
millions.
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', and 'upper' columns and added
'value' column.
"""
for col in ["mean", "lower", "upper"]:
if df_type == "pop":
df[col] = df[col] / 1000000
df[col] = df[col].apply(lambda x: round(x, 2))
df["value"] = (df["mean"].astype(str) + " (" + df["lower"].astype(str) +
" - " + df["upper"].astype(str) + ")")
return df
def pivot_scenarios(df, prefix, scen_map, df_type="pop"):
"""Takes a dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns, and returns a dataframe with wide scenarios. Scenario column names
are given by:
prefix + "_" + df_type + "_" + df["scenario"].map(scen_map)
Args:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value', and 'scenario'
columns.
Returns:
df (pandas dataframe):
Dataframe with 'mean', 'lower', 'upper', 'value' columns wide by
scenario.
"""
df["scenario"] = prefix + "_" + df_type + "_" +\
df["scenario"].map(scen_map)
df = df.pivot_table(values=["lower", "mean", "upper" ,"value"],
index="location_id",
columns="scenario",
aggfunc="first").reset_index()
# This flattens the column levels
df.columns = ['_'.join(col) for col in df.columns.values if col]
df.rename(columns={"location_id_":"location_id"}, inplace=True)
return df
def get_max_pop_year(group):
"""Takes a dataframe (or GroupBy object) with 'mean' and 'year_id' columns,
and returns a dataframe with the max value in 'mean' and the 'year_id'
of the max value.
Args:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
Returns:
df (pandas dataframe):
Dataframe with 'year_id' and 'mean' columns.
"""
max_year_val = group.loc[group["mean"].idxmax()][["year_id", "mean"]]
return max_year_val
def pull_reshape_pop(gbd_round_id, pop_version, location_ids):
"""Pulls year 2017 GBD round 5 populations, converts it an xarray dataarray,
pulls forecast population, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. Peak population and peak population
year are pulled for each location in the dataframe. All required data are
then reshaped and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
pop_version (str):
Forecast populations version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
pop_final_df (pandas dataframe):
Dataframe with all required population data, reshaped for downstream
table production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD pops
pop_2017 = get_population(gbd_round_id=gbd_round_id, age_group_id=22,
sex_id=3, location_id=location_ids,
status="best", year_id=p_end, with_ui=True)[[
"year_id", "location_id", "population", "lower", "upper"
]].rename(columns={"population": "mean"})
pop_2017_da = melt_to_xarray(pop_2017)
# Get future pops
pop_fut = open_xr(f"{gbd_round_id}/future/population/"
f"{pop_version}/population_combined.nc").data
pop_fut_sel = pop_fut.sel(location_id=location_ids, scenario=SCENARIOS,
age_group_id=ALL_AGE_ID, sex_id=BOTH_SEX_ID)
# Concat and make quantile wide
pop_da = xr.concat([pop_2017_da, pop_fut_sel], dim="year_id")
pop_df = pop_da.rename("value").to_dataframe().reset_index()
pop_df = pop_df.pivot_table(values="value",
index=["location_id", "year_id", "age_group_id",
"sex_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
pop_with_ui = combine_mean_ui(pop_df)
# Find peak pops and year of peak
peak_pop_df = pop_with_ui.query("scenario == 0").groupby(
"location_id").apply(
get_max_pop_year).reset_index().rename(
columns={"mean":"peak_pop","year_id":"peak_year"})
peak_pop_df["peak_pop"] = peak_pop_df["peak_pop"].apply(
lambda x: round(x, 2))
peak_pop_df["peak_pop_value"] = (peak_pop_df["peak_pop"].astype(str) +
" (" +
peak_pop_df["peak_year"].astype(
int).astype(str) +
")")
# Get 2017 and 2100 values
pop_2017_only = pop_with_ui.query(f"year_id == {p_end} and scenario == 0")
pop_2100_only = pop_with_ui.query(f"year_id == {f_end}")
pop_2017_wide = pivot_scenarios(pop_2017_only, f"{p_end}", SCENARIO_MAP)
pop_2100_wide = pivot_scenarios(pop_2100_only, f"{f_end}", SCENARIO_MAP)
# Merge
pop_final_df = pop_2017_wide.merge(peak_pop_df).merge(pop_2100_wide)
return pop_final_df
def pull_reshape_tfr(gbd_round_id, tfr_version, location_ids):
"""Pulls year 2017 GBD round 5 TFR, converts it an xarray dataarray,
pulls forecast TFR, and concatenates the dataarrays. The new array is
then converted to a pandas dataframe. All required data are then reshaped
and merged for downstream table production.
Args:
gbd_round_id (int):
GBD round.
tfr_version (str):
Forecast TFR version.
location_ids (list):
List of location IDs to pull from both past and future data.
Returns:
tfr_final_df (pandas dataframe):
Dataframe with all required TFR data, reshaped for downstream table
production.
"""
p_end = YEARS.past_end
f_end = YEARS.forecast_end
# Get 2017 GBD TFR
tfr_2017 = get_covariate_estimates(covariate_id=149,
gbd_round_id=gbd_round_id,
location_id=location_ids, year_id=p_end,
status="best")[[
"year_id", "location_id","mean_value", "lower_value", "upper_value"
]].rename(columns={"mean_value":"mean", "lower_value":"lower",
"upper_value":"upper"})
tfr_2017_da = melt_to_xarray(tfr_2017)
# Get future TFR
tfr_fut = open_xr(f"{gbd_round_id}/future/tfr/"
f"{tfr_version}/tfr_combined.nc").data
tfr_fut_sel = tfr_fut.sel(location_id=location_ids, scenario=SCENARIOS,
year_id=YEARS.forecast_years)
# Concat and make quantile wide
tfr_da = xr.concat([tfr_2017_da, tfr_fut_sel], dim="year_id")
tfr_df = tfr_da.to_dataframe().reset_index()
tfr_df = tfr_df.pivot_table(values="value",
index=["location_id", "year_id", "scenario"],
columns="quantile").reset_index()
# Combine value and UI into one column
tfr_df = combine_mean_ui(tfr_df, df_type="tfr")
# Get 2017 and 2100 values
tfr2017 = tfr_df.query(f"year_id == {p_end} and scenario==0")
tfr2100 = tfr_df.query(f"year_id == {f_end}")
tfr2017 = pivot_scenarios(tfr2017, f"{p_end}", SCENARIO_MAP, df_type="tfr")
tfr2100 = pivot_scenarios(tfr2100, f"{f_end}", SCENARIO_MAP, df_type="tfr")
# Merge
tfr_final_df = tfr2017.merge(tfr2100)
return tfr_final_df
def convert_to_floating(string):
"""
Takes a string with a decimal point and converts the decimal point to a
floating decimal point for lancet style formatting.
Args:
string (str):
A number string with a decimal point.
Returns:
str:
The original string with floating decimal.
"""
return "".join(["\u00b7" if char=="." else char for char in string])
def get_format_obj(workbook, font_name="Times New Roman", font_size=8,
bg_color="#FFFFFF", align=True, bold=False):
"""Utility function to dynamically create cell formatting options.
Args:
workbook (xlsxwriter Workbook):
Parent workbook of the worksheet to which the data is written.
font_name(str):
Font of the content.
font_size(int):
Font size of the content.
bg_color(str):
String representing the HEX code of cell color.
align(bool):
If cell content needs to be vertically and horizontally aligned.
bold (bool):
If cell content needs to be boldened.
Returns:
format_obj (xlsxwriter workbook format object):
Has specified format properties.
"""
format_obj = workbook.add_format(
{
"font_name": font_name,
"font_size": font_size
}
)
format_obj.set_border()
format_obj.set_text_wrap()
format_obj.set_bg_color(bg_color)
if bold:
format_obj.set_bold()
if align:
format_obj.set_align("center")
format_obj.set_align("vcenter")
return format_obj
def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):
"""Utility function to write the header for each page.
Args:
worksheet (Worksheet object):
Worksheet to which the data is written.
curr_row (int):
Starting row number for the header.
cols (list):
List of characters representing the columns.
data_cols (pandas series):
Columns to be written.
header_format(xlsxwriter Format object):
Cell format options for headers.
stages (list):
"tfr", "pop" etc.
Returns:
int: An integer specifying the row number following the header.
"""
### Merge range function takes the locations of the cells to merge, the data
### to write and the cell format. A sample input would look like:
### worksheet.merge_range("A0:B1", "Location", cell_format_obj)
### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the
### value "Location".
end_row = curr_row + CELL_HT["location"]
row_range = cols[0] + str(curr_row) + ":" + cols[0] + str(end_row)
worksheet.merge_range(row_range, "Location", header_format)
num_pop_cols = sum(map(lambda i: "pop" in i, data_cols)) - 1
num_tfr_cols = sum(map(lambda i: "tfr" in i, data_cols)) - 1
col_end = 0
for i, stage in enumerate(stages):
if stage == "pop":
unit_txt = " (in millions)"
stage_txt = "Population"
col_range = num_pop_cols
else:
unit_txt = ""
stage_txt = "Total Fertility Rate"
col_range = num_tfr_cols
col_st = col_end + 1
col_end = col_st + col_range
curr_row_copy = curr_row
end_row = curr_row_copy + CELL_HT["stage"]
row_range = (
cols[col_st] + str(curr_row_copy) + ":" +
cols[col_end] + str(end_row)
)
col_txt = stage_txt + unit_txt
worksheet.merge_range(row_range, col_txt, header_format)
curr_row_copy = end_row + 1
end_row = curr_row_copy + CELL_HT["stage"]
col_st_copy = col_st
for column in data_cols:
if stage in column:
row_range = cols[col_st_copy] + str(curr_row_copy)
worksheet.write(row_range, COL_NAME_MAP[column], header_format)
col_st_copy += 1
return end_row + 1
def write_table(final_df, outfile, stages):
"""Writes the data to an xlsx table.
Args:
final_df (pandas dataframe):
Dataframe with formatted data.
outfile (FBDPath object):
Path to store the table.
stages (list):
"tfr", "pop" etc.
"""
workbook = xlsxwriter.Workbook(
str(outfile), {"constant_memory": False}
)
worksheet = workbook.add_worksheet("Table 1")
header_color = "#F2DCDB"
white = "#000000"
black = "#FFFFFF"
loc_cell_width = 20
data_cell_width = 15
column_start = 65
header_format = get_format_obj(
workbook, bg_color=header_color, font_size=12, bold=True
)
title_format = get_format_obj(
workbook, bg_color=white, font_size=13, align=False, bold=True
)
title_format.set_font_color(black)
# Column length is basically all columns in the dataframe except 'level'
col_len = final_df.shape[1]-1
data_cols = final_df.drop(["level", "lancet_label"], axis=1).columns.values
cols = list(map(chr, range(column_start, column_start+col_len)))
worksheet.set_column(cols[0]+":"+cols[0], loc_cell_width)
worksheet.set_column(cols[1]+":"+cols[-1], data_cell_width)
# place-holder to manually adjust title as needed
title = (
"Title goes here."
)
curr_row = 1
end_row = curr_row + CELL_HT["title"]
row_range = cols[0] + str(curr_row) + ":" + cols[-1] + str(end_row)
worksheet.merge_range(row_range, title, title_format)
curr_row = end_row+1
page_row_count = 1
page_breaks = []
for _, row in final_df.iterrows():
page_row_count += 1
### Insert page break after 20 rows.
if row["level"] == 0 or (page_row_count != 0 and
page_row_count % 20 == 0):
page_row_count = 0
page_breaks.append(curr_row - 1)
curr_row = write_header(
worksheet, curr_row, cols, data_cols,
header_format, stages
)
end_row = curr_row + CELL_HT["data_cols"]
col_idx = 0
if row["level"] < 3:
loc_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True,
align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11,
bg_color=header_color, bold=True
)
else:
loc_fmt_obj = get_format_obj(
workbook, font_size=11, align=False
)
data_fmt_obj = get_format_obj(
workbook, font_size=11
)
for col in final_df:
if col == "level":
continue
row_range = (
cols[col_idx] + str(curr_row) + ":" +
cols[col_idx] + str(end_row)
)
if col == "lancet_label":
|
else:
worksheet.merge_range(row_range, row[col], data_fmt_obj)
col_idx += 1
curr_row = end_row+1
worksheet.set_h_pagebreaks(page_breaks[1:])
worksheet.fit_to_pages(1, 0)
workbook.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--pop-version", type=str, required=True,
help="The version of population to use."
)
parser.add_argument("--tfr-version", type=str, required=True,
help="The version of fertility to use."
)
parser.add_argument(
"--gbd-round-id", type=int, required=True,
help="The GBD round associated with the data.")
parser.add_argument(
"--output-review-table", action="store_true",
help="Outputs table for reviewers along with Lancet-style table."
)
args = parser.parse_args()
# Define stages (population and TFR)
stages = ["pop", "tfr"]
# Get location metadata
gbd_loc_df = get_location_metadata(gbd_round_id=args.gbd_round_id,
location_set_id=35)
loc_meta = gbd_loc_df.query("level < 4")[
["location_id", "lancet_label", "level","sort_order"]]
location_ids = loc_meta.location_id.tolist()
# Make pop and TFR dataframes
pop_final_df = pull_reshape_pop(
args.gbd_round_id, args.pop_version, location_ids
)
tfr_final_df = pull_reshape_tfr(
args.gbd_round_id, args.tfr_version, location_ids
)
# merge dataframes
merged_df = loc_meta.merge(
pop_final_df).merge(
tfr_final_df,
on="location_id", how="left").sort_values(by="sort_order")
# Convert to floating decimal
data_cols = ["value_2017_pop_ref", "value_2100_pop_ref",
"value_2100_pop_sdg", "peak_pop_value",
"value_2017_tfr_ref", "value_2100_tfr_ref",
"value_2100_tfr_sdg"]
merged_df.loc[:, data_cols] = merged_df.loc[:, data_cols].applymap(
lambda x: convert_to_floating(x)
)
# Order final dataframe
final_df = merged_df[["level", "lancet_label"] + data_cols]
# Write table
plot_dir = (f"/ihme/forecasting/plot/{args.gbd_round_id}"
"/future/population/table_1/")
fname = date.today().strftime("%Y%m%d") + "_table_1.xlsx"
filepath = plot_dir + fname
try:
mkdir(plot_dir)
print(f"{plot_dir} created.")
except FileExistsError:
print(f"{plot_dir} already exists.")
write_table(final_df, filepath, stages)
if args.output_review_table:
review_fname = date.today().strftime("%Y%m%d") + "_table_1_review.csv"
review_cols = list(REVIEW_COL_NAME_MAP.keys())
review_df = merged_df[review_cols].drop_duplicates()
review_df.rename(columns=REVIEW_COL_NAME_MAP, inplace=True)
review_df.to_csv(plot_dir + review_fname, index=False) | loc_name = INDENT_MAP[row["level"]] + row[col]
worksheet.merge_range(row_range, loc_name, loc_fmt_obj) | conditional_block |
response.rs | // rust imports
use std::io::Read;
use std::ffi::OsStr;
use std::path::{PathBuf, Path};
use std::fs::{self, File};
use std::collections::HashMap;
// 3rd-party imports
use rusqlite::Connection;
use rusqlite::types::ToSql;
use hyper::http::h1::HttpReader;
use hyper::buffer::BufReader;
use hyper::net::NetworkStream;
use hyper::header::{Headers, ContentType};
use hyper::mime::{Mime, TopLevel, SubLevel};
use multipart::server::{Multipart, Entries, SaveResult};
use url::percent_encoding::percent_decode;
use mime_types;
use csv;
use chrono::naive::date::NaiveDate;
use chrono::Datelike;
use serde::ser::Serialize;
use serde_json;
// local imports
use route::{Route, HumanError, APIError};
use database::Database;
// statics
lazy_static! {
static ref MIME_TYPES: mime_types::Types = mime_types::Types::new().unwrap();
}
// enums
pub enum Component {
Home,
NotFound,
}
#[derive(Serialize, Debug)]
pub struct JSONResponse {
pub error: Option<String>,
pub payload: Option<serde_json::Value>,
}
impl JSONResponse {
fn error(reason: Option<String>) -> Self {
JSONResponse {
error: reason,
payload: None,
}
}
fn payload<T: Serialize>(value: T) -> Self {
use serde_json::to_value;
JSONResponse {
error: None,
payload: Some(to_value(value).unwrap()),
}
}
}
pub enum AppResponse {
Component(Component),
Asset(ContentType, Vec<u8> /* content */),
MethodNotAllowed,
NotFound,
BadRequest,
InternalServerError,
JSONResponse(JSONResponse),
}
impl AppResponse {
pub fn process(db_conn: Database,
route: Route,
headers: Headers,
http_reader: HttpReader<&mut BufReader<&mut NetworkStream>>)
-> Self {
match route {
Route::Home => AppResponse::Component(Component::Home),
Route::FileUpload => handle_file_upload(db_conn, headers, http_reader),
Route::Asset(path_to_asset) => handle_asset(path_to_asset),
Route::HumanError(human_error) => {
match human_error {
HumanError::NotFound => AppResponse::Component(Component::NotFound),
}
}
Route::APIError(api_error) => {
match api_error {
APIError::MethodNotAllowed => AppResponse::MethodNotAllowed,
APIError::NotFound => AppResponse::NotFound,
}
}
}
}
}
fn handle_asset(path_to_asset: String) -> AppResponse {
#[inline]
fn decode_percents(string: &OsStr) -> String {
let string = format!("{}", string.to_string_lossy());
format!("{}", percent_decode(string.as_bytes()).decode_utf8_lossy())
}
// TODO: inlined resources here
// URL decode
let decoded_req_path = Path::new(&path_to_asset).iter().map(decode_percents);
let starts_with = match Path::new("./assets/").to_path_buf().canonicalize() {
Err(_) => {
return AppResponse::Component(Component::NotFound);
}
Ok(x) => x,
};
let mut req_path = starts_with.clone();
req_path.extend(decoded_req_path);
let req_path: PathBuf = req_path;
// TODO: this is a security bottle-neck
let req_path = match req_path.canonicalize() {
Err(_) => {
return AppResponse::Component(Component::NotFound);
}
Ok(req_path) => {
if !req_path.starts_with(starts_with.as_path()) {
return AppResponse::Component(Component::NotFound);
}
req_path
}
};
match fs::metadata(&req_path) {
Ok(metadata) => {
if !metadata.is_file() {
return AppResponse::Component(Component::NotFound);
}
// TODO: better way?
let path_str = format!("{}", &req_path.to_string_lossy());
// Set the content type based on the file extension
let mime_str = MIME_TYPES.mime_for_path(req_path.as_path());
let mut content_type = ContentType(Mime(TopLevel::Application, SubLevel::Json, vec![]));
let _ = mime_str.parse().map(|mime: Mime| {
content_type = ContentType(mime);
});
let mut file = File::open(req_path)
.ok()
.expect(&format!("No such file: {:?}", path_str));
let mut content = Vec::new();
file.read_to_end(&mut content).unwrap();
return AppResponse::Asset(content_type, content);
}
Err(_err) => {
return AppResponse::Component(Component::NotFound);
}
}
}
fn handle_file_upload(db_conn: Database,
headers: Headers,
http_reader: HttpReader<&mut BufReader<&mut NetworkStream>>)
-> AppResponse {
match process_multipart(headers, http_reader) {
None => AppResponse::BadRequest,
Some(mut multipart) => {
match multipart.save().temp() {
SaveResult::Full(entries) => process_entries(db_conn, entries),
SaveResult::Partial(_entries, error) => {
println!("Errors saving multipart:\n{:?}", error);
// TODO: fix
// process_entries(entries.into())
AppResponse::BadRequest
}
SaveResult::Error(error) => {
println!("Errors saving multipart:\n{:?}", error);
// Err(error)
AppResponse::BadRequest
}
}
}
}
}
fn process_entries(db_conn: Database, entries: Entries) -> AppResponse {
let files = match entries.files.get("uploads[]") {
Some(files) => {
if files.len() <= 0 {
return AppResponse::BadRequest;
}
files
}
None => {
return AppResponse::BadRequest;
}
};
let mut expense_tracker = ExpenseTracker::new();
let mut records = vec![];
for file in files {
let mut reader = match csv::Reader::from_file(file.path.clone()) {
Ok(reader) => reader.has_headers(true),
Err(error) => {
// TODO: error
println!("error: {}", error);
return AppResponse::InternalServerError;
}
};
for record in reader.decode() {
let (date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount): (String,
String,
String,
String,
String,
String,
String,
String) = match record {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
};
let pre_tax_amount: f64 = {
let pre_tax_amount = pre_tax_amount.trim().replace(",", "");
match pre_tax_amount.parse::<f64>() {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
}
};
let tax_amount: f64 = {
let tax_amount = tax_amount.trim().replace(",", "");
match tax_amount.parse::<f64>() {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
}
};
let new_date = match NaiveDate::parse_from_str(&date, "%_m/%e/%Y") {
Ok(x) => x,
Err(_) => {
return AppResponse::BadRequest;
}
};
let record = Record(date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount);
records.push(record);
expense_tracker.add(new_date, pre_tax_amount + tax_amount);
}
}
add_to_database(db_conn, records);
return AppResponse::JSONResponse(JSONResponse::payload(expense_tracker));
}
fn add_to_database(db_connnection: Database, records: Vec<Record>) {
for record in records {
let Record(date,
category,
employee_name,
employee_address,
expense_description,
pre_tax_amount,
tax_name,
tax_amount) = record;
let query = format!("
INSERT INTO ExpenseHistory(date, category, \
employee_name, employee_address, expense_description, \
pre_tax_amount, tax_name, tax_amount)
VALUES (:date, \
:category, :employee_name, :employee_address, :expense_description, \
:pre_tax_amount, :tax_name, :tax_amount);
");
let params: &[(&str, &ToSql)] = &[(":date", &date),
(":category", &category),
(":employee_name", &employee_name),
(":employee_address", &employee_address),
(":expense_description", &expense_description),
(":pre_tax_amount", &pre_tax_amount),
(":tax_name", &tax_name),
(":tax_amount", &tax_amount)];
db_write_lock!(db_conn; db_connnection.clone());
let db_conn: &Connection = db_conn;
match db_conn.execute_named(&query, params) {
Err(sqlite_error) => {
panic!("{:?}", sqlite_error);
}
_ => {
/* query sucessfully executed */
}
}
}
}
fn process_multipart<R: Read>(headers: Headers, http_reader: R) -> Option<Multipart<R>> {
let boundary = headers.get::<ContentType>().and_then(|ct| {
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
let ContentType(ref mime) = *ct;
let params = match *mime {
Mime(TopLevel::Multipart, SubLevel::FormData, ref params) => params,
_ => return None,
};
params.iter()
.find(|&&(ref name, _)| match *name {
Attr::Boundary => true,
_ => false,
})
.and_then(|&(_, ref val)| match *val {
Value::Ext(ref val) => Some(&**val),
_ => None,
})
});
match boundary.map(String::from) {
Some(boundary) => Some(Multipart::with_body(http_reader, boundary)),
None => None,
}
}
#[derive(Eq, PartialEq, Hash, Serialize)]
enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
#[derive(Serialize)]
struct ExpenseTracker(HashMap<Month, f64>);
impl ExpenseTracker {
fn | () -> Self {
ExpenseTracker(HashMap::new())
}
fn add(&mut self, date: NaiveDate, expenses: f64) {
let month = match date.month() {
1 => Month::January,
2 => Month::February,
3 => Month::March,
4 => Month::April,
5 => Month::May,
6 => Month::June,
7 => Month::July,
8 => Month::August,
9 => Month::September,
10 => Month::October,
11 => Month::November,
12 => Month::December,
_ => unreachable!(),
};
if self.0.contains_key(&month) {
let entry = self.0.get_mut(&month).unwrap();
*entry = *entry + expenses;
return;
}
self.0.insert(month, expenses);
}
}
struct Record(String, String, String, String, String, f64, String, f64);
| new | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.