text stringlengths 1 1.05M |
|---|
<filename>src/other/orm-config.service.ts<gh_stars>0
import { Injectable } from "@nestjs/common";
import * as path from 'path';
import 'dotenv/config';
@Injectable()
export class OrmConfigService {
getOrmConfig(): object {
return {
type: process.env.DB_TYPE,
host: process.env.DB_HOST,
port: process.env.DB_PORT,
username: process.env.DB_USER,
password: <PASSWORD>,
database: process.env.DB,
synchronize: false,
logging: false,
entities: [
`${path.dirname(require.main.filename)}/**/*.entity{.ts,.js}`,
],
};
}
}
|
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([[91],{
/***/ "../../src/components/icon/assets/editor_undo.js":
/*!**********************************************************************************!*\
!*** /Users/chanderprall/projects/eui/src/components/icon/assets/editor_undo.js ***!
\**********************************************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.icon = void 0;
__webpack_require__(/*! core-js/modules/es6.object.assign */ "../../node_modules/core-js/modules/es6.object.assign.js");
var _react = _interopRequireDefault(__webpack_require__(/*! react */ "../../node_modules/react/index.js"));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
var EuiIconEditorUndo = function EuiIconEditorUndo(props) {
return _react.default.createElement("svg", _extends({
width: 16,
height: 16,
viewBox: "0 0 16 16",
xmlns: "http://www.w3.org/2000/svg"
}, props), _react.default.createElement("path", {
d: "M5.5 6h5c1.943 0 3 1.057 3 3s-1.057 3-3 3H9v-1h1.5c1.39 0 2-.61 2-2 0-1.39-.61-2-2-2h-5v3L2 6.5 5.5 3v3z"
}));
};
var icon = EuiIconEditorUndo;
exports.icon = icon;
EuiIconEditorUndo.__docgenInfo = {
"description": "",
"methods": [],
"displayName": "EuiIconEditorUndo"
};
/***/ })
}]);
//# sourceMappingURL=91.bundle.js.map |
<filename>plugins/com.ibm.socialcrm.notesintegration.ui/src/com/ibm/socialcrm/notesintegration/ui/custom/SugarEntrySelectionComposite.java
package com.ibm.socialcrm.notesintegration.ui.custom;
/****************************************************************
* IBM OpenSource
*
* (C) Copyright IBM Corp. 2012
*
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
***************************************************************/
import java.util.List;
import org.eclipse.jface.layout.GridDataFactory;
import org.eclipse.jface.layout.GridLayoutFactory;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.ControlAdapter;
import org.eclipse.swt.events.ControlEvent;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Shell;
import com.ibm.socialcrm.notesintegration.core.BaseSugarEntry;
/**
* Composite that lets the user choose from multiple items when our live text matches yield multiple results.
*/
public class SugarEntrySelectionComposite extends Composite {
private List<BaseSugarEntry> entries;
private boolean sizeSet = false;
public SugarEntrySelectionComposite(Shell shell, List<BaseSugarEntry> entries) {
super(shell, SWT.NONE);
this.entries = entries;
createComposite();
}
protected void createComposite() {
setLayout(GridLayoutFactory.fillDefaults().margins(0, 0).spacing(0, 0).create());
setLayoutData(GridDataFactory.fillDefaults().grab(true, true).create());
final SugarItemList itemList = new SugarItemList(this, SWT.NONE);
getShell().setText(entries.get(0).getSugarType().getMultiSelectionTitle());
final SugarItem[] items = new SugarItem[entries.size()];
int ctr = 0;
for (BaseSugarEntry entry : entries) {
items[ctr] = itemList.addItem(entry);
ctr++;
}
items[0].addControlListener(new ControlAdapter() {
@Override
public void controlResized(ControlEvent arg0) {
if (!sizeSet) {
sizeSet = true;
int y = items[0].getSize().y;
// We don't want to show more than 7 items by default (The 8 roughly accounts for the size of the shell header).
if (entries.size() > 7) {
getShell().setSize(getShell().getSize().x, 8 * y);
}
}
}
});
itemList.setLayoutData(GridDataFactory.fillDefaults().grab(true, true).hint(300, SWT.DEFAULT).create());
itemList.setFocus();
}
}
|
<filename>client/monitor/auth.go
package monitor
import (
"encoding/json"
"net"
"os"
"ssprobe-common/model"
"ssprobe-common/util"
"strings"
)
var (
name string = "onezol.com"
server string = "127.0.0.1"
port string = "3384"
token string = "<PASSWORD>"
)
type AuthResult struct {
Ok bool
Conn *net.Conn
Name string
}
var logger util.Logger
// parseParam parse user parameters.
func parseParam() {
args := os.Args
for _, arg := range args {
if strings.Contains(arg, "--name=") {
name = arg[7:]
} else if strings.Contains(arg, "--server=") {
server = arg[9:]
} else if strings.Contains(arg, "--port=") {
port = arg[7:]
} else if strings.Contains(arg, "--token=") {
token = arg[8:]
}
}
if len(server) == 0 || len(token) == 0 {
logger.LogWithExit("The argument you provided does not match [--server,--token].")
}
}
// RequestAuth authenticate the client and return the connection.
func RequestAuth() *AuthResult {
parseParam()
conn, err := net.Dial("tcp", server+":"+port)
if err != nil {
logger.LogWithError(err, "Failed to connect to server!")
return &AuthResult{Ok: false}
}
// Authentication.
bytes, _ := json.Marshal(token)
_, err = conn.Write(bytes)
if err != nil {
logger.OnlyLog("Failed to send authentication request.")
return &AuthResult{Ok: false}
}
var buf = make([]byte, 1024)
n, err := conn.Read(buf)
if err != nil {
logger.LogWithError(err, "Authentication failed.")
return &AuthResult{Ok: false}
}
var resModel model.AuthResponse
_ = json.Unmarshal(buf[:n], &resModel)
// The token is incorrect.
if resModel.Code == -1 {
logger.LogWithExit("Authentication failed, incorrect token.")
}
logger.OnlyLog("Server connection successful!")
return &AuthResult{
Ok: true,
Conn: &conn,
Name: name,
}
}
|
class DatasetFMRI:
def __init__(self):
self.data = None # Attribute to store the loaded fMRI data
self.preprocessed_data = None # Attribute to store the preprocessed fMRI data
self.analysis_result = None # Attribute to store the result of data analysis
def load_data(self, file_path):
# Implement the logic to load fMRI data from the specified file
# Example: self.data = fMRI_loader.load(file_path)
pass
def preprocess_data(self):
# Implement the logic to preprocess the loaded fMRI data
# Example: self.preprocessed_data = fMRI_preprocessor.preprocess(self.data)
pass
def analyze_data(self):
# Implement the logic to analyze the preprocessed fMRI data
# Example: self.analysis_result = fMRI_analyzer.analyze(self.preprocessed_data)
pass |
<reponame>Xinkai/YoutubePlayrate<filename>content.js
// ==UserScript==
// @name Youtube Playback Rate
// @namespace http://cuoan.net/
// @version 0.1
// @description Easily control YouTube's playback speed
// @author <NAME>
// @match http://*/*
// @match https://*/*
// @grant none
// ==/UserScript==
(() => {
'use strict';
class Utils {
static getAncestorNode(node, selector) {
let currentNode = node;
while (currentNode !== document) {
if (currentNode.matches(selector)) {
return currentNode;
}
currentNode = currentNode.parentNode;
}
return null;
}
static formatTime(seconds) {
const h = (seconds / 3600) | 0;
const m = (seconds / 60) % 60 | 0;
const s = (seconds % 60) | 0;
const m_str = (h > 0 && m < 10) ? `0${m}` : `${m}`;
const s_str = s < 10 ? `0${s}` : `${s}`;
return h !== 0 ? `${h}:${m_str}:${s_str}` : `${m_str}:${s_str}`;
}
static parseTime(str) {
const splits = str.split(":");
const len = splits.length;
let accum = +splits[len - 1];
accum += (+splits[len-2]) * 60 || 0;
accum += (+splits[len-3]) * 60 || 0;
return accum;
}
static log(...args) {
return console.log("PlayRate", ...args);
}
}
class Base {
constructor({ player, container }) {
this.player = player;
this.container = container;
this.rate = this.player.playbackRate;
this.warpedTimeIndicator = null;
this.statusOverlay = null;
this.overlayTimer = null;
this.toolbar = null;
}
initialize = () => {
if (this.player.getAttribute("playrate-loaded")) {
return;
}
this.player.setAttribute("playrate-loaded", true);
this.toolbar = this.getToolbar();
this.styleToolbar();
this.warpedTimeIndicator = this.createWarpedTimeIndicator();
this.styleWarpedTimeIndicator();
this.placeWarpedTimeIndicator(this.toolbar);
this.statusOverlay = this.createStatusOverlay();
this.placeStatusOverlay();
this.updateDuration();
this.player.addEventListener("durationchange", this.updateDuration);
this.setupUserInputListeners();
}
showStatusOverlay = () => {
if (this.overlayTimer) {
clearTimeout(this.overlayTimer);
}
this.statusOverlay.overlay.style.visibility = "visible";
this.overlayTimer = setTimeout(() => {
this.statusOverlay.overlay.style.visibility = "hidden";
this.overlayTimer = null;
}, 1500);
}
updateCurrent = () => {
this.warpedTimeIndicator.current.innerText = Utils.formatTime(this.player.currentTime / this.rate);
}
updateDuration = () => {
this.warpedTimeIndicator.duration.innerText = Utils.formatTime(this.player.duration / this.rate);
}
updateStatusOverlay = () => {
this.statusOverlay.status.innerText = `${this.player.playbackRate * 100 | 0}%`;
}
setupUserInputListeners = () => {
this.warpedTimeIndicator.root.addEventListener("wheel", event => {
if (event.deltaY < 0) {
this.changeRate(event, 0.1);
} else {
this.changeRate(event, -0.1);
}
});
document.addEventListener("keydown", event => {
if (event.target.tagName !== "INPUT" &&
event.target.contentEditable === "inherit") {
if (event.code === "NumpadAdd") {
this.changeRate(event, 0.1);
} else if (event.code === "NumpadSubtract") {
this.changeRate(event, -0.1);
}
}
});
this.player.addEventListener("ratechange", this.onRateReset);
this.player.addEventListener("loadedmetadata", this.onRateReset);
this.player.addEventListener("timeupdate", this.updateCurrent);
}
changeRate = (event, delta) => {
if ((delta > 0 && this.player.playbackRate < 4) ||
(delta < 0 && this.player.playbackRate > 0.2)) {
this.showStatusOverlay();
event.stopPropagation();
event.preventDefault();
this.player.playbackRate += delta;
}
}
static getPlayer = async() => {
const player = document.querySelector("video,audio");
if (player) {
return player;
}
return new Promise(resolve => {
const onMetaDataloaded = event => {
const tagName = event.target.tagName;
if (tagName === "VIDEO" || tagName === "AUDIO") {
document.removeEventListener("loadedmetadata", onMetaDataloaded, true);
resolve(event.target);
}
};
document.body.addEventListener("loadedmetadata", onMetaDataloaded, true);
});
}
static launch = async() => {
const player = await this.getPlayer();
const siteDetectors = {
".html5-video-player": Youtube,
".bilibili-player-video-wrap": Bilibili,
};
for (const [containerSelector, Provider] of Object.entries(siteDetectors)) {
const container = Utils.getAncestorNode(player, containerSelector);
if (container) {
new Provider({ player, container });
break;
}
}
}
createWarpedTimeIndicator = () => {
const root = document.createElement("div");
const current = document.createElement("span");
root.appendChild(current);
const separator = document.createElement("span");
separator.innerText = " / ";
root.appendChild(separator);
const duration = document.createElement("span");
root.appendChild(duration);
return {
root,
current,
separator,
duration,
};
}
createStatusOverlay = () => {
const status = document.createElement("div");
status.style.textAlign = "center";
status.style.margin = "auto";
status.style.fontSize = "11em";
status.style.color = "rgba(255,255,255, 0.5)";
const overlay = document.createElement("div");
overlay.style.backgroundColor = "rgba(0, 0, 0, 0.5)";
overlay.style.zIndex = 10;
overlay.style.width = "50%";
overlay.style.height = "50%";
overlay.style.position = "absolute";
overlay.style.display = "flex";
overlay.style.visibility = "hidden";
overlay.style.top = "25%";
overlay.style.left = "25%";
overlay.style.pointerEvents = "none";
overlay.style.transition = "all 0.5s linear";
overlay.appendChild(status);
return {
status,
overlay,
};
}
getToolbar = () => {
throw new Error("Abstract");
}
styleToolbar = () => {
throw new Error("Abstract");
}
styleWarpedTimeIndicator = () => {
throw new Error("Abstract");
}
placeWarpedTimeIndicator = (toolbar) => {
throw new Error("Abstract");
}
placeStatusOverlay = () => {
throw new Error("Abstract");
}
onRateReset = () => {
this.rate = this.player.playbackRate;
this.updateStatusOverlay();
this.updateCurrent();
this.updateDuration();
}
}
class Youtube extends Base {
constructor({ player, container }) {
super({ player, container });
this.initialize();
}
getToolbar = () => {
return this.container.querySelector(".ytp-right-controls");
}
styleToolbar = () => {
// fix style: floating .ytp-right-controls may overflow
this.toolbar.parentNode.style.position = "relative";
this.toolbar.style.position = "absolute";
this.toolbar.style.right = "0";
this.toolbar.style.top = "0";
this.toolbar.style.float = "none";
}
styleWarpedTimeIndicator = () => {
const {
root,
current,
separator,
duration,
} = this.warpedTimeIndicator;
root.classList.add("ytp-time-display", "playrate-ext", "notranslate");
current.classList.add("ytp-time-current");
separator.classList.add("ytp-time-separator");
duration.classList.add("ytp-time-duration");
}
placeWarpedTimeIndicator = (toolbar) => {
toolbar.insertBefore(this.warpedTimeIndicator.root, toolbar.firstChild);
}
placeStatusOverlay = () => {
this.container.appendChild(this.statusOverlay.overlay);
}
}
class Bilibili extends Base {
constructor({ player, container }) {
super({ player, container });
this.initialize();
}
getToolbar = () => {
return this.container.querySelector(".bilibili-player-video-control");
}
styleToolbar = () => {
}
styleWarpedTimeIndicator = () => {
const {
root,
current,
separator,
duration,
} = this.warpedTimeIndicator;
root.classList.add("bilibili-player-video-time");
root.style.color = "white";
root.style.width = "60px";
current.classList.add("bilibili-player-video-time-now");
separator.classList.add("bilibili-player-video-divider");
duration.classList.add("bilibili-player-video-time-total");
}
placeWarpedTimeIndicator = (toolbar) => {
const bottom = this.toolbar.querySelector(".bilibili-player-video-control-bottom-right");
const intervalTimer = setInterval(() => {
const originSpeed = bottom.querySelector(".bilibili-player-video-btn-speed");
if (!originSpeed) {
return;
}
bottom.insertBefore(this.warpedTimeIndicator.root, originSpeed);
originSpeed.style.display = "none";
clearInterval(intervalTimer);
}, 100);
}
placeStatusOverlay = () => {
this.container.appendChild(this.statusOverlay.overlay);
}
}
function onload() {
document.removeEventListener("DOMContentLoaded", onload);
Base.launch();
}
document.addEventListener("DOMContentLoaded", onload);
})();
|
#!/bin/bash
# If then number of granules folder in directory equals the number in datastrip we consider it complete
product_complete() {
GR_COUNT_MTD=$(grep granuleId $1 | wc -l)
DS_IMG=$(dirname $1)"/IMG_DATA/DB1"
IMG_COUNT=$(find ${DS_IMG} -name "PDI_ATF*bin" | wc -w)
PROD=$(dirname $(dirname $(dirname $1)))
GR_COUNT_FOLDER=$(find "${PROD}/GR/DB1" -name "*MSI_L0U_GR*" -type d | wc -w)
if [ ${GR_COUNT_FOLDER} -ne ${GR_COUNT_MTD} ] ;then
echo "No"
return 1
elif [ ${IMG_COUNT} -ne 156 ] ; then
echo "No"
return 1
fi
echo "Yes"
return 0
}
granule_count_mtd(){
GR_COUNT_MTD=$(grep granuleId $1 | wc -l)
echo $GR_COUNT_MTD
}
compare_date() {
local PROD=$1
local BEGIN=$2
local END=$3
prod_date=$(echo $(basename $PROD) | sed 's+.*T.*_S++' | sed 's+T.*++')
if [ ${prod_date} -lt $BEGIN ]; then
echo "BEFORE"
return 1
fi
if [ ${prod_date} -gt $END ]; then
echo "AFTER"
return 2
fi
echo "OK"
return 0
}
if [ $# -lt 1 ]; then
echo "AnalyzeL0u.sh PRODUCTS_DIR [DATE_BEGIN] [DATE_END]"
exit 1
fi
if [ $# -eq 3 ]; then
DATE_BEGIN=${2}
DATE_END=${3}
echo "Date comparision requested "${DATE_BEGIN}" / "${DATE_END}
fi
set -o pipefail
PRODUCT_DIR=$1
GRI=/mnt/gri-dem
#export IDPORCH_DEBUG=1
#
# Script surrounding the orchestrator launch to prepare a context based in folders:
CUR_DIR="$( cd "$(dirname "$0")" ; pwd -P )"
for f in $(shuf -e $(find ${PRODUCT_DIR} -maxdepth 3 -type d -name "DS")); do
PROD_L0U=$(find ${f} -maxdepth 1 -type d -name "*MSI_L0U*")
MTD=$(find ${f} -maxdepth 2 -type f -name "*MTD_L0U_DS*xml")
if [ -z $MTD ]; then
echo "No MTD found under "$f
continue
fi
echo "L0u product : "$PROD_L0U
echo "L0u MTD : "$MTD
PROD=$(dirname $(dirname $PROD_L0U))
# comare the date if one given
if [ ! -z ${DATE_BEGIN} ]; then
echo "Comparing product date between "${DATE_BEGIN}" / "${DATE_END}
COMPARE=$(compare_date $f ${DATE_BEGIN} ${DATE_END})
echo "Product date is "$COMPARE
if [ $COMPARE != "OK" ]; then
echo "Product not between requested dates, skip"
continue
fi
fi
COMPLETE=$(product_complete $MTD)
GR_COUNT=$(granule_count_mtd $MTD)
echo "Number of granules in MTD :"$GR_COUNT
if [ $GR_COUNT -lt 40 ];then
echo "Product is too small to be treated !!!!"
fi
echo "Is product complete : "$COMPLETE
if [ $COMPLETE != "Yes" ];then
echo "Product $PROD is not complete"
continue
else
echo "Product complete"
fi
if [ -f "$PROD/.treated" ]; then
echo "Product already treated"
fi
if [ -f "$PROD/.lock" ]; then
echo "Product $PROD is locked by another instance"
fi
#ORIGIN_DATE=$(grep $(basename $(dirname $f)) /mnt/shared/logs/*eisp*log | grep Successfully | sed 's+.*OriginDate\\":\\"++' | sed 's+Z\\",.*+.000000+')
#if [ -z "${ORIGIN_DATE}" ] ; then
# echo "OriginDate not available, skip"
#fi
#echo "Origin Date : "${ORIGIN_DATE}
# Orbit for GRI
ORBIT=$(printf %03d $(grep SENSING_ORBIT_NUMBER $f | sed 's+[ \t]*<SENSING_ORBIT_NUMBER>++' | sed 's+</SENSING_ORBIT_NUMBER>++'))
echo "Orbit : "${ORBIT}
# Find the corresponding gri folder
GRI_FOLDER=$(find $GRI -maxdepth 1 -type d -name "S2__OPER_AUX_GRI${ORBIT}*")
if [[ ! -d ${GRI_FOLDER} ]];then
echo ${GRI_FOLDER}" folder can't be created and doesnt exists"
exit 1
fi
echo "GRI_FOLDER : "${GRI_FOLDER}
# Find the Origin date in log
# Test if originDate.txt exists
if [ -f "$f/originDate.txt" ]; then
echo "originDate.txt found"
ORIGIN_DATE=$(cat "$f/originDate.txt")
else
ORIGIN_DATE=$(grep $(basename $(dirname $f)) /mnt/shared/logs/*eisp*log | grep Successfully | sed 's+.*OriginDate\\":\\"++' | sed 's+Z\\",.*+.000000+')
if [ -z "${ORIGIN_DATE}" ]; then
echo "OriginDate not available, skip"
exit 1
else
cat $ORIGIN_DATE > $f/originDate.txt
fi
fi
echo "Origin Date : "${ORIGIN_DATE}
# Find the session Id
# Test if sessionId.txt exists
if [ -f "$f/sessionId.txt" ]; then
echo "sessionId.txt found"
SESSION_ID=$(cat "$f/sessionId.txt")
else
SESSION_NAME=$(grep $(basename $(dirname $f)) /mnt/shared/logs/*eisp*log | grep Successfully | sed 's+.*name\":\"++' | sed 's+\",.*++')
if [ -z "${SESSION_NAME}" ]; then
echo "SessionName not available"
exit 1
else
SESSION_ID=$(grep ${SESSION_NAME} /mnt/shared/logs/*eisp*log | grep Producing | sed "s+.*localPath=++" | sed 's+, .*++')
if [ -z "${SESSION_ID}" ]; then
echo "SessionId not available"
exit 1
else
cat $SESSION_ID > $f/sessionId.txt
fi
fi
fi
echo "SessionId : "${SESSION_ID}
echo "Datatake_type: "$(grep 'DATATAKE_TYPE' $MTD)
done
|
var group__MMU__functions =
[
[ "MMU Defines and Structs", "group__MMU__defs__gr.html", "group__MMU__defs__gr" ],
[ "MMU_APPage", "group__MMU__functions.html#gac7c88d4d613350059b4d77814ea2c7a0", null ],
[ "MMU_APSection", "group__MMU__functions.html#ga946866c84a72690c385ee07545bf8145", null ],
[ "MMU_Disable", "group__MMU__functions.html#ga2a2badd06531e04f559b97fdb2aea154", null ],
[ "MMU_DomainPage", "group__MMU__functions.html#ga45f5389cb1351bb2806a38ac8c32d416", null ],
[ "MMU_DomainSection", "group__MMU__functions.html#gabd88f4c41b74365c38209692785287d0", null ],
[ "MMU_Enable", "group__MMU__functions.html#ga63334cbd77d310d078eb226c7542b96b", null ],
[ "MMU_GetPageDescriptor", "group__MMU__functions.html#gaa2fcfb63c7019665b8a352d54f55d740", null ],
[ "MMU_GetSectionDescriptor", "group__MMU__functions.html#ga4f21eee79309cf8cde694d0d7e1205bd", null ],
[ "MMU_GlobalPage", "group__MMU__functions.html#ga14dfeaf8983de57521aaa66c19dd43c9", null ],
[ "MMU_GlobalSection", "group__MMU__functions.html#ga3ca22117a7f2d3c4d1cd1bf832cc4d2f", null ],
[ "MMU_InvalidateTLB", "group__MMU__functions.html#ga9de65bea1cabf73dc4302e0e727cc8c3", null ],
[ "MMU_MemoryPage", "group__MMU__functions.html#ga9a2946f7c93bcb05cdd20be691a54b8c", null ],
[ "MMU_MemorySection", "group__MMU__functions.html#ga353d3d794bcd1b35b3b5aeb73d6feb08", null ],
[ "MMU_PPage", "group__MMU__functions.html#gab15289c416609cd56dde816b39a4cea4", null ],
[ "MMU_PSection", "group__MMU__functions.html#ga3577aec23189228c9f95abba50c3716d", null ],
[ "MMU_SecurePage", "group__MMU__functions.html#ga2c1887ed6aaff0a51e3effc3db595c94", null ],
[ "MMU_SecureSection", "group__MMU__functions.html#ga84a5a15ee353d70a9b904e3814bd94d8", null ],
[ "MMU_SharedPage", "group__MMU__functions.html#gaaa19560532778e4fdc667e56fd2dd378", null ],
[ "MMU_SharedSection", "group__MMU__functions.html#ga29ea426394746cdd6a4b4c14164ec6b9", null ],
[ "MMU_TTPage4k", "group__MMU__functions.html#ga823cca9649a28bab8a90f8bd9bb92d83", null ],
[ "MMU_TTPage64k", "group__MMU__functions.html#ga48c509501f94a3f7316e79f8ccd34184", null ],
[ "MMU_TTSection", "group__MMU__functions.html#gaaff28ea191391cbbd389d74327961753", null ],
[ "MMU_XNPage", "group__MMU__functions.html#gab0e0fed40d998757147beb8fcf05a890", null ],
[ "MMU_XNSection", "group__MMU__functions.html#ga9132cbfe3b2367de3db27daf4cc82ad7", null ]
]; |
struct USR_DATA6_R {
value: u32,
}
impl USR_DATA6_R {
fn new(value: u32) -> Self {
USR_DATA6_R { value }
}
}
struct R {
bits: u32,
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
fn usr_data6(&self) -> USR_DATA6_R {
USR_DATA6_R::new(self.bits & 0xffff_ffff)
}
}
fn main() {
let r = R { bits: 0xABCD1234 };
let usr_data = r.usr_data6();
println!("User Data: 0x{:X}", usr_data.value); // Output: User Data: 0xCD1234
} |
/* istanbul ignore file: no unit tests on patchs */
import { IDatabase } from 'pg-promise';
import ModuleContextFilter from '../../../shared/modules/ContextFilter/ModuleContextFilter';
import ContextFilterVO from '../../../shared/modules/ContextFilter/vos/ContextFilterVO';
import ContextQueryVO from '../../../shared/modules/ContextFilter/vos/ContextQueryVO';
import ModuleDAO from '../../../shared/modules/DAO/ModuleDAO';
import DashboardBuilderController from '../../../shared/modules/DashboardBuilder/DashboardBuilderController';
import TranslatableTextVO from '../../../shared/modules/Translation/vos/TranslatableTextVO';
import IGeneratorWorker from '../../IGeneratorWorker';
export default class Patch20220222MigrationCodesTradsDB implements IGeneratorWorker {
public static getInstance(): Patch20220222MigrationCodesTradsDB {
if (!Patch20220222MigrationCodesTradsDB.instance) {
Patch20220222MigrationCodesTradsDB.instance = new Patch20220222MigrationCodesTradsDB();
}
return Patch20220222MigrationCodesTradsDB.instance;
}
private static instance: Patch20220222MigrationCodesTradsDB = null;
get uid(): string {
return 'Patch20220222MigrationCodesTradsDB';
}
private constructor() { }
public async work(db: IDatabase<any>) {
let filter = new ContextFilterVO();
filter.field_id = 'code_text';
filter.filter_type = ContextFilterVO.TYPE_TEXT_STARTSWITH_ANY;
filter.vo_type = TranslatableTextVO.API_TYPE_ID;
filter.param_textarray = [
DashboardBuilderController.TableColumnDesc_NAME_CODE_PREFIX,
DashboardBuilderController.VOFIELDREF_NAME_CODE_PREFIX
];
let query: ContextQueryVO = new ContextQueryVO();
query.base_api_type_id = TranslatableTextVO.API_TYPE_ID;
query.active_api_type_ids = [TranslatableTextVO.API_TYPE_ID];
query.filters = [filter];
let page_widget_trads: TranslatableTextVO[] = await ModuleContextFilter.getInstance().select_vos(query);
await ModuleDAO.getInstance().deleteVOs(page_widget_trads);
// let lang = await ModuleTranslation.getInstance().getLang('fr-fr');
// let page_widgets: DashboardPageWidgetVO[] = await ModuleDAO.getInstance().getVos<DashboardPageWidgetVO>(DashboardPageWidgetVO.API_TYPE_ID);
// for (let i in page_widgets) {
// let page_widget = page_widgets[i];
// json_options
// let options =
// if (!!this.page_widget.json_options) {
// options = JSON.parse(this.page_widget.json_options) as BulkOpsWidgetOptions;
// options = options ? new BulkOpsWidgetOptions(options.page_widget_id, options.api_type_id, options.limit) : null;
// }
// }
}
} |
<reponame>sori9088/mealplan-client<gh_stars>0
import React from 'react'
import { Link } from 'react-router-dom';
export default function Head(props) {
return (
<header className="masthead">
<div className="container h-100">
<div className="row h-100 align-items-center">
<div className="col-12 text-center">
<span>Healthy and Tasty</span>
<p className="lead">We provide simple, tasty and refreshing health food based on well-being trends.</p>
<section id="intro">
<div id="intro-content" className="center-content">
<div className="center-content-inner">
<div className="content-section content-section-margin">
<div className="content-section-grid clearfix">
{props.user
?
<> </>
:
<Link to="/signup" className="button nav-link">
<div className="bottom"></div>
<div className="top">
<div className="label">Check Out Our Dishes</div>
<div className="button-border button-border-left"></div>
<div className="button-border button-border-top"></div>
<div className="button-border button-border-right"></div>
<div className="button-border button-border-bottom"></div>
</div>
</Link>
}
</div>
</div>
</div>
</div>
</section>
</div>
</div>
</div>
</header>
)
}
|
/**
* Evelyn is a simple, file-based event-sourcing storage system.
* It is currently in an early, experimental stage and not available as a standalone project.
*/
package be.kwakeroni.evelyn; |
// Trims whitespace in each string from an array of strings
function trimSpacesInEachElement(arr) {
return arr.map(function(x) { return x.trim(); });
}
// Returns a copy of the given array with empty/undefined string elements removed in between
function removeEmptyElements(arr) {
return arr.filter(function(x) { return x && x.length > 0; });
}
// Returns true if the given string is enclosed in parentheses, e.g. is of form "(something)"
function isEnclosedInParens(str) {
return str[0] == '(' && str[str.length-1] == ')';
}
// Returns true if the given substring is contained in the string (case sensitive)
function contains(str, substr) {
return str.indexOf(substr) >= 0;
}
// Returns true if the any of the given substrings in the list is contained in the first parameter string (case sensitive)
function containsAnyOf(str, substrList) {
for(var i in substrList) if (contains(str, substrList[i])) return true;
return false;
}
// Splits an user agent string logically into an array of tokens, e.g.
// 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5 Build/MOB30M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36'
// -> ['Mozilla/5.0', '(Linux; Android 6.0.1; Nexus 5 Build/MOB30M)', 'AppleWebKit/537.36 (KHTML, like Gecko)', 'Chrome/51.0.2704.81', 'Mobile Safari/537.36']
function splitUserAgent(str) {
str = str.trim();
var uaList = [];
var tokens = '';
// Split by spaces, while keeping top level parentheses intact, so
// "Mozilla/5.0 (Linux; Android 6.0.1) Mobile Safari/537.36" becomes
// ['Mozilla/5.0', '(Linux; Android 6.0.1)', 'Mobile', 'Safari/537.36']
var parensNesting = 0;
for(var i = 0; i < str.length; ++i) {
if (str[i] == ' ' && parensNesting == 0) {
if (tokens.trim().length != 0) uaList.push(tokens.trim());
tokens = '';
} else if (str[i] == '(') ++parensNesting;
else if (str[i] == ')') --parensNesting;
tokens = tokens + str[i];
}
if (tokens.trim().length > 0) uaList.push(tokens.trim());
// What follows is a number of heuristic adaptations to account for UA strings met in the wild:
// Fuse ['a/ver', '(someinfo)'] together. For example:
// 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5 Build/MOB30M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36'
// -> fuse 'AppleWebKit/537.36' and '(KHTML, like Gecko)' together
for(var i = 1; i < uaList.length; ++i) {
var l = uaList[i];
if (isEnclosedInParens(l) && !contains(l, ';') && i > 1) {
uaList[i-1] = uaList[i-1] + ' ' + l;
uaList[i] = '';
}
}
uaList = removeEmptyElements(uaList);
// Fuse ['foo', 'bar/ver'] together, if 'foo' has only ascii chars. For example:
// 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5 Build/MOB30M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36'
// -> fuse ['Mobile', 'Safari/537.36'] together
for(var i = 0; i < uaList.length-1; ++i) {
var l = uaList[i];
var next = uaList[i+1];
if (/^[a-zA-Z]+$/.test(l) && contains(next, '/')) {
uaList[i+1] = l + ' ' + next;
uaList[i] = '';
}
}
uaList = removeEmptyElements(uaList);
return uaList;
}
// Finds the special token in the user agent token list that corresponds to the platform info.
// This is the first element contained in parentheses that has semicolon delimited elements.
// Returns the platform info as an array split by the semicolons.
function splitPlatformInfo(uaList) {
for(var i = 0; i < uaList.length; ++i) {
var item = uaList[i];
if (isEnclosedInParens(item)) {
return removeEmptyElements(trimSpacesInEachElement(item.substr(1, item.length-2).split(';')));
}
}
}
// Deduces the operating system from the user agent platform info token list.
function findOS(uaPlatformInfo) {
var oses = ['Android', 'BSD', 'Linux', 'Windows', 'iPhone OS', 'Mac OS', 'BSD', 'CrOS', 'Darwin', 'Dragonfly', 'Fedora', 'Gentoo', 'Ubuntu', 'debian', 'HP-UX', 'IRIX', 'SunOS', 'Macintosh', 'Win 9x', 'Win98', 'Win95', 'WinNT'];
for(var os in oses) {
for(var i in uaPlatformInfo) {
var item = uaPlatformInfo[i];
if (contains(item, oses[os])) return item;
}
}
return 'Other';
}
// Filters the product components (items of format 'foo/version') from the user agent token list.
function parseProductComponents(uaList) {
uaList = uaList.filter(function(x) { return contains(x, '/') && !isEnclosedInParens(x); });
var productComponents = {};
for(var i in uaList) {
var x = uaList[i];
if (contains(x, '/')) {
x = x.split('/');
if (x.length != 2) throw uaList[i];
productComponents[x[0].trim()] = x[1].trim();
} else {
productComponents[x] = true;
}
}
return productComponents;
}
// Maps Windows NT version to human-readable Windows Product version
function windowsDistributionName(winNTVersion) {
var vers = {
'5.0': '2000',
'5.1': 'XP',
'5.2': 'XP',
'6.0': 'Vista',
'6.1': '7',
'6.2': '8',
'6.3': '8.1',
'10.0': '10'
}
if (!vers[winNTVersion]) return 'NT ' + winNTVersion;
return vers[winNTVersion];
}
// The full function to decompose a given user agent to the interesting logical info bits.
//
export default function deduceUserAgent(userAgent) {
userAgent = userAgent || navigator.userAgent;
var ua = {
userAgent: userAgent,
productComponents: {},
platformInfo: []
};
try {
var uaList = splitUserAgent(userAgent);
var uaPlatformInfo = splitPlatformInfo(uaList);
var productComponents = parseProductComponents(uaList);
ua.productComponents = productComponents;
ua.platformInfo = uaPlatformInfo;
var ual = userAgent.toLowerCase();
// Deduce arch and bitness
var b32On64 = ['wow64'];
if (contains(ual, 'wow64')) {
ua.bitness = '32-on-64';
ua.arch = 'x86_64';
} else if (containsAnyOf(ual, ['x86_64', 'amd64', 'ia64', 'win64', 'x64'])) {
ua.bitness = 64;
ua.arch = 'x86_64';
} else if (contains(ual, 'ppc64')) {
ua.bitness = 64;
ua.arch = 'PPC';
} else if (contains(ual, 'sparc64')) {
ua.bitness = 64;
ua.arch = 'SPARC';
} else if (containsAnyOf(ual, ['i386', 'i486', 'i586', 'i686', 'x86'])) {
ua.bitness = 32;
ua.arch = 'x86';
} else if (contains(ual, 'arm7') || contains(ual, 'android') || contains(ual, 'mobile')) {
ua.bitness = 32;
ua.arch = 'ARM';
// Heuristic: Assume all OS X are 64-bit, although this is not certain. On OS X, 64-bit browsers
// don't advertise being 64-bit.
} else if (contains(ual, 'intel mac os')) {
ua.bitness = 64;
ua.arch = 'x86_64';
} else {
ua.bitness = 32;
}
// Deduce operating system
var os = findOS(uaPlatformInfo);
var m = os.match('(.*)\\s+Mac OS X\\s+(.*)');
if (m) {
ua.platform = 'Mac';
ua.arch = m[1];
ua.os = 'Mac OS';
ua.osVersion = m[2].replace(/_/g, '.');
}
if (!m) {
m = os.match('Android\\s+(.*)');
if (m) {
ua.platform = 'Android';
ua.os = 'Android';
ua.osVersion = m[1];
}
}
if (!m) {
m = os.match('Windows NT\\s+(.*)');
if (m) {
ua.platform = 'PC';
ua.os = 'Windows';
ua.osVersion = windowsDistributionName(m[1]);
if (!ua.arch) ua.arch = 'x86';
}
}
if (!m) {
if (contains(uaPlatformInfo[0], 'iPhone') || contains(uaPlatformInfo[0], 'iPad') || contains(uaPlatformInfo[0], 'iPod') || contains(os, 'iPhone') || os.indexOf('CPU OS') == 0) {
m = os.match('.*OS (.*) like Mac OS X');
if (m) {
ua.platform = uaPlatformInfo[0];
ua.os = 'iOS';
ua.osVersion = m[1].replace(/_/g, '.');
ua.bitness = parseInt(ua.osVersion) >= 7 ? 64 : 32;
}
}
}
if (!m) {
m = contains(os, 'BSD') || contains(os, 'Linux');
if (m) {
ua.platform = 'PC';
ua.os = os.split(' ')[0];
if (!ua.arch) ua.arch = 'x86';
}
}
if (!m) {
ua.os = os;
}
function findProduct(productComponents, product) {
for(var i in productComponents) {
if (productComponents[i] == product) return i;
}
return -1;
}
// Deduce human-readable browser vendor, product and version names
var browsers = [['SamsungBrowser', 'Samsung'], ['Edge', 'Microsoft'], ['OPR', 'Opera'], ['Chrome', 'Google'], ['Safari', 'Apple'], ['Firefox', 'Mozilla']];
for(var i in browsers) {
var b = browsers[i][0];
if (productComponents[b]) {
ua.browserVendor = browsers[i][1];
ua.browserProduct = browsers[i][0];
if (ua.browserProduct == 'OPR') ua.browserProduct = 'Opera';
if (ua.browserProduct == 'Trident') ua.browserProduct = 'Internet Explorer';
ua.browserVersion = productComponents[b];
break;
}
}
// Detect IEs
if (!ua.browserProduct) {
var matchIE = userAgent.match(/MSIE\s([\d.]+)/);
if (matchIE) {
ua.browserVendor = 'Microsoft';
ua.browserProduct = 'Internet Explorer';
ua.browserVersion = matchIE[1];
} else if (contains(uaPlatformInfo, 'Trident/7.0')) {
ua.browserVendor = 'Microsoft';
ua.browserProduct = 'Internet Explorer';
ua.browserVersion = userAgent.match(/rv:([\d.]+)/)[1];
}
}
// Deduce mobile platform, if present
for(var i = 0; i < uaPlatformInfo.length; ++i) {
var item = uaPlatformInfo[i];
var iteml = item.toLowerCase();
if (contains(iteml, 'nexus') || contains(iteml, 'samsung')) {
ua.platform = item;
ua.arch = 'ARM';
break;
}
}
// Deduce form factor
if (contains(ual, 'tablet') || contains(ual, 'ipad')) ua.formFactor = 'Tablet';
else if (contains(ual, 'mobile') || contains(ual, 'iphone') || contains(ual, 'ipod')) ua.formFactor = 'Mobile';
else if (contains(ual, 'smart tv') || contains(ual, 'smart-tv')) ua.formFactor = 'TV';
else ua.formFactor = 'Desktop';
} catch(e) {
ua.internalError = 'Failed to parse user agent string: ' + e.toString();
}
return ua;
}
|
#!/bin/bash -e
# Check for required env vars
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
$DIR/../resources/check-env-vars.sh ||
(
echo "ERROR: Missing required environmental variables"
exit 1
)
# Check the org exists
cf orgs | grep "$ORG" > /dev/null ||
(
echo "ERROR: Org '$ORG' does not exist"
exit 1
)
echo "Org '$ORG' exists"
# Check the service broker app is deployed
cf target -o $ORG -s $SERVICE_BROKER_SPACE > /dev/null
cf apps | grep "$SERVICE_BROKER_APP_NAME" > /dev/null ||
(
echo "ERROR: The '$SERVICE_BROKER_APP_NAME' app does not exist in org '$ORG', space '$SERVICE_BROKER_SPACE'"
exit 1
)
echo "The '$SERVICE_BROKER_APP_NAME' app is running in org '$ORG', space '$SERVICE_BROKER_SPACE'"
# Check no service broker already exists
cf service-brokers | grep "$SERVICE_BROKER_NAME" > /dev/null &&
(
echo "ERROR: The '$SERVICE_BROKER_NAME' service broker already exists"
exit 1
)
echo "The '$SERVICE_BROKER_NAME' service broker doesn't exist"
# Check we are admin
cf target | grep -i "User:" | grep "admin" > /dev/null ||
(
echo "ERROR: You are not a CF admin"
exit 1
)
echo "You are a CF admin"
# Check the source space exists
cf target -o $ORG > /dev/null
cf spaces | grep "$SOURCE_SPACE" > /dev/null ||
(
echo "ERROR: Space '$SOURCE_SPACE' in org '$ORG' does not exist"
exit 1
)
echo "Space '$SOURCE_SPACE' in org '$ORG' exists"
# Check the target space exists
cf target -o $ORG > /dev/null
cf spaces | grep "$TARGET_SPACE" > /dev/null ||
(
echo "ERROR: Space '$TARGET_SPACE' in org '$ORG' does not exist"
exit 1
)
echo "Space '$TARGET_SPACE' in org '$ORG' exists"
# Check the source app exists
cf target -o $ORG -s $SOURCE_SPACE > /dev/null
cf apps | grep "$SOURCE_APP" > /dev/null ||
(
echo "ERROR: Missing app '$SOURCE_APP' in org '$ORG', space '$SOURCE_SPACE'"
exit 1
)
echo "App '$SOURCE_APP' exists in org '$ORG', space '$SOURCE_SPACE'"
# Check the target app exists
cf target -o $ORG -s $TARGET_SPACE > /dev/null
cf apps | grep "$TARGET_APP" > /dev/null ||
(
echo "ERROR: Missing app '$TARGET_APP' in org '$ORG', space '$TARGET_SPACE'"
exit 1
)
echo "App '$TARGET_APP' exists in org '$ORG', space '$TARGET_SPACE'"
# Check no service instances exist in the source space
echo "Ready!"
|
<reponame>Bolados/b-interpolate-teller<filename>src/app/domains/models/teller.param.model.ts
import {Point} from "./point.model";
export class TellerParam {
public point: Point;
public alpha: number = 1;
public beta: number[] = [];
constructor(point: Point = null,
alpha: number = 1,
beta: number[] = []
) {
this.point = point;
this.alpha = alpha;
this.beta = beta;
}
from(param: TellerFormParam) {
this.point = param.point;
this.alpha = param.alpha;
this.beta = new Array<number>();
for( let i= 0; i< param.maxBeta; i++){
this.beta.push(param.initBeta);
}
return this;
}
toForm() {
let param: TellerFormParam;
param.point = this.point;
param.alpha = this.alpha;
param.maxBeta = this.beta.length;
param.initBeta = 0;
return param;
}
}
export class TellerFormParam {
constructor(
public point: Point = null,
public alpha: number = 0,
public maxBeta: number = 0,
public initBeta: number = 0,
public deltaX: number = 0.5,
public step: number = 0.1,
) {
}
from(param: TellerParam): TellerFormParam {
this.point = param.point;
this.alpha = param.alpha;
this.maxBeta = param.beta.length;
this.initBeta = 0;
return this;
}
toParam(): TellerParam{
const param: TellerParam = new TellerParam(new Point(0, 0));
param.point = this.point;
param.alpha = this.alpha;
param.beta = new Array<number>();
for( let i= 0; i < this.maxBeta; i++){
param.beta.push(this.initBeta);
}
return param;
}
}
|
# Preserve path at the time this file was sourced
# This prevents using of user-defined mocks/stubs that modify the PATH
_BATSLIB_PATH="$PATH"
source "$(dirname "${BASH_SOURCE[0]}")/src/output.bash"
source "$(dirname "${BASH_SOURCE[0]}")/src/error.bash"
source "$(dirname "${BASH_SOURCE[0]}")/src/lang.bash"
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.LogoStumbleupon32 = factory());
}(this, (function () { 'use strict';
var _32 = {
elem: 'svg',
attrs: {
xmlns: 'http://www.w3.org/2000/svg',
viewBox: '0 0 32 32',
width: 32,
height: 32,
},
content: [
{
elem: 'path',
attrs: {
d:
'M16 2a14 14 0 1 0 14 14A14 14 0 0 0 16 2zm-.09 10.45a.84.84 0 0 0-.84.84v5.14a3.55 3.55 0 0 1-7.1 0v-2.34h2.71v2.24a.84.84 0 0 0 1.68 0v-5a3.55 3.55 0 0 1 7.09 0v1l-1.58.51-1.12-.51v-1a.85.85 0 0 0-.84-.88zm7.93 6a3.55 3.55 0 0 1-7.09 0v-2.31l1.12.51 1.58-.51v2.29a.84.84 0 0 0 1.68 0v-2.24h2.71z',
},
},
],
name: 'logo--stumbleupon',
size: 32,
};
return _32;
})));
|
from starlette.responses import JSONResponse
from starlette.requests import Request
from starlette.responses import Response
class WebIndex:
async def apiDiscordRoleNotFound(cls: "WebIndex", WebRequest: Request, **kwargs: dict) -> Response:
# Extract error message and role information from kwargs
error_msg = kwargs.get("msg", "Role not found")
role_id = kwargs.get("role_id", "")
role_name = kwargs.get("role_name", "")
guild_id = kwargs.get("guild_id", "")
# Construct response JSON with error message and role information
response_data = {
"error": error_msg,
"role_id": role_id,
"role_name": role_name,
"guild_id": guild_id
}
# Return JSON response with appropriate status code
return JSONResponse(content=response_data, status_code=404) |
<reponame>seawindnick/javaFamily
package com.java.study.zuo.vedio.advanced.chapter6;
/**
* <Description>
* 数组中子数组的最大累乘积
* 【题目】 给定一个double类型的数组arr,
* 其中的元素可正、可负、可0,返回 子数组累乘的最大乘积。
* 例如,arr=[-2.5,4,0,3,0.5,8,-1],
* 子数组[3,0.5,8]累乘可以获得最大的乘积12,所以返回12。
*
* @author hushiye
* @since 2020-09-17 17:45
*/
public class SubArrayMaxProduct {
public static double subArrayMaxProduct(double[] arr) {
if (arr == null || arr.length == 0) {
return 0;
}
double max = arr[0];
double min = arr[0];
double res = arr[0];
double maxEnd = 0;
double minEnd = 0;
for (int i = 1; i < arr.length; i++) {
maxEnd = max * arr[i];
minEnd = min * arr[i];
max = Math.max(arr[i], Math.max(maxEnd, minEnd));
min = Math.min(arr[i], Math.min(maxEnd, minEnd));
res = Math.max(res, max);
}
return res;
}
public static void main(String[] args) {
double[] arr = {-2.5, 4, 1, 3, 0.5, 8};
System.out.println(subArrayMaxProduct(arr));
}
}
|
import express from 'express';
let app = express();
app.get('/', (req, res, next) => index(req, res).catch(next));
app.get('/hello', (req, res, next) => hello(req, res).catch(next));
app.listen(8080);
/**
* [index description]
* @param {[type]} req [description]
* @param {[type]} res [description]
* @return {[type]} [description]
*/
async function index(req, res) {
res.send('service1');
}
/**
* [hello description]
* @param {[type]} req [description]
* @param {[type]} res [description]
* @return {[type]} [description]
*/
async function hello(req, res) {
let name = req.query.name || 'world';
res.send(`hello ${name}`);
};
|
package com.jinjunhang.contract.service;
/**
* Created by lzn on 16/3/23.
*/
public class ServiceConfiguration {
//public final static String serverName = "192.168.1.50";
private final static boolean isUseConfig = true;
public static String LOCATOR_HTTP = "";
public static String LOCATOR_SERVERNAME = "";
public static int LOCATOR_PORT = 0;
public final static String serverName1 = "jjhtest.hengdianworld.com";
public final static int port1 = 80;
public final static String serverName2 = "192.168.1.57";
public final static int port2 = 3000;
public static String httpMethod() {
if (isUseConfig) {
return LOCATOR_HTTP;
}
return "http";
}
public static String serverName() {
if (isUseConfig) {
return LOCATOR_SERVERNAME;
}
return serverName1;
}
public static int port() {
if (isUseConfig) {
return LOCATOR_PORT;
}
return port1;
}
public static String SeachOrderUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/search.json";
}
public static String GetOrderPurcaseInfoUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/getPurchaseInfo.json";
}
public static String GetBasicInfoUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/getBasicInfo.json";
}
public static String GetOrderChuyunInfoUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/getChuyunInfo.json";
}
public static String GetOrderFukuangInfoUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/getFukuangInfo.json";
}
public static String GetOrderShouhuiInfoUrl() {
return httpMethod() + "://"+ serverName() + ":" + port() +"/order/getShouhuiInfo.json";
}
public static String SearchApprovalUrl() {
return httpMethod() + "://"+serverName()+":" + port() +"/approval/search.json";
}
public static String AuditApprovalUrl() {
return httpMethod() + "://"+serverName()+":"+port() +"/approval/audit.json";
}
public static String LoginUrl() {
return httpMethod() + "://"+serverName()+":"+port() +"/login/login.json";
}
public static String RegsiterDevieUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/login/registerdevice.json";
}
public static String ResetBadgeUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/login/resetbadge.json";
}
public static String GetProductUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/product/search.json";
}
public static String GetProductImageUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/product/getImage.json";
}
public static String searchPriceReportUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/price_report/search.json";
}
public static String getPriceReportUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/price_report/getPriceReport.json";
}
public static String searchProducstUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/price_report/searchProducts.json";
}
public static String submitReportUrl() {
return httpMethod() + "://" + serverName() + ":" + port() + "/price_report/submit.json";
}
}
|
#!/bin/bash
: '
Для каждого файла, из заданного списка, сценарий должен определить тип архиватора,
которым был создан тот или иной файл (с помощью утилиты file).
Затем сценарий должен выполнить соответствующую команду разархивации
(gunzip, bunzip2, unzip, uncompress или что-то иное).
Если файл не является архивом, то сценарий должен оповестить пользователя
об этом и ничего не делать с этим файлом.
'
# Если кол-во переданных параметров не 1
if [[ $(($#)) -ne 1 ]] ; then
echo "Ошибка: Необходимо передать параметр" >&2
exit 1
fi
FILE_LIST=$1
#Цикл по каждому найденному файлу
cat $FILE_LIST | while read CURRENT_FILE
do
#Вывод файла на экран
echo "Файл $CURRENT_FILE, размер больше 20 Мб"
done
echo "Скрипт закончил работу" |
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Ads
module GoogleAds
module V7
module Errors
# Container for enum describing possible multiplier errors.
class MultiplierErrorEnum
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Enum describing possible multiplier errors.
module MultiplierError
# Enum unspecified.
UNSPECIFIED = 0
# The received error code is not known in this version.
UNKNOWN = 1
# Multiplier value is too high
MULTIPLIER_TOO_HIGH = 2
# Multiplier value is too low
MULTIPLIER_TOO_LOW = 3
# Too many fractional digits
TOO_MANY_FRACTIONAL_DIGITS = 4
# A multiplier cannot be set for this bidding strategy
MULTIPLIER_NOT_ALLOWED_FOR_BIDDING_STRATEGY = 5
# A multiplier cannot be set when there is no base bid (e.g., content max
# cpc)
MULTIPLIER_NOT_ALLOWED_WHEN_BASE_BID_IS_MISSING = 6
# A bid multiplier must be specified
NO_MULTIPLIER_SPECIFIED = 7
# Multiplier causes bid to exceed daily budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_DAILY_BUDGET = 8
# Multiplier causes bid to exceed monthly budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_MONTHLY_BUDGET = 9
# Multiplier causes bid to exceed custom budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_CUSTOM_BUDGET = 10
# Multiplier causes bid to exceed maximum allowed bid
MULTIPLIER_CAUSES_BID_TO_EXCEED_MAX_ALLOWED_BID = 11
# Multiplier causes bid to become less than the minimum bid allowed
BID_LESS_THAN_MIN_ALLOWED_BID_WITH_MULTIPLIER = 12
# Multiplier type (cpc vs. cpm) needs to match campaign's bidding strategy
MULTIPLIER_AND_BIDDING_STRATEGY_TYPE_MISMATCH = 13
end
end
end
end
end
end
end
|
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
time.sleep(60)
points = 0
hits = mc.events.pollBlockHits()
for hit in hits:
x = hit.pos.x
y = hit.pos.y
z = hit.pos.z
points = points + mc.getBlock(x, y, z)
mc.postToChat("You got " + str(points) + " points.")
|
<reponame>tOverney/ADA-Project<gh_stars>0
from django.contrib import admin
from .models import Capacity, Path
class CapacityAdmin(admin.ModelAdmin):
raw_id_fields = ('stop_time', 'trip', 'service_date')
list_display = ('trip_id2', 'stop_time_id2', 'service_date_id2', 'capacity1st', 'capacity2nd')
def trip_id2(self, instance):
return instance.trip.pk
def stop_time_id2(self, instance):
return instance.stop_time.pk
def service_date_id2(self, instance):
return instance.service_date.date
admin.site.register(Capacity, CapacityAdmin)
class PathAdmin(admin.ModelAdmin):
raw_id_fields = ('trip', 'stop')
list_display = ('trip_id2', 'stop_id2', 'path')
def trip_id2(self, instance):
return instance.trip.id
def stop_id2(self, instance):
return instance.stop.id
admin.site.register(Path, PathAdmin) |
<gh_stars>1-10
/**
* There are four possible parameter locations specified by the in field:
*
* - __path:__ Used together with Path Templating, where the parameter value
* is actually part of the operation's URL. This does not include the host
* or base path of the API. For example, in `/items/{itemId}`, the path
* parameter is `itemId`.
*
* - __query:__ Parameters that are appended to the URL. For example,
* in `/items?id=###`, the query parameter is `id`.
*
* - __header:__ Custom headers that are expected as part of the request.
* Note that [RFC7230](https://tools.ietf.org/html/rfc7230#page-22) states
* header names are case insensitive.
*
* - __cookie:__ Used to pass a specific cookie value to the API.
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameter-locations
*/
export type ParameterLocation =
'query' |
'header' |
'path' |
'cookie';
/**
* In order to support common ways of serializing simple parameters,
* a set of style values are defined.
*
* - __matrix:__ Path style parameters defined by [RFC6570](https://tools.ietf.org/html/rfc6570#section-3.2.7)
*
* - __label:__ Label style parameters defined by [RFC6570](https://tools.ietf.org/html/rfc6570#section-3.2.5)
*
* - __form:__ Form style parameters defined by [RFC6570](https://tools.ietf.org/html/rfc6570#section-3.2.8).
* This option replaces `collectionFormat` with a `csv` (when `explode` is false)
* or `multi` (when `explode` is true) value from OpenAPI 2.0.
*
* - __simple:__ Simple style parameters defined by [RFC6570](https://tools.ietf.org/html/rfc6570#section-3.2.2).
* This option replaces `collectionFormat` with a `csv` value from OpenAPI 2.0.
*
* - __spaceDelimited:__ Space separated array values.
* This option replaces `collectionFormat` equal to `ssv` from OpenAPI 2.0.
*
* - __pipeDelimited:__ Pipe separated array values.
* This option replaces `collectionFormat` equal to `pipes` from OpenAPI 2.0.
*
* - __deepObject:__ Provides a simple way of rendering nested objects using form parameters.
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#style-values
*/
export type SerializationStyle =
'matrix' |
'label' |
'form' |
'simple' |
'spaceDelimited' |
'pipeDelimited' |
'deepObject';
/**
* Primitive data types in the OAS are based on the types supported by the
* [JSON Schema Specification Wright Draft 00](https://tools.ietf.org/html/draft-wright-json-schema-00#section-4.2).
*
* Note that `integer` as a type is also supported and is defined as a
* JSON number without a fraction or exponent part.
*
* `null` is not supported as a type (see [nullable](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#schemaNullable) for an alternative solution).
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#data-types
*/
export type DataType =
'boolean' |
'object' |
'array' |
'number' |
'integer' |
'string';
/**
* OAS uses several known formats to define in fine detail the data type being used.
*
* However, to support documentation needs, the format property is an open
* `string`-valued property, and can have any value. Formats such as
* `"email", "uuid"`, and so on, __MAY__ be used even though undefined by
* this specification.
*
* Types that are not accompanied by a format property follow the type
* definition in the JSON Schema.
*
* Tools that do not recognize a specific `format` __MAY__ default back to the
* `type` alone, as if the `format` is not specified.
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#dataTypeFormat
*/
export type DataFormat =
'date' |
'date-time' |
'email' |
'hostname' |
'ipv4' |
'ipv6' |
'uri' |
'uriref' |
'int32' |
'int64' |
'float' |
'double' |
'byte' |
'binary' |
'password';
/**
* Security scheme types that can be used by the operations.
*
* Supported schemes are HTTP authentication, an API key (either as a header or
* as a query parameter), OAuth2's common flows (implicit, password, application
* and access code) as defined in [RFC6749](https://tools.ietf.org/html/rfc6749),
* and [OpenID Connect Discovery](https://tools.ietf.org/html/draft-ietf-oauth-discovery-06).
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#security-scheme-object
*/
export type SecuritySchemeType =
'apiKey' |
'http' |
'oauth2' |
'openIdConnect';
/**
* Valid locations for `apiKey`'s.
*
* __REQUIRED__ only when the `SecuritySchemeType` is set to `apiKey`.
*
* @see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#security-scheme-object
*/
export type SecuritySchemeIn =
'query' |
'header' |
'cookie';
|
interface Post {
title: string,
author: string,
published_at: Date,
updated_at: string
}
export function serializePost(postData: Post) {
const metadata = {
"@context": "http://schema.org",
"@type": "NewsArticle",
"headline": postData.title,
"author": {
"@type": "Person",
"name": "tradingstrategy.ai"
},
"datePublished": postData.published_at,
"dateModified": postData.updated_at,
}
return `<script type="application/ld+json">${JSON.stringify(metadata, null, 2)}</script>`
}
|
"""
Load a dataset, split into training and testing set, and ultimately make predictions using a decision tree model.
"""
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
# Load the data set
data = pd.read_csv('data.csv')
# Split into training and testing sets
X = data.drop('target', axis=1)
y = data['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Create a decision tree model
model = DecisionTreeClassifier(max_depth=5)
model.fit(X_train, y_train)
# Make predictions
preds = model.predict(X_test)
# Print results
print(preds) |
<reponame>famod/qson
package io.quarkus.qson.deployment;
import io.quarkus.arc.deployment.AdditionalBeanBuildItem;
import io.quarkus.arc.deployment.BeanContainerBuildItem;
import io.quarkus.builder.BuildException;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.annotations.Record;
import io.quarkus.deployment.builditem.CombinedIndexBuildItem;
import io.quarkus.deployment.builditem.GeneratedClassBuildItem;
import io.quarkus.deployment.recording.RecorderContext;
import io.quarkus.qson.Qson;
import io.quarkus.qson.QsonCustomWriter;
import io.quarkus.qson.QsonIgnore;
import io.quarkus.qson.QsonProperty;
import io.quarkus.qson.QsonTransformer;
import io.quarkus.qson.generator.ParserGenerator;
import io.quarkus.qson.generator.WriterGenerator;
import io.quarkus.qson.runtime.QuarkusQsonGenerator;
import io.quarkus.qson.runtime.QuarkusQsonInitializer;
import io.quarkus.qson.runtime.QuarkusQsonMapper;
import io.quarkus.qson.runtime.QuarkusQsonRegistry;
import io.quarkus.qson.util.Types;
import org.jboss.jandex.AnnotationInstance;
import org.jboss.jandex.AnnotationTarget;
import org.jboss.jandex.AnnotationValue;
import org.jboss.jandex.ClassInfo;
import org.jboss.jandex.DotName;
import org.jboss.jandex.MethodInfo;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.Type;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static io.quarkus.deployment.annotations.ExecutionTime.STATIC_INIT;
public class QsonBuildStep {
public static final DotName QSON = DotName.createSimple(Qson.class.getName());
public static final DotName QSON_TRANSFORMER = DotName.createSimple(QsonTransformer.class.getName());
public static final DotName QSON_CUSTOM_WRITER = DotName.createSimple(QsonCustomWriter.class.getName());
public static final DotName QUARKUS_QSON_INITIALIZER = DotName.createSimple(QuarkusQsonInitializer.class.getName());
public static final DotName QSON_PROPERTY = DotName.createSimple(QsonProperty.class.getName());
public static final DotName QSON_IGNORE = DotName.createSimple(QsonIgnore.class.getName());
@BuildStep
AdditionalBeanBuildItem additionalBeans() {
return AdditionalBeanBuildItem.builder()
.setUnremovable()
.addBeanClass(QuarkusQsonMapper.class)
.build();
}
@BuildStep
QsonGeneratorBuildItem publishGenerator(QsonBuildTimeConfig config) {
QuarkusQsonGeneratorImpl generator = new QuarkusQsonGeneratorImpl();
if (config.dateFormat.isPresent()) {
generator.dateFormat(config.dateFormat.get());
}
return new QsonGeneratorBuildItem(generator);
}
@BuildStep
void scan(BuildProducer<QsonBuildItem> qson,
QsonGeneratorBuildItem genItem,
CombinedIndexBuildItem combinedIndex) throws Exception {
Collection<AnnotationInstance> annotations = combinedIndex.getIndex().getAnnotations(QSON);
Set<String> classes = new HashSet<>();
registerQson(genItem.getGenerator(), annotations, classes);
annotations = combinedIndex.getIndex().getAnnotations(QSON_PROPERTY);
register(genItem.getGenerator(), annotations, classes);
annotations = combinedIndex.getIndex().getAnnotations(QSON_IGNORE);
register(genItem.getGenerator(), annotations, classes);
registerTransformer(genItem.getGenerator(), combinedIndex);
registerCustomWriter(genItem.getGenerator(), combinedIndex);
qson.produce(new QsonBuildItem());
}
private void registerTransformer(QuarkusQsonGeneratorImpl generator, CombinedIndexBuildItem combinedIndex) throws Exception {
Collection<AnnotationInstance> annotations = combinedIndex.getIndex().getAnnotations(QSON_TRANSFORMER);
for (AnnotationInstance ai : annotations) {
MethodInfo mi = ai.target().asMethod();
ClassInfo dec = mi.declaringClass();
Class declaring = Thread.currentThread().getContextClassLoader().loadClass(dec.name().toString());
Method method = declaring.getMethod(mi.name());
generator.overrideMappingFor(method.getReturnType()).transformer(declaring);
generator.register(method.getReturnType(), true, false);
}
}
private void registerCustomWriter(QuarkusQsonGeneratorImpl generator, CombinedIndexBuildItem combinedIndex) throws Exception {
Collection<AnnotationInstance> annotations = combinedIndex.getIndex().getAnnotations(QSON_CUSTOM_WRITER);
for (AnnotationInstance ai : annotations) {
ClassInfo dec = ai.target().asClass();
Class declaring = Thread.currentThread().getContextClassLoader().loadClass(dec.name().toString());
QsonCustomWriter ann = (QsonCustomWriter)declaring.getAnnotation(QsonCustomWriter.class);
generator.overrideMappingFor(ann.value()).customWriter(declaring);
generator.register(ann.value(), false, true);
}
}
private void registerQson(QuarkusQsonGeneratorImpl generator, Collection<AnnotationInstance> annotations, Set<String> classes) throws BuildException, ClassNotFoundException {
for (AnnotationInstance ai : annotations) {
ClassInfo ci = ai.target().asClass();
String className = ci.name().toString();
if (!Modifier.isPublic(ci.flags()) || Modifier.isInterface(ci.flags())) {
throw new BuildException("@Qson annnotation can only be placed on public classes: " + className, Collections.emptyList());
}
if (Modifier.isAbstract(ci.flags())) {
throw new BuildException("@Qson annnotation cannot be placed on an abstract class: " + className, Collections.emptyList());
}
AnnotationValue generateParser = ai.value("generateParser");
AnnotationValue generateWriter = ai.value("generateWriter");
boolean parser = generateParser == null || generateParser.asBoolean();
boolean writer = generateWriter == null || generateWriter.asBoolean();
if (!classes.contains(className)) {
Class clz = Thread.currentThread().getContextClassLoader().loadClass(className);
generator.register(clz, parser, writer);
classes.add(className);
}
}
}
private void register(QuarkusQsonGeneratorImpl generator, Collection<AnnotationInstance> annotations, Set<String> classes) throws BuildException, ClassNotFoundException {
for (AnnotationInstance ai : annotations) {
ClassInfo ci = null;
if (ai.target().kind() == AnnotationTarget.Kind.CLASS) {
ci = ai.target().asClass();
} else if (ai.target().kind() == AnnotationTarget.Kind.METHOD) {
ci = ai.target().asMethod().declaringClass();
} else if (ai.target().kind() == AnnotationTarget.Kind.FIELD) {
ci = ai.target().asField().declaringClass();
} else {
return; // do not know what to do.
}
String className = ci.name().toString();
if (!Modifier.isPublic(ci.flags()) || Modifier.isInterface(ci.flags())) {
throw new BuildException("Qson mapped classes must be public classes: " + className, Collections.emptyList());
}
if (Modifier.isAbstract(ci.flags())) {
throw new BuildException("Qson mapped classes cannot be abstract: " + className, Collections.emptyList());
}
if (!classes.contains(className)) {
Class clz = Thread.currentThread().getContextClassLoader().loadClass(className);
generator.register(clz, true, true);
classes.add(className);
}
}
}
@BuildStep
public GeneratedQsonClassesBuildItem generate(BuildProducer<GeneratedClassBuildItem> toGenerate,
QsonGeneratorBuildItem genItem,
List<QsonBuildItem> ignore,
CombinedIndexBuildItem combinedIndex) throws Exception {
QuarkusQsonGeneratorImpl generator = genItem.getGenerator();
Collection<AnnotationInstance> annotations = combinedIndex.getIndex().getAnnotations(QUARKUS_QSON_INITIALIZER);
for (AnnotationInstance ai : annotations) {
MethodInfo method = ai.target().asMethod();
ClassInfo declaring = method.declaringClass();
if (!Modifier.isPublic(method.flags())
|| !Modifier.isStatic(method.flags())
|| method.returnType().kind() != org.jboss.jandex.Type.Kind.VOID
|| method.parameters().size() != 1
|| method.parameters().get(0).kind() != org.jboss.jandex.Type.Kind.CLASS
) {
throw new BuildException("Bad signature for @QuarkusQsonInitializer annotated method: " + method.toString(), Collections.emptyList());
}
Class initClass = Thread.currentThread().getContextClassLoader().loadClass(declaring.name().toString());
Method m = initClass.getMethod(method.name(), QuarkusQsonGenerator.class);
m.invoke(null, generator);
}
Map<String, String> generatedParsers = new HashMap<>();
Map<String, String> generatedWriters = new HashMap<>();
GeneratedClassGizmoAdaptor adaptor = new GeneratedClassGizmoAdaptor(toGenerate);
generateParsers(generator, generator.getParsers(), generatedParsers, adaptor);
generateWriters(generator, generator.getWriters(), generatedWriters, adaptor);
return new GeneratedQsonClassesBuildItem(generatedParsers, generatedWriters);
}
public void generateParsers(QuarkusQsonGeneratorImpl generator, Set<Type> parsers, Map<String, String> generatedParsers, GeneratedClassGizmoAdaptor adaptor) {
for (Type entry : parsers) {
String key = Types.typename(entry);
if (generatedParsers.containsKey(key)) continue;
ParserGenerator.Builder builder = generator.parser(entry);
builder.output(adaptor).generate();
generatedParsers.put(key, builder.className());
generateParsers(generator, builder.referenced(), generatedParsers, adaptor);
}
}
public void generateWriters(QuarkusQsonGeneratorImpl generator, Set<Type> writers, Map<String, String> generatedWriters, GeneratedClassGizmoAdaptor adaptor) {
for (Type entry : writers) {
String key = Types.typename(entry);
if (generatedWriters.containsKey(key)) continue;
WriterGenerator.Builder builder = generator.writer(entry);
builder.output(adaptor).generate();
generatedWriters.put(key, builder.className());
generateWriters(generator, builder.referenced(), generatedWriters, adaptor);
}
}
@BuildStep()
@Record(STATIC_INIT)
public QsonCompletedBuildItem staticInit(QuarkusQsonRegistry registry,
RecorderContext context,
BeanContainerBuildItem beanContainer, // dependency
GeneratedQsonClassesBuildItem generated) {
registry.clear(); // not sure if we need this for redeploy?
if (generated == null) return new QsonCompletedBuildItem();
for (Map.Entry<String, String> entry : generated.getGeneratedParsers().entrySet()) {
registry.registerParser(entry.getKey(), context.newInstance(entry.getValue()));
}
for (Map.Entry<String, String> entry : generated.getGeneratedWriters().entrySet()) {
registry.registerWriter(entry.getKey(), context.newInstance(entry.getValue()));
}
return new QsonCompletedBuildItem();
}
}
|
/**
* Job.js
*
* @description :: TODO: You might write a short summary of how this model works and what it represents here.
* @docs :: http://sailsjs.org/documentation/concepts/models-and-orm/models
*/
module.exports = {
attributes: {
id: {
type: 'integer',
autoIncrement: true,
unique: true,
primaryKey: true
},
name: {
type: 'string'
},
description: {
type: 'string'
},
dueAt: {
type: 'datetime'
},
closedAt: {
type: 'datetime'
},
submitter: {
type: 'string',
required: true
},
inputs: {
collection: 'input',
via: 'jobId'
},
externalId: {
type: 'string'
}
},
};
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.objectGroup = void 0;
var objectGroup = {
"viewBox": "0 0 2048 1792",
"children": [{
"name": "path",
"attribs": {
"d": "M2048 384h-128v1024h128v384h-384v-128h-1280v128h-384v-384h128v-1024h-128v-384h384v128h1280v-128h384v384zM1792 128v128h128v-128h-128zM128 128v128h128v-128h-128zM256 1664v-128h-128v128h128zM1664 1536v-128h128v-1024h-128v-128h-1280v128h-128v1024h128v128h1280zM1920 1664v-128h-128v128h128zM1280 640h384v768h-896v-256h-384v-768h896v256zM512 1024h640v-512h-640v512zM1536 1280v-512h-256v384h-384v128h640z"
}
}]
};
exports.objectGroup = objectGroup; |
#!/bin/bash
dieharder -d 209 -g 46 -S 2878504267
|
#samtools
#!/bin/bash
#torun
#chmod u+x gzip.sh
#./gzip.sh /home/fb416/projects/omer/
BASEPATH=${1}
for filepath in $BASEPATH/seq/*.fq
do
filename=$(basename $filepath .fq)
qsub -o $BASEPATH/log/gzip_$filename.out -e $BASEPATH/log/gzip_$filename.err -N samtools_$filename -pe slots 8 -b y gzip -1 $filepath
done
|
<filename>gatsby-config.js
require("dotenv").config({
path: `.env.development`,
})
module.exports = {
siteMetadata: {
title: `Manineta Graviranje - izdelava lesenih umetnin.`,
titleTemplate: `%s | Manineta Graviranje`,
description: `Veselje do ustvarjanja,domišljija ter sprostitev ob tem nas je pripeljalo do laserskega graviranja. Graviranje, rezanje ter izdelava različnih nekovinskih izdelkov. Izdelki za darila ob raznih priložnostih kot so rojstni dnevi, krst, poroke, in še in še..`,
author: `@NikVogrinec`,
image: "/images/logo_noText.svg",
facebookUsername: `manineta.graviranje.3`,
siteUrl: `https://www.manineta.netlify.app`,
},
plugins: [
`gatsby-plugin-react-helmet`,
{
resolve: `gatsby-source-filesystem`,
options: {
name: `images`,
path: `${__dirname}/src/images`,
},
},
{
resolve: `gatsby-source-contentful`,
options: {
spaceId: process.env.CONTENTFUL_SPACE_ID,
accessToken: process.env.CONTENTFUL_ACCESS_TOKEN,
},
},
{
resolve: `gatsby-plugin-manifest`,
options: {
start_url: "/",
theme_color: "#E89D38",
icon: `src/images/logo_noText.svg`,
},
},
`gatsby-transformer-sharp`,
`gatsby-plugin-sharp`,
`gatsby-plugin-sass`,
`gatsby-background-image`,
`gatsby-plugin-sitemap`,
`gatsby-plugin-offline`,
`gatsby-plugin-react-helmet`,
`gatsby-plugin-transition-link`,
// this (optional) plugin enables Progressive Web App + Offline functionality
// To learn more, visit: https://gatsby.dev/offline
// `gatsby-plugin-offline`,
],
}
|
<reponame>getmetamapper/metamapper<filename>app/authentication/mutations.py
# -*- coding: utf-8 -*-
import graphene
import graphene.relay as relay
import graphql_jwt
import graphql_jwt.shortcuts as jwt
import app.authentication.emails as emails
import app.authentication.models as models
import app.authentication.schema as schema
import app.authentication.tasks as tasks
import app.authentication.serializers as serializers
import utils.mixins.mutations as mixins
import utils.shortcuts as shortcuts
from django.core import exceptions
from django.contrib.auth.tokens import default_token_generator
from utils.errors import PasswordResetTokenExpired
from utils.graphql.types import ErrorType
from app.authorization.permissions import (
AllowAny,
AllowAuthenticated,
WorkspaceOwnersOnly,
)
class Register(mixins.CreateMutationMixin, relay.ClientIDMutation):
"""Mutation to register a user.
"""
permission_classes = (AllowAny,)
class Meta:
serializer_class = serializers.UserSerializer
class Input:
fname = graphene.String(required=True)
lname = graphene.String(required=True)
email = graphene.String(required=True)
password = graphene.String(required=True)
user = graphene.Field(schema.UserType)
jwt = graphene.Field(graphene.String)
@classmethod
def prepare_response(cls, instance, errors, **data):
return_kwargs = {
cls.model_name: instance,
'errors': errors,
}
if instance and instance.pk is not None:
return_kwargs['jwt'] = jwt.get_token(instance)
return cls(**return_kwargs)
class ResetPassword(graphene.Mutation):
"""Mutation for requesting a password reset email.
"""
class Input:
email = graphene.String(required=True)
ok = graphene.Boolean()
errors = graphene.List(ErrorType)
def mutate(self, info, email):
user = shortcuts.get_object_or_404(models.User, email=email)
token = default_token_generator.make_token(user)
emails.reset_password(user.email, user.id, token)
return ResetPassword(ok=True, errors=None)
class ResetPasswordConfirm(graphene.Mutation):
"""Mutation for changing the user password.
"""
class Input:
uid = graphene.Int(required=True)
token = graphene.String(required=True)
password = graphene.String(required=True)
ok = graphene.Boolean()
errors = graphene.List(ErrorType)
@classmethod
def perform_mutation(cls, user, token, password):
if not default_token_generator.check_token(user, token):
raise PasswordResetTokenExpired('Password reset token is no longer valid.')
user.change_password(password)
return user
def mutate(self, info, uid, token, password):
errors = None
user = shortcuts.get_object_or_404(models.User, pk=uid)
try:
ResetPasswordConfirm.perform_mutation(user, token, password)
except PasswordResetTokenExpired:
errors = [
ErrorType(resource='User', field='password_reset_token', code='invalid')
]
except exceptions.ValidationError:
errors = [
ErrorType(resource='User', field='password', code='invalid')
]
return ResetPasswordConfirm(ok=(errors is None), errors=errors)
class LoginWithSSOToken(graphene.Mutation):
"""Mutation for logging in with a one-time SSO token.
"""
class Input:
uid = graphene.Int(required=True)
token = graphene.String(required=True)
jwt = graphene.String()
def mutate(self, info, uid, token):
"""Return a JWT if the access token is valid.
"""
jwt_token = None
user = models.User.objects.filter(id=uid).first()
if user:
if user.is_sso_access_token_valid(token):
jwt_token = jwt.get_token(user)
user.clear_sso_access_token(True)
return LoginWithSSOToken(jwt=jwt_token)
class UpdateCurrentUser(mixins.UpdateMutationMixin, relay.ClientIDMutation):
"""Mutation to update metadata about the current user.
"""
permission_classes = (AllowAuthenticated,)
class Meta:
serializer_class = serializers.CurrentUserSerializer
class Input:
current_password = graphene.String(required=False)
fname = graphene.String(required=False)
lname = graphene.String(required=False)
email = graphene.String(required=False)
password = graphene.String(required=False)
user = graphene.Field(schema.UserType)
jwt = graphene.Field(graphene.String)
@classmethod
def get_serializer_kwargs(cls, root, info, **data):
return {
'instance': info.context.user,
'data': {k: v for k, v in data.items() if v},
'partial': True,
'context': {
'request': info.context,
},
}
@classmethod
def prepare_response(cls, instance, errors, **data):
return_kwargs = {
cls.model_name: instance,
'jwt': jwt.get_token(instance),
'errors': errors,
}
return cls(**return_kwargs)
class CreateWorkspace(mixins.CreateMutationMixin, relay.ClientIDMutation):
"""Mutation for creating a new workspace.
"""
permission_classes = (AllowAuthenticated,)
class Meta:
serializer_class = serializers.WorkspaceSerializer
class Input:
name = graphene.String(required=True)
slug = graphene.String(required=True)
beacon_consent = graphene.Boolean(required=False, default_value=False)
workspace = graphene.Field(schema.WorkspaceType)
@classmethod
def perform_save(cls, serializer, info):
return serializer.save(creator=info.context.user)
class UpdateWorkspace(mixins.UpdateMutationMixin, relay.ClientIDMutation):
"""Mutation for updating an existing workspace.
"""
permission_classes = (WorkspaceOwnersOnly,)
class Meta:
serializer_class = serializers.WorkspaceSerializer
class Input:
id = graphene.ID(required=True)
name = graphene.String(required=False)
slug = graphene.String(required=False)
beacon_consent = graphene.Boolean(required=False)
workspace = graphene.Field(schema.WorkspaceType)
class DeleteWorkspace(mixins.DeleteMutationMixin, relay.ClientIDMutation):
"""Permanently remove an existing workspace.
"""
permission_classes = (WorkspaceOwnersOnly,)
class Meta:
serializer_class = serializers.WorkspaceSerializer
@classmethod
def tasks_on_success(cls, instance, info):
"""We should queue this datastore to be hard-deleted.
"""
return [
{
"function": tasks.hard_delete_workspace.delay,
"arguments": {
"workspace_id": instance.id,
},
}
]
class AccountSetup(mixins.CreateMutationMixin, relay.ClientIDMutation):
"""Mutation for creating a new user and workspace at the same time.
"""
permission_classes = (AllowAny,)
class Meta:
serializer_class = serializers.AccountSetupSerializer
class Input:
fname = graphene.String(required=True)
lname = graphene.String(required=True)
email = graphene.String(required=True)
password = <PASSWORD>(required=True)
workspace_name = graphene.String(required=True)
workspace_slug = graphene.String(required=True)
beacon_consent = graphene.Boolean(required=False, default_value=False)
jwt = graphene.Field(graphene.String)
workspace_slug = graphene.Field(graphene.String)
@classmethod
def prepare_response(cls, instance, errors, **data):
"""We return the JWT or errors as necessary.
"""
return_kwargs = {
'jwt': None,
'workspace_slug': None,
'errors': errors,
}
if instance and instance.pk is not None:
return_kwargs['workspace_slug'] = data['workspace_slug']
return_kwargs['jwt'] = jwt.get_token(instance)
return cls(**return_kwargs)
class Mutation(graphene.ObjectType):
# JWT Authentication
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
login_with_sso_token = LoginWithSSOToken.Field(name='loginWithSSOToken')
# Current User Management
register_user = Register.Field()
update_current_user = UpdateCurrentUser.Field()
reset_password = ResetPassword.Field()
reset_password_confirm = <PASSWORD>PasswordConfirm.Field()
# Workspace Management
create_workspace = CreateWorkspace.Field()
update_workspace = UpdateWorkspace.Field()
delete_workspace = DeleteWorkspace.Field()
account_setup = AccountSetup.Field()
|
#!/bin/sh
# Copyright (c) 2014-2015 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
if [ -z "$OSSLSIGNCODE" ]; then
OSSLSIGNCODE=osslsigncode
fi
if [ ! -n "$1" ]; then
echo "usage: $0 <osslcodesign args>"
echo "example: $0 -key codesign.key"
exit 1
fi
OUT=signature-win.tar.gz
SRCDIR=unsigned
WORKDIR=./.tmp
OUTDIR="${WORKDIR}/out"
OUTSUBDIR="${OUTDIR}/win"
TIMESERVER=http://timestamp.comodoca.com
CERTFILE="win-codesign.cert"
mkdir -p "${OUTSUBDIR}"
basename -a `ls -1 "${SRCDIR}"/*-unsigned.exe` | while read UNSIGNED; do
echo Signing "${UNSIGNED}"
"${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@"
"${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}"
done
rm -f "${OUT}"
tar -C "${OUTDIR}" -czf "${OUT}" .
rm -rf "${WORKDIR}"
echo "Created ${OUT}"
|
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Bash functions used by buildbot annotator scripts for the android
# build of chromium. Executing this script should not perform actions
# other than setting variables and defining of functions.
# Number of jobs on the compile line; e.g. make -j"${JOBS}"
JOBS="${JOBS:-4}"
# Parse named arguments passed into the annotator script
# and assign them global variable names.
function bb_parse_args {
while [[ $1 ]]; do
case "$1" in
--factory-properties=*)
FACTORY_PROPERTIES="$(echo "$1" | sed 's/^[^=]*=//')"
BUILDTYPE=$(bb_get_json_prop "$FACTORY_PROPERTIES" target)
;;
--build-properties=*)
BUILD_PROPERTIES="$(echo "$1" | sed 's/^[^=]*=//')"
;;
--slave-properties=*)
SLAVE_PROPERTIES="$(echo "$1" | sed 's/^[^=]*=//')"
;;
*)
echo "@@@STEP_WARNINGS@@@"
echo "Warning, unparsed input argument: '$1'"
;;
esac
shift
done
}
# Basic setup for all bots to run after a source tree checkout.
# Args:
# $1: source root.
# $2 and beyond: key value pairs which are parsed by bb_parse_args.
function bb_baseline_setup {
SRC_ROOT="$1"
# Remove SRC_ROOT param
shift
cd $SRC_ROOT
echo "@@@BUILD_STEP Environment setup@@@"
bb_parse_args "$@"
export GYP_GENERATORS=ninja
export GOMA_DIR=/b/build/goma
. build/android/envsetup.sh
local extra_gyp_defines="$(bb_get_json_prop "$FACTORY_PROPERTIES" \
extra_gyp_defines) $(bb_get_json_prop "$SLAVE_PROPERTIES" \
extra_gyp_defines)"
export GYP_DEFINES+=" fastbuild=1 $extra_gyp_defines"
if echo $extra_gyp_defines | grep -qE 'clang|asan'; then
unset CXX_target
fi
local build_path="${SRC_ROOT}/out/${BUILDTYPE}"
local landmines_triggered_path="$build_path/.landmines_triggered"
python "$SRC_ROOT/build/landmines.py"
if [[ $BUILDBOT_CLOBBER || -f "$landmines_triggered_path" ]]; then
echo "@@@BUILD_STEP Clobber@@@"
if [[ -z $BUILDBOT_CLOBBER ]]; then
echo "Clobbering due to triggered landmines: "
cat "$landmines_triggered_path"
else
# Also remove all the files under out/ on an explicit clobber
find "${SRC_ROOT}/out" -maxdepth 1 -type f -exec rm -f {} +
fi
# Sdk key expires, delete android folder.
# crbug.com/145860
rm -rf ~/.android
rm -rf "$build_path"
if [[ -e $build_path ]] ; then
echo "Clobber appeared to fail? $build_path still exists."
echo "@@@STEP_WARNINGS@@@"
fi
fi
}
function bb_asan_tests_setup {
# Download or build the ASan runtime library.
${SRC_ROOT}/tools/clang/scripts/update.sh
}
# Setup goma. Used internally to buildbot_functions.sh.
function bb_setup_goma_internal {
echo "Killing old goma processes"
${GOMA_DIR}/goma_ctl.sh stop || true
killall -9 compiler_proxy || true
echo "Starting goma"
export GOMA_API_KEY_FILE=${GOMA_DIR}/goma.key
${GOMA_DIR}/goma_ctl.sh start
trap bb_stop_goma_internal SIGHUP SIGINT SIGTERM
}
# Stop goma.
function bb_stop_goma_internal {
echo "Stopping goma"
${GOMA_DIR}/goma_ctl.sh stop
}
# Build using ninja.
function bb_goma_ninja {
echo "Using ninja to build."
local TARGET=$1
bb_setup_goma_internal
ninja -C out/$BUILDTYPE -j120 -l20 $TARGET
bb_stop_goma_internal
}
# Compile step
function bb_compile {
# This must be named 'compile' for CQ.
echo "@@@BUILD_STEP compile@@@"
gclient runhooks
bb_goma_ninja All
}
# Experimental compile step; does not turn the tree red if it fails.
function bb_compile_experimental {
# Linking DumpRenderTree appears to hang forever?
EXPERIMENTAL_TARGETS="android_experimental"
for target in ${EXPERIMENTAL_TARGETS} ; do
echo "@@@BUILD_STEP Experimental Compile $target @@@"
set +e
bb_goma_ninja "${target}"
if [ $? -ne 0 ] ; then
echo "@@@STEP_WARNINGS@@@"
fi
set -e
done
}
# Run findbugs.
function bb_run_findbugs {
echo "@@@BUILD_STEP findbugs@@@"
if [[ $BUILDTYPE = Release ]]; then
local BUILDFLAG="--release-build"
fi
bb_run_step build/android/findbugs_diff.py $BUILDFLAG
bb_run_step tools/android/findbugs_plugin/test/run_findbugs_plugin_tests.py \
$BUILDFLAG
}
# Run a buildbot step and handle failure (failure will not halt build).
function bb_run_step {
(
set +e
"$@"
if [[ $? != 0 ]]; then
echo "@@@STEP_FAILURE@@@"
fi
)
}
# Zip and archive a build.
function bb_zip_build {
echo "@@@BUILD_STEP Zip build@@@"
python ../../../../scripts/slave/zip_build.py \
--src-dir "$SRC_ROOT" \
--build-dir "out" \
--exclude-files "lib.target,gen,android_webview,jingle_unittests" \
--factory-properties "$FACTORY_PROPERTIES" \
--build-properties "$BUILD_PROPERTIES"
}
# Download and extract a build.
function bb_extract_build {
echo "@@@BUILD_STEP Download and extract build@@@"
if [[ -z $FACTORY_PROPERTIES || -z $BUILD_PROPERTIES ]]; then
return 1
fi
# When extract_build.py downloads an unversioned build it
# issues a warning by exiting with large numbered return code
# When it fails to download it build, it exits with return
# code 1. We disable halt on error mode and return normally
# unless the python tool returns 1.
(
set +e
python ../../../../scripts/slave/extract_build.py \
--build-dir "$SRC_ROOT/build" \
--build-output-dir "../out" \
--factory-properties "$FACTORY_PROPERTIES" \
--build-properties "$BUILD_PROPERTIES"
local extract_exit_code=$?
if (( $extract_exit_code > 1 )); then
echo "@@@STEP_WARNINGS@@@"
return
fi
return $extract_exit_code
)
}
# Runs the license checker for the WebView build.
# License checker may return error code 1 meaning that
# there are non-fatal problems (warnings). Everything
# above 1 is considered to be a show-stopper.
function bb_check_webview_licenses {
echo "@@@BUILD_STEP Check licenses for WebView@@@"
(
set +e
cd "${SRC_ROOT}"
python android_webview/tools/webview_licenses.py scan
local licenses_exit_code=$?
if [[ $licenses_exit_code -eq 1 ]]; then
echo "@@@STEP_WARNINGS@@@"
elif [[ $licenses_exit_code -gt 1 ]]; then
echo "@@@STEP_FAILURE@@@"
fi
return 0
)
}
# Retrieve a packed json property using python
function bb_get_json_prop {
local JSON="$1"
local PROP="$2"
python -c "import json; print json.loads('$JSON').get('$PROP', '')"
}
|
<reponame>reno-xjb/v-mapboxgl<filename>packages/@v-mapboxgl/plugin-menu-control/src/index.ts
import Vue, { VueConstructor } from 'vue';
import VMenuControl from './controls/VMenuControl.vue';
export { VMenuControl };
function install(vue: VueConstructor<Vue>) {
vue.component('VMenuControl', VMenuControl);
}
export default {
install,
};
if (window !== undefined && window.hasOwnProperty('Vue')) {
install((window as any).Vue);
}
|
# Replace "kafka-consumer-groups"
# by "kafka-consumer-groups.sh" or "kafka-consumer-groups.bat" based on your system # (or bin/kafka-consumer-groups.sh or bin\windows\kafka-consumer-groups.bat if you didn't setup PATH / Environment variables)
# documentation for the command
kafka-consumer-groups
# list consumer groups
kafka-consumer-groups --bootstrap-server localhost:9092 --list
# describe one specific group
kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group my-second-application
# describe another group
kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group my-first-application
# start a consumer
kafka-console-consumer --bootstrap-server 127.0.0.1:9092 --topic first_topic --group my-first-application
# describe the group now
kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group my-first-application
# describe a console consumer group (change the end number)
kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group console-consumer-10592
# start a console consumer
kafka-console-consumer --bootstrap-server 127.0.0.1:9092 --topic first_topic --group my-first-application
# describe the group again
kafka-consumer-groups --bootstrap-server localhost:9092 --describe --group my-first-application |
#!/bin/bash
#
# Generate icons from the SVG. Ideally, this should be done in the ant task,
# but this really only happens rarely, whenever there is tweaking of the icon.
#
# Generated icons are checked in, so no need to run this script every time.
##
BASE_DRAWABLE=res/drawable
declare -A resolutions=( ["xhdpi"]="96" ["hdpi"]="72" \
["mdpi"]="48" ["ldpi"]="36")
for res in "${!resolutions[@]}"; do
SIZE="${resolutions[$res]}"
FILE=${BASE_DRAWABLE}-${res}/ic_launcher_pp.png
inkscape --export-png=${FILE}.tmp \
--export-width=$SIZE --export-height=$SIZE precise-pitch.svg
pngcrush -e "" ${FILE}.tmp >/dev/null 2>&1
rm -f ${FILE}.tmp
done
|
def remove_elements(arr, match):
"""
Remove all the elements of a given list that match a given element.
"""
# Create a new list from given list
result = [x for x in arr if x != match]
return result
# Call the function
arr = [1, 2, 3, 4, 2, 5, 2]
match = 2
result = remove_elements(arr, match)
print(result) |
#!/usr/bin/env bats
load test_helper
#
# Test
#
@test "Test case rsync fails on file copy" {
# verify $KEPLER_SH is in path if not skip this test
skipIfKeplerNotInPath
echo "0,/foo,," > "$THE_TMP/bin/command.tasks"
echo "0,26794 /foo,error," >> "$THE_TMP/bin/command.tasks"
echo "1,,," >> "$THE_TMP/bin/command.tasks"
echo "1,,," >> "$THE_TMP/bin/command.tasks"
echo "1,,rsync error," >> "$THE_TMP/bin/command.tasks"
echo "1,,rsync error2," >> "$THE_TMP/bin/command.tasks"
# Run kepler.sh
run $KEPLER_SH -runwf -redirectgui $THE_TMP -CWS_jobname jname -CWS_user joe -CWS_jobid 123 -remotePath /foo -CWS_outputdir $THE_TMP -maxRetry 2 -sleepCmd /bin/true -sshCmd "$THE_TMP/bin/command" -duCmd "$THE_TMP/bin/command" -remoteHost war.crbs.ucsd.edu -rsyncCmd "$THE_TMP/bin/command" $WF
# Check exit code
[ "$status" -eq 0 ]
# Check output from kepler.sh
[[ "${lines[0]}" == "The base dir is"* ]]
echoArray "${lines[@]}"
# Verify we got a workflow failed txt file
[ -s "$THE_TMP/$WORKFLOW_FAILED_TXT" ]
cat "$THE_TMP/$README_TXT"
cat "$THE_TMP/$WORKFLOW_FAILED_TXT"
# Verify we ran all the commands
run wc -l "$THE_TMP/bin/command.tasks"
[ "$status" -eq 0 ]
echo "${lines[0]}"
[[ "${lines[0]}" == "0 "* ]]
# Verify we got a README.txt
[ -s "$THE_TMP/$README_TXT" ]
run cat "$THE_TMP/$WORKFLOW_FAILED_TXT"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "simple.error.message=Unable to copy data" ]
[ "${lines[1]}" == "detailed.error.message=Non zero exit code from $THE_TMP/bin/command" ]
[ -s "$THE_TMP/$WORKFLOW_STATUS" ]
run egrep "^phase=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "phase=Transferring data retry" ]
run egrep "^phase.help=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
echo "${lines[0]}"
[ "${lines[0]}" == "phase.help=In this phase the workflow copies the data (26794 bytes) from (war.crbs.ucsd.edu:/foo) to the output of this job" ]
run egrep "^phase.list=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "phase.list=Examining data to transfer,Transferring data,Transferring data retry,Done" ]
run egrep "^estimated.total.diskspace=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "estimated.total.diskspace=26794" ]
run egrep "^estimated.total.diskspace.help=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "estimated.total.diskspace.help=Number of bytes that will be copied to job from war.crbs.ucsd.edu" ]
run egrep "^disk.space.consumed=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "disk.space.consumed=unknown" ]
run egrep "^disk.space.consumed.help=" "$THE_TMP/$WORKFLOW_STATUS"
[ "$status" -eq 0 ]
[ "${lines[0]}" == "disk.space.consumed.help=Disk space in bytes" ]
}
|
#include <catch2/catch.hpp>
// Function to convert miles to kilometers
double mileToKilometer(double m)
{
return m * 1.609344;
}
// Test cases for mileToKilometer function
TEST_CASE("Miles to Kilometers Conversion", "[mileToKilometer]")
{
// Test case 1: Convert 1 mile to kilometers
SECTION("1 mile to kilometers")
{
REQUIRE(mileToKilometer(1) == Approx(1.609344));
}
// Test case 2: Convert 5 miles to kilometers
SECTION("5 miles to kilometers")
{
REQUIRE(mileToKilometer(5) == Approx(8.04672));
}
// Test case 3: Convert 10 miles to kilometers
SECTION("10 miles to kilometers")
{
REQUIRE(mileToKilometer(10) == Approx(16.09344));
}
// Additional test cases can be added to cover various input values and edge cases
} |
#!/usr/bin/env bash
set -e
TAGNAME=$1
GH_REF=github.com/aerokube/ggr-ui.git
git config user.name "${TRAVIS_REPO_SLUG}"
git config user.email "aerokube@aerokube.github.com"
git remote add upstream "https://${GITHUB_TOKEN}@${GH_REF}"
git fetch upstream
git branch -r
echo "Deleting old output"
rm -rf ${TRAVIS_BUILD_DIR}/docs/output
mkdir ${TRAVIS_BUILD_DIR}/docs/output
git worktree prune
rm -rf ${TRAVIS_BUILD_DIR}/.git/worktrees/docs/output/
echo "Checking out gh-pages branch into docs/output"
git worktree add -B gh-pages ${TRAVIS_BUILD_DIR}/docs/output upstream/gh-pages
echo "Removing existing files"
mkdir -p ${TRAVIS_BUILD_DIR}/docs/output/${TAGNAME}
rm -rf ${TRAVIS_BUILD_DIR}/docs/output/${TAGNAME}/*
#echo "Copying images"
#cp -R ${TRAVIS_BUILD_DIR}/docs/img ${TRAVIS_BUILD_DIR}/docs/output/${TAGNAME}/img
echo "Generating docs"
docker run -v ${TRAVIS_BUILD_DIR}/docs/:/documents/ --name asciidoc-to-html asciidoctor/docker-asciidoctor asciidoctor -a revnumber=${TAGNAME} -D /documents/output/${TAGNAME} index.adoc
echo "Updating gh-pages branch"
cd ${TRAVIS_BUILD_DIR}/docs/output && git add --all && git commit -m "Publishing to gh-pages"
git push upstream HEAD:gh-pages
|
<reponame>muthukumaravel7/armnn
var classarmnn_1_1profiling_1_1_i_counter_mappings =
[
[ "~ICounterMappings", "classarmnn_1_1profiling_1_1_i_counter_mappings.xhtml#aff478703ba6af2360a04c1f0e00911b4", null ],
[ "GetBackendId", "classarmnn_1_1profiling_1_1_i_counter_mappings.xhtml#a8c03a58ac20b34e541bad78df16859e8", null ],
[ "GetGlobalId", "classarmnn_1_1profiling_1_1_i_counter_mappings.xhtml#a83e6db8d13e21158334ca6722b20fa67", null ]
]; |
<gh_stars>10-100
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.md in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { ApiManagementModels } from "@azure/arm-apimanagement";
import { AzureParentTreeItem, AzureTreeItem, ISubscriptionContext } from "vscode-azureextensionui";
import { nonNullProp } from "../utils/nonNull";
import { treeUtils } from "../utils/treeUtils";
import { IProductTreeRoot } from "./IProductTreeRoot";
import { IServiceTreeRoot } from "./IServiceTreeRoot";
import { ProductApisTreeItem } from "./ProductApisTreeItem";
import { ProductPolicyTreeItem } from "./ProductPolicyTreeItem";
import { ProductsTreeItem } from "./ProductsTreeItem";
export class ProductTreeItem extends AzureParentTreeItem<IProductTreeRoot> {
public static contextValue: string = 'azureApiManagementProductTreeItem';
public contextValue: string = ProductTreeItem.contextValue;
public readonly commandId: string = 'azureApiManagement.showArmProduct';
public readonly policyTreeItem: ProductPolicyTreeItem;
public readonly productApisTreeItem: ProductApisTreeItem;
private _label: string;
private _root: IProductTreeRoot;
constructor(
parent: ProductsTreeItem,
public readonly productContract: ApiManagementModels.ProductContract) {
super(parent);
this._label = nonNullProp(this.productContract, 'displayName');
this._root = this.createRoot(parent.root);
this.productApisTreeItem = new ProductApisTreeItem(this);
this.policyTreeItem = new ProductPolicyTreeItem(this);
}
public get label() : string {
return this._label;
}
public get root(): IProductTreeRoot {
return this._root;
}
public get iconPath(): { light: string, dark: string } {
return treeUtils.getThemedIconPath('product');
}
public hasMoreChildrenImpl(): boolean {
return false;
}
public async loadMoreChildrenImpl(): Promise<AzureTreeItem<IProductTreeRoot>[]> {
return [this.productApisTreeItem, this.policyTreeItem];
}
private createRoot(subRoot: ISubscriptionContext): IProductTreeRoot {
return Object.assign({}, <IServiceTreeRoot>subRoot, {
productName: nonNullProp(this.productContract, 'name')
});
}
}
|
<reponame>Antloup/workflow-js
import Provider from './Provider';
import ActionFunction from '../Elements/Actions/ActionFunction';
export default class ActionProvider extends Provider<ActionFunction> {
getProviderType() {
return 'action';
}
}
|
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
tag=$1
key=$2
src=$3
dotnet pack -c Release -p:PackageVersion=${tag} DvBCrud.MongoDb/DvBCrud.MongoDb.csproj
dotnet pack -c Release -p:PackageVersion=${tag} DvBCrud.MongoDb.Api/DvBCrud.MongoDb.Api.csproj
dotnet nuget push DvBCrud.MongoDb/bin/Release/DvBCrud.MongoDb.${tag}.nupkg --api-key ${key} --source ${src}
dotnet nuget push DvBCrud.MongoDb.Api/bin/Release/DvBCrud.MongoDb.Api.${tag}.nupkg --api-key ${key} --source ${src}
|
require "spec_helper"
RSpec.describe "Converter" do
describe ".in_english" do
it 'converts 0 to "zero"' do
expect(Converter::EnglishNumber.new(0).in_english).to eq("zero")
expect(Converter::EnglishNumber.new(0.5).in_english).to eq("zero")
expect(Converter::EnglishNumber.new(-0.5).in_english).to eq("zero")
end
it 'converts a one- and two-digit positive integer into English words' do
expect(Converter::EnglishNumber.new(6).in_english).to eq("six")
expect(Converter::EnglishNumber.new(11).in_english).to eq("eleven")
expect(Converter::EnglishNumber.new(22).in_english).to eq("twenty-two")
expect(Converter::EnglishNumber.new(30).in_english).to eq("thirty")
end
it 'converts a one- and two-digit positive decimal into English words' do
expect(Converter::EnglishNumber.new(6.5).in_english).to eq("six")
expect(Converter::EnglishNumber.new(24.5).in_english).to eq("twenty-four")
end
it 'converts a one- and two-digit negative integer into English words' do
expect(Converter::EnglishNumber.new(-6).in_english).to eq("negative six")
expect(Converter::EnglishNumber.new(-11).in_english).to eq("negative eleven")
expect(Converter::EnglishNumber.new(-22).in_english).to eq("negative twenty-two")
expect(Converter::EnglishNumber.new(-30).in_english).to eq("negative thirty")
end
it 'converts a one- and two-digit negative decimal into English words' do
expect(Converter::EnglishNumber.new(-6.5).in_english).to eq("negative six")
expect(Converter::EnglishNumber.new(-24.5).in_english).to eq("negative twenty-four")
end
# it 'returns nil for numbers that are greater/less than 99/-99' do
# expect(Converter::EnglishNumber.new(101).in_english).to eq(nil)
# expect(Converter::EnglishNumber.new(-101).in_english).to eq(nil)
# end
it 'converts a three-digit positive integer into English words' do
expect(Converter::EnglishNumber.new(100).in_english).to eq("one hundred")
expect(Converter::EnglishNumber.new(101).in_english).to eq("one hundred one")
expect(Converter::EnglishNumber.new(111).in_english).to eq("one hundred eleven")
expect(Converter::EnglishNumber.new(123).in_english).to eq("one hundred twenty-three")
expect(Converter::EnglishNumber.new(687).in_english).to eq("six hundred eighty-seven")
expect(Converter::EnglishNumber.new(687.78).in_english).to eq("six hundred eighty-seven")
end
it 'converts a three-digit negative integer into English words' do
expect(Converter::EnglishNumber.new(-100).in_english).to eq("negative one hundred")
expect(Converter::EnglishNumber.new(-101).in_english).to eq("negative one hundred one")
expect(Converter::EnglishNumber.new(-111).in_english).to eq("negative one hundred eleven")
expect(Converter::EnglishNumber.new(-123).in_english).to eq("negative one hundred twenty-three")
expect(Converter::EnglishNumber.new(-687).in_english).to eq("negative six hundred eighty-seven")
expect(Converter::EnglishNumber.new(-687.78).in_english).to eq("negative six hundred eighty-seven")
end
it 'converts positive thousands integer into English words' do
expect(Converter::EnglishNumber.new(1000).in_english).to eq("one thousand")
expect(Converter::EnglishNumber.new(1001).in_english).to eq("one thousand one")
expect(Converter::EnglishNumber.new(1011).in_english).to eq("one thousand eleven")
expect(Converter::EnglishNumber.new(1023).in_english).to eq("one thousand twenty-three")
expect(Converter::EnglishNumber.new(1687).in_english).to eq("one thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(1687.78).in_english).to eq("one thousand six hundred eighty-seven")
end
it 'converts negative thousands into English words' do
expect(Converter::EnglishNumber.new(-1000).in_english).to eq("negative one thousand")
expect(Converter::EnglishNumber.new(-1001).in_english).to eq("negative one thousand one")
expect(Converter::EnglishNumber.new(-1011).in_english).to eq("negative one thousand eleven")
expect(Converter::EnglishNumber.new(-1023).in_english).to eq("negative one thousand twenty-three")
expect(Converter::EnglishNumber.new(-1687).in_english).to eq("negative one thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(-1687.78).in_english).to eq("negative one thousand six hundred eighty-seven")
end
it 'converts positive millions into English words' do
expect(Converter::EnglishNumber.new(1000000).in_english).to eq("one million")
expect(Converter::EnglishNumber.new(1000001).in_english).to eq("one million one")
expect(Converter::EnglishNumber.new(1000011).in_english).to eq("one million eleven")
expect(Converter::EnglishNumber.new(1000023).in_english).to eq("one million twenty-three")
expect(Converter::EnglishNumber.new(1369687).in_english).to eq("one million three hundred sixty-nine thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(1369687.78).in_english).to eq("one million three hundred sixty-nine thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(961349687).in_english).to eq("nine hundred sixty-one million three hundred fourty-nine thousand six hundred eighty-seven")
end
it 'converts negative millions into English words' do
expect(Converter::EnglishNumber.new(-1000000).in_english).to eq("negative one million")
expect(Converter::EnglishNumber.new(-1000001).in_english).to eq("negative one million one")
expect(Converter::EnglishNumber.new(-1000011).in_english).to eq("negative one million eleven")
expect(Converter::EnglishNumber.new(-1000023).in_english).to eq("negative one million twenty-three")
expect(Converter::EnglishNumber.new(-1369687).in_english).to eq("negative one million three hundred sixty-nine thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(-1369687.78).in_english).to eq("negative one million three hundred sixty-nine thousand six hundred eighty-seven")
expect(Converter::EnglishNumber.new(-961349687).in_english).to eq("negative nine hundred sixty-one million three hundred fourty-nine thousand six hundred eighty-seven")
end
it 'converts positive billions into English words' do
expect(Converter::EnglishNumber.new(1000000000).in_english).to eq("one billion")
expect(Converter::EnglishNumber.new(1000000001).in_english).to eq("one billion one")
expect(Converter::EnglishNumber.new(1000000011).in_english).to eq("one billion eleven")
expect(Converter::EnglishNumber.new(1000000023).in_english).to eq("one billion twenty-three")
expect(Converter::EnglishNumber.new(1369687257).in_english).to eq("one billion three hundred sixty-nine million six hundred eighty-seven thousand two hundred fifty-seven")
expect(Converter::EnglishNumber.new(1369687257.345).in_english).to eq("one billion three hundred sixty-nine million six hundred eighty-seven thousand two hundred fifty-seven")
expect(Converter::EnglishNumber.new(415369687257).in_english).to eq("four hundred fifteen billion three hundred sixty-nine million six hundred eighty-seven thousand two hundred fifty-seven")
expect(Converter::EnglishNumber.new(415369687257.99).in_english).to eq("four hundred fifteen billion three hundred sixty-nine million six hundred eighty-seven thousand two hundred fifty-seven")
end
end
end
|
<filename>weiBoDemo/weiBoDemo/Main/BaseTabBarVC.h
//
// BaseTabBarVC.h
// weiBoDemo
//
// Created by wjm on 16/8/19.
// Copyright © 2016年 XH. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface BaseTabBarVC : UITabBarController
@end
|
import React from 'react';
class Main extends React.Component {
constructor(props) {
super(props);
this.state = {
text: 'Hello World',
};
}
render() {
return (
<h3>{this.state.text}</h3>
)
}
}
export default Main; |
<reponame>enigeer1/show12
# --coding--:utf-8 --
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(853, 734)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.radioButtonCam = QtWidgets.QRadioButton(self.centralwidget)
self.radioButtonCam.setGeometry(QtCore.QRect(140, 540, 121, 31))
self.radioButtonCam.setObjectName("radioButtonCam")
self.radioButtonFile = QtWidgets.QRadioButton(self.centralwidget)
self.radioButtonFile.setGeometry(QtCore.QRect(140, 580, 121, 31))
self.radioButtonFile.setObjectName("radioButtonFile")
self.Open = QtWidgets.QPushButton(self.centralwidget)
self.Open.setGeometry(QtCore.QRect(350, 560, 121, 41))
self.Open.setObjectName("Open")
self.Close = QtWidgets.QPushButton(self.centralwidget)
self.Close.setGeometry(QtCore.QRect(550, 560, 111, 41))
self.Close.setObjectName("Close")
self.DispalyLabel = QtWidgets.QLabel(self.centralwidget)
# self.DispalyLabel.setGeometry(QtCore.QRect(71, 44, 711, 411))
self.DispalyLabel.setGeometry(QtCore.QRect(71, 44, 711, 411))
self.DispalyLabel.setMouseTracking(False)
self.DispalyLabel.setText("")
self.DispalyLabel.setObjectName("DispalyLabel")
# 添加计算右肘的按钮
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(840, 390, 141, 23))
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(830, 440, 151, 171))
self.textEdit.setObjectName("textEdit")
# 添加计算左肘的按钮
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(1020, 390, 151, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setGeometry(QtCore.QRect(1020, 440, 151, 171))
self.textEdit_2.setObjectName("textEdit_2")
# 添加右腋
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(830, 170, 151, 23))
self.pushButton_3.setObjectName("pushButton_3")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setGeometry(QtCore.QRect(830, 210, 151, 161))
self.textEdit_3.setObjectName("textEdit_3")
# 添加左腋
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(1020, 170, 151, 23))
self.pushButton_4.setObjectName("pushButton_4")
self.textEdit_4 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_4.setGeometry(QtCore.QRect(1020, 210, 151, 161))
self.textEdit_4.setObjectName("textEdit_4")
# 添加表情识别控件按按钮
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(830, 80, 75, 23))
self.pushButton_5.setObjectName("pushButton_5")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 853, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.radioButtonCam.setText(_translate("MainWindow", "camera"))
self.radioButtonFile.setText(_translate("MainWindow", "local file"))
self.Open.setText(_translate("MainWindow", "Open"))
self.Close.setText(_translate("MainWindow", "Close"))
# 添加计算右肘的按钮
self.pushButton.setText(_translate("MainWindow", "show_right_eblow_angel"))
self.pushButton_2.setText(_translate("MainWindow", "show_left_eblow_angel"))
self.pushButton_3.setText(_translate("MainWindow", "show_right_Armpit_angel "))
self.pushButton_4.setText(_translate("MainWindow", "show_left_Armpit_angel "))
# 添加表情识别控件
self.pushButton_5.setText(_translate("MainWindow", "show_face"))
|
// Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RequestMessage RequestMessage
//
// swagger:model RequestMessage
type RequestMessage struct {
// message date
// Format: date-time
MessageDate *strfmt.DateTime `json:"messageDate,omitempty"`
// message value
MessageValue string `json:"messageValue,omitempty"`
}
// Validate validates this request message
func (m *RequestMessage) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMessageDate(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestMessage) validateMessageDate(formats strfmt.Registry) error {
if swag.IsZero(m.MessageDate) { // not required
return nil
}
if err := validate.FormatOf("messageDate", "body", "date-time", m.MessageDate.String(), formats); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *RequestMessage) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RequestMessage) UnmarshalBinary(b []byte) error {
var res RequestMessage
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
/**
* file : LKAudioEncoder.h
* author : Rex
* create : 2017-02-17 16:59
* func :
* history:
*/
#ifndef __LKAUDIOENCODER_H_
#define __LKAUDIOENCODER_H_
#endif
|
rm -rf application.zip
rm -rf zipper.zip
npm run build
cp zipper_64.png zipper_128.png manifest.webapp dist
cd dist
zip -q ../application.zip css/* index.html js/* zipper_64.png zipper_128.png manifest.webapp
cd ..
zip -q Zipper.zip application.zip metadata.json update.webapp
rm -rf application.zip |
<filename>test/TestOrganisation/projectFail2/public/js/bottom1.js
this is invalid javascript
alert('hello'); |
import React from "react";
import { StyleSheet, View, Text } from "react-native";
import { IconButton } from "react-native-paper";
import colors from "../constants/colors";
export default (props) => {
return (
<View style={styles.brand}>
<IconButton icon="bullseye" style={{ margin: 0 }} color={colors.secondaryLight} />
<Text style={{ fontSize: 16, color: colors.secondaryLight }}>Simplist</Text>
</View>
);
}
const styles = StyleSheet.create({
brand: {
flexDirection: "row",
alignItems: "center",
marginLeft: -5,
marginTop: 20
}
}); |
<filename>bank.go
// Package ingaugo provides a screenscraping interface to ING Australia Bank
package ingaugo
import "log"
type Bank struct {
wsURL string
}
type customLog struct {
debugLog bool
}
var clog customLog
type tokenResponse struct {
Token string
}
// NewBank is used to initialize and return a Bank that works by launching a
// a local browser instance. It depends on 'google-chrome' executable being in $PATH
func NewBank() Bank {
return Bank{}
}
// NewBankWithWS initalises and returns a Bank that will attempt to
// connect to a browser via websocket URL of the form ws://<hostname>:<port>
func NewBankWithWS(websocketURL string) Bank {
return Bank{wsURL: websocketURL}
}
// SetDebug turns on/off verbose logging to stderr
func SetDebug(state bool) {
clog.debugLog = state
}
func (l customLog) Printf(format string, v ...interface{}) {
if l.debugLog {
log.Printf(format, v)
}
}
func (l customLog) Println(msg string) {
l.Printf("%v\n", msg)
}
|
<filename>webapp/controller/v1.controller.js
sap.ui.define([
"sap/ui/core/mvc/Controller"
], function(Controller) {
"use strict";
return Controller.extend("com.lbrandsHelloLBrands.controller.v1", {
onInit: function() {
var listData = {
data: [{
"workbookname": "Romatic Bras",
"templatename": "Spring 2018",
"category": "Bras",
"emotionalspace": "Playful Sexy"
}, {
"workbookname": "Performance Sports Bras",
"templatename": "Fall 2018",
"category": "Bras",
"emotionalspace": "Sport"
}]
};
var jModel = new sap.ui.model.json.JSONModel();
jModel.setData(listData);
this.getView().setModel(jModel, "local");
},
formatActiveEmployeeText: function(sValue) {
if (sValue > 5) {
return 'Active';
} else {
return 'Not Active';
}
},
onBtnClick: function() {
//Here comes logic
alert(1);
},
onSearch: function(oEvent) {
var searchString = oEvent.getParameter("query");
var oTable = this.getView().byId("__table0");
var oFilter = new sap.ui.model.Filter({
path: "FirstName",
operator: "Contains",
value1: searchString
});
oTable.getBinding("items").filter([oFilter]);
}
});
}); |
#! /bin/bash
#SBATCH -o /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd/run_rexi_par_m000128_t028_n0128_r0001_a1.txt
###SBATCH -e /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd/run_rexi_par_m000128_t028_n0128_r0001_a1.err
#SBATCH -J rexi_par_m000128_t028_n0128_r0001_a1
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=28
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=08:00:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=28
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd
cd ../../../
. local_software/env_vars.sh
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T0"
time -p mpiexec.hydra -genv OMP_NUM_THREADS 28 -envall -ppn 1 -n 1 ./build/rexi_par_m_tno_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 1 --rexi-h 0.2 --timestepping-mode 1 --staggering 0 --rexi-m=128 -C -5.0
|
#!/bin/bash
npps () {
# Convert and copy icon which is needed for desktop integration into place:
wget -q https://github.com/mmtrt/notepad-plus-plus/raw/master/snap/local/src/notepad-plus-plus.png -O notepad-plus-plus.png
for width in 8 16 22 24 32 36 42 48 64 72 96 128 192 256; do
dir=icons/hicolor/${width}x${width}/apps
mkdir -p $dir
convert notepad-plus-plus.png -resize ${width}x${width} $dir/notepad-plus-plus.png
done
wget -q "https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage"
chmod +x ./appimagetool-x86_64.AppImage
./appimagetool-x86_64.AppImage --appimage-extract &>/dev/null
ver=$(wget https://api.github.com/repos/notepad-plus-plus/notepad-plus-plus/releases -qO - 2>&1 | grep "Notepad++ " | sed s'|"| |g' | awk '{print $5}' | head -n1)
wget -q https://github.com/$(wget -qO- https://github.com/notepad-plus-plus/notepad-plus-plus/releases | grep download/ | cut -d '"' -f2 | sed -n 5p)
7z x -aos "npp.$ver.Installer.exe" -x'!change.log' -x'!doLocalConf.xml' -x'!LICENSE' -x'!NppShell_06.dll' -x'!readme.txt' -x'!userDefinedLang-markdown.default.modern.xml' -o"npp-stable/usr/share/notepad-plus-plus" &>/dev/null
# winedata
appdir="npp-stable/usr/share/notepad-plus-plus"
mkdir -p "npp-stable/winedata/Application Data/Notepad++" && mkdir -p "npp-stable/usr/share/notepad-plus-plus/plugins/Config"
mv $appdir/notepad++.exe $appdir/notepad-plus-plus.exe
cp -R $appdir/'$_14_'/* "npp-stable/winedata/Application Data/Notepad++";cp -R $appdir/'$_15_'/* "npp-stable/usr/share/notepad-plus-plus/plugins";cp -R $appdir/'$_17_'/* "npp-stable/usr/share/notepad-plus-plus/plugins/Config"
find $appdir/'$PLUGINSDIR' -type f -name '*.xml' -print0 | while read -d $'\0' file; do cp -v "$file" $appdir/localization/ &>/dev/null; done
rm -R $appdir/'$_14_';rm -R $appdir/'$_15_';rm -R $appdir/'$_17_';rm -R $appdir/'$PLUGINSDIR';
find "npp-stable/usr" -type d -execdir chmod 755 {} + && find "npp-stable/winedata" -type d -execdir chmod 755 "{}" +
rm ./*.exe
mkdir -p npp-stable/usr/bin ; cp notepad-plus-plus.desktop npp-stable ; cp AppRun npp-stable ; sed -i -e 's|progVer=|progVer='"$ver"'|g' npp-stable/AppRun
cp -r icons npp-stable/usr/share ; cp notepad-plus-plus.png npp-stable
export ARCH=x86_64; squashfs-root/AppRun -v ./npp-stable -u "gh-releases-zsync|mmtrt|notepad-plus-plus_AppImage|stable|notepad*.AppImage.zsync" notepad-plus-plus_"${ver}"-${ARCH}.AppImage &>/dev/null
}
nppswp () {
export WINEDLLOVERRIDES="mscoree,mshtml="
export WINEARCH="win32"
export WINEPREFIX="/home/runner/.wine"
export WINEDEBUG="-all"
npps ; rm ./*AppImage*
WINE_VER="$(wget -qO- https://dl.winehq.org/wine-builds/ubuntu/dists/focal/main/binary-i386/ | grep wine-stable | sed 's|_| |g;s|~| |g' | awk '{print $5}' | tail -n1)"
wget -q https://github.com/mmtrt/WINE_AppImage/releases/download/continuous-stable/wine-stable_${WINE_VER}-x86_64.AppImage
chmod +x *.AppImage ; mv wine-stable_${WINE_VER}-x86_64.AppImage wine-stable.AppImage
# Create WINEPREFIX
./wine-stable.AppImage wineboot ; sleep 5
# Removing any existing user data
( cd "$WINEPREFIX/drive_c/" ; rm -rf users ) || true
cp -Rp $WINEPREFIX npp-stable/ ; rm -rf $WINEPREFIX ; rm ./*.AppImage
( cd npp-stable ; wget -qO- 'https://gist.github.com/mmtrt/df659de58e36ee091e203ab3c1460619/raw/3da5e9ba5b376a5e5836c5452a8d58e84e0fa9f2/nppswp.patch' | patch -p1 )
export ARCH=x86_64; squashfs-root/AppRun -v ./npp-stable -n -u "gh-releases-zsync|mmtrt|notepad-plus-plus_AppImage|stable-wp|notepad*.AppImage.zsync" notepad-plus-plus_"${ver}"_WP-${ARCH}.AppImage &>/dev/null
}
if [ "$1" == "stable" ]; then
npps
( mkdir -p dist ; mv notepad-plus-plus*.AppImage* dist/. ; cd dist || exit ; chmod +x ./*.AppImage )
elif [ "$1" == "stablewp" ]; then
nppswp
( mkdir -p dist ; mv notepad-plus-plus*.AppImage* dist/. ; cd dist || exit ; chmod +x ./*.AppImage )
fi
|
def draw_histogram(data):
# loop through the data
for num in data:
# print one '#' for each data point
print('#' * num)
draw_histogram([1, 2, 3, 4, 5]) |
<reponame>xdvio/native-bip39
/*!
* bip39.
* bip39.cpp
*
* \date 2019
* \author <NAME> (<EMAIL>)
* \link https://github.com/edwardstock
*/
#include <minter/bip39/Bip39Mnemonic.h>
#include <cstdlib>
#include "bip39.h"
char **minter_get_languages(size_t *num_written) {
std::vector<std::string> tmp = minter::Bip39Mnemonic::getLanguages();
*num_written = tmp.size();
char **out = new char *[tmp.size()];
for (size_t i = 0; i < tmp.size(); i++) {
const size_t sz = tmp[i].size();
out[i] = new char[sz+1];
tmp.at(i).copy(out[i], sz);
out[i][sz] = '\0';
}
return out;
}
void minter_free_string_array(char **arr, size_t len) {
if(!arr) {
//nullptr
return;
}
for (size_t i = 0; i < len; i++) {
if(arr[i]) {
memset(arr[i], 0, sizeof(char) * strlen(arr[i]));
delete[] arr[i];
}
}
delete[] arr;
}
char **minter_get_words_from_language(const char *lang, size_t *num_written) {
std::vector<std::string> tmp = minter::Bip39Mnemonic::getWordsFromLanguage(lang);
*num_written = tmp.size();
char **out = new char *[tmp.size()];
for (size_t i = 0; i < tmp.size(); i++) {
const size_t sz = tmp[i].size();
out[i] = new char[sz+1];
tmp.at(i).copy(out[i], sz);
out[i][sz] = '\0';
}
return out;
}
minter_mnemonic_result *copy_mnemonic(minter::Bip39Mnemonic::MnemonicResult &&res) {
auto *out = new minter_mnemonic_result();
out->len = res.len;
out->words = new char *[res.len];
for (size_t i = 0; i < res.len; i++) {
out->words[i] = new char[res.words[i].size()+1];
res.words[i].copy(out->words[i], res.words[i].size());
out->words[i][res.words[i].size()] = '\0';
}
switch (res.status) {
case minter::Bip39Mnemonic::MnemonicStatus::Ok:out->status = minter_mnemonic_status::Ok;
break;
case minter::Bip39Mnemonic::MnemonicStatus::UnknownError:out->status = minter_mnemonic_status::UnknownError;
break;
case minter::Bip39Mnemonic::MnemonicStatus::UnsupportedEntropy:
out->status = minter_mnemonic_status::UnsupportedEntropy;
break;
}
out->raw = new char[res.raw.size()+1];
res.raw.copy(out->raw, res.raw.size());
out->raw[res.raw.size()] = '\0';
return out;
}
minter_mnemonic_result *minter_generate_mnemonic(const char *lang, size_t entropy) {
return copy_mnemonic(minter::Bip39Mnemonic::generate(lang, entropy));
}
void minter_free_mnemonic(minter_mnemonic_result *mnemonic) {
if (!mnemonic) return;
for (size_t i = 0; i < mnemonic->len; i++) {
delete[] mnemonic->words[i];
}
delete[] mnemonic->words;
delete[] mnemonic->raw;
}
minter_mnemonic_result *minter_encode_bytes(const uint8_t *src, const char *lang, size_t entropy) {
return copy_mnemonic(minter::Bip39Mnemonic::encodeBytes(src, lang, entropy));
}
void minter_words_to_seed(const char *words, uint8_t *out64, size_t *num_written) {
minter::Bip39Mnemonic::wordsToSeed(words, out64, num_written);
}
bool minter_validate_words(const char *lang, const char *mnemonic) {
return minter::Bip39Mnemonic::validateWords(lang, mnemonic);
}
|
#!/bin/bash
if which puppet > /dev/null; then
echo "Found puppet installation"
else
echo "puppet installation"
apt-get install -y -q puppet
fi
|
//
// LSDCommonResult.h
// LSBluetooth-Library
//
// Created by lifesense on 16/1/17.
// Copyright © 2016年 Lifesense. All rights reserved.
//
#import "LSDBaseModel.h"
@interface LSDCommonResult : LSDBaseModel
@property (nonatomic, assign) int cmd;
//@property (nonatomic, assign) int packageIndex;
@property (nonatomic, assign) BOOL isSuccess;
@end
|
import * as ace from 'brace';
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { JhiLanguageHelper } from 'app/core/language/language.helper';
import * as chai from 'chai';
import * as sinonChai from 'sinon-chai';
import { SinonStub, stub } from 'sinon';
import { ArtemisTestModule } from '../../test.module';
import { MockActivatedRouteWithSubjects } from '../../helpers/mocks/activated-route/mock-activated-route-with-subjects';
import { MockSyncStorage } from '../../helpers/mocks/service/mock-sync-storage.service';
import { MockComponent } from 'ng-mocks';
import { ArtemisSharedModule } from 'app/shared/shared.module';
import { MockAlertService } from '../../helpers/mocks/service/mock-alert.service';
import { AlertService } from 'app/core/alert/alert.service';
import { ActivatedRoute, Router, RouterModule } from '@angular/router';
import { MockRouter } from '../../helpers/mocks/mock-router';
import { of, throwError } from 'rxjs';
import { HttpErrorResponse } from '@angular/common/http';
import { SidePanelComponent } from 'app/shared/side-panel/side-panel.component';
import { CollapsableAssessmentInstructionsComponent } from 'app/assessment/assessment-instructions/collapsable-assessment-instructions/collapsable-assessment-instructions.component';
import { AssessmentInstructionsComponent } from 'app/assessment/assessment-instructions/assessment-instructions/assessment-instructions.component';
import { TutorParticipationGraphComponent } from 'app/shared/dashboards/tutor-participation-graph/tutor-participation-graph.component';
import { TutorLeaderboardComponent } from 'app/shared/dashboards/tutor-leaderboard/tutor-leaderboard.component';
import { TranslateModule } from '@ngx-translate/core';
import { ArtemisSharedComponentModule } from 'app/shared/components/shared-component.module';
import { ArtemisProgrammingAssessmentModule } from 'app/exercises/programming/assess/programming-assessment.module';
import { LocalStorageService, SessionStorageService } from 'ngx-webstorage';
import { ArtemisAssessmentSharedModule } from 'app/assessment/assessment-shared.module';
import { GuidedTourMapping } from 'app/guided-tour/guided-tour-setting.model';
import { GuidedTourService } from 'app/guided-tour/guided-tour.service';
import { DeviceDetectorService } from 'ngx-device-detector';
import { ModelingSubmission } from 'app/entities/modeling-submission.model';
import { ArtemisProgrammingExerciseInstructionsRenderModule } from 'app/exercises/programming/shared/instructions-render/programming-exercise-instructions-render.module';
import { ModelingExercise } from 'app/entities/modeling-exercise.model';
import { HeaderExercisePageWithDetailsComponent } from 'app/exercises/shared/exercise-headers/header-exercise-page-with-details.component';
import { TutorExerciseDashboardComponent } from 'app/exercises/shared/dashboards/tutor/tutor-exercise-dashboard.component';
import { ExerciseType } from 'app/entities/exercise.model';
import { ModelingEditorComponent } from 'app/exercises/modeling/shared/modeling-editor.component';
import { ModelingSubmissionService } from 'app/exercises/modeling/participate/modeling-submission.service';
import { TutorParticipationStatus } from 'app/entities/participation/tutor-participation.model';
import { ExerciseService } from 'app/exercises/shared/exercise/exercise.service';
import { ArtemisResultModule } from 'app/exercises/shared/result/result.module';
import { HeaderParticipationPageComponent } from 'app/exercises/shared/exercise-headers/header-participation-page.component';
import { StructuredGradingInstructionsAssessmentLayoutComponent } from 'app/assessment/structured-grading-instructions-assessment-layout/structured-grading-instructions-assessment-layout.component';
import { StatsForDashboard } from 'app/course/dashboards/instructor-course-dashboard/stats-for-dashboard.model';
chai.use(sinonChai);
const expect = chai.expect;
describe('TutorExerciseDashboardComponent', () => {
// needed to make sure ace is defined
ace.acequire('ace/ext/modelist.js');
let comp: TutorExerciseDashboardComponent;
let fixture: ComponentFixture<TutorExerciseDashboardComponent>;
let modelingSubmissionService: ModelingSubmissionService;
let modelingSubmissionStub: SinonStub;
let guidedTourService: GuidedTourService;
const exercise = { id: 20, type: ExerciseType.MODELING, tutorParticipations: [{ status: TutorParticipationStatus.TRAINED }] } as ModelingExercise;
const submission = { id: 30 } as ModelingSubmission;
const stats = {
numberOfSubmissions: { inTime: 12, late: 5 },
numberOfAssessments: { inTime: 9, late: 1 },
} as StatsForDashboard;
const lockLimitErrorResponse = new HttpErrorResponse({ error: { errorKey: 'lockedSubmissionsLimitReached' } });
beforeEach(async () => {
return TestBed.configureTestingModule({
imports: [
ArtemisTestModule,
ArtemisSharedModule,
ArtemisSharedComponentModule,
ArtemisProgrammingAssessmentModule,
ArtemisProgrammingExerciseInstructionsRenderModule,
ArtemisResultModule,
RouterModule,
TranslateModule.forRoot(),
ArtemisAssessmentSharedModule,
],
declarations: [
TutorExerciseDashboardComponent,
MockComponent(TutorLeaderboardComponent),
MockComponent(TutorParticipationGraphComponent),
MockComponent(HeaderExercisePageWithDetailsComponent),
MockComponent(HeaderParticipationPageComponent),
MockComponent(SidePanelComponent),
MockComponent(ModelingEditorComponent),
MockComponent(CollapsableAssessmentInstructionsComponent),
MockComponent(AssessmentInstructionsComponent),
MockComponent(StructuredGradingInstructionsAssessmentLayoutComponent),
],
providers: [
JhiLanguageHelper,
DeviceDetectorService,
{ provide: AlertService, useClass: MockAlertService },
{ provide: ActivatedRoute, useClass: MockActivatedRouteWithSubjects },
{ provide: Router, useClass: MockRouter },
{ provide: LocalStorageService, useClass: MockSyncStorage },
{ provide: SessionStorageService, useClass: MockSyncStorage },
{
provide: ExerciseService,
useValue: {
getForTutors() {
return {
subscribe: (fn: (value: any) => void) =>
fn({
body: exercise,
}),
};
},
getStatsForTutors() {
return {
subscribe: (fn: (value: any) => void) =>
fn({
body: stats,
}),
};
},
},
},
],
})
.overrideModule(ArtemisTestModule, { set: { declarations: [], exports: [] } })
.compileComponents()
.then(() => {
fixture = TestBed.createComponent(TutorExerciseDashboardComponent);
comp = fixture.componentInstance;
modelingSubmissionService = TestBed.inject(ModelingSubmissionService);
guidedTourService = TestBed.inject(GuidedTourService);
comp.exerciseId = exercise.id!;
modelingSubmissionStub = stub(modelingSubmissionService, 'getModelingSubmissionForExerciseWithoutAssessment');
});
});
afterEach(() => {
modelingSubmissionStub.restore();
});
it('should set unassessedSubmission if lock limit is not reached', () => {
const guidedTourMapping = {} as GuidedTourMapping;
spyOn<any>(guidedTourService, 'checkTourState').and.returnValue(true);
guidedTourService.guidedTourMapping = guidedTourMapping;
modelingSubmissionStub.returns(of(submission));
comp.loadAll();
expect(modelingSubmissionStub).to.have.been.calledOnceWithExactly(exercise.id);
expect(comp.unassessedSubmission).to.equal(submission);
expect(comp.submissionLockLimitReached).to.be.false;
});
it('should not set unassessedSubmission if lock limit is reached', () => {
modelingSubmissionStub.returns(throwError(lockLimitErrorResponse));
comp.loadAll();
expect(modelingSubmissionStub).to.have.been.calledOnceWithExactly(exercise.id);
expect(comp.unassessedSubmission).to.be.undefined;
expect(comp.submissionLockLimitReached).to.be.true;
});
it('should have correct percentages calculated', () => {
modelingSubmissionStub.returns(of(submission));
comp.loadAll();
expect(modelingSubmissionStub).to.have.been.calledOnceWithExactly(exercise.id);
expect(comp.totalAssessmentPercentage.inTime).to.equal(75);
expect(comp.totalAssessmentPercentage.late).to.equal(20);
});
});
|
const {hcloud} = require('./head')
const fs = require('fs')
const path = require('path')
describe("小程序码", function () {
it("[wxacode.get]获取小程序码", function (done) {
let targetPath = path.join(__dirname,'qrcode.jpg')
hcloud.wxacode().get({
scene: 1,
path: '/pages/10why/index/index',
width: 280,
}).then(bufferStr => {
fs.writeFileSync(targetPath, bufferStr, 'binary')
done()
}).catch(err => {
console.error(err)
done.fail(err)
})
});
it("[wxacode.createQRCode]获取小程序二维码", function (done) {
let targetPath = path.join(__dirname,'qrcode2.jpg')
hcloud.wxacode().createQRCode({
path: '/pages/10why/index/index',
width: 280,
}).then(bufferStr => {
fs.writeFileSync(targetPath, bufferStr, 'binary')
done()
}).catch(err => {
console.error(err)
done.fail(err)
})
});
it("[wxacode.getUnlimited]获取小程序二维码", function (done) {
let targetPath = path.join(__dirname,'qrcode3.jpg')
hcloud.wxacode().getUnlimited({
scene: 1,
path: '/pages/10why/index/index',
width: 280,
}).then(bufferStr => {
fs.writeFileSync(targetPath, bufferStr, 'binary')
done()
}).catch(err => {
console.error(err)
done.fail(err)
})
});
}); |
<filename>src/main/java/app/habitzl/elasticsearch/status/monitor/tool/client/data/cluster/ClusterInfo.java
package app.habitzl.elasticsearch.status.monitor.tool.client.data.cluster;
import javax.annotation.concurrent.Immutable;
import java.io.Serializable;
import java.util.Objects;
import java.util.StringJoiner;
@Immutable
public final class ClusterInfo implements Serializable {
private static final long serialVersionUID = 1L;
private final String clusterName;
private final ClusterHealthStatus healthStatus;
private final int numberOfNodes;
private final int numberOfDataNodes;
private final int numberOfActiveShards;
private final int numberOfPrimaryShards;
private final int numberOfInitializingShards;
private final int numberOfUnassignedShards;
private final String masterNodeId;
public ClusterInfo(
final String clusterName,
final ClusterHealthStatus healthStatus,
final int numberOfNodes,
final int numberOfDataNodes,
final int numberOfActiveShards,
final int numberOfPrimaryShards,
final int numberOfInitializingShards,
final int numberOfUnassignedShards,
final String masterNodeId) {
this.clusterName = clusterName;
this.healthStatus = healthStatus;
this.numberOfNodes = numberOfNodes;
this.numberOfDataNodes = numberOfDataNodes;
this.numberOfActiveShards = numberOfActiveShards;
this.numberOfPrimaryShards = numberOfPrimaryShards;
this.numberOfInitializingShards = numberOfInitializingShards;
this.numberOfUnassignedShards = numberOfUnassignedShards;
this.masterNodeId = masterNodeId;
}
public String getClusterName() {
return clusterName;
}
public ClusterHealthStatus getHealthStatus() {
return healthStatus;
}
public int getNumberOfNodes() {
return numberOfNodes;
}
public int getNumberOfDataNodes() {
return numberOfDataNodes;
}
public int getNumberOfActiveShards() {
return numberOfActiveShards;
}
public int getNumberOfPrimaryShards() {
return numberOfPrimaryShards;
}
public int getNumberOfInitializingShards() {
return numberOfInitializingShards;
}
public int getNumberOfUnassignedShards() {
return numberOfUnassignedShards;
}
public String getMasterNodeId() {
return masterNodeId;
}
@Override
@SuppressWarnings("CyclomaticComplexity")
public boolean equals(final Object o) {
boolean isEqual;
if (this == o) {
isEqual = true;
} else if (o == null || getClass() != o.getClass()) {
isEqual = false;
} else {
ClusterInfo that = (ClusterInfo) o;
isEqual = Objects.equals(numberOfNodes, that.numberOfNodes)
&& Objects.equals(numberOfDataNodes, that.numberOfDataNodes)
&& Objects.equals(numberOfActiveShards, that.numberOfActiveShards)
&& Objects.equals(numberOfPrimaryShards, that.numberOfPrimaryShards)
&& Objects.equals(numberOfInitializingShards, that.numberOfInitializingShards)
&& Objects.equals(numberOfUnassignedShards, that.numberOfUnassignedShards)
&& Objects.equals(clusterName, that.clusterName)
&& Objects.equals(healthStatus, that.healthStatus)
&& Objects.equals(masterNodeId, that.masterNodeId);
}
return isEqual;
}
@Override
public int hashCode() {
return Objects.hash(clusterName, healthStatus, numberOfNodes, numberOfDataNodes, numberOfActiveShards, numberOfPrimaryShards, numberOfInitializingShards, numberOfUnassignedShards, masterNodeId);
}
@Override
public String toString() {
return new StringJoiner(", ", ClusterInfo.class.getSimpleName() + "[", "]")
.add("clusterName='" + clusterName + "'")
.add("healthStatus=" + healthStatus)
.add("numberOfNodes=" + numberOfNodes)
.add("numberOfDataNodes=" + numberOfDataNodes)
.add("numberOfActiveShards=" + numberOfActiveShards)
.add("numberOfPrimaryShards=" + numberOfPrimaryShards)
.add("numberOfInitializingShards=" + numberOfInitializingShards)
.add("numberOfUnassignedShards=" + numberOfUnassignedShards)
.add("masterNodeId='" + masterNodeId + "'")
.toString();
}
}
|
<filename>utils/file_test.go
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package utils
import (
"bytes"
"crypto/rand"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCopyDir(t *testing.T) {
srcDir, err := ioutil.TempDir("", "src")
require.NoError(t, err)
defer os.RemoveAll(srcDir)
dstParentDir, err := ioutil.TempDir("", "dstparent")
require.NoError(t, err)
defer os.RemoveAll(dstParentDir)
dstDir := filepath.Join(dstParentDir, "dst")
tempFile := "temp.txt"
err = ioutil.WriteFile(filepath.Join(srcDir, tempFile), []byte("test file"), 0655)
require.NoError(t, err)
childDir := "child"
err = os.Mkdir(filepath.Join(srcDir, childDir), 0777)
require.NoError(t, err)
childTempFile := "childtemp.txt"
err = ioutil.WriteFile(filepath.Join(srcDir, childDir, childTempFile), []byte("test file"), 0755)
require.NoError(t, err)
err = CopyDir(srcDir, dstDir)
assert.NoError(t, err)
stat, err := os.Stat(filepath.Join(dstDir, tempFile))
assert.NoError(t, err)
assert.Equal(t, uint32(0655), uint32(stat.Mode()))
assert.False(t, stat.IsDir())
data, err := ioutil.ReadFile(filepath.Join(dstDir, tempFile))
assert.NoError(t, err)
assert.Equal(t, "test file", string(data))
stat, err = os.Stat(filepath.Join(dstDir, childDir))
assert.NoError(t, err)
assert.True(t, stat.IsDir())
stat, err = os.Stat(filepath.Join(dstDir, childDir, childTempFile))
assert.NoError(t, err)
assert.Equal(t, uint32(0755), uint32(stat.Mode()))
assert.False(t, stat.IsDir())
data, err = ioutil.ReadFile(filepath.Join(dstDir, childDir, childTempFile))
assert.NoError(t, err)
assert.Equal(t, "test file", string(data))
err = CopyDir(srcDir, dstDir)
assert.Error(t, err)
}
func TestLimitedReaderWithError(t *testing.T) {
t.Run("read less than max size", func(t *testing.T) {
maxBytes := 10
randomBytes := make([]byte, maxBytes)
n, err := rand.Read(randomBytes)
require.NoError(t, err)
require.Equal(t, n, maxBytes)
lr := NewLimitedReaderWithError(bytes.NewReader(randomBytes), int64(maxBytes))
smallerBuf := make([]byte, maxBytes-3)
_, err = io.ReadFull(lr, smallerBuf)
require.NoError(t, err)
})
t.Run("read equal to max size", func(t *testing.T) {
maxBytes := 10
randomBytes := make([]byte, maxBytes)
n, err := rand.Read(randomBytes)
require.NoError(t, err)
require.Equal(t, n, maxBytes)
lr := NewLimitedReaderWithError(bytes.NewReader(randomBytes), int64(maxBytes))
buf := make([]byte, maxBytes)
_, err = io.ReadFull(lr, buf)
require.Truef(t, err == nil || err == io.EOF, "err must be nil or %v, got %v", io.EOF, err)
})
t.Run("single read, larger than max size", func(t *testing.T) {
maxBytes := 5
moreThanMaxBytes := maxBytes + 10
randomBytes := make([]byte, moreThanMaxBytes)
n, err := rand.Read(randomBytes)
require.NoError(t, err)
require.Equal(t, moreThanMaxBytes, n)
lr := NewLimitedReaderWithError(bytes.NewReader(randomBytes), int64(maxBytes))
buf := make([]byte, moreThanMaxBytes)
_, err = io.ReadFull(lr, buf)
require.Error(t, err)
require.Equal(t, SizeLimitExceeded, err)
})
t.Run("multiple small reads, total larger than max size", func(t *testing.T) {
maxBytes := 10
lessThanMaxBytes := maxBytes - 4
randomBytesLen := maxBytes * 2
randomBytes := make([]byte, randomBytesLen)
n, err := rand.Read(randomBytes)
require.NoError(t, err)
require.Equal(t, randomBytesLen, n)
lr := NewLimitedReaderWithError(bytes.NewReader(randomBytes), int64(maxBytes))
buf := make([]byte, lessThanMaxBytes)
_, err = io.ReadFull(lr, buf)
require.NoError(t, err)
// lets do it again
_, err = io.ReadFull(lr, buf)
require.Error(t, err)
require.Equal(t, SizeLimitExceeded, err)
})
}
|
import os
import multiprocessing
import re
from .common import * #pylint: disable=wildcard-import
# output files
STAR_OUT1 = 'unmapped.star.1.fq'
STAR_OUT2 = 'unmapped.star.2.fq'
STAR_COUNTS_OUT = 'reads_per_gene.star.tab'
PRICESEQFILTER_OUT1 = 'priceseqfilter.unmapped.star.1.fq'
PRICESEQFILTER_OUT2 = 'priceseqfilter.unmapped.star.2.fq'
FQ2FA_OUT1 = 'priceseqfilter.unmapped.star.1.fasta'
FQ2FA_OUT2 = 'priceseqfilter.unmapped.star.2.fasta'
CDHITDUP_OUT1 = 'cdhitdup.priceseqfilter.unmapped.star.1.fasta'
CDHITDUP_OUT2 = 'cdhitdup.priceseqfilter.unmapped.star.2.fasta'
LZW_OUT1 = 'lzw.cdhitdup.priceseqfilter.unmapped.star.1.fasta'
LZW_OUT2 = 'lzw.cdhitdup.priceseqfilter.unmapped.star.2.fasta'
BOWTIE2_OUT = 'bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.sam'
EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1 = 'unmapped.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.1.fasta'
EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT2 = 'unmapped.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.2.fasta'
EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT3 = 'unmapped.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.merged.fasta'
GSNAP_FILTER_SAM = 'gsnap_filter.sam'
EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1 = 'unmapped.gsnap_filter.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.1.fasta'
EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT2 = 'unmapped.gsnap_filter.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.2.fasta'
EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT3 = 'unmapped.gsnap_filter.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.merged.fasta'
LOGS_OUT_BASENAME = 'log'
STATS_IN = 'total_reads.json'
STATS_OUT = 'stats.json'
VERSION_OUT = 'versions.json'
PIPELINE_VERSION_OUT = 'pipeline_version.txt'
MAX_INPUT_READS = 75 * 1000 * 1000
MAX_GNSAP_FILTER_READS = 10 * 1000 * 1000
INPUT_TRUNCATED_FILE = 'input_truncated.txt'
# arguments from environment variables
INPUT_BUCKET = os.environ.get('INPUT_BUCKET')
FILE_TYPE = os.environ.get('FILE_TYPE')
OUTPUT_BUCKET = os.environ.get('OUTPUT_BUCKET').rstrip('/')
STAR_GENOME = os.environ.get(
'STAR_GENOME',
's3://czbiohub-infectious-disease/references/human/STAR_genome.tar')
BOWTIE2_GENOME = os.environ.get(
'BOWTIE2_GENOME',
's3://czbiohub-infectious-disease/references/human/bowtie2_genome.tar')
GSNAP_GENOME = os.environ.get('GSNAP_GENOME',
os.path.join(
os.path.dirname(STAR_GENOME),
'hg38_pantro5_k16.tar'))
STAR_BOWTIE_VERSION_FILE_S3 = os.environ.get(
'STAR_BOWTIE_VERSION_FILE_S3', get_host_index_version_file(STAR_GENOME))
DB_SAMPLE_ID = os.environ['DB_SAMPLE_ID']
AWS_BATCH_JOB_ID = os.environ.get('AWS_BATCH_JOB_ID', 'local')
SAMPLE_S3_OUTPUT_POSTFIX = "/%s" % major_version(
PIPELINE_VERSION) if PIPELINE_VERSION else ""
SAMPLE_S3_INPUT_PATH = INPUT_BUCKET.rstrip('/')
SAMPLE_S3_OUTPUT_PATH = OUTPUT_BUCKET + SAMPLE_S3_OUTPUT_POSTFIX
sample_name = SAMPLE_S3_INPUT_PATH[5:].rstrip('/').replace('/', '-')
SAMPLE_DIR = DEST_DIR + '/' + sample_name
FASTQ_DIR = SAMPLE_DIR + '/fastqs'
RESULT_DIR = SAMPLE_DIR + '/results'
SCRATCH_DIR = SAMPLE_DIR + '/scratch'
DEFAULT_LOG_PARAMS = {"sample_s3_output_path": SAMPLE_S3_OUTPUT_PATH}
# versioning
# target outputs by task
TARGET_OUTPUTS_SINGLE = {
"run_star": [os.path.join(RESULT_DIR, STAR_OUT1)],
"run_priceseqfilter": [os.path.join(RESULT_DIR, PRICESEQFILTER_OUT1)],
"run_fq2fa": [os.path.join(RESULT_DIR, FQ2FA_OUT1)],
"run_cdhitdup": [os.path.join(RESULT_DIR, CDHITDUP_OUT1)],
"run_lzw": [os.path.join(RESULT_DIR, LZW_OUT1)],
"run_bowtie2":
[os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1)],
"run_gsnap_filter":
[os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1)]
}
TARGET_OUTPUTS_PAIRED = {
"run_star":
[os.path.join(RESULT_DIR, STAR_OUT1),
os.path.join(RESULT_DIR, STAR_OUT2)],
"run_priceseqfilter": [
os.path.join(RESULT_DIR, PRICESEQFILTER_OUT1),
os.path.join(RESULT_DIR, PRICESEQFILTER_OUT2)
],
"run_fq2fa": [
os.path.join(RESULT_DIR, FQ2FA_OUT1),
os.path.join(RESULT_DIR, FQ2FA_OUT2)
],
"run_cdhitdup": [
os.path.join(RESULT_DIR, CDHITDUP_OUT1),
os.path.join(RESULT_DIR, CDHITDUP_OUT2)
],
"run_lzw":
[os.path.join(RESULT_DIR, LZW_OUT1),
os.path.join(RESULT_DIR, LZW_OUT2)],
"run_bowtie2": [
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1),
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT2),
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT3)
],
"run_gsnap_filter": [
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1),
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT2),
os.path.join(RESULT_DIR, EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT3)
]
}
# software packages
STAR = "STAR"
PRICESEQ_FILTER = "PriceSeqFilter"
CDHITDUP = "cd-hit-dup"
BOWTIE2 = "bowtie2"
GSNAPL = "gsnapl"
# pipeline configuration
LZW_FRACTION_CUTOFFS = [0.45, 0.42]
# Convenience functions
def fq2fa(input_fastq, output_fasta):
cmd = "sed -n '1~4s/^@/>/p;2~4p' <%s >%s" % (input_fastq, output_fasta)
execute_command(cmd)
def result_path(basename):
return os.path.join(RESULT_DIR, basename)
def lzw_fraction(sequence):
if sequence == "":
return 0.0
sequence = sequence.upper()
dict_size = 0
dictionary = {}
# Initialize dictionary with single char
for c in sequence:
dict_size += 1
dictionary[c] = dict_size
word = ""
results = []
for c in sequence:
wc = word + c
if dictionary.get(wc):
word = wc
else:
results.append(dictionary[word])
dict_size += 1
dictionary[wc] = dict_size
word = c
if word != "":
results.append(dictionary[word])
return float(len(results)) / len(sequence)
def generate_lzw_filtered_single(fasta_file, out_prefix, cutoff_fractions):
out_read_1 = open(out_prefix + '.1.fasta', 'wb')
for cutoff_frac in cutoff_fractions:
read_1 = open(fasta_file, 'rb')
count = 0
filtered = 0
while True:
line_r1_header = read_1.readline()
line_r1_sequence = read_1.readline()
if line_r1_header and line_r1_sequence:
fraction_1 = lzw_fraction(line_r1_sequence.rstrip())
count += 1
if fraction_1 > cutoff_frac:
out_read_1.write(line_r1_header)
out_read_1.write(line_r1_sequence)
else:
filtered += 1
else:
break
msg = "LZW filter: cutoff_frac: %f, total reads: %d, filtered reads: %d, " \
"kept ratio: %f" % (cutoff_frac, count, filtered, 1 - float(filtered) / count)
write_to_log(msg)
if count != filtered:
break
out_read_1.close()
def generate_lzw_filtered_paired(fasta_file_1, fasta_file_2, out_prefix,
cutoff_fractions):
out_read_1 = open(out_prefix + '.1.fasta', 'wb')
out_read_2 = open(out_prefix + '.2.fasta', 'wb')
for cutoff_frac in cutoff_fractions:
read_1 = open(fasta_file_1, 'rb')
read_2 = open(fasta_file_2, 'rb')
count = 0
filtered = 0
while True:
line_r1_header = read_1.readline()
line_r1_seq = read_1.readline()
line_r2_header = read_2.readline()
line_r2_seq = read_2.readline()
if line_r1_header and line_r1_seq and line_r2_header and line_r2_seq:
fraction_1 = lzw_fraction(line_r1_seq.rstrip())
fraction_2 = lzw_fraction(line_r2_seq.rstrip())
count += 1
if fraction_1 > cutoff_frac and fraction_2 > cutoff_frac:
out_read_1.write(line_r1_header)
out_read_1.write(line_r1_seq)
out_read_2.write(line_r2_header)
out_read_2.write(line_r2_seq)
else:
filtered += 1
else:
break
msg = "LZW filter: cutoff_frac: %f, total reads: %d, filtered reads: %d, " \
"kept ratio: %f" % (cutoff_frac, count, filtered, 1 - float(filtered) / count)
write_to_log(msg)
if count != filtered:
break
out_read_1.close()
out_read_2.close()
def generate_unmapped_singles_from_sam(sam_file, output_prefix):
"""Output a single file containing every unmapped read after bowtie2.
SAM file alignments:
- See: https://en.wikipedia.org/wiki/SAM_(file_format)
- https://broadinstitute.github.io/picard/explain-flags.html
- part[0] = query template name
- part[1] = bitwise flag
- part[9] = segment sequence
"""
with open(output_prefix + '.1.fasta', 'wb') as output_read:
with open(sam_file, 'rb') as sam_f:
# Skip headers
read = sam_f.readline()
while read and read[0] == '@':
read = sam_f.readline()
while read:
part = read.split("\t")
# Read unmapped
if part[1] == "4":
# Do NOT append /1 to read id
output_read.write(">%s\n%s\n" % (part[0], part[9]))
read = sam_f.readline()
def generate_unmapped_pairs_from_sam_work(out_read_1, out_read_2,
out_merged_read, sam_f):
"""SAM file alignments:
- See: https://en.wikipedia.org/wiki/SAM_(file_format)
- https://broadinstitute.github.io/picard/explain-flags.html
- part[0] = query template name
- part[1] = bitwise flag
- part[9] = segment sequence
"""
# Skip headers
read1 = sam_f.readline()
while read1 and read1[0] == '@':
read1 = sam_f.readline()
read2 = sam_f.readline()
while read1 and read2:
part1 = read1.split("\t")
part2 = read2.split("\t")
if part1[1] == "77" and part2[1] == "141": # Both parts unmapped
out_read_1.write(">%s\n%s\n" % (part1[0], part1[9]))
out_read_2.write(">%s\n%s\n" % (part2[0], part2[9]))
# Append /1 to read id
out_merged_read.write(">%s/1\n%s\n" % (part1[0], part1[9]))
# Append /2 to read id
out_merged_read.write(">%s/2\n%s\n" % (part2[0], part2[9]))
read1 = sam_f.readline()
read2 = sam_f.readline()
def generate_unmapped_pairs_from_sam(sam_file, out_prefix):
"""Output 1.fasta and 2.fasta containing the unmapped pairs from bowtie2.
Also output .merged.fasta and multiplex read ids by appending /1 and /2.
"""
with open(out_prefix + '.1.fasta', 'wb') as out_read_1:
with open(out_prefix + '.2.fasta', 'wb') as out_read_2:
with open(out_prefix + '.merged.fasta', 'wb') as out_merged_read:
with open(sam_file, 'rb') as sam_f:
generate_unmapped_pairs_from_sam_work(
out_read_1, out_read_2, out_merged_read, sam_f)
def max_input_lines(input_file):
"""Returning number of lines corresponding to MAX_INPUT_READS based on file
type.
"""
if "fasta" in input_file:
return MAX_INPUT_READS * 2
# Assume it's FASTQ
return MAX_INPUT_READS * 4
# Job functions
def run_star_part(output_dir, genome_dir, fastq_files, count_genes=False):
execute_command("mkdir -p %s" % output_dir)
star_command_params = [
'cd', output_dir, ';', STAR, '--outFilterMultimapNmax', '99999',
'--outFilterScoreMinOverLread', '0.5', '--outFilterMatchNminOverLread',
'0.5', '--outReadsUnmapped', 'Fastx', '--outFilterMismatchNmax', '999',
'--outSAMmode', 'None', '--clip3pNbases', '0', '--runThreadN',
str(multiprocessing.cpu_count()), '--genomeDir', genome_dir,
'--readFilesIn', " ".join(fastq_files)
]
if fastq_files[0][-3:] == '.gz':
# Create a custom decompressor which does "zcat $input_file | head -
# ${max_lines}"
cmd = "echo 'zcat ${2} | head -${1}' > %s/gzhead; " % genome_dir
execute_command(cmd)
max_lines = max_input_lines(fastq_files[0])
star_command_params += [
'--readFilesCommand',
'"sh %s/gzhead %d"' % (genome_dir, max_lines)
]
path = "%s/sjdbList.fromGTF.out.tab" % genome_dir
if count_genes and os.path.isfile(path):
star_command_params += ['--quantMode', 'GeneCounts']
cmd = " ".join(star_command_params), os.path.join(output_dir,
"Log.progress.out")
execute_command(cmd)
def uncompressed(s3genome):
if s3genome.endswith(".gz"):
return s3genome[:-3]
if s3genome.endswith(".tgz"):
return s3genome[:-3] + "tar"
return s3genome
def fetch_genome_work(s3genome, strict):
genome_name = os.path.basename(s3genome).rstrip(".gz").rstrip(".tar")
if genome_name not in ("STAR_genome", "bowtie2_genome",
"hg38_pantro5_k16"):
write_to_log("Oh hello interesting new genome {}".format(genome_name))
genome_dir = os.path.join(REF_DIR, genome_name)
if not os.path.exists(genome_dir):
# Can consider merging with fetch_reference: idseq-pipeline/issues/223
try:
install_s3mi()
tarfile = uncompressed(s3genome)
try:
write_to_log("Trying to download compressed genome...")
cmd = "s3mi cat {tarfile} | tar xvf - -C {refdir}".format(
tarfile=tarfile, refdir=REF_DIR)
execute_command(cmd)
assert os.path.isdir(genome_dir)
except:
if tarfile != s3genome:
print("Uncompressed version doesn't exist. Downloading "
"compressed version...")
# The uncompressed version doesn't exist. This is much
# slower, but no choice.
execute_command("rm -rf {}".format(genome_dir))
cmd = "s3mi cat {s3genome} | tar xvfz - -C {refdir}".format(
s3genome=s3genome, refdir=REF_DIR)
execute_command(cmd)
assert os.path.isdir(genome_dir)
else:
# Okay, may be s3mi is broken. We'll try aws cp next.
write_to_log("Error in downloading with s3mi. Trying aws cp...")
raise
except:
try:
execute_command("rm -rf {}".format(genome_dir))
cmd = "aws s3 cp --quiet {s3genome} - | tar xvf - -C {refdir}".format(
s3genome=s3genome, refdir=REF_DIR)
execute_command(cmd)
assert os.path.isdir(genome_dir)
except:
msg = "Failed to download index {}, it might not exist.".format(
s3genome)
write_to_log(msg)
if strict:
raise
genome_dir = None
# Note we do not reraise the exception here, just print it.
traceback.print_exc()
if genome_dir:
write_to_log("successfully downloaded index {}".format(s3genome))
return genome_dir
def fetch_genome(s3genome, strict=True, mutex=threading.RLock(), mutexes={}): #pylint: disable=dangerous-default-value
"""Fetch and expand genome archive from s3 into local dir. Return that local
dir. If already downloaded, return right away. If a fetch of the same genome
is already in progress on another thread, wait for it to complete or fail;
and if it failed, try again. If all tries fail, raise an exception (strict)
or return None (not strict).
Typical use:
fruitfly_dir = fetch_genome("s3://fruitfly_genome.tar")
# Prefetching optimization: While doing compute intensive work on fruit flies,
# start fetching the butterfly genome.
threading.Thread(target=fetch_genome, args=["s3://butterfly_genome.tar"]).start()
... do some compute intensive work on fruit flies ...
butterfly_dir = fetch_genome("s3://butterfly_genome.tar")
threading.Thread(target=fetch_genome, args=["s3://firefly_genome.tar"]).start()
... do some compute intensive work on butterflies ...
firefly_dir = fetch_genome("s3://firefly_genome.tar")
...
Without the pre-fetching thread, the compute intensive work on butterflies
would have to wait for the entire butterfly genome to be downloaded. With
pre-fetching like this, the download of the butterfly genome proceeds in
parallel with the fruit fly computation, and by the time the butterfly
genome is needed, it may already have been fully downloaded, so the
butterfly computation won't have to wait for it. Similarly, the download
of the firefly genome proceeds in parallel with the butterfly computation,
and the firefly genome will be ready by the time it's needed. The program
would still work correctly if we comment out all the pre-fetching threads,
but would take much longer to execute.
It may be tempting to initiate all fetching parallel at the start of the
program, but that's undesirable for two reasons:
1) Fetching data with s3mi fully utilizes the I/O bandwidth of moderately
sized instances, so fetching multiple streams in parallel will just slow them
down and delay the moment we can begin computing.
2) If the different computation stages support result caching, so that
typically the first N stages would be cached from a previous run, and the
computation would resume from stage N+1, this pattern beautifully avoids
any unnecessary fetching of data that won't be needed for the cached
stages, while still fetching the data needed for stage N+1. If, instead,
we were to initiate pre-fetching at the beginning of the program, we would
have to carefully ensure we only prefetch data that will in fact be
needed, by replicating some of the caching logic.
"""
with mutex:
if s3genome not in mutexes:
mutexes[s3genome] = threading.RLock()
mx = mutexes[s3genome]
with mx:
return fetch_genome_work(s3genome, strict)
def get_read(f):
# The FASTQ format specifies that each read consists of 4 lines,
# the first of which begins with @ followed by read ID.
read, rid = [], None
line = f.readline()
if line:
assert line[0] == "@"
rid = line.split("\t", 1)[0].strip()
read.append(line)
read.append(f.readline())
read.append(f.readline())
read.append(f.readline())
return read, rid
def write_lines(of, lines):
for l in lines:
of.write(l)
def handle_outstanding_read(r0, r0id, outstanding_r0, outstanding_r1, of0, of1,
mem, max_mem):
# If read r0 completes an outstanding r1, output the pair (r0, r1).
# Else r0 becomes outstanding, so in future some r1 may complete it.
if r0id:
if r0id in outstanding_r1:
write_lines(of0, r0)
write_lines(of1, outstanding_r1.pop(r0id))
mem -= 1
else:
outstanding_r0[r0id] = r0
mem += 1
if mem > max_mem:
max_mem = mem
return mem, max_mem
def sync_pairs_work(of0, of1, if0, if1):
# TODO: Use this as a template for merging fasta?
outstanding_r0 = {}
outstanding_r1 = {}
mem = 0
max_mem = 0
while True:
r0, r0id = get_read(if0)
r1, r1id = get_read(if1)
if not r0 and not r1:
break
if r0id == r1id:
# If the input pairs are already synchronized, we take this branch
# on every iteration.
write_lines(of0, r0)
write_lines(of1, r1)
else:
mem, max_mem = handle_outstanding_read(r0, r0id, outstanding_r0,
outstanding_r1, of0, of1,
mem, max_mem)
mem, max_mem = handle_outstanding_read(r1, r1id, outstanding_r1,
outstanding_r0, of1, of0,
mem, max_mem)
return outstanding_r0, outstanding_r1, max_mem
def sync_pairs(fastq_files, max_discrepancies=0):
"""The given fastq_files contain the same read IDs but in different order.
Output the same data in synchronized order. Omit up to max_discrepancies
if necessary. If more must be suppressed, raise assertion.
"""
if len(fastq_files) != 2:
return fastq_files
output_fnames = [ifn + ".synchronized_pairs.fq" for ifn in fastq_files]
with open(fastq_files[0], "rb") as if_0:
with open(fastq_files[1], "rb") as if_1:
with open(output_fnames[0], "wb") as of_0:
with open(output_fnames[1], "wb") as of_1:
outstanding_r0, outstanding_r1, max_mem = sync_pairs_work(
of_0, of_1, if_0, if_1)
if max_mem:
# This will be printed if some pairs were out of order.
warning_message = "WARNING: Pair order out of sync in {fqf}. " \
"Synchronized using RAM for {max_mem} pairs.".format(
fqf=fastq_files, max_mem=max_mem)
write_to_log(warning_message)
discrepancies_count = len(outstanding_r0) + len(outstanding_r1)
if discrepancies_count:
warning_message = "WARNING: Found {dc} broken pairs in {fqf}, e.g., " \
"{example}.".format(
dc=discrepancies_count,
fqf=fastq_files,
example=(outstanding_r0 or outstanding_r1).popitem()[0])
write_to_log(warning_message)
assert discrepancies_count <= max_discrepancies, warning_message
return output_fnames
def extract_total_counts_from_star_output(result_dir, num_fastqs,
total_counts_from_star):
"""Grab the total reads from the Log.final.out file."""
log_file = os.path.join(result_dir, "Log.final.out")
cmd = "grep 'Number of input reads' %s" % log_file
total_reads = execute_command_with_output(cmd).split("\t")[1]
total_reads = int(total_reads)
# If it's exactly the same, it must have been truncated.
if total_reads == MAX_INPUT_READS:
total_counts_from_star['truncated'] = 1
total_counts_from_star['total_reads'] = total_reads * num_fastqs
def run_star(fastq_files, uploader_start, total_counts_from_star):
"""Run STAR to filter out host reads."""
star_outputs = [STAR_COUNTS_OUT, STAR_OUT1, STAR_OUT2]
num_fastqs = len(fastq_files)
gene_count_output = None
def unmapped_files_in(some_dir):
return [
"%s/Unmapped.out.mate%d" % (some_dir, i + 1)
for i in range(num_fastqs)
]
genome_dir = fetch_genome(STAR_GENOME)
assert genome_dir is not None
# If we are here, we are also going to need a bowtie genome later; start
# fetching it now. This is the absolute PERFECT PLACE for this fetch. If
# we are computing from scratch, the download has plenty of time to
# complete before bowtie needs it. If we are doing a lazy rerun,
# this function gets skipped, and we avoid a huge unnecessary download.
threading.Thread(target=fetch_genome, args=[BOWTIE2_GENOME]).start()
# Check if parts.txt file exists. If so, use the new version of partitioned
# indices. Otherwise, stay put.
parts_file = os.path.join(genome_dir, "parts.txt")
assert os.path.isfile(parts_file)
with open(parts_file, 'rb') as parts_f:
num_parts = int(parts_f.read())
unmapped = fastq_files
for part_idx in range(num_parts):
tmp_result_dir = "%s/star-part-%d" % (SCRATCH_DIR, part_idx)
genome_part = "%s/part-%d" % (genome_dir, part_idx)
run_star_part(tmp_result_dir, genome_part, unmapped, part_idx == 0)
unmapped = sync_pairs(unmapped_files_in(tmp_result_dir))
# Run part 0 in gene-counting mode:
# (a) ERCCs are doped into part 0 and we want their counts
# (b) if there is only 1 part (e.g. human), the host gene counts also
# make sense
# (c) at part 0, we can also extract out total input reads and if the
# total_counts is exactly the same as MAX_INPUT_READS then we know the
# input file is truncated.
if part_idx == 0:
gene_count_file = os.path.join(tmp_result_dir,
"ReadsPerGene.out.tab")
extract_total_counts_from_star_output(tmp_result_dir, num_fastqs,
total_counts_from_star)
if os.path.isfile(gene_count_file):
gene_count_output = gene_count_file
result_files = [gene_count_output] + unmapped
for i, f in enumerate(result_files):
if f is not None:
output_i = result_path(star_outputs[i])
execute_command("mv %s %s;" % (f, output_i))
uploader_start(output_i, SAMPLE_S3_OUTPUT_PATH + "/")
# Cleanup
execute_command("cd %s; rm -rf *" % SCRATCH_DIR)
write_to_log("Finished running STAR.")
def run_priceseqfilter(input_fqs, uploader_start):
"""PriceSeqFilter is used to filter input data based on quality. Two FASTQ
inputs means paired reads.
See: http://derisilab.ucsf.edu/software/price/
"""
# PriceSeqFilter determines input type based on extension.
# It will throw an exception if output extension doesn't match input.
correct_file_extension = os.path.splitext(FILE_TYPE)[0]
input_files = ["%s.%s" % (fq, correct_file_extension) for fq in input_fqs]
output_files = [
"%s_priceseqfilter_output.%s" % (f, correct_file_extension)
for f in input_files
]
for fq, f in zip(input_fqs, input_files):
execute_command("ln %s %s" % (fq, f))
priceseq_params = [PRICESEQ_FILTER, '-a', '12', '-rnf', '90', '-log', 'c']
if len(input_fqs) == 2:
priceseq_params.extend([
'-fp', input_files[0], input_files[1], '-op', output_files[0],
output_files[1]
])
else:
priceseq_params.extend(['-f', input_files[0], '-o', output_files[0]])
if "fastq" in FILE_TYPE or "fq" in FILE_TYPE:
priceseq_params.extend(['-rqf', '85', '0.98'])
execute_command(" ".join(priceseq_params))
write_to_log("Finished running PriceSeqFilter.")
out_path = result_path(PRICESEQFILTER_OUT1)
execute_command("mv %s %s" % (output_files[0], out_path))
s3_dst = SAMPLE_S3_OUTPUT_PATH + "/"
uploader_start(out_path, s3_dst)
if len(input_fqs) == 2:
out_path = result_path(PRICESEQFILTER_OUT2)
execute_command("mv %s %s" % (output_files[1], out_path))
uploader_start(out_path, s3_dst)
def run_fq2fa(input_fqs, uploader_start):
"""FASTQ to FASTA conversion."""
fq2fa(input_fqs[0], result_path(FQ2FA_OUT1))
if len(input_fqs) == 2:
fq2fa(input_fqs[1], result_path(FQ2FA_OUT2))
write_to_log("Finished FASTQ to FASTA conversion.")
dst = SAMPLE_S3_OUTPUT_PATH + "/"
uploader_start(result_path(FQ2FA_OUT1), dst)
if len(input_fqs) == 2:
uploader_start(result_path(FQ2FA_OUT2), dst)
def run_cdhitdup(input_fas, uploader_start):
"""CD-HIT-DUP is used to identify duplicates from single or paired reads.
Two FASTQ inputs means paired reads.
See: http://weizhongli-lab.org/cd-hit/
"""
cdhitdup_params = [
CDHITDUP, '-i', input_fas[0], '-o', result_path(CDHITDUP_OUT1),
'-e', '0.05', '-u', '70'
]
if len(input_fas) == 2:
cdhitdup_params.extend(
['-i2', input_fas[1], '-o2', result_path(CDHITDUP_OUT2)])
execute_command(" ".join(cdhitdup_params))
dst = SAMPLE_S3_OUTPUT_PATH + "/"
uploader_start(result_path(CDHITDUP_OUT1), dst)
if len(input_fas) == 2:
uploader_start(result_path(CDHITDUP_OUT2), dst)
print("Finished CD-HIT-DUP.")
def run_lzw(input_fas, uploader_start):
output_prefix = result_path(LZW_OUT1[:-8])
if len(input_fas) == 2:
generate_lzw_filtered_paired(input_fas[0], input_fas[1], output_prefix,
LZW_FRACTION_CUTOFFS)
else:
generate_lzw_filtered_single(input_fas[0], output_prefix,
LZW_FRACTION_CUTOFFS)
# Copy back to aws
dst = SAMPLE_S3_OUTPUT_PATH + "/"
uploader_start(result_path(LZW_OUT1), dst)
if len(input_fas) == 2:
uploader_start(result_path(LZW_OUT2), dst)
print("Finished running LZW.")
def run_bowtie2(input_fas, uploader_start):
"""Bowtie2 is an aligner we use for filtering out reads that map to the
host genome. Two input FASTAs means paired reads.
http://bowtie-bio.sourceforge.net/index.shtml
"""
# Check if genome downloaded already
genome_dir = fetch_genome(BOWTIE2_GENOME)
# If we are here, we are also going to need a gsnap genome later; start
# fetching it now. This is actually THE PERFECT PLACE to initiate this
# fetch. When we are running from scratch, there is plenty of time to
# download the gsnap genome while bowtie is running. When we are doing a
# lazy rerun, this function gets skipped, and the fetching of gsnap genome
# is not initiated. That's brilliant -- we don't fetch the gsnap genome
# if we won't be needing it, and lazy reruns are very quick.
threading.Thread(target=fetch_genome, args=[GSNAP_GENOME]).start()
# The file structure looks like
# "bowtie2_genome/GRCh38.primary_assembly.genome.3.bt2"
# The code below will handle up to "bowtie2_genome/GRCh38.primary_assembly.
# genome.99.bt2" but not 100.
cmd = "ls {genome_dir}/*.bt2*".format(genome_dir=genome_dir)
local_genome_dir_ls = execute_command_with_output(cmd)
genome_basename = local_genome_dir_ls.split("\n")[0][:-6]
if genome_basename[-1] == '.':
genome_basename = genome_basename[:-1]
bowtie2_params = [
BOWTIE2, '-q', '-p',
str(multiprocessing.cpu_count()), '-x', genome_basename, '-f',
'--very-sensitive-local', '-S',
result_path(BOWTIE2_OUT)
]
if len(input_fas) == 2:
bowtie2_params.extend(['-1', input_fas[0], '-2', input_fas[1]])
else:
bowtie2_params.extend(['-U', input_fas[0]])
execute_command(" ".join(bowtie2_params))
write_to_log("Finished Bowtie alignment.")
# Extract out unmapped files from sam
output_prefix = result_path(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1[:-8])
if len(input_fas) == 2:
generate_unmapped_pairs_from_sam(
result_path(BOWTIE2_OUT), output_prefix)
else:
generate_unmapped_singles_from_sam(
result_path(BOWTIE2_OUT), output_prefix)
dst = SAMPLE_S3_OUTPUT_PATH + "/"
def upload_to_dst(src):
uploader_start(result_path(src), dst)
upload_to_dst(BOWTIE2_OUT)
upload_to_dst(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1)
if len(input_fas) == 2:
upload_to_dst(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT2)
upload_to_dst(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT3)
print("Extracted unmapped fragments from Bowtie SAM file.")
# Can remove this once the todo below, issue #173, is addressed.
class SkipGsnap(Exception):
pass
def run_gsnap_filter(input_fas, uploader_start):
"""GSNAP is an aligner we use to filter out reads mapping to the host
genome. We add this additional step after STAR/Bowtie-based filtering to
increase our sensitivity to host filtering. Currently only runs on the
human host with a chimp reference genome to increase sensitivity to human
matches. Two input FASTAs means paired reads.
http://research-pub.gene.com/gmap/
"""
# Unpack the gsnap genome
genome_dir = fetch_genome(GSNAP_GENOME, strict=False)
if genome_dir is None:
# Apparently if the GSNAP_GENOME file doesn't exist, we are supposed
# to skip this step.
# TODO (yunfang): An independent way to specify whether this step
# should be executed, so that operational errors don't just silently
# cause the step to be skipped. See #173.
raise SkipGsnap()
gsnap_base_dir = os.path.dirname(genome_dir)
gsnap_index_name = os.path.basename(genome_dir)
# Run Gsnap
gsnap_params = [
GSNAPL, '-A sam', '--batch=0', '--use-shared-memory=0',
'--gmap-mode=all', '--npaths=1', '--ordered', '-t 32',
'--max-mismatches=40', '-D', gsnap_base_dir, '-d', gsnap_index_name,
'-o',
result_path(GSNAP_FILTER_SAM)
]
gsnap_params += input_fas
execute_command(" ".join(gsnap_params))
write_to_log("Finished GSNAP alignment.")
# Extract out unmapped files from sam
output_prefix = result_path(EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1[:-8])
if len(input_fas) == 2:
generate_unmapped_pairs_from_sam(
result_path(GSNAP_FILTER_SAM), output_prefix)
else:
generate_unmapped_singles_from_sam(
result_path(GSNAP_FILTER_SAM), output_prefix)
dst = SAMPLE_S3_OUTPUT_PATH + "/"
def upload_to_dst(src):
uploader_start(result_path(src), dst)
upload_to_dst(GSNAP_FILTER_SAM)
upload_to_dst(EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1)
if len(input_fas) == 2:
upload_to_dst(EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT2)
upload_to_dst(EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT3)
write_to_log(
"Extracted unmapped fragments from SAM file for GSNAP output.")
@retry
def upload_with_retries(from_f, to_f):
execute_command("aws s3 cp --quiet {from_f} {to_f}".format(
from_f=from_f, to_f=to_f))
def upload(from_f, to_f, status, status_lock=threading.RLock()):
try:
with iostream_uploads: # Limit concurrent uploads so as not to stall the pipeline.
with iostream: # Still counts toward the general semaphore.
upload_with_retries(from_f, to_f)
with status_lock:
status[from_f] = "success"
except:
with status_lock:
status[from_f] = "error"
raise
def unzip_to_file(from_f, to_f):
execute_command("gzip -dc {from_f} > {to_f}".format(
from_f=from_f, to_f=to_f))
def run_host_filtering(fastq_files, initial_file_type_for_log, lazy_run, stats,
prefiltered):
number_of_input_files = len(fastq_files)
target_outputs = TARGET_OUTPUTS_SINGLE
if number_of_input_files == 2:
target_outputs = TARGET_OUTPUTS_PAIRED
uploader_status = {}
uploader_threads = []
def uploader_start(from_f, to_f):
t = threading.Thread(
target=upload, args=[from_f, to_f, uploader_status])
t.start()
uploader_threads.append(t)
def uploader_check_wait_all():
for t in uploader_threads:
t.join()
for filename, status in uploader_status.iteritems():
msg = "Bad upload status {} for file {}".format(status, filename)
assert status == "success", msg
if prefiltered:
# Move input in place of bowtie output (as it represents bowtie output
# from another run).
btos = [
EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1,
EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT2
]
unzip_threads = []
for i, fname in enumerate(fastq_files):
msg = "Prefiltered input is not a fasta file: {fname}".format(
fname=fname)
assert fname.endswith(".fasta") or fname.endswith(".fasta.gz"), msg
if fname.endswith(".fasta"):
cmd = "mv {fname} {bto}".format(
fname=fname, bto=result_path(btos[i]))
execute_command(cmd)
else:
t = MyThread(
target=unzip_to_file, args=[fname,
result_path(btos[i])])
t.start()
unzip_threads.append(t)
for t in unzip_threads:
t.join()
assert t.completed and not t.exception
else:
# If not pre-filtered, run STAR
# Getting total_reads and file truncation status from STAR
total_counts_from_star = {}
log_params = return_merged_dict(
DEFAULT_LOG_PARAMS, {
"title": "STAR",
"version_file_s3": STAR_BOWTIE_VERSION_FILE_S3,
"output_version_file": result_path(VERSION_OUT)
})
run_and_log_s3(log_params, target_outputs["run_star"], lazy_run,
SAMPLE_S3_OUTPUT_PATH, run_star, fastq_files,
uploader_start, total_counts_from_star)
if not total_counts_from_star.get('total_reads'):
# Total reads not set. Most likely it's lazy run. Will have to
# actually count the reads.
#
# TODO: Remove this when we also lazy load the stats.json file.
max_reads = MAX_INPUT_READS * len(fastq_files)
total_reads = count_reads(fastq_files[0],
initial_file_type_for_log, max_reads)
if total_reads >= max_reads:
total_reads = max_reads
total_counts_from_star['truncated'] = 1
total_counts_from_star['total_reads'] = total_reads
stats.data.append(total_counts_from_star)
stats.count_reads(
"run_star",
before_filename=fastq_files[0],
before_filetype=initial_file_type_for_log,
after_filename=result_path(STAR_OUT1),
after_filetype=initial_file_type_for_log)
if total_counts_from_star.get('truncated'):
# Upload the truncation file to notify web that the input files are
# truncated.
cmd = "echo %d | aws s3 cp - %s/%s" % (
total_counts_from_star['total_reads'], SAMPLE_S3_OUTPUT_PATH,
INPUT_TRUNCATED_FILE)
execute_command(cmd)
# Run PriceSeqFilter
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "PriceSeqFilter"})
input_files = [result_path(STAR_OUT1)]
if number_of_input_files == 2:
input_files.append(result_path(STAR_OUT2))
run_and_log_s3(log_params, target_outputs["run_priceseqfilter"],
lazy_run, SAMPLE_S3_OUTPUT_PATH, run_priceseqfilter,
input_files, uploader_start)
stats.count_reads(
"run_priceseqfilter",
before_filename=result_path(STAR_OUT1),
before_filetype=initial_file_type_for_log,
after_filename=result_path(PRICESEQFILTER_OUT1),
after_filetype=initial_file_type_for_log)
# Run FASTQ to FASTA
if "fastq" in FILE_TYPE or "fq" in FILE_TYPE:
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "FASTQ to FASTA"})
input_files = [result_path(PRICESEQFILTER_OUT1)]
next_inputs = [result_path(FQ2FA_OUT1)]
if number_of_input_files == 2:
input_files.append(result_path(PRICESEQFILTER_OUT2))
next_inputs.append(result_path(FQ2FA_OUT2))
run_and_log_s3(log_params, target_outputs["run_fq2fa"], lazy_run,
SAMPLE_S3_OUTPUT_PATH, run_fq2fa, input_files,
uploader_start)
else:
next_inputs = [result_path(PRICESEQFILTER_OUT1)]
if number_of_input_files == 2:
next_inputs.append(result_path(PRICESEQFILTER_OUT2))
file_type_for_log = "fasta"
if number_of_input_files == 2:
file_type_for_log = "fasta_paired"
# Run CD-HIT-DUP
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "CD-HIT-DUP"})
run_and_log_s3(log_params, target_outputs["run_cdhitdup"], lazy_run,
SAMPLE_S3_OUTPUT_PATH, run_cdhitdup, next_inputs,
uploader_start)
stats.count_reads(
"run_cdhitdup",
before_filename=next_inputs[0],
before_filetype=file_type_for_log,
after_filename=result_path(CDHITDUP_OUT1),
after_filetype=file_type_for_log)
# Run LZW filter
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "LZW filter"})
input_files = [result_path(CDHITDUP_OUT1)]
if number_of_input_files == 2:
input_files.append(result_path(CDHITDUP_OUT2))
run_and_log_s3(log_params, target_outputs["run_lzw"], lazy_run,
SAMPLE_S3_OUTPUT_PATH, run_lzw, input_files,
uploader_start)
stats.count_reads(
"run_lzw",
before_filename=result_path(CDHITDUP_OUT1),
before_filetype=file_type_for_log,
after_filename=result_path(LZW_OUT1),
after_filetype=file_type_for_log)
# Run Bowtie
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "bowtie2"})
input_files = [result_path(LZW_OUT1)]
if number_of_input_files == 2:
input_files.append(result_path(LZW_OUT2))
run_and_log_s3(log_params, target_outputs["run_bowtie2"], lazy_run,
SAMPLE_S3_OUTPUT_PATH, run_bowtie2, input_files,
uploader_start)
stats.count_reads(
"run_bowtie2",
before_filename=result_path(LZW_OUT1),
before_filetype=file_type_for_log,
after_filename=result_path(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1),
after_filetype=file_type_for_log)
# Run GSNAP against host genomes (only available for Human as of 5/1/2018)
# GSNAP may run again even for pre-filtered inputs
try:
input_files = [result_path(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1)]
if number_of_input_files == 2:
input_files.append(
result_path(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT2))
# Skip GSNAP if the number of reads is too big
# TODO: move gsnap filter to after subsampling
if stats.data[-1]['reads_after'] > MAX_GNSAP_FILTER_READS:
raise SkipGsnap()
log_params = return_merged_dict(DEFAULT_LOG_PARAMS,
{"title": "run_gsnap_filter"})
run_and_log_s3(log_params, target_outputs["run_gsnap_filter"],
lazy_run, SAMPLE_S3_OUTPUT_PATH, run_gsnap_filter,
input_files, uploader_start)
stats.count_reads(
"run_gsnap_filter",
before_filename=result_path(EXTRACT_UNMAPPED_FROM_BOWTIE_SAM_OUT1),
before_filetype=file_type_for_log,
after_filename=result_path(EXTRACT_UNMAPPED_FROM_GSNAP_SAM_OUT1),
after_filetype=file_type_for_log)
except SkipGsnap:
write_to_log("Skipping gsnap for prefilterd input or too many reads")
pass
# Finalize the remaining reads
stats.set_remaining_reads()
uploader_check_wait_all()
def upload_pipeline_version_file():
execute_command("echo %s > %s" % (PIPELINE_VERSION, PIPELINE_VERSION_OUT))
execute_command("aws s3 cp %s %s/" % (PIPELINE_VERSION_OUT, OUTPUT_BUCKET))
def run_stage1(lazy_run=True):
execute_command("mkdir -p %s %s %s %s" % (SAMPLE_DIR, FASTQ_DIR,
RESULT_DIR, SCRATCH_DIR))
execute_command("mkdir -p %s " % REF_DIR)
# configure logger
log_file = "%s/%s.%s.txt" % (RESULT_DIR, LOGS_OUT_BASENAME,
AWS_BATCH_JOB_ID)
configure_logger(log_file)
write_to_log("Starting stage...")
# Get list of input files to fetch
command = "aws s3 ls %s/ | grep '\\.%s$'" % (SAMPLE_S3_INPUT_PATH,
FILE_TYPE)
try:
output = execute_command_with_output(command).rstrip().split("\n")
except:
# TODO: Duct-tape for now to resolve fq.gz vs fastq.gz issue.
if "fastq.gz" in FILE_TYPE:
global FILE_TYPE
FILE_TYPE = "fq.gz"
command = "aws s3 ls %s/ | grep '\\.%s$'" % (SAMPLE_S3_INPUT_PATH, FILE_TYPE)
output = execute_command_with_output(command).rstrip().split("\n")
input_fetch_threads = []
def fetch_input(input_basename):
fetch_from_s3(
os.path.join(SAMPLE_S3_INPUT_PATH, input_basename),
FASTQ_DIR,
allow_s3mi=True,
auto_unzip=False)
# Fetch input files with multiple threads
for line in output:
m = re.match(".*?([^ ]*." + re.escape(FILE_TYPE) + ")", line)
if m:
t = MyThread(target=fetch_input, args=[m.group(1)])
t.start()
input_fetch_threads.append(t)
else:
write_to_log("%s doesn't match %s" % (line, FILE_TYPE))
for t in input_fetch_threads:
# Check thread completion
t.join()
assert t.completed and not t.exception
# Check FASTQ files
cmd = "ls %s/*.%s" % (FASTQ_DIR, FILE_TYPE)
fastq_files = execute_command_with_output(cmd).rstrip().split("\n")
if len(fastq_files) not in [1, 2]:
msg = "Number of input files was neither 1 nor 2. Aborting computation."
write_to_log(msg)
return # only support either 1 file or 2 (paired) files
initial_file_type_for_log = "fasta"
if "fastq" in FILE_TYPE or "fq" in FILE_TYPE:
initial_file_type_for_log = "fastq"
if len(fastq_files) == 2:
initial_file_type_for_log += "_paired"
# Instantiate a stats instance
stats = StatsFile(STATS_OUT, RESULT_DIR, None, SAMPLE_S3_OUTPUT_PATH)
# Download total_reads.json input, if present. This is only provided with
# post-filtered inputs, where we don't have the reads prior to host
# filtering. Record total number of input reads.
try:
stats_in = StatsFile(STATS_IN, RESULT_DIR, SAMPLE_S3_INPUT_PATH,
SAMPLE_S3_OUTPUT_PATH)
stats_in.load_from_s3()
total_reads = stats_in.get_total_reads()
assert total_reads == int(total_reads)
msg = "Post-filtered input with {total_reads} original total reads.".format(
total_reads=total_reads)
write_to_log(msg)
except:
total_reads = None
stats_in = None
write_to_log("Unfiltered input. Need host filtering")
if total_reads is not None: # set total reads if available
stats.data.append({'total_reads': total_reads})
# Run host filtering
run_host_filtering(fastq_files, initial_file_type_for_log, lazy_run, stats,
stats_in is not None)
stats.save_to_s3()
write_to_log("Host filtering complete")
upload_log_file(SAMPLE_S3_OUTPUT_PATH)
|
<gh_stars>0
package models;
public class Coordinate {
public Coordinate(int row, int column) {
}
} |
# models.py
from django.db import models
class Maintainer(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
# views.py
from rest_framework import viewsets
from maintainer.models import Maintainer
from maintainer.serializers import MaintainerSerializer
class MaintainerAPIView(viewsets.ModelViewSet):
queryset = Maintainer.objects.all()
serializer_class = MaintainerSerializer
class MaintainerAutocompleteView(viewsets.ModelViewSet):
queryset = Maintainer.objects.all()
serializer_class = MaintainerSerializer
# serializers.py
from rest_framework import serializers
from maintainer.models import Maintainer
class MaintainerSerializer(serializers.ModelSerializer):
class Meta:
model = Maintainer
fields = ['id', 'name', 'email']
# urls.py
from django.conf.urls import url
from django.urls import path
from rest_framework import routers
from maintainer import views
router = routers.DefaultRouter()
router.register("autocomplete/maintainer", views.MaintainerAutocompleteView, basename="maintainer_autocomplete")
router.register('maintainer', views.MaintainerAPIView, basename='maintainer')
urlpatterns = [
path('maintainer/', views.MaintainerAPIView.as_view({'get': 'list', 'post': 'create'}), name='maintainer-list'),
path('maintainer/<int:pk>/', views.MaintainerAPIView.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}), name='maintainer-detail'),
]
urlpatterns += router.urls |
<filename>db/migrate/20200709220838_add_date_to_measurement.rb<gh_stars>1-10
class AddDateToMeasurement < ActiveRecord::Migration[6.0]
def change
add_column :measurements, :date, :datetime
end
end
|
<reponame>Rbenjani/competitive<gh_stars>0
/**
* Version 2
*/
var w = readline();
print(w < 4 || w % 2 ? 'NO' : 'YES');
|
from django.urls import include, path
from .views import classroom, classroom_list, classroom_detail, add_classroom, add_student
urlpatterns = [
path('classrooms/', classroom_list, name='classroom-list'),
path('classrooms/<int:classroom_id>/', classroom_detail, name='classroom-detail'),
path('classrooms/add/', add_classroom, name='add-classroom'),
path('classrooms/<int:classroom_id>/add_student/', add_student, name='add-student'),
]
# Include the classroom URL configuration in the main project's URL configuration
urlpatterns = [
path('api/', include('classroom.urls')),
] |
<gh_stars>10-100
package net.johnewart.gearman.common.packets.response;
import net.johnewart.gearman.constants.PacketType;
public class NoJob extends ResponsePacket {
public NoJob()
{
this.type = PacketType.NO_JOB;
}
@Override
public byte[] toByteArray()
{
return getHeader();
}
@Override
public int getPayloadSize()
{
return 0;
}
}
|
<filename>ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.parse;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Table;
/**
* ColumnStatsSemanticAnalyzer.
* Handles semantic analysis and rewrite for gathering column statistics both at the level of a
* partition and a table. Note that table statistics are implemented in SemanticAnalyzer.
*
*/
public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
private static final Log LOG = LogFactory
.getLog(ColumnStatsSemanticAnalyzer.class);
private ASTNode originalTree;
private ASTNode rewrittenTree;
private String rewrittenQuery;
private Context ctx;
private boolean isRewritten;
private boolean isTableLevel;
private List<String> colNames;
private List<String> colType;
private Table tbl;
public ColumnStatsSemanticAnalyzer(HiveConf conf) throws SemanticException {
super(conf);
}
private boolean shouldRewrite(ASTNode tree) {
boolean rwt = false;
if (tree.getChildCount() > 1) {
ASTNode child0 = (ASTNode) tree.getChild(0);
ASTNode child1;
if (child0.getToken().getType() == HiveParser.TOK_TAB) {
child0 = (ASTNode) child0.getChild(0);
if (child0.getToken().getType() == HiveParser.TOK_TABNAME) {
child1 = (ASTNode) tree.getChild(1);
if (child1.getToken().getType() == HiveParser.KW_COLUMNS) {
rwt = true;
}
}
}
}
return rwt;
}
private boolean isPartitionLevelStats(ASTNode tree) {
boolean isPartitioned = false;
ASTNode child = (ASTNode) tree.getChild(0);
if (child.getChildCount() > 1) {
child = (ASTNode) child.getChild(1);
if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) {
isPartitioned = true;
}
}
return isPartitioned;
}
private Table getTable(ASTNode tree) throws SemanticException {
String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0));
try {
return db.getTable(tableName);
} catch (HiveException e) {
throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
}
}
private Map<String,String> getPartKeyValuePairsFromAST(ASTNode tree) {
ASTNode child = ((ASTNode) tree.getChild(0).getChild(1));
Map<String,String> partSpec = new HashMap<String, String>();
String partKey;
String partValue;
for (int i = 0; i < child.getChildCount(); i++) {
partKey = new String(getUnescapedName((ASTNode) child.getChild(i).getChild(0))).toLowerCase();
if (child.getChild(i).getChildCount() > 1) {
partValue = new String(getUnescapedName((ASTNode) child.getChild(i).getChild(1)));
partValue = partValue.replaceAll("'", "");
} else {
partValue = null;
}
partSpec.put(partKey, partValue);
}
return partSpec;
}
private List<String> getColumnName(ASTNode tree) throws SemanticException{
switch (tree.getChildCount()) {
case 2:
return Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
case 3:
int numCols = tree.getChild(2).getChildCount();
List<String> colName = new LinkedList<String>();
for (int i = 0; i < numCols; i++) {
colName.add(i, new String(getUnescapedName((ASTNode) tree.getChild(2).getChild(i))));
}
return colName;
default:
throw new SemanticException("Internal error. Expected number of children of ASTNode to be"
+ " either 2 or 3. Found : " + tree.getChildCount());
}
}
private void handlePartialPartitionSpec(Map<String,String> partSpec) throws
SemanticException {
// If user has fully specified partition, validate that partition exists
int partValsSpecified = 0;
for (String partKey : partSpec.keySet()) {
partValsSpecified += partSpec.get(partKey) == null ? 0 : 1;
}
try {
if ((partValsSpecified == tbl.getPartitionKeys().size()) && (db.getPartition(tbl, partSpec, false, null, false) == null)) {
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
}
} catch (HiveException he) {
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
}
// User might have only specified partial list of partition keys, in which case add other partition keys in partSpec
List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
for (String partKey : partKeys){
if(!partSpec.containsKey(partKey)) {
partSpec.put(partKey, null);
}
}
// Check if user have erroneously specified non-existent partitioning columns
for (String partKey : partSpec.keySet()) {
if(!partKeys.contains(partKey)){
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PART_KEY.getMsg() + " : " + partKey);
}
}
}
private StringBuilder genPartitionClause(Map<String,String> partSpec) throws SemanticException {
StringBuilder whereClause = new StringBuilder(" where ");
boolean predPresent = false;
StringBuilder groupByClause = new StringBuilder(" group by ");
boolean aggPresent = false;
for (String partKey : partSpec.keySet()) {
String value;
if ((value = partSpec.get(partKey)) != null) {
if (!predPresent) {
predPresent = true;
} else {
whereClause.append(" and ");
}
whereClause.append(partKey);
whereClause.append(" = ");
if (getColTypeOf(partKey).equalsIgnoreCase("string")) {
whereClause.append("'");
}
whereClause.append(value);
if (getColTypeOf(partKey).equalsIgnoreCase("string")) {
whereClause.append("'");
}
}
}
for (FieldSchema fs : tbl.getPartitionKeys()) {
if (!aggPresent) {
aggPresent = true;
} else {
groupByClause.append(",");
}
groupByClause.append(fs.getName());
}
// attach the predicate and group by to the return clause
return predPresent ? whereClause.append(groupByClause) : groupByClause;
}
private String getColTypeOf (String partKey) throws SemanticException{
for (FieldSchema fs : tbl.getPartitionKeys()) {
if (partKey.equalsIgnoreCase(fs.getName())) {
return fs.getType();
}
}
throw new SemanticException ("Unknown partition key : " + partKey);
}
private int getNumBitVectorsForNDVEstimation(HiveConf conf) throws SemanticException {
int numBitVectors;
float percentageError = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_NDV_ERROR);
if (percentageError < 0.0) {
throw new SemanticException("hive.stats.ndv.error can't be negative");
} else if (percentageError <= 2.4) {
numBitVectors = 1024;
LOG.info("Lowest error achievable is 2.4% but error requested is " + percentageError + "%");
LOG.info("Choosing 1024 bit vectors..");
} else if (percentageError <= 3.4 ) {
numBitVectors = 1024;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 1024 bit vectors..");
} else if (percentageError <= 4.8) {
numBitVectors = 512;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 512 bit vectors..");
} else if (percentageError <= 6.8) {
numBitVectors = 256;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 256 bit vectors..");
} else if (percentageError <= 9.7) {
numBitVectors = 128;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 128 bit vectors..");
} else if (percentageError <= 13.8) {
numBitVectors = 64;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 64 bit vectors..");
} else if (percentageError <= 19.6) {
numBitVectors = 32;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 32 bit vectors..");
} else if (percentageError <= 28.2) {
numBitVectors = 16;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 16 bit vectors..");
} else if (percentageError <= 40.9) {
numBitVectors = 8;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 8 bit vectors..");
} else if (percentageError <= 61.0) {
numBitVectors = 4;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 4 bit vectors..");
} else {
numBitVectors = 2;
LOG.info("Error requested is " + percentageError + "%");
LOG.info("Choosing 2 bit vectors..");
}
return numBitVectors;
}
private List<String> getColumnTypes(List<String> colNames)
throws SemanticException{
List<String> colTypes = new LinkedList<String>();
List<FieldSchema> cols = tbl.getCols();
for (String colName : colNames) {
for (FieldSchema col: cols) {
if (colName.equalsIgnoreCase(col.getName())) {
colTypes.add(new String(col.getType()));
}
}
}
return colTypes;
}
private String genRewrittenQuery(List<String> colNames, int numBitVectors, Map<String,String> partSpec,
boolean isPartitionStats) throws SemanticException{
StringBuilder rewrittenQueryBuilder = new StringBuilder("select ");
String rewrittenQuery;
for (int i = 0; i < colNames.size(); i++) {
if (i > 0) {
rewrittenQueryBuilder.append(" , ");
}
rewrittenQueryBuilder.append("compute_stats(");
rewrittenQueryBuilder.append(colNames.get(i));
rewrittenQueryBuilder.append(" , ");
rewrittenQueryBuilder.append(numBitVectors);
rewrittenQueryBuilder.append(" )");
}
if (isPartitionStats) {
for (FieldSchema fs : tbl.getPartCols()) {
rewrittenQueryBuilder.append(" , " + fs.getName());
}
}
rewrittenQueryBuilder.append(" from ");
rewrittenQueryBuilder.append(tbl.getTableName());
isRewritten = true;
// If partition level statistics is requested, add predicate and group by as needed to rewritten
// query
if (isPartitionStats) {
rewrittenQueryBuilder.append(genPartitionClause(partSpec));
}
rewrittenQuery = rewrittenQueryBuilder.toString();
rewrittenQuery = new VariableSubstitution().substitute(conf, rewrittenQuery);
return rewrittenQuery;
}
private ASTNode genRewrittenTree(String rewrittenQuery) throws SemanticException {
ASTNode rewrittenTree;
// Parse the rewritten query string
try {
ctx = new Context(conf);
} catch (IOException e) {
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_IO_ERROR.getMsg());
}
ctx.setCmd(rewrittenQuery);
ParseDriver pd = new ParseDriver();
try {
rewrittenTree = pd.parse(rewrittenQuery, ctx);
} catch (ParseException e) {
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_PARSE_ERROR.getMsg());
}
rewrittenTree = ParseUtils.findRootNonNullToken(rewrittenTree);
return rewrittenTree;
}
public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticException {
super(conf);
// check if it is no scan. grammar prevents coexit noscan/columns
super.processNoScanCommand(tree);
// check if it is partial scan. grammar prevents coexit partialscan/columns
super.processPartialScanCommand(tree);
/* Rewrite only analyze table <> column <> compute statistics; Don't rewrite analyze table
* command - table stats are collected by the table scan operator and is not rewritten to
* an aggregation.
*/
if (shouldRewrite(tree)) {
tbl = getTable(tree);
colNames = getColumnName(tree);
// Save away the original AST
originalTree = tree;
boolean isPartitionStats = isPartitionLevelStats(tree);
Map<String,String> partSpec = null;
checkIfTemporaryTable();
checkForPartitionColumns(colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (isPartitionStats) {
isTableLevel = false;
partSpec = getPartKeyValuePairsFromAST(tree);
handlePartialPartitionSpec(partSpec);
} else {
isTableLevel = true;
}
colType = getColumnTypes(colNames);
int numBitVectors = getNumBitVectorsForNDVEstimation(conf);
rewrittenQuery = genRewrittenQuery(colNames, numBitVectors, partSpec, isPartitionStats);
rewrittenTree = genRewrittenTree(rewrittenQuery);
} else {
// Not an analyze table column compute statistics statement - don't do any rewrites
originalTree = rewrittenTree = tree;
rewrittenQuery = null;
isRewritten = false;
}
}
// fail early if the columns specified for column statistics are not valid
private void validateSpecifiedColumnNames(List<String> specifiedCols)
throws SemanticException {
List<String> tableCols = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
for(String sc : specifiedCols) {
if (!tableCols.contains(sc.toLowerCase())) {
String msg = "'" + sc + "' (possible columns are " + tableCols.toString() + ")";
throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(msg));
}
}
}
private void checkForPartitionColumns(List<String> specifiedCols, List<String> partCols)
throws SemanticException {
// Raise error if user has specified partition column for stats
for (String pc : partCols) {
for (String sc : specifiedCols) {
if (pc.equalsIgnoreCase(sc)) {
throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_COLUMN.getMsg()
+ " [Try removing column '" + sc + "' from column list]");
}
}
}
}
private void checkIfTemporaryTable() throws SemanticException {
if (tbl.isTemporary()) {
throw new SemanticException(tbl.getTableName()
+ " is a temporary table. Column statistics are not supported on temporary tables.");
}
}
@Override
public void analyze(ASTNode ast, Context origCtx) throws SemanticException {
QB qb;
QBParseInfo qbp;
// initialize QB
init();
// Setup the necessary metadata if originating from analyze rewrite
if (isRewritten) {
qb = getQB();
qb.setAnalyzeRewrite(true);
qbp = qb.getParseInfo();
qbp.setTableName(tbl.getTableName());
qbp.setTblLvl(isTableLevel);
qbp.setColName(colNames);
qbp.setColType(colType);
initCtx(ctx);
LOG.info("Invoking analyze on rewritten query");
analyzeInternal(rewrittenTree);
} else {
initCtx(origCtx);
LOG.info("Invoking analyze on original query");
analyzeInternal(originalTree);
}
}
}
|
// tslint:disable no-magic-numbers
import { as, Pitch, Scalar } from '@musical-patterns/utilities'
const NOT_TRUE_SUPERPARTICULAR_INITIAL_PITCH: Scalar<Pitch> = as.Scalar<Pitch>(2 / 1)
const NOT_TRUE_SUBSUPERPARTICULAR_INITIAL_PITCH: Scalar<Pitch> = as.Scalar<Pitch>(1 / 2)
export {
NOT_TRUE_SUBSUPERPARTICULAR_INITIAL_PITCH,
NOT_TRUE_SUPERPARTICULAR_INITIAL_PITCH,
}
|
##
## Add user Hadoop
# useradd -m -d /home/hadoop -s /bin/bash hadoop || true
# su -c "mkdir -p ~/.ssh || true" vagrant
su -c "cat /vagrant/id_rsa.pub >> ~/.ssh/authorized_keys" vagrant
|
<reponame>brain-net-cog/fMRIDenoise<gh_stars>10-100
def profiler_callback(node, status):
from nipype.utils.profiler import log_nodes_cb
if status != 'end':
return
if isinstance(node.result.runtime, list):
return
return log_nodes_cb(node, status) |
echo ------------------------------------------------------------------------------
echo ng build --prod
echo ------------------------------------------------------------------------------
ng build --prod
echo ------------------------------------------------------------------------------
echo "Done."
echo ------------------------------------------------------------------------------
|
#!/bin/bash
#SBATCH -p scavenger
#SBATCH --job-name="Rstats"
#SBATCH -a 1-50
#SBATCH --mem=10G
#SBATCH -o "globalancestry_whole-autosome.out"
module load R/4.0.0
c=$SLURM_ARRAY_TASK_ID
declare -i c10="${c}0"
for i in {0..9};
do
c_array[$i]=$((c10 - i))
done
for i in "${c_array[@]}"
do
filename=$(ls /work/ih49/simulations/globalancestry/whole_autosome/*_seed-${i}_ancestryproportions.csv | head -n 1)
~/home/CV_DuffySelection/globalancestry_sims/globalancestry_whole-autosome.R $filename >\
/work/ih49/Rstats/globalancestry/whole_autosome/Rstats_${i}.out
done
|
<gh_stars>0
const itemQuery = `{
items: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "/items/" } }
) {
edges {
node {
fields{
slug
}
frontmatter {
price
tagLine
categories
tag
}
}
}
}
}`
const flatten = arr =>
arr.map(({ node: { frontmatter, fields, ...rest } }) => ({
...frontmatter,
...fields,
...rest,
}))
const settings = { attributesToSnippet: [`excerpt:20`] }
const queries = [
{
query: itemQuery,
transformer: ({ data }) => flatten(data.items.edges),
indexName: `Items`,
settings,
},
]
module.exports = queries
|
#!/usr/bin/env python3
'''Psychomotor Vigilance Task'''
#########################################################
# STAP constants and stdio
import json,sys
if 'raw_input' in vars(__builtins__): input = raw_input #Fix for Python 2.x raw_input
def send(d): print(json.dumps(d)); sys.stdout.flush()
def recv(): return json.loads(input())
CLEAR = None
def obj(id=None,content=NotImplemented,**options):
if id is not None: options['id']=id
if content is not NotImplemented: options['v']=content
return options
#########################################################
import random,statistics
TRIALS = 10
INSTRUCTIONS = 'Click a button when one appears here'
BUTTON = obj('Click Me',False,onin={'v':CLEAR})
def main():
log=[]
ums=0
#announce required options
send({'require':{'options':['U','onin']},'template':'[type="bin"][level="1"]{height:200px}'})
#display Trial and instructions containers; let user software know that any buttons inside the instructions container should be deleted once user-input (i.e. click) is detected
send([ obj('Trial',1,max=TRIALS),
obj(INSTRUCTIONS,[]) ])
#do trials
for trial in range(1,TRIALS+1):
#set random time for button appearance
buttonAppearanceTime=ums+random.randrange(2000,10000)
#update trial time, wait till buttonAppearanceTime, then add the 'Click me' button
send([ obj('Trial',trial),
obj(INSTRUCTIONS, [BUTTON], U=buttonAppearanceTime) ])
#get participant action
ums=recv()[0]
log.append(ums-buttonAppearanceTime)
send([ obj('Your response time is',log[-1],unit='ms') ])
#display goodbye message in popup
send([ CLEAR,
obj('Your mean response time is',statistics.mean(log)),
'Thank you for your participation.' ])
if __name__=='__main__': main() |
#!/bin/bash
# Apply database migrations
echo "Apply database migrations"
python manage.py migrate
# Start server
echo "Starting server"
python manage.py runserver 0.0.0.0:8000 |
SELECT C.customer_name, C.customer_id, EXTRACT(YEAR FROM T.transaction_date) AS YEAR, EXTRACT(MONTH FROM T.transaction_date) AS "MONTH", SUM(T.amount) AS "TOTAL_SPENDING"
FROM Customer C
JOIN Transaction T
ON C.customer_id = T.customer_id
WHERE T.transaction_date >= CURDATE() - INTERVAL 1 YEAR
GROUP BY C.customer_name, C.customer_id, YEAR, MONTH; |
<gh_stars>0
import React, { useEffect, useState } from 'react';
import { Switch, BrowserRouter, Route } from "react-router-dom";
import { connect, getCurrentPathname, iAmReady } from './libs/nexo/helpers';
import nexo from './nexoClient';
import NexoSyncRoute from './NexoSyncRoute';
import Main from './pages/Main';
import Subpage from './pages/Subpage';
import ProductList from './pages/ProductList';
function App() {
const [ready, setReady] = useState(false);
useEffect(() => {
connect(nexo).then(async () => {
const pathname = await getCurrentPathname(nexo);
if (pathname && pathname !== window.location.pathname) {
window.location.replace(window.location.origin + pathname);
return;
}
setReady(true);
iAmReady(nexo);
});
}, [])
if (!ready) return null;
return (<BrowserRouter>
<NexoSyncRoute>
<Switch>
<Route path="/subpage" exact>
<Subpage />
</Route>
<Route path="/products" exact>
<ProductList />
</Route>
<Route path="/">
<Main />
</Route>
</Switch>
</NexoSyncRoute>
</BrowserRouter>);
}
export default App;
|
// Receber uma quantidade de valores pra avaliar
// função exibe se cada valor é par ou impar
exibirTipo(10);
function exibirTipo(limite) {
for (let i = 0; i < limite; i++) {
if ( i % 2 == 0)
console.log(i,'PAR');
else
console.log(i,'IMPAR');
};
} |
#!/bin/bash
mocha_option="--require ./test/src/espower-loader.js\
--require ./node_modules/babel/register\
--recursive ./test/src -R spec"
if [ "$TRAVIS" == "1" ]
then
./node_modules/.bin/istanbul cover ./node_modules/mocha/bin/_mocha --report lcovonly -- $mocha_option && cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js
elif [ "$1" == "coverage" ]
then
./node_modules/.bin/istanbul cover ./node_modules/mocha/bin/_mocha -- $mocha_option
else
./node_modules/.bin/mocha $mocha_option
fi
|
<gh_stars>0
package com.hongbog.util;
import android.content.Context;
import android.content.SharedPreferences;
import static com.hongbog.util.LabelSharedPreference.PreferenceConstant.DEF_INT_VALUE;
import static com.hongbog.util.LabelSharedPreference.PreferenceConstant.PREF_KEY;
import static com.hongbog.util.LabelSharedPreference.PreferenceConstant.PREF_NAME;
/**
* Created by taein on 2018-08-29.
*/
public class LabelSharedPreference {
private Context context;
private SharedPreferences preferences;
private SharedPreferences.Editor editor;
public interface PreferenceConstant{
String PREF_NAME = "LABEL_PREF";
String PREF_KEY = "enrolledLabel";
int DEF_INT_VALUE = -999;
int GARBAGE_VALUE = -1;
}
public LabelSharedPreference(Context context) {
this.context = context;
preferences = context.getSharedPreferences(PREF_NAME, context.MODE_PRIVATE);
editor = preferences.edit();
}
public int getInt(String keyName){
return preferences.getInt(keyName, DEF_INT_VALUE);
}
public int getInt(){
return preferences.getInt(PREF_KEY, DEF_INT_VALUE);
}
public boolean contains(String keyName){
return preferences.contains(keyName);
}
public boolean contains(){
return preferences.contains(PREF_KEY);
}
public void putInt(int value){
editor.putInt(PREF_KEY, value);
editor.apply();
}
public void removeKey(String keyName){
editor.remove(keyName);
editor.apply();
}
public void clear(){
editor.clear();
editor.apply();
}
}
|
def find_largest_prime_factor(number):
i = 2
while i < number // 2:
if number % i == 0:
number = number // i
i = 2
else:
i += 1
return number
print(find_largest_prime_factor(300)) |
import '@testing-library/jest-dom/extend-expect';
import { render } from '@testing-library/svelte';
import { enGB, fr, ru } from 'date-fns/locale';
import { defaultMaxDate, defaultMinDate } from '../../utils/date-default-ranges';
import { dateFnsUtils } from '../../utils/date-fns-adapter';
import DatePickerHeader from './DatePickerHeader.svelte';
describe('DatePickerHeader', () => {
beforeEach(() => {
dateFnsUtils.locale = enGB;
})
const currentMonth = new Date('2021-01-04');
const datePickerHeaderProps = {
dateAdapter: dateFnsUtils,
currentMonth,
minDate: defaultMinDate,
maxDate: defaultMaxDate,
selectPreviousMonth: jest.fn(),
selectNextMonth: jest.fn(),
toggleYearPicker: jest.fn(),
}
it('should render component', () => {
const { container } = render(DatePickerHeader, {
props: datePickerHeaderProps,
});
expect(container).toMatchSnapshot();
});
it('should show selected year and month', () => {
const { getByTestId } = render(DatePickerHeader, {
props: datePickerHeaderProps
});
const selectedYear = getByTestId('selected-year');
const selectedMonth = getByTestId('selected-month');
expect(selectedYear).toHaveTextContent('2021');
expect(selectedMonth).toHaveTextContent('January');
});
it('should show selected month w/ french locale', () => {
dateFnsUtils.locale = fr;
const { getByTestId } = render(DatePickerHeader, {
props: datePickerHeaderProps,
});
const selectedMonth = getByTestId('selected-month');
expect(selectedMonth).toHaveTextContent('janvier');
});
it('should show selected month w/ russian locale', () => {
dateFnsUtils.locale = ru;
const { getByTestId } = render(DatePickerHeader, {
props: datePickerHeaderProps,
});
const selectedMonth = getByTestId('selected-month');
expect(selectedMonth).toHaveTextContent('январь');
});
it('should show selected month w/ english locale', () => {
const { getByTestId } = render(DatePickerHeader, {
props: datePickerHeaderProps
});
const selectedMonth = getByTestId('selected-month');
expect(selectedMonth).toHaveTextContent('January');
});
});
|
#!/usr/bin/env bash
X_ENV=$(node -r ts-node/register -e 'console.log(require("./src/config.ts").default.env)')
X_DB_HOST=$(node -r ts-node/register -e 'console.log(require("./src/config.ts").default.database.host)')
X_DB_PORT=$(node -r ts-node/register -e 'console.log(require("./src/config.ts").default.database.port)')
./wait-for-it.sh $X_DB_HOST:$X_DB_PORT
# [[ "$X_ENV" == "development" ]] && npm run typeorm schema:sync
# npm run typeorm migration:run && \
eval "$@"
|
// create a function to search the string
const searchString = (string, array) => {
let output = array.filter(item => item === string);
return output.length > 0 ? output[0] : null;
}
// create an array to test
const testArray = ['foo', 'bar', 'baz'];
// test the function
const searchResult = searchString('bar', testArray);
console.log(searchResult); |
package com.hewentian.hadoop.zookeeper.rmi.ha;
import java.io.IOException;
import java.net.MalformedURLException;
import java.rmi.Naming;
import java.rmi.Remote;
import java.rmi.RemoteException;
import java.rmi.registry.LocateRegistry;
import java.util.concurrent.CountDownLatch;
import org.apache.log4j.Logger;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
/**
* <p>
* <b>ServiceProvider</b> 是 服务提供者,用于发布RMI服务。
* </p>
*
* @author <a href="mailto:<EMAIL>">hewentian</a>
* @date 2019-02-21 15:20:48
* @since JDK 1.8
*/
public class ServiceProvider {
private static final Logger log = Logger.getLogger(ServiceProvider.class);
private CountDownLatch latch = new CountDownLatch(1);
public void publish(Remote remote, String host, int port) {
String url = publishService(remote, host, port);
if (url != null) {
ZooKeeper zk = connectServer();
if (zk != null) {
checkParentPath(zk);
createNode(zk, url);
}
}
}
private String publishService(Remote remote, String host, int port) {
String url = null;
try {
url = String.format("rmi://%s:%d/%s", host, port, remote.getClass().getName());
LocateRegistry.createRegistry(port);
Naming.rebind(url, remote);
log.debug(String.format("publish rmi service (url: %s)", url));
} catch (RemoteException | MalformedURLException e) {
log.error(e.getMessage(), e);
}
return url;
}
private ZooKeeper connectServer() {
ZooKeeper zk = null;
try {
zk = new ZooKeeper(Constant.ZK_CONNECTION_STRING, Constant.ZK_SESSION_TIMEOUT, new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getState() == Event.KeeperState.SyncConnected) {
latch.countDown(); // 唤醒当前正在执行的线程
}
}
});
latch.await(); // 使当前线程处于等待状态
} catch (IOException | InterruptedException e) {
log.error(e.getMessage(), e);
}
return zk;
}
/**
* 检查父目录是否存在,如果不存在则创建,我们也可以使用ZooKeeper的客户端工具创建:
* create /registry null
*
* @param zk
*/
public void checkParentPath(ZooKeeper zk) {
try {
if (zk.exists(Constant.ZK_REGISTRY_PATH, false) == null) {
zk.create(Constant.ZK_REGISTRY_PATH, null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
log.debug(String.format("create zookeeper node: %s", Constant.ZK_REGISTRY_PATH));
}
} catch (KeeperException | InterruptedException e) {
log.error(e.getMessage(), e);
}
}
private void createNode(ZooKeeper zk, String url) {
try {
byte[] data = url.getBytes();
String path = zk.create(Constant.ZK_PROVIDER_PATH, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
log.debug(String.format("create zookeeper node (%s => %s)", path, url));
} catch (KeeperException | InterruptedException e) {
log.error(e.getMessage(), e);
}
}
}
|
public class RandomNumber {
public static int generateRandomNumber() {
Random rand = new Random();
return rand.nextInt(6) + 1;
}
public static void main(String[] args) {
System.out.println(generateRandomNumber()); // generate a random number between 1 and 6
}
} |
#!/bin/bash
#settings
binname="herbwebdavd"
buildername="build-linux.sh"
if [ -z "$1" ]
then
basename=$(basename "$0")
echo "Usage $basename [targetpah]"
exit 0
fi
if [ -e "$1" ]
then
echo "Target path $1 exists."
exit 0
fi
path=$(dirname "$0")
cd $path
echo "Publish to $1."
echo "Building"
bash ./$buildername
echo "Creating folder $1."
mkdir $1
mkdir $1/appdata
cp ../../appdata/readme.md $1/appdata/readme.md
echo "Copying bin file."
mkdir $1/bin
cp -rpf ../../bin/$binname $1/bin/$binname
echo "Copying system files."
cp -rpf ../../system $1/system
echo "Copying resources files."
cp -rpf ../../resources $1/resources
echo "Copying config skeleton files."
cp -rpf ../../system/configskeleton $1/config
|
<gh_stars>0
/**
* Copyright 2016 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iaik.privlog.encoders;
import java.io.IOException;
import java.io.OutputStream;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.util.CloseUtil;
import iaik.asn1.ObjectID;
import iaik.asn1.structures.AlgorithmID;
import iaik.cms.ContentInfoOutputStream;
import iaik.cms.EnvelopedDataOutputStream;
import iaik.cms.KeyTransRecipientInfo;
import iaik.cms.RecipientInfo;
import iaik.utils.Util;
/**
* @author <NAME> <<EMAIL>>
*/
public class CmsWrappingEncryptionEncoder extends WrappingEncoderBase<ILoggingEvent> {
protected EnvelopedDataOutputStream envelopedDataOutputStream;
protected String algorithm;
protected Thread shutdownHook;
protected ArrayList<RecipientInfo> recipients = new ArrayList<>();
@Override
public void init(OutputStream os)
throws IOException
{
try {
ContentInfoOutputStream contentInfoStream = new ContentInfoOutputStream(ObjectID.cms_envelopedData, os);
contentInfoStream.setPassThroughClose(false);
AlgorithmID contentEncAlg = (AlgorithmID) AlgorithmID.getAlgorithmID(algorithm).clone();
envelopedDataOutputStream = new EnvelopedDataOutputStream(contentInfoStream, contentEncAlg);
for (RecipientInfo recipientInfo : recipients) {
envelopedDataOutputStream.addRecipientInfo(recipientInfo);
}
baseEncoder.init(envelopedDataOutputStream);
shutdownHook = new Thread(new Runnable() {
@Override
public void run() {
try {
close();
} catch (Exception e) {
// We tried at least.
}
}
});
Runtime.getRuntime().addShutdownHook(shutdownHook);
} catch (Exception cause) {
addError("Failed to initialize CMS output stream", cause);
}
}
@Override
public void close()
throws IOException
{
try {
baseEncoder.close();
envelopedDataOutputStream.flush();
} finally {
CloseUtil.closeQuietly(envelopedDataOutputStream);
envelopedDataOutputStream = null;
Runtime.getRuntime().removeShutdownHook(shutdownHook);
shutdownHook = null;
}
}
public void addRecipientInfo(RecipientInfo recipientInfo) {
recipients.add(recipientInfo);
}
public void addRecipient(X509Certificate encyptionCertificate) {
addRecipient(encyptionCertificate, getKeyTransEncAlgorithm(encyptionCertificate));
}
public void addRecipient(X509Certificate encyptionCertificate, AlgorithmID algorithmID) {
try {
RecipientInfo recipientInfo = new KeyTransRecipientInfo(Util.convertCertificate(encyptionCertificate),
algorithmID);
addRecipientInfo(recipientInfo);
} catch (Exception cause) {
throw cause instanceof RuntimeException ? (RuntimeException) cause
: new RuntimeException("Failed to add recipient via encryption certificate", cause);
}
}
protected AlgorithmID getKeyTransEncAlgorithm(X509Certificate encyptionCertificate) {
if ("RSA".equals(encyptionCertificate.getPublicKey().getAlgorithm())) {
return (AlgorithmID) AlgorithmID.rsaesOAEP.clone();
} else {
throw new IllegalArgumentException("Currently only RSA encryption certificates are supported");
}
}
public void setAlgorithm(String algorithm) {
this.algorithm = algorithm;
}
}
|
def total_cost(items):
total_cost = 0
for item in items:
quantity = item[1]
unit_price = item[2]
total_cost += quantity * unit_price
return total_cost |
import os
import random
import userdb
import vegetables
import util
from ka import keep_alive
import cryptg
from telethon import TelegramClient, events, types, custom, utils, errors
import telethon
import logging
logging.basicConfig(filename='log.txt', filemode='w')
BOT_TOKEN = os.getenv('tok')
API_ID = int(os.getenv('api_id'))
API_HASH = os.getenv('api_hash')
bot = TelegramClient('seller', API_ID, API_HASH)
DELIVERY_CHARGE = 50
MIN_ORDER_AMT = 300
# From https://stackoverflow.com/questions/61154990/how-to-get-button-callbackquery-in-conversations-of-telethon-library
def press_event(user_id):
return events.CallbackQuery(func=lambda e: e.sender_id == user_id)
def message_event(user_id):
return events.NewMessage(func=lambda e: e.sender_id == user_id)
@bot.on(events.NewMessage(pattern='/start'))
async def start_handler(event):
await event.reply('Hello, here\'s what we sell')
db = userdb.instantiate(event.sender_id)
await categories_handler(event)
@bot.on(events.NewMessage(pattern='/categories'))
async def categories_handler(event):
values_list = set(vegetables.sheet.col_values(2))
message = "These are the categories that we offer: \n"
for i, item in enumerate(values_list):
message = f'{message}\n{i+1}. {item}\n/category_{item}'
await event.reply(message)
@bot.on(events.NewMessage(pattern=r'/category_\w+'))
async def category_handler(event):
print('In category handler')
category = event.message.text.split('_')[1]
print(category)
message = f'Here are the items we have in {category}:\n'
veg_list = vegetables.veg_from_category(category)
print(veg_list)
for i, item in enumerate(veg_list):
message = f'{message}\n{i+1}. **{item.name}**\n**Price**: KES{item.price}\n/item_{item.name}\n'
await event.reply(message)
@bot.on(events.NewMessage(pattern=r'/item_\w+'))
async def item_handler(event):
item = event.message.text.split('_')[1]
veg = vegetables.Vegetable(item)
message = f'**{veg.name}**\nPrice: KES{veg.price}\n\n/add_to_cart_{veg.name}'
await event.reply(message, file=veg.image)
@bot.on(events.NewMessage(pattern=r'/add_to_cart_\w+'))
async def cart_handler(event):
item = event.message.text.split('_')[-1]
SENDER = event.sender_id
async with bot.conversation(SENDER) as conv:
await conv.send_message(f'Enter the number of items of {item} to be added')
buttons = [[],[]]
for i in range(1, 11):
if i<6:
ind=0
else:
ind=1
buttons[ind].append(telethon.custom.Button.inline(str(i), str(i)))
await conv.send_message('Choose', buttons=buttons)
press = await conv.wait_event(press_event(SENDER))
quantity = int(press.data)
price = vegetables.Vegetable(item).price
logging.info(f'Add to cart: Item-{item}, Price-{price}, Quantity-{quantity}')
userdb.add_to_cart(SENDER, item, int(price), int(quantity))
await conv.send_message(f'{quantity} {item}(s) added to your cart\n/view_cart')
@bot.on(events.NewMessage(pattern=r'/view_cart'))
async def view_handler(event):
cart = userdb.get_cart(event.sender_id)
message = f'Here is your cart: \n\n**Item**\t**Quantity**\t**Price**'
price = 0
for item in cart:
message = f'{message}\n{item["item"]}\t{item["quantity"]}\t{item["price"]}\n/remove_{item["item"]}'
price += int(item["price"])
message = f'{message}\n\n**Total number of items**: {len(cart)}\n**Total price:** __KES__{price}\n**Add items:** /categories\n**Place Order**: /place_order'
await event.reply(message)
return (cart, price)
@bot.on(events.NewMessage(pattern=r'/remove_\w+'))
async def remove_handler(event):
sender = event.sender_id
item = event.message.text.split('_')[1]
logging.info(f'{sender} removed {item}')
userdb.remove_from_cart(sender, item)
await event.reply(f'{item} removed from cart\n/view_cart')
@bot.on(events.NewMessage(pattern=r'/place_order'))
async def order_handler(event):
cart, price = await view_handler(event)
if int(price)<MIN_ORDER_AMT:
await event.reply(f'The minimum order amount for delivery is KES{MIN_ORDER_AMT}. Please add more items to your cart and then order')
else:
SENDER = event.sender_id
async with bot.conversation(SENDER) as conv:
await conv.send_message(f'Enter the address where delivery is to be made: ', buttons=telethon.custom.Button.request_location('Please share your location', single_use=True))
address = await conv.wait_event(message_event(SENDER))
geo_point = address.message.media.geo
address = str((geo_point.long, geo_point.lat))
print(address)
await conv.send_message(f'Enter your mobile number: ' )
contact = await conv.wait_event(message_event(SENDER))
contact = contact.message.text
print(contact)
await conv.send_message(f'Enter the name of the recepient: ' )
name = await conv.wait_event(message_event(SENDER))
name = name.message.text
total = price+DELIVERY_CHARGE
await conv.send_message(f'**Order amount:** KES{price}\n**Delivery Charges:** KES{DELIVERY_CHARGE}\n**Total:** {total}')
await conv.send_message('How would you like to pay?', buttons=[
telethon.custom.Button.inline('Pay Now', '1'),
telethon.custom.Button.inline('Pay on Delivery', '2')]
)
pay_option = await conv.wait_event(press_event(SENDER))
pay_option = int(pay_option.data)
if pay_option==1:
paid, payment_id = util.take_payment(SENDER, total)
if paid:
await conv.send_message('Your order is successful. Please wait for the delivery')
userdb.place_order(SENDER, cart, address, total, contact, name, payment_id)
else:
await conv.send_message('Your payment was unsuccessful :(\nPlease try again')
elif pay_option==2:
await conv.send_message('Prove that you are not a robot')
num1 = random.randint(1,10)
num2 = random.randint(1,10)
ans = num1+num2
opt1 = telethon.custom.Button.inline(str(ans), ans)
opt2 = telethon.custom.Button.inline(str(ans-6), ans-6)
opt3 = telethon.custom.Button.inline(str(ans+6), ans+6)
options = [opt1, opt2, opt3]
random.shuffle(options)
await conv.send_message(f'What is {num1}+{num2}?', buttons=options)
choice = await conv.wait_event(press_event(SENDER))
if int(choice.data)!=ans:
await conv.send_message('Wrong answer. Please try again.')
else:
await conv.send_message('Order placed. Please wait for your delivery')
userdb.place_order(SENDER, cart, address, total, contact, name, 'cod')
keep_alive()
bot.start(bot_token=BOT_TOKEN)
print('Started running')
bot.run_until_disconnected() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.