text
stringlengths 1
1.05M
|
|---|
CREATE TABLE student_records (
id int AUTO_INCREMENT PRIMARY KEY,
name varchar(255) NOT NULL,
age int NOT NULL,
height double NOT NULL,
weight double NOT NULL
);
|
#!/usr/bin/env bash
set -e
project="`cat package.json | grep '"name":' | awk -F '"' '{print $4}'`"
docker container stop ${project}_builder 2> /dev/null || true
docker stack rm $project 2> /dev/null || true
echo -n "Waiting for the $project stack to shutdown."
# wait until there are no more containers in this stack
while [[ -n "`docker container ls --quiet --filter label=com.docker.stack.namespace=$project`" ]]
do echo -n '.' && sleep 3
done
# wait until the stack's network has been removed
while [[ -n "`docker network ls --quiet --filter label=com.docker.stack.namespace=$project`" ]]
do echo -n '.' && sleep 3
done
echo ' Goodnight!'
|
import React from "react";
export const ErrorMessage = ({ error }) => (
<div className="alert alert-danger" role="alert">
<span>{error.message}</span>
</div>
);
export const SuccessMessage = ({ formVerb = "updated", formType = "Data" }) => (
<div className="alert alert-success" role="alert">
<span>
{formType} successfully {`${formVerb}`}!
</span>
</div>
);
|
<filename>src/types/index.d.ts
declare module "*.vue" {
import { ComponentOptions } from "vue";
let component: ComponentOptions;
export default component;
}
declare module "*.md" {
import { ComponentOptions } from "vue";
let component: ComponentOptions;
export default component;
}
|
<filename>apkanalyser/src/andreflect/sign/ApkSign.java
/*
* Copyright (C) 2012 Sony Mobile Communications AB
*
* This file is part of ApkAnalyser.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package andreflect.sign;
import java.io.File;
import com.android.signapk.SignApk;
public class ApkSign {
// SignApk.jar is a tool included with the Android platform source bundle.
// testkey.pk8 is the private key that is compatible with the recovery image included in this zip file
// testkey.x509.pem is the corresponding certificate/public key
//
// Usage:
// java -jar signapk.jar testkey.x509.pem testkey.pk8 update.zip update_signed.zip
public String sign(File file) {
String name = file.getAbsolutePath();
if (file.getAbsolutePath().toLowerCase().endsWith(".ap_"))
{
name = name.substring(0, name.length() - 4);
name += ".apk";
} else if (file.getAbsolutePath().toLowerCase().endsWith(".apk")) {
name = name.substring(0, name.length() - 4);
name += "_sign.apk";
}
String[] args = { null, null, file.getAbsolutePath(), name };
SignApk.main(args);
return name;
}
}
|
<filename>src/peersafe/app/storage/impl/TableStorageItem.cpp
//------------------------------------------------------------------------------
/*
This file is part of chainsqld: https://github.com/chainsql/chainsqld
Copyright (c) 2016-2018 Peersafe Technology Co., Ltd.
chainsqld is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
chainsqld is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
//==============================================================================
#include <ripple/app/main/Application.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionMaster.h>
#include <ripple/json/json_reader.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/app/misc/Transaction.h>
#include <peersafe/app/table/TableSync.h>
#include <peersafe/app/table/TableStatusDBMySQL.h>
#include <peersafe/app/table/TableStatusDBSQLite.h>
#include <peersafe/protocol/TableDefines.h>
#include <peersafe/protocol/STEntry.h>
#include <peersafe/app/storage/TableStorageItem.h>
#include <peersafe/app/storage/TableStorage.h>
#include <peersafe/app/tx/ChainSqlTx.h>
#include <peersafe/app/util/TableSyncUtil.h>
#include <ripple/ledger/impl/Tuning.h>
namespace ripple {
TableStorageItem::TableStorageItem(Application& app, Config& cfg, beast::Journal journal)
: app_(app)
, journal_(journal)
, cfg_(cfg)
{
bExistInSyncTable_ = false;
bDropped_ = false;
lastTxTm_ = 0;
}
TableStorageItem::~TableStorageItem()
{
}
void TableStorageItem::InitItem(AccountID account, std::string nameInDB, std::string tableName)
{
accountID_ = account;
sTableNameInDB_ = nameInDB;
sTableName_ = tableName;
getTxStoreTrans();
}
void TableStorageItem::SetItemParam(LedgerIndex txnLedgerSeq, uint256 txnHash, LedgerIndex LedgerSeq, uint256 ledgerHash)
{
txnHash_ = txnHash;
txnLedgerSeq_ = txnLedgerSeq;
ledgerHash_ = ledgerHash;
LedgerSeq_ = LedgerSeq;
}
void TableStorageItem::Put(STTx const& tx, uint256 txhash)
{
auto iter = std::find_if(txList_.begin(), txList_.end(),
[txhash](txInfo& info)
{
return info.uTxHash == txhash;
}
);
if (iter != txList_.end())
return;
txInfo txInfo_;
txInfo_.accountID = accountID_;
txInfo_.uTxHash = txhash;
txInfo_.bCommit = false;
if (tx.isFieldPresent(sfLastLedgerSequence))
txInfo_.uTxLedgerVersion = tx.getFieldU32(sfLastLedgerSequence); //uTxLedgerVersion
if (txInfo_.uTxLedgerVersion <= 0)
{
txInfo_.uTxLedgerVersion = app_.getLedgerMaster().getValidLedgerIndex() + MAX_GAP_LEDGERNUM_TXN_APPEARIN;
}
txList_.push_back(txInfo_);
}
void TableStorageItem::prehandleTx(STTx const& tx)
{
if (txList_.size() <= 0)
{
app_.getTableSync().StopOneTable(accountID_, sTableNameInDB_, tx.getFieldU16(sfOpType) == T_CREATE);
}
}
TER TableStorageItem::PutElem(ChainSqlTx& transactor, STTx const& tx, uint256 txhash)
{
std::pair<bool, std::string> ret = { true, "success" };
auto result = tefTABLE_STORAGEERROR;
if (getTxStoreDBConn().GetDBConn() == NULL)
{
return tefTABLE_STORAGENORMALERROR;
}
prehandleTx(tx);
auto op_type = tx.getFieldU16(sfOpType);
if (!isNotNeedDisposeType((TableOpType)op_type))
{
auto resultPair = transactor.dispose(getTxStore(),tx);
if (resultPair.first == tesSUCCESS)
{
JLOG(journal_.trace()) << "Dispose success";
}
else
{
ret = { false,"Dispose error" };
if (resultPair.first != tefTABLE_TXDISPOSEERROR)
{
result = resultPair.first;
}
transactor.setExtraMsg(resultPair.second);
}
}
if (tx.getFieldU16(sfOpType) == T_DROP)
{
bDropped_ = true;
getTableStatusDB().UpdateSyncDB(to_string(accountID_), sTableNameInDB_, true, "");
}
else if (T_RENAME == op_type)
{
auto tables = tx.getFieldArray(sfTables);
if (tables.size() > 0)
{
auto newTableName = strCopy(tables[0].getFieldVL(sfTableNewName));
getTableStatusDB().RenameRecord(accountID_, sTableNameInDB_, newTableName);
}
}
if (ret.first)
{
JLOG(journal_.trace()) << "Dispose success";
if (!bExistInSyncTable_)
{
if (!getTableStatusDB().IsExist(accountID_, sTableNameInDB_))
{
auto chainId = TableSyncUtil::GetChainId(&transactor.view());
getTableStatusDB().InsertSnycDB(sTableName_, sTableNameInDB_, to_string(accountID_), LedgerSeq_, ledgerHash_, true, "",chainId);
}
bExistInSyncTable_ = true;
}
Put(tx, txhash);
result = tesSUCCESS;
}
return result;
}
bool TableStorageItem::CheckLastLedgerSeq(LedgerIndex CurLedgerVersion)
{
auto ledger = app_.getLedgerMaster().getLedgerBySeq(CurLedgerVersion);
if (!ledger) return false;
auto iter = txList_.begin();
for (; iter != txList_.end(); iter++)
{
if (iter->bCommit) continue;
if (iter->uTxLedgerVersion < CurLedgerVersion)
{
return false;
}
else if(iter->uTxLedgerVersion == CurLedgerVersion)
{
if (!ledger->txMap().hasItem(iter->uTxHash))
{
return false;
}
}
}
return true;
}
bool TableStorageItem::isHaveTx(uint256 txid)
{
auto iter(txList_.end());
iter = std::find_if(txList_.begin(), txList_.end(),
[txid](txInfo &info) {
return info.uTxHash == txid;
});
if (iter != txList_.end())
return true;
else
return false;
}
TableStorageItem::TableStorageDBFlag TableStorageItem::CheckSuccess(LedgerIndex validatedIndex)
{
for (int index = LedgerSeq_ + 1; index <= validatedIndex; index++)
{
auto ledger = app_.getLedgerMaster().getLedgerBySeq(index);
if (!ledger) continue;
LedgerSeq_ = index;
ledgerHash_ = app_.getLedgerMaster().getHashBySeq(index);
auto const sleAccepted = ledger->read(keylet::table(accountID_));
if (sleAccepted == NULL) continue;
const STEntry * pEntry = NULL;
auto aTableEntries = sleAccepted->getFieldArray(sfTableEntries);
auto retPair = TableSyncUtil::IsTableSLEChanged(aTableEntries, txnLedgerSeq_, sTableNameInDB_,true);
if (retPair.second == NULL)
{
if (retPair.first)
continue;
else if(bDropped_) //deleted;bug:RR-559
return STORAGE_COMMIT;
}
pEntry = retPair.second;
std::vector <uint256> aTx;
for (auto const& item : ledger->txMap())
{
auto blob = SerialIter{ item.data(), item.size() }.getVL();
STTx stTx(SerialIter{ blob.data(), blob.size() });
auto str = stTx.getFullText();
auto vecTxs = app_.getMasterTransaction().getTxs(stTx, sTableNameInDB_);
if (vecTxs.size() > 0)
{
aTx.push_back(stTx.getTransactionID());
}
}
int iCount = 0;
if (aTx.size() > 0) {
txnHash_ = pEntry->getFieldH256(sfTxnLedgerHash);
txnLedgerSeq_ = pEntry->getFieldU32(sfTxnLgrSeq);
}
for (auto tx : aTx)
{
iCount++;
auto iter = std::find_if(txList_.begin(), txList_.end(),
[tx](txInfo &item) {
return item.uTxHash == tx;
});
if (iter == txList_.end())
{
return STORAGE_ROLLBACK;
}
else
{
iter->bCommit = true;
auto initIter = std::find_if(txList_.begin(), txList_.end(),
[tx](txInfo &item) {
return !item.bCommit;
});
if (initIter == txList_.end()) //mean that each tx had set flag to commit
{
if (iCount < aTx.size())
{
LedgerSeq_ = index -1;
ledgerHash_ = app_.getLedgerMaster().getHashBySeq(LedgerSeq_);
txUpdateHash_ = tx;
}
else
{
txnHash_ = pEntry->getFieldH256(sfTxnLedgerHash);
txnLedgerSeq_ = pEntry->getFieldU32(sfTxnLgrSeq);
}
lastTxTm_ = ledger->info().closeTime.time_since_epoch().count();
return STORAGE_COMMIT;
}
}
}
}
return STORAGE_NONE;
}
bool TableStorageItem::rollBack()
{
{
LockedSociSession sql_session = getTxStoreDBConn().GetDBConn()->checkoutDb();
TxStoreTransaction &stTran = getTxStoreTrans();
stTran.rollback();
JLOG(journal_.warn()) << " TableStorageItem::rollBack " << sTableName_;
}
app_.getTableSync().ReStartOneTable(accountID_, sTableNameInDB_, sTableName_, false, false);
return true;
}
bool TableStorageItem::commit()
{
{
LockedSociSession sql_session = getTxStoreDBConn().GetDBConn()->checkoutDb();
TxStoreTransaction &stTran = getTxStoreTrans();
if(!bDropped_)
getTableStatusDB().UpdateSyncDB(to_string(accountID_), sTableNameInDB_, to_string(txnHash_), to_string(txnLedgerSeq_), to_string(ledgerHash_), to_string(LedgerSeq_), txUpdateHash_.isNonZero()?to_string(txUpdateHash_) : "", to_string(lastTxTm_),"");
stTran.commit();
}
app_.getTableSync().ReStartOneTable(accountID_, sTableNameInDB_, sTableName_, bDropped_, true);
auto result = std::make_pair("db_success", "");
for (auto& info : txList_)
{
auto txn = app_.getMasterTransaction().fetch(info.uTxHash, true);
if (txn) {
app_.getOPs().pubTableTxs(accountID_, sTableName_, *txn->getSTransaction(), result, false);
}
}
return true;
}
bool TableStorageItem::DoUpdateSyncDB(const std::string &Owner, const std::string &TableNameInDB, bool bDel,
const std::string &PreviousCommit)
{
return getTableStatusDB().UpdateSyncDB(Owner, TableNameInDB, bDel, PreviousCommit);
}
TxStoreDBConn& TableStorageItem::getTxStoreDBConn()
{
if (conn_ == NULL)
{
conn_ = std::make_unique<TxStoreDBConn>(cfg_);
if (conn_->GetDBConn() == NULL)
{
JLOG(journal_.error()) << "TableStorageItem::getTxStoreDBConn() return null";
}
}
return *conn_;
}
TxStoreTransaction& TableStorageItem::getTxStoreTrans()
{
if (uTxStoreTrans_ == NULL)
{
uTxStoreTrans_ = std::make_unique<TxStoreTransaction>(&getTxStoreDBConn());
}
return *uTxStoreTrans_;
}
TxStore& TableStorageItem::getTxStore()
{
if (pObjTxStore_ == NULL)
{
auto& conn = getTxStoreDBConn();
pObjTxStore_ = std::make_unique<TxStore>(conn.GetDBConn(), cfg_, journal_);
}
return *pObjTxStore_;
}
TableStatusDB& TableStorageItem::getTableStatusDB()
{
if (pObjTableStatusDB_ == NULL)
{
DatabaseCon::Setup setup = ripple::setup_SyncDatabaseCon(cfg_);
std::pair<std::string, bool> result = setup.sync_db.find("type");
if (result.first.compare("sqlite") == 0)
pObjTableStatusDB_ = std::make_unique<TableStatusDBSQLite>(getTxStoreDBConn().GetDBConn(), &app_, journal_);
else
pObjTableStatusDB_ = std::make_unique<TableStatusDBMySQL>(getTxStoreDBConn().GetDBConn(), &app_, journal_);
}
return *pObjTableStatusDB_;
}
bool TableStorageItem::doJob(LedgerIndex CurLedgerVersion)
{
bool bRet = false;
if (txList_.size() <= 0)
{
rollBack();
return true;
}
bRet = CheckLastLedgerSeq(CurLedgerVersion);
if (!bRet)
{
rollBack();
return true;
}
auto eType = CheckSuccess(CurLedgerVersion);
if (eType == STORAGE_ROLLBACK)
{
rollBack();
return true;
}
else if (eType == STORAGE_COMMIT)
{
commit();
return true;
}
else
{
return false;
}
return false;
}
}
|
function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }
function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; }
function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
import React, { Component } from 'react';
import { compose } from 'recompose';
import { withTheme } from 'styled-components';
import { defaultProps } from '../../default-props';
import { Box } from '../Box';
import { Button } from '../Button';
import { Heading } from '../Heading';
import { Keyboard } from '../Keyboard';
import { StyledCalendar, StyledDay, StyledDayContainer, StyledWeek, StyledWeeks, StyledWeeksContainer } from './StyledCalendar';
import { addDays, addMonths, betweenDates, daysApart, endOfMonth, startOfMonth, subtractDays, subtractMonths, withinDates, updateDateRange } from './utils';
var headingPadMap = {
small: 'xsmall',
medium: 'small',
large: 'medium'
};
var buildStartEnd = function buildStartEnd(reference, firstDayOfWeek) {
var start = new Date(reference);
start.setDate(1); // first of month
// In case Sunday is the first day of the month, and the user asked for Monday to
// be the first day of the week, then we need to include Sunday and six days prior.
start = start.getDay() === 0 && firstDayOfWeek === 1 ? start = subtractDays(start, 6) : start = subtractDays(start, start.getDay() - firstDayOfWeek); // beginning of week
var end = addDays(start, 7 * 5 + 7); // 5 weeks to end of week
return {
start: start,
end: end
};
};
var buildState = function buildState(props) {
var date = props.date,
dates = props.dates,
firstDayOfWeek = props.firstDayOfWeek,
reference = props.reference;
var normalizedReference;
if (reference) {
normalizedReference = new Date(reference);
} else if (date) {
normalizedReference = new Date(date);
} else if (dates && dates.length > 0) {
if (typeof dates[0] === 'string') {
normalizedReference = new Date(dates[0]);
} else if (Array.isArray(dates[0])) {
normalizedReference = new Date(dates[0][0]);
} else {
normalizedReference = new Date();
}
} else {
normalizedReference = new Date();
}
return _extends({}, buildStartEnd(normalizedReference, firstDayOfWeek), {
reference: normalizedReference
});
};
var Calendar =
/*#__PURE__*/
function (_Component) {
_inheritsLoose(Calendar, _Component);
function Calendar() {
var _this;
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
_this = _Component.call.apply(_Component, [this].concat(args)) || this;
_defineProperty(_assertThisInitialized(_this), "state", {});
_defineProperty(_assertThisInitialized(_this), "dayRefs", {});
_defineProperty(_assertThisInitialized(_this), "clearSlideStateLater", function () {
clearTimeout(_this.timer);
_this.timer = setTimeout(function () {
var targetStartEnd = _this.state.targetStartEnd;
if (targetStartEnd) {
_this.setState({
start: targetStartEnd.start,
end: targetStartEnd.end,
targetStartEnd: undefined,
slide: undefined
});
} // Wait for animation to finish before cleaning up. Empirically determined.
}, 800);
});
_defineProperty(_assertThisInitialized(_this), "setReference", function (reference) {
var _this$props = _this.props,
animate = _this$props.animate,
bounds = _this$props.bounds,
firstDayOfWeek = _this$props.firstDayOfWeek,
onReference = _this$props.onReference;
var _this$state = _this.state,
start = _this$state.start,
end = _this$state.end,
targetStartEnd = _this$state.targetStartEnd;
if (betweenDates(reference, bounds)) {
var nextStartEnd = buildStartEnd(reference, firstDayOfWeek);
var nextState = {
reference: reference
}; // if we're changing too fast, bypass animation
if (!animate || targetStartEnd) {
nextState.targetStartEnd = nextStartEnd;
nextState.start = nextStartEnd.start;
nextState.end = nextStartEnd.end;
nextState.targetStartEnd = undefined;
nextState.slide = undefined;
} else {
nextState.targetStartEnd = nextStartEnd;
if (nextStartEnd.start.getTime() < start.getTime()) {
nextState.start = nextStartEnd.start;
nextState.slide = {
direction: 'down',
weeks: daysApart(start, nextStartEnd.start) / 7
};
} else if (nextStartEnd.end.getTime() > end.getTime()) {
nextState.end = nextStartEnd.end;
nextState.slide = {
direction: 'up',
weeks: daysApart(nextStartEnd.end, end) / 7
};
}
}
_this.clearSlideStateLater();
_this.setState(nextState, function () {
if (onReference) {
onReference(reference.toISOString());
}
});
}
});
_defineProperty(_assertThisInitialized(_this), "onFocus", function (day) {
return function () {
var bounds = _this.props.bounds;
var reference = _this.state.reference;
if (betweenDates(day, bounds)) {
_this.setState({
focused: day
}, function () {
if (day.getMonth() !== reference.getMonth()) {
_this.setReference(day);
}
});
}
};
});
_defineProperty(_assertThisInitialized(_this), "onClickDay", function (dateString) {
return function () {
var _this$props2 = _this.props,
onSelect = _this$props2.onSelect,
range = _this$props2.range;
if (range) {
var nextState = updateDateRange(dateString, _this.state);
_this.setState(nextState);
if (onSelect) {
onSelect(nextState.dates || nextState.date || undefined);
}
} else if (onSelect) {
onSelect(dateString);
}
};
});
_defineProperty(_assertThisInitialized(_this), "setFocus", function (day) {
var ref = _this.dayRefs[day.toISOString()];
if (ref && ref.current) {
ref.current.focus();
}
});
_defineProperty(_assertThisInitialized(_this), "renderCalendarHeader", function (previousMonth, nextMonth) {
var _this$props3 = _this.props,
bounds = _this$props3.bounds,
locale = _this$props3.locale,
size = _this$props3.size,
theme = _this$props3.theme;
var reference = _this.state.reference;
var PreviousIcon = size === 'small' ? theme.calendar.icons.small.previous : theme.calendar.icons.previous;
var NextIcon = size === 'small' ? theme.calendar.icons.small.next : theme.calendar.icons.next;
return React.createElement(Box, {
direction: "row",
justify: "between",
align: "center"
}, React.createElement(Box, {
flex: true,
pad: {
horizontal: headingPadMap[size] || 'small'
}
}, React.createElement(Heading, {
level: size === 'small' ? 4 : 3,
size: size,
margin: "none"
}, reference.toLocaleDateString(locale, {
month: 'long',
year: 'numeric'
}))), React.createElement(Box, {
flex: false,
direction: "row",
align: "center"
}, React.createElement(Button, {
a11yTitle: previousMonth.toLocaleDateString(locale, {
month: 'long',
year: 'numeric'
}),
icon: React.createElement(PreviousIcon, {
size: size !== 'small' ? size : undefined
}),
disabled: !betweenDates(previousMonth, bounds),
onClick: function onClick() {
return _this.setReference(previousMonth);
}
}), React.createElement(Button, {
a11yTitle: nextMonth.toLocaleDateString(locale, {
month: 'long',
year: 'numeric'
}),
icon: React.createElement(NextIcon, {
size: size !== 'small' ? size : undefined
}),
disabled: !betweenDates(nextMonth, bounds),
onClick: function onClick() {
return _this.setReference(nextMonth);
}
})));
});
_defineProperty(_assertThisInitialized(_this), "renderDaysOfWeek", function (locale, size, start) {
var day = new Date(start);
var days = [];
while (days.length < 7) {
days.push(React.createElement(StyledDayContainer, {
key: days.length,
sizeProp: size
}, React.createElement(StyledDay, {
otherMonth: true,
sizeProp: size
}, day.toLocaleDateString(locale, {
weekday: 'narrow'
}))));
day = addDays(day, 1);
}
return React.createElement(StyledWeek, null, days);
});
return _this;
}
Calendar.getDerivedStateFromProps = function getDerivedStateFromProps(nextProps, prevState) {
var reference = nextProps.reference;
var prevReference = prevState.reference;
if (Object.prototype.hasOwnProperty.call(nextProps, 'date') || Object.prototype.hasOwnProperty.call(nextProps, 'dates') || !prevReference || reference) {
var state = {};
if (Object.prototype.hasOwnProperty.call(nextProps, 'date') || Object.prototype.hasOwnProperty.call(nextProps, 'dates')) {
state.date = nextProps.date;
state.dates = nextProps.dates;
}
if (!prevReference || reference) {
state = _extends({}, state, {}, buildState(nextProps));
}
return state;
}
return null;
};
var _proto = Calendar.prototype;
_proto.componentDidUpdate = function componentDidUpdate() {
var focused = this.state.focused;
if (focused) {
var ref = this.dayRefs[focused.toISOString()];
if (ref && ref.current && ref.current !== document.activeElement) {
ref.current.focus();
}
}
};
_proto.componentWillUnmount = function componentWillUnmount() {
clearTimeout(this.timer);
};
_proto.render = function render() {
var _this2 = this;
var _this$props4 = this.props,
bounds = _this$props4.bounds,
dateProp = _this$props4.date,
datesProp = _this$props4.dates,
disabled = _this$props4.disabled,
daysOfWeek = _this$props4.daysOfWeek,
firstDayOfWeek = _this$props4.firstDayOfWeek,
header = _this$props4.header,
locale = _this$props4.locale,
onReference = _this$props4.onReference,
onSelect = _this$props4.onSelect,
range = _this$props4.range,
showAdjacentDays = _this$props4.showAdjacentDays,
size = _this$props4.size,
theme = _this$props4.theme,
rest = _objectWithoutPropertiesLoose(_this$props4, ["bounds", "date", "dates", "disabled", "daysOfWeek", "firstDayOfWeek", "header", "locale", "onReference", "onSelect", "range", "showAdjacentDays", "size", "theme"]);
var _this$state2 = this.state,
date = _this$state2.date,
dates = _this$state2.dates,
focused = _this$state2.focused,
start = _this$state2.start,
reference = _this$state2.reference,
end = _this$state2.end,
slide = _this$state2.slide; // We have to deal with reference being the end of a month with more
// days than the month we are changing to. So, we always set reference
// to the first of the month before changing the month.
var previousMonth = endOfMonth(subtractMonths(startOfMonth(reference), 1));
var nextMonth = startOfMonth(addMonths(startOfMonth(reference), 1));
var weeks = [];
var day = new Date(start);
var days;
this.dayRefs = {};
while (day.getTime() < end.getTime()) {
if (day.getDay() === firstDayOfWeek) {
if (days) {
weeks.push(React.createElement(StyledWeek, {
key: day.getTime()
}, days));
}
days = [];
}
var otherMonth = day.getMonth() !== reference.getMonth();
if (!showAdjacentDays && otherMonth) {
days.push(React.createElement(StyledDayContainer, {
key: day.getTime(),
sizeProp: size
}, React.createElement(StyledDay, {
sizeProp: size
})));
} else {
var dateString = day.toISOString();
this.dayRefs[dateString] = React.createRef();
var selected = false;
var inRange = false;
var selectedState = withinDates(day, date || dates);
if (selectedState === 2) {
selected = true;
} else if (selectedState === 1) {
inRange = true;
}
var dayDisabled = withinDates(day, disabled) || bounds && !betweenDates(day, bounds);
days.push(React.createElement(StyledDayContainer, {
key: day.getTime(),
sizeProp: size
}, React.createElement(Button, {
ref: this.dayRefs[dateString],
a11yTitle: day.toDateString(),
plain: true,
hoverIndicator: !dayDisabled,
disabled: dayDisabled,
onClick: this.onClickDay(dateString),
onFocus: this.onFocus(day),
onBlur: function onBlur() {
return _this2.setState({
focused: false
});
}
}, React.createElement(StyledDay, {
inRange: inRange,
otherMonth: day.getMonth() !== reference.getMonth(),
isSelected: selected,
sizeProp: size
}, day.getDate()))));
}
day = addDays(day, 1);
}
weeks.push(React.createElement(StyledWeek, {
key: day.getTime()
}, days));
return React.createElement(StyledCalendar, _extends({
sizeProp: size
}, rest), React.createElement(Keyboard, {
onUp: function onUp(event) {
event.preventDefault();
_this2.setFocus(addDays(focused, -7));
},
onDown: function onDown(event) {
event.preventDefault();
_this2.setFocus(addDays(focused, 7));
},
onLeft: function onLeft() {
return focused && _this2.setFocus(addDays(focused, -1));
},
onRight: function onRight() {
return focused && _this2.setFocus(addDays(focused, 1));
}
}, React.createElement(Box, null, header ? header({
date: reference,
locale: locale,
onPreviousMonth: function onPreviousMonth() {
return _this2.setReference(previousMonth);
},
onNextMonth: function onNextMonth() {
return _this2.setReference(nextMonth);
},
previousInBound: betweenDates(previousMonth, bounds),
nextInBound: betweenDates(nextMonth, bounds)
}) : this.renderCalendarHeader(previousMonth, nextMonth), daysOfWeek && this.renderDaysOfWeek(locale, size, start), React.createElement(StyledWeeksContainer, {
sizeProp: size
}, React.createElement(StyledWeeks, {
slide: slide,
sizeProp: size
}, weeks)))));
};
return Calendar;
}(Component);
_defineProperty(Calendar, "defaultProps", {
animate: true,
firstDayOfWeek: 0,
size: 'medium',
locale: 'en-US',
showAdjacentDays: true
});
Object.setPrototypeOf(Calendar.defaultProps, defaultProps);
var CalendarDoc;
if (process.env.NODE_ENV !== 'production') {
CalendarDoc = require('./doc').doc(Calendar); // eslint-disable-line global-require
}
var CalendarWrapper = compose(withTheme)(CalendarDoc || Calendar);
export { CalendarWrapper as Calendar };
|
#!/bin/bash
#
# Thesis Ch4: When does UC matter Grid HEADER
# ERCOT 2007, min 200MW gen, Year as 52 weeks
# Full Ops w/ Maintenance, 80MW min UC integer, non-parallel
# No B&B priority, No cheat, No cap limit helper
#
# To actually submit the job use:
# qsub SCRIPT_NAME
#+++++++++++++++++ TEMPLATE GAMS-CPLEX Header +++++++++++++++++
# No printf parameters
# Version History
# Ver Date Time Who What
# --- ---------- ----- -------------- ---------------------------------
# 1 2012-08-24 00:20 bpalmintier TEMPLATE based on whenUC_t12 (whenUC_base v4)
# 2 2012-08-25 09:25 bpalmintier Correct renew_to_rps to match StaticCapPlan v82. It was & still is disabled.
# Simple BASH script to run and time a series of GAMS jobs to compare the run
# time of binary vs clustered unit commitment both with and without capacity
# expansion decisions
#
#========= Setup Job Queue Parameters ==========
# IMPORTANT: The lines beginning #PBS set various queuing parameters, they are not simple comments
#
# name of submitted job, also name of output file unless specified
# The default job name is the name of this script, so here we surpress the job naming so
# we get unique names for all of our jobs
##PBS -N matlab_pbs
#
# Ask for all 1 node with 8 processors. this may or may not give
# exclusive access to a machine, but typically the queueing system will
# assign the 8 core machines first
#
# By requiring 20GB we ensure we get one of the machines with 24GB (or maybe a 12 core unit)
#PBS -l nodes=1:ppn=12,mem=40gb
#
# This option merges any error messages into output file
#PBS -j oe
#
# Select the queue based on maximum run times. OPT are:
# short 2hr
# medium 8hr
# long 24hr
# xlong 48hr, extendable to 168hr using -l walltime= option below
#PBS -q xlong
# And up the run time to the maximum of a full week (168 hrs)
#PBS -l walltime=62:00:00
echo "Node list:"
cat $PBS_NODEFILE
echo "Disk usage:"
df -h
#Set things up to load modules
source /etc/profile.d/modules.sh
#Load recent version of GAMS
module load gams/23.6.3
#Set path to gams in environment variable so MATLAB can read it
GAMS=`which gams`
export GAMS
#And load CPLEX
module load cplex
#Establish a working directory in scratch
#Will give error if it already exists, but script continues anyway
mkdir /scratch/b_p
#Clean anything out of our scratch folder (Assumes exclusive machine usage)
rm -r /scratch/b_p/*
#Make a new subfolder for this job
SCRATCH="/scratch/b_p/${PBS_JOBID}"
mkdir $SCRATCH
#Establish our model directory
MODEL_DIR="${HOME}/projects/advpower/models/capplan/"
#----------------------------
# Setup gams OPT
#----------------------------
DATE_TIME=`date +%y%m%d-%H%M`
ADVPOWER_REPO_VER=`svnversion ~/projects/advpower`
echo "Date & Time:" ${DATE_TIME}
echo "SVN Repository Version:" ${ADVPOWER_REPO_VER}
GAMS_MODEL="StaticCapPlan"
#======= Shared Setup =======
OUT_DIR="${HOME}/projects/advpower/results/gams/whenUC_grid/"
#Make sure output directory exists
mkdir ${OUT_DIR}
# Default GAMS OPT to:
# errmsg: enable in-line description of errors in list file
# lf & lo: store the solver log (normally printed to screen) in $OUT_DIR
# o: rename the list file and store in $OUT_DIR
# inputdir: Look for $include and $batinclude files in $WORK_DIR
# And Advanced Power Model OPT to:
# out_dir: specify directory for CSV output files
# out_prefix: add a unique run_id to all output files
# memo: encode some helpful run information in the summary file
#
# Plus additional user supplied OPT pasted into template
# Options shared by all runs across all files
COMMON_IO_OPT=" -errmsg=1 -lo=2 -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} "
ALL_RUN_OPT=" --sys=thesis_sys.inc --min_gen_size=0.2 --plan_margin=on --skip_cap_limit=1"
# Options common to the runs in this file
THIS_FILE_OPT="${ALL_RUN_OPT} --maint=1 --demand=ercot2007_dem_yr_as_52wk.inc --retire=0.5 "
# Note: 210000sec=58hrs
LONG_OPT=" "
PAR_OPT=" --par_threads=3 --lp_method=6 --par_mode=-1 --probe=2 "
#+++++++++++++++++ END HEADER +++++++++++++++++
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_simp_cmx141_r20"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --co2cap=141 --rps=0.2 --maint=1 --derate=1 --plan_margin=on "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_edRsv_cmx141_r20_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_ignore_unit_min=Inf --co2cap=141 --rps=0.2 --maint=1 --rsrv=separate --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_uclp_cmx141_r20_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=Inf --co2cap=141 --rps=0.2 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_full_int60_cmx141_r20_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=0.06 --co2cap=141 --rps=0.2 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_simp_cmx141_r40"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --co2cap=141 --rps=0.4 --maint=1 --derate=1 --plan_margin=on "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_edRsv_cmx141_r40_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_ignore_unit_min=Inf --co2cap=141 --rps=0.4 --maint=1 --rsrv=separate --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_uclp_cmx141_r40_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=Inf --co2cap=141 --rps=0.4 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_full_int60_cmx141_r40_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=0.06 --co2cap=141 --rps=0.4 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_simp_cmx141_r60"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --co2cap=141 --rps=0.6 --maint=1 --derate=1 --plan_margin=on "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_edRsv_cmx141_r60_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_ignore_unit_min=Inf --co2cap=141 --rps=0.6 --maint=1 --rsrv=separate --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_uclp_cmx141_r60_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=Inf --co2cap=141 --rps=0.6 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#+++++++++++++++++ Repeated GAMS running Template +++++++++++++++++
# Template requires 2 (printf style) substitutions:
# string run_id
# string gams_options
#
# See header template for version information
RUN_CODE="whenUC_full_int60_cmx141_r60_sep"
#Make a temporary run directory in scratch
WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/"
mkdir ${WORK_DIR}
cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR}
cd ${WORK_DIR}
echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}"
pwd
# Setup run specific OPT
# Note reduced planning margin b/c computing reserves directly
RUN_OPT=" --max_solve_time=216000 --uc_int_unit_min=0.06 --co2cap=141 --rps=0.6 --maint=1 --rsrv=separate --ramp=1 --unit_commit=1 --startup=1 --min_up_down=1 --plan_margin=0.05 "
SHARE_OPT=" ${LONG_OPT} ${THIS_FILE_OPT} "
IO_OPT=" ${COMMON_IO_OPT} -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} "
#Now run GAMS-CPLEX
echo "--- GAMS Run code ${RUN_CODE} ---"
echo " GAMS Model ${GAMS_MODEL}"
echo " IO OPT ${IO_OPT}"
echo " Shared OPT: ${SHARE_OPT}"
echo " Run OPT: ${RUN_OPT}"
echo .
gams ${GAMS_MODEL} ${IO_OPT} ${SHARE_OPT} ${RUN_OPT} &
cd ${MODEL_DIR}
pwd
#=== Footer Template ====
# No printf parameters
#
# See header template for version information
#Wait until all background jobs are complete
wait
echo "GAMS Done (All runs)"
echo .
#See how much disk space we used
df -h
#Clean-up scratch space
echo "Cleaning up our Scratch Space"
cd
rm -r /scratch/b_p/*
df -h
echo "Script Complete ${PBS_JOBID}"
|
<reponame>EchoofthePast/Dub<filename>handlers/edithandler.go
package handlers
import(
"os"
"fmt"
"html"
"strings"
"net/http"
"html/template"
"github.com/Creator/Dub/static/goget"
)
//EditHandler Edit Page Handler Function
func EditHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
if r.Method == "GET" {
fmt.Fprintf(os.Stdout, "GET, %q", html.EscapeString(r.URL.Path))
} else if r.Method == "POST" {
fmt.Fprintf(os.Stdout, "POST, %q", html.EscapeString(r.URL.Path))
} else {
http.Error(w, "Invalid request method.", 405)
}
method := r.Method
fmt.Println(" Method:", method)
search := r.Form["searchbox"] //
for index := range search {
if search[index] != "" {
input := search[index]
//Addition user error prevention required
//data = gopher(input) //
data = goget.Gopher(input)
for index := range input {
//
var letters [255]string
letters[index] = string(input[index])
var i int
var length1 int
var length2 int
var words string
if letters[index] == " " {
fmt.Println("Search Word ", index, letters[index])
i++
length1 = index
store := length1
//fmt.Println("The Length of the Word is ", length1)
if index == store{
if letters[index] != " " {
for index = range letters{
words = letters[index]
}
fmt.Println("Search Word ", index, words)
i++
length2 = index
//fmt.Println("The Length of the Word is ", length2)
}
}
len1 := length1
len2 := length2
memory := len(letters)
remaining := memory -(len1+len2)
//totol length of the search
fmt.Println("First Word:", len1)
fmt.Println("Second Word:", len2)
fmt.Println("Memory Remaining:", remaining)
}
}
//fmt.Println("Name", data)
}
}
var output [255]string
var key string
var value string
var sname string
var fname string
var mname string
var id string
var num string
var age string
var tel string
var cell string
var address string
var suburb string
var city string
var province string
var country string
var information string
var medication string
//var add0 string
//var add1 string
//var add2 string
var add3 string
var add4 string
var doc0 string
var doc1 string
var doc2 string
//get the input from the button arrays
if r.Method == "POST" {
for k, v := range r.Form {
key = k
value = strings.Join(v, "")
if key == "firstname" {
output[0] = value
} else if key == "surname" {
output[1] = value
} else if key == "middlename" {
output[2] = value
} else if key == "id" {
output[3] = value
} else if key == "number" {
output[4] = value
} else if key == "age" {
output[5] = value
} else if key == "tel" {
output[6] = value
} else if key == "cell" {
output[7] = value
} else if key == "address" {
output[8] = value
} else if key == "suburb" {
output[9] = value
} else if key == "city" {
output[10] = value
} else if key == "province" {
output[11] = value
} else if key == "country" {
output[12] = value
} else if key == "information" {
output[13] = value
} else if key == "medication" {
output[14] = value
} else if key == "doc0" {
output[15] = value
} else if key == "doc1" {
output[16] = value
} else if key == "doc2" {
output[17] = value
} else if key == "doc3" {
output[18] = value
}
fmt.Println("key:", key)
fmt.Println("value", value)
fname = output[0]
sname = output[1]
mname = output[2]
id = output[3]
num = output[4]
age = output[5]
tel = output[6]
cell = output[7]
address = output[8]
suburb = output[9]
city = output[10]
province = output[11]
country = output[12]
information = output[13]
medication = output[14]
doc0 = output[15]
doc1 = output[16]
doc2 = output[17]
}
fmt.Println(fname, sname, mname,id, num, age,tel, cell,address,suburb, city, province, country ,information, medication,add3, add4, doc0, doc1,doc2)
goget.CreateFile(output)
} //end of if statement
tmpl := template.Must(template.ParseFiles("templates/edit.html"))
tmpl.Execute(w, data)
}//EditHandler Function End
|
#include "tests.h"
#include <chrono>
#include <gtest/gtest.h>
#include <memory>
#include <thread>
CryptoFixture::CryptoFixture() : _certPath(CERTIFICATES_PATH) {}
CryptoFixture::~CryptoFixture() {}
void CryptoFixture::SetUp() {
std::string keyPairPath = _certPath + "peer1/mykeypair.pem";
std::string peerPublicKeyPath = _certPath + "peer1/peer.pem";
_crypto = std::make_shared<crypto::RSA>(keyPairPath, peerPublicKeyPath);
}
void CryptoFixture::TearDown() {}
TEST_F(CryptoFixture, TestSigningAndDecryptingNonce) {
auto nonce = _crypto->generateNonce();
std::string encrypted_nonce = _crypto->signString(nonce);
std::string decrypted_nonce = _crypto->decryptWithPublicKey(encrypted_nonce, _crypto->publicKey());
ASSERT_EQ(nonce, decrypted_nonce);
}
TEST_F(CryptoFixture, TestHashingSHA1){
std::string pubKey = _crypto->publicKey();
std::string hashed = _crypto->sha1(pubKey, pubKey.length());
ASSERT_EQ("78ed774f7871ab4e631031374fec8211e0cfb006", hashed);
}
|
-------------------------------------------------------------------------------
--
-- Script: containing_chunk.sql
-- Purpose: to find the X$KSMSP chunk that contains a particular address
--
-- Copyright: (c) Ixora Pty Ltd
-- Author: <NAME>
--
-------------------------------------------------------------------------------
@save_sqlplus_settings
set verify off
select
x.ksmchcom chunk_contents,
x.ksmchcls chunk_class,
x.ksmchsiz chunk_size,
&Addr - rawtonum(x.ksmchptr) offset_to_addr
from
sys.x_$ksmsp x
where
x.inst_id = userenv('Instance') and
&Addr >= rawtonum(x.ksmchptr) and
&Addr < rawtonum(x.ksmchptr) + x.ksmchsiz
/
@restore_sqlplus_settings
|
/* mbed Microcontroller Library
* Copyright (c) 2016 u-blox
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_OBJECTS_H
#define MBED_OBJECTS_H
#include "cmsis.h"
#include "PortNames.h"
#include "PeripheralNames.h"
#include "PinNames.h"
#include "stdbool.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
IRQ_NOT_SET,
IRQ_ON,
IRQ_OFF
} irq_setting_t;
struct port_s {
__IO uint32_t *reg_dir;
__IO uint32_t *reg_out;
__IO uint32_t *reg_val;
__IO uint32_t *reg_drv;
PortName port;
uint32_t mask;
};
struct gpio_irq_s {
/* Don't bother with having a port number here as there's only one */
uint32_t ch; /* Corresponds to the interrupt pin */
};
struct serial_s {
SerialConfig config;
PinName rx_pin;
PinName tx_pin;
volatile uart_ctrl_t *reg_base;
uint8_t index;
uint32_t baud_rate;
bool format_set; /* If true then the struct that follows is populated */
struct {
uint8_t stop_bits;
uint8_t data_bits;
uint8_t parity;
} format;
irq_setting_t irq_rx_setting;
irq_setting_t irq_tx_setting;
};
#include "gpio_object.h"
#ifdef __cplusplus
}
#endif
#endif
|
#!/bin/bash
#####################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
#####################################################################
# Description:
# This script checks that a legacy and a synthetic network adapter work together, without causing network issues to the VM.
# If there are more than one synthetic/legacy interfaces, it is enough for just one (of each type) to successfully ping the remote server.
# If the IP_IGNORE Parameter is given, the interface which owns that given address will not be able to take part in the test and will only be used to communicate with LIS
#
# Steps:
# 1. Get legacy and synthetic network interfaces
# 2. Try to get DHCP addresses for each of them
# 2a. If no DHCP, try to set static IP
# 3. Try to ping REMOTE_SERVER from each interface
#
#
# Parameters required:
# REMOTE_SERVER
#
# Optional parameters:
# TC_COVERED
# SYNTH_STATIC_IP
# LEGACY_STATIC_IP
# SYNTH_NETMASK
# LEGACY_NETMASK
# IP_IGNORE
# LO_IGNORE
# GATEWAY
#
# Parameter explanation:
# REMOTE_SERVER is the IP address of the remote server, pinged in the last step of the script
# SYNTH_STATIC_IP is an optional IP address assigned to the synthetic netadapter interface in case none was received via DHCP
# LEGACY_STATIC_IP is an optional IP address assigned to the legacy netadapter interface in case none was received via DHCP
# SYNTH_NETMASK is an optional netmask used in case no address was assigned to the synthetic netadapter via DHCP
# LEGACY_NETMASK is an optional netmask used in case no address was assigned to the legacy netadapter via DHCP
# IP_IGNORE is the IP Address of an interface that is not touched during this test (no dhcp or static ip assigned to it)
# - it can be used to specify the connection used to communicate with the VM, which needs to remain unchanged
# LO_IGNORE is an optional argument used to indicate that the loopback interface lo is not to be used during the test (it is usually detected as a legacy interface)
# GATEWAY is the IP Address of the default gateway
# TC_COVERED is the testcase number
#
#
############################################################################
# Convert eol
dos2unix Utils.sh
# Source Utils.sh
. Utils.sh || {
echo "Error: unable to source Utils.sh!"
echo "TestAborted" > state.txt
exit 2
}
# Source constants file and initialize most common variables
UtilsInit
# In case of error
case $? in
0)
# do nothing
;;
1)
LogMsg "Unable to cd to $LIS_HOME. Aborting..."
UpdateSummary "Unable to cd to $LIS_HOME. Aborting..."
SetTestStateAborted
exit 3
;;
2)
LogMsg "Unable to use test state file. Aborting..."
UpdateSummary "Unable to use test state file. Aborting..."
# need to wait for test timeout to kick in
# hailmary try to update teststate
echo "TestAborted" > state.txt
exit 4
;;
3)
LogMsg "Error: unable to source constants file. Aborting..."
UpdateSummary "Error: unable to source constants file"
SetTestStateAborted
exit 5
;;
*)
# should not happen
LogMsg "UtilsInit returned an unknown error. Aborting..."
UpdateSummary "UtilsInit returned an unknown error. Aborting..."
SetTestStateAborted
exit 6
;;
esac
# Parameter provided in constants file
if [ "${SYNTH_STATIC_IP:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter SYNTH_STATIC_IP is not defined in constants file"
LogMsg "$msg"
else
# Validate that $SYNTH_STATIC_IP is the correct format
CheckIP "$SYNTH_STATIC_IP"
if [ 0 -ne $? ]; then
msg="Variable SYNTH_STATIC_IP: $SYNTH_STATIC_IP does not contain a valid IPv4 address "
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 30
fi
fi
# Parameter provided in constants file
if [ "${LEGACY_STATIC_IP:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter LEGACY_STATIC_IP is not defined in constants file"
LogMsg "$msg"
else
# Validate that $LEGACY_STATIC_IP is the correct format
CheckIP "$LEGACY_STATIC_IP"
if [ 0 -ne $? ]; then
msg="Variable LEGACY_STATIC_IP: $LEGACY_STATIC_IP does not contain a valid IPv4 address "
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 30
fi
fi
# Parameter provided in constants file
if [ "${SYNTH_NETMASK:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter SYNTH_NETMASK is not defined in constants file . Defaulting to 255.255.255.0"
LogMsg "$msg"
SYNTH_NETMASK=255.255.255.0
fi
# Parameter provided in constants file
if [ "${LEGACY_NETMASK:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter LEGACY_NETMASK is not defined in constants file . Defaulting to 255.255.255.0"
LogMsg "$msg"
LEGACY_NETMASK=255.255.255.0
fi
# Parameter provided in constants file
if [ "${REMOTE_SERVER:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The mandatory test parameter REMOTE_SERVER is not defined in constants file! Aborting..."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
fi
# set gateway parameter
if [ "${GATEWAY:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter GATEWAY is not defined in constants file . No default gateway will be set for any interface."
LogMsg "$msg"
GATEWAY=''
else
CheckIP "$GATEWAY"
if [ 0 -ne $? ]; then
msg=""
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 10
fi
fi
declare __iface_ignore
# Parameter provided in constants file
if [ "${ipv4:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter ipv4 is not defined in constants file! Make sure you are using the latest LIS code."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
else
CheckIP "$ipv4"
if [ 0 -ne $? ]; then
msg="Test parameter ipv4 = $ipv4 is not a valid IP Address"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
# Get the interface associated with the given IP_IGNORE
__iface_ignore=$(ip -o addr show| grep "$ipv4" | cut -d ' ' -f2)
fi
if [ "${DISABLE_NM:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter DISABLE_NM is not defined in constants file. If the NetworkManager is running it could interfere with the test."
LogMsg "$msg"
else
if [[ "$DISABLE_NM" =~ [Yy][Ee][Ss] ]]; then
# work-around for suse where the network gets restarted in order to shutdown networkmanager.
declare __orig_netmask
GetDistro
case "$DISTRO" in
suse*)
__orig_netmask=$(ip -o addr show | grep "$ipv4" | cut -d '/' -f2 | cut -d ' ' -f1)
;;
esac
DisableNetworkManager
case "$DISTRO" in
suse*)
ip link set "$__iface_ignore" down
ip addr flush dev "$__iface_ignore"
ip addr add "$ipv4"/"$__orig_netmask" dev "$__iface_ignore"
ip link set "$__iface_ignore" up
;;
esac
fi
fi
declare __lo_ignore
if [ "${LO_IGNORE:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter LO_IGNORE is not defined in constants file! The loopback interface may be used during the test."
LogMsg "$msg"
__lo_ignore=''
else
ip link show lo >/dev/null 2>&1
if [ 0 -ne $? ]; then
msg="The loopback interface is not working"
LogMsg "$msg"
else
__lo_ignore=lo
fi
fi
# Retrieve synthetic network interfaces
GetSynthNetInterfaces
if [ 0 -ne $? ]; then
msg="No synthetic network interfaces found"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
# Remove interface if present
SYNTH_NET_INTERFACES=(${SYNTH_NET_INTERFACES[@]/$__iface_ignore/})
if [ ${#SYNTH_NET_INTERFACES[@]} -eq 0 ]; then
msg="The only synthetic interface is the one which LIS uses to send files/commands to the VM."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 10
fi
LogMsg "Found ${#SYNTH_NET_INTERFACES[@]} synthetic interface(s): ${SYNTH_NET_INTERFACES[*]} in VM"
# Test interface
declare -i __synth_iterator
declare -ai __invalid_positions
for __synth_iterator in "${!SYNTH_NET_INTERFACES[@]}"; do
ip link show "${SYNTH_NET_INTERFACES[$__synth_iterator]}" >/dev/null 2>&1
if [ 0 -ne $? ]; then
__invalid_positions=("${__invalid_positions[@]}" "$__synth_iterator")
LogMsg "Warning synthetic interface ${SYNTH_NET_INTERFACES[$__synth_iterator]} is unusable"
fi
done
if [ ${#SYNTH_NET_INTERFACES[@]} -eq ${#__invalid_positions[@]} ]; then
msg="No usable synthetic interface remains. "
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
# reset iterator and remove invalid positions from array
__synth_iterator=0
while [ $__synth_iterator -lt ${#__invalid_positions[@]} ]; do
# eliminate from SYNTH_NET_INTERFACES array the interface located on position ${__invalid_positions[$__synth_iterator]}
SYNTH_NET_INTERFACES=("${SYNTH_NET_INTERFACES[@]:0:${__invalid_positions[$__synth_iterator]}}" "${SYNTH_NET_INTERFACES[@]:$((${__invalid_positions[$__synth_iterator]}+1))}")
: $((__synth_iterator++))
done
# delete array
unset __invalid_positions
if [ 0 -eq ${#SYNTH_NET_INTERFACES[@]} ]; then
# array is empty... but we checked for this case above
msg="This should not have happened. Probable internal error above line $LINENO"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 100
fi
# Get the legacy netadapter interface
GetLegacyNetInterfaces
if [ 0 -ne $? ]; then
msg="No legacy network interfaces found"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
# Remove loopback interface if LO_IGNORE is set
LEGACY_NET_INTERFACES=(${LEGACY_NET_INTERFACES[@]/$__lo_ignore/})
if [ ${#LEGACY_NET_INTERFACES[@]} -eq 0 ]; then
msg="The only legacy interface is the loopback interface lo, which was set to be ignored."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 10
fi
# Remove interface if present
LEGACY_NET_INTERFACES=(${LEGACY_NET_INTERFACES[@]/$__iface_ignore/})
if [ ${#LEGACY_NET_INTERFACES[@]} -eq 0 ]; then
msg="The only legacy interface is the one which LIS uses to send files/commands to the VM."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 10
fi
LogMsg "Found ${#LEGACY_NET_INTERFACES[@]} legacy interface(s): ${LEGACY_NET_INTERFACES[*]} in VM"
# Test interface
declare -i __legacy_iterator
declare -ai __invalid_positions
for __legacy_iterator in "${!LEGACY_NET_INTERFACES[@]}"; do
ip link show "${LEGACY_NET_INTERFACES[$__legacy_iterator]}" >/dev/null 2>&1
if [ 0 -ne $? ]; then
# add current position to __invalid_positions array
__invalid_positions=("${__invalid_positions[@]}" "$__legacy_iterator")
LogMsg "Warning legacy interface ${LEGACY_NET_INTERFACES[$__legacy_iterator]} is unusable"
fi
done
if [ ${#LEGACY_NET_INTERFACES[@]} -eq ${#__invalid_positions[@]} ]; then
msg="No usable legacy interface remains"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
# reset iterator and remove invalid positions from array
__legacy_iterator=0
while [ $__legacy_iterator -lt ${#__invalid_positions[@]} ]; do
LEGACY_NET_INTERFACES=("${LEGACY_NET_INTERFACES[@]:0:${__invalid_positions[$__legacy_iterator]}}" "${LEGACY_NET_INTERFACES[@]:$((${__invalid_positions[$__legacy_iterator]}+1))}")
: $((__legacy_iterator++))
done
# delete array
unset __invalid_positions
if [ 0 -eq ${#LEGACY_NET_INTERFACES[@]} ]; then
# array is empty... but we checked for this case above
msg="This should not have happened. Probable internal error above line $LINENO"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 100
fi
__synth_iterator=0
# Try to get DHCP address for synthetic adaptor and ping if configured
while [ $__synth_iterator -lt ${#SYNTH_NET_INTERFACES[@]} ]; do
LogMsg "Trying to get an IP Address via DHCP on synthetic interface ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
SetIPfromDHCP "${SYNTH_NET_INTERFACES[$__synth_iterator]}"
if [ 0 -eq $? ]; then
if [ -n "$GATEWAY" ]; then
LogMsg "Setting $GATEWAY as default gateway on dev ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
CreateDefaultGateway "$GATEWAY" "${SYNTH_NET_INTERFACES[$__synth_iterator]}"
if [ 0 -ne $? ]; then
LogMsg "Warning! Failed to set default gateway!"
fi
fi
LogMsg "Trying to ping $REMOTE_SERVER from synthetic interface ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
UpdateSummary "Trying to ping $REMOTE_SERVER from synthetic interface ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
# ping the remote host using an easily distinguishable pattern 0xcafed00d`null`syn`null`dhcp`null`
ping -I "${SYNTH_NET_INTERFACES[$__synth_iterator]}" -c 10 -p "cafed00d0073796e006468637000" "$REMOTE_SERVER" >/dev/null 2>&1
if [ 0 -eq $? ]; then
# ping worked! Do not test any other interface
LogMsg "Successfully pinged $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]} (dhcp)."
UpdateSummary "Successfully pinged $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]} (dhcp)."
break
else
LogMsg "Unable to ping $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
UpdateSummary "Unable to ping $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
fi
fi
# shut interface down
ip link set ${SYNTH_NET_INTERFACES[$__synth_iterator]} down
LogMsg "Unable to get address from dhcp server on synthetic interface ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
: $((__synth_iterator++))
done
# If all dhcp requests or ping failed, try to set static ip.
if [ ${#SYNTH_NET_INTERFACES[@]} -eq $__synth_iterator ]; then
if [ -z "$SYNTH_STATIC_IP" ]; then
msg="No static IP Address provided for synthetic interfaces. DHCP failed. Unable to continue..."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
else
# reset iterator
__synth_iterator=0
while [ $__synth_iterator -lt ${#SYNTH_NET_INTERFACES[@]} ]; do
SetIPstatic "$SYNTH_STATIC_IP" "${SYNTH_NET_INTERFACES[$__synth_iterator]}" "$SYNTH_NETMASK"
LogMsg "$(ip -o addr show ${SYNTH_NET_INTERFACES[$__synth_iterator]} | grep -vi inet6)"
if [ -n "$GATEWAY" ]; then
LogMsg "Setting $GATEWAY as default gateway on dev ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
CreateDefaultGateway "$GATEWAY" "${SYNTH_NET_INTERFACES[$__synth_iterator]}"
if [ 0 -ne $? ]; then
LogMsg "Warning! Failed to set default gateway!"
fi
fi
LogMsg "Trying to ping $REMOTE_SERVER"
UpdateSummary "Trying to ping $REMOTE_SERVER"
# ping the remote host using an easily distinguishable pattern 0xcafed00d`null`syn`null`static`null`
ping -I "${SYNTH_NET_INTERFACES[$__synth_iterator]}" -c 10 -p "cafed00d0073796e0073746174696300" "$REMOTE_SERVER" >/dev/null 2>&1
if [ 0 -eq $? ]; then
# ping worked! Remove working element from __invalid_positions list
LogMsg "Successfully pinged $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]} (static)."
UpdateSummary "Successfully pinged $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]} (static)."
break
else
LogMsg "Unable to ping $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
UpdateSummary "Unable to ping $REMOTE_SERVER through synthetic ${SYNTH_NET_INTERFACES[$__synth_iterator]}"
fi
: $((__synth_iterator++))
done
if [ ${#SYNTH_NET_INTERFACES[@]} -eq $__synth_iterator ]; then
msg="Unable to set neither static address for synthetic interface(s) ${SYNTH_NET_INTERFACES[@]}"
LogMsg "msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
fi
fi
# Try to get DHCP address for legacy adaptor
__legacy_iterator=0
while [ $__legacy_iterator -lt ${#LEGACY_NET_INTERFACES[@]} ]; do
LogMsg "Trying to get an IP Address via DHCP on legacy interface ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
SetIPfromDHCP "${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
if [ 0 -eq $? ]; then
if [ -n "$GATEWAY" ]; then
LogMsg "Setting $GATEWAY as default gateway on dev ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
CreateDefaultGateway "$GATEWAY" "${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
if [ 0 -ne $? ]; then
LogMsg "Warning! Failed to set default gateway!"
fi
fi
LogMsg "Trying to ping $REMOTE_SERVER from legacy interface ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
UpdateSummary "Trying to ping $REMOTE_SERVER from legacy interface ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
# ping the remote host using an easily distinguishable pattern 0xcafed00d`null`leg`null`dhcp`null`
ping -I "${LEGACY_NET_INTERFACES[$__legacy_iterator]}" -c 10 -p "cafed00d006c6567006468637000" "$REMOTE_SERVER" >/dev/null 2>&1
if [ 0 -eq $? ]; then
# ping worked!
LogMsg "Successfully pinged $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]} (dhcp)."
UpdateSummary "Successfully pinged $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]} (dhcp)."
break
else
LogMsg "Unable to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
UpdateSummary "Unable to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
fi
fi
# shut interface down
ip link set ${LEGACY_NET_INTERFACES[$__legacy_iterator]} down
LogMsg "Unable to get address from dhcp server on legacy interface ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
: $((__legacy_iterator++))
done
# If dhcp failed, try to set static ip
if [ ${#LEGACY_NET_INTERFACES[@]} -eq $__legacy_iterator ]; then
msg="Unable to get address for legacy interface(s) ${LEGACY_NET_INTERFACES[@]} through DHCP"
LogMsg "$msg"
if [ -z "$LEGACY_STATIC_IP" ]; then
msg="No static IP Address provided for legacy interfaces. DHCP failed. Unable to continue..."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
else
# reset iterator
__legacy_iterator=0
while [ $__legacy_iterator -lt ${#LEGACY_NET_INTERFACES[@]} ]; do
SetIPstatic "$LEGACY_STATIC_IP" "${LEGACY_NET_INTERFACES[$__legacy_iterator]}" "$LEGACY_NETMASK"
LogMsg "$(ip -o addr show ${LEGACY_NET_INTERFACES[$__legacy_iterator]} | grep -vi inet6)"
if [ -n "$GATEWAY" ]; then
LogMsg "Setting $GATEWAY as default gateway on dev ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
CreateDefaultGateway "$GATEWAY" "${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
if [ 0 -ne $? ]; then
LogMsg "Warning! Failed to set default gateway!"
fi
fi
LogMsg "Trying to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
UpdateSummary "Trying to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
# ping the remote host using an easily distinguishable pattern 0xcafed00d`null`leg`null`static`null`
ping -I "${LEGACY_NET_INTERFACES[$__legacy_iterator]}" -c 10 -p "cafed00d006c65670073746174696300" "$REMOTE_SERVER" >/dev/null 2>&1
if [ 0 -eq $? ]; then
LogMsg "Successfully pinged $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]} (static)."
UpdateSummary "Successfully pinged $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]} (static)."
break
else
LogMsg "Unable to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
UpdateSummary "Unable to ping $REMOTE_SERVER through legacy ${LEGACY_NET_INTERFACES[$__legacy_iterator]}"
fi
: $((__legacy_iterator++))
done
if [ ${#LEGACY_NET_INTERFACES[@]} -eq $__legacy_iterator ]; then
msg="Unable to set neither static address for legacy interface(s) ${LEGACY_NET_INTERFACES[@]}"
LogMsg "msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 10
fi
fi
fi
UpdateSummary "Test successful"
LogMsg "Updating test case state to completed"
SetTestStateCompleted
exit 0
|
public class BubbleSort {
// Bubble sort by switching adjacent elements
public static void bubbleSort(int arr[]) {
int n = arr.length;
for (int i = 0; i < n-1; i++) {
// Traverse through all array elements
for (int j = 0; j < n-i-1; j++) {
// Swap adjacent elements if they are in wrong order
if (arr[j] > arr[j+1]) {
// swap arr[j] and arr[j+1]
int temp = arr[j];
arr[j] = arr[j+1];
arr[j+1] = temp;
}
}
}
}
public static void main(String[] args) {
int[] arr = {5, 3, 2, 6, 1};
bubbleSort(arr);
System.out.println("Sorted array: ");
for (int i = 0; i < arr.length; i++) {
System.out.print(arr[i] + " ");
}
}
}
|
docker build -t rtsp-samsung-tv .
docker tag rtsp-samsung-tv vassio/rtsp-samsung-tv:1.1.18
docker push vassio/rtsp-samsung-tv:1.1.18
docker tag rtsp-samsung-tv vassio/rtsp-samsung-tv:latest
docker push vassio/rtsp-samsung-tv:latest
|
import nltk
def synonyms_words(text):
text_tokens = nltk.word_tokenize(text)
replacement_words = []
for token in text_tokens:
synonyms = nltk.wordnet.synsets(token)
if synonyms:
replacement_words.append(synonyms[0].name().split('.')[0])
else:
replacement_words.append(token)
return ' '.join(replacement_words)
text = "this is an example sentence"
print(synonyms_words(text))
|
const {
initAccount,
getAccountInfo,
getTotalPageList,
} = require('../../src/telegraph/telegraph');
const {publishImgs} = require('../../src/telegraph/publish');
const path = require('path');
const fs = require('fs');
(async () => {
const ret = await initAccount('cfg/telegraph.yaml');
console.log(JSON.stringify(ret.telegraph));
const ai = await getAccountInfo(ret.telegraph);
console.log(JSON.stringify(ai));
const pl = await getTotalPageList(ret.telegraph);
console.log(JSON.stringify(pl));
const page = await publishImgs(
ret.telegraph,
'金田一R 第十四卷',
// 'test',
(i) => {
const fn = path.join('../jarviscrawlercore/comic/715/14', i + '.jpg');
if (fs.existsSync(fn)) {
return fn;
}
return;
},
1,
999,
);
console.log(JSON.stringify(page));
process.exit(-1);
})().catch((err) => {
console.log('catch a err ', err);
process.exit(-1);
});
|
#!/bin/bash -e
# NOTE: the 5.5 and 5.6 versions do not have SSL enabled
versions="5.5 5.6 5.7 8.0"
function launch() {
VERSION=$1
CONTAINER_NAME="zgrab_mysql-$VERSION"
if docker ps --filter "name=$CONTAINER_NAME" | grep -q $CONTAINER_NAME; then
echo "mysql/setup: Container $CONTAINER_NAME already running -- skipping launch..."
return
fi
docker run -td --rm --name $CONTAINER_NAME -e MYSQL_ALLOW_EMPTY_PASSWORD=true -e MYSQL_LOG_CONSOLE=true mysql:$VERSION
}
function waitFor() {
VERSION=$1
CONTAINER_NAME=zgrab_mysql-$VERSION
echo "mysql/setup: Waiting for $CONTAINER_NAME to become ready..."
while ! (docker logs --tail all $CONTAINER_NAME | grep -q "ready for connections."); do
echo -n "."
sleep 1
done
for i in `seq 1 5`; do
echo -n "*"
sleep 1
done
echo "...ok."
}
echo "mysql/setup: Launching docker containers..."
for version in $versions; do
launch $version
done
for version in $versions; do
waitFor $version
done
|
import Ux from "ux";
import * as U from 'underscore';
import Cmd from './Op.Command';
const initToolbar = (reference) => {
const toolbar = Ux.fromHoc(reference, "toolbar");
let toolbarArray = [];
if (U.isArray(toolbar)) {
const commands = Cmd.initCommand(reference);
toolbarArray = Cmd.initCommands(commands, toolbar);
}
/** 分组,按command **/
const $toolbars = {};
toolbarArray.forEach(item => $toolbars[item.command] = item.text);
return $toolbars;
};
const initEvent = (reference, key) => {
let $command = [];
if (key) {
const event = Ux.fromHoc(reference, "event");
const commands = Cmd.initCommand(reference);
if (event.hasOwnProperty(key)) {
const keys = event[key];
$command = Cmd.initCommands(commands, keys);
}
}
return $command;
};
const initDetail = (reference) => {
return Ux.fromHoc(reference, "detail");
};
export default {
initToolbar,
initEvent,
initDetail
};
|
function processInput(value, trim) {
if (trim) {
return value.trim();
} else {
return value;
}
}
|
import java.security.*;
public class MyPayload
implements PrivilegedExceptionAction
{
public MyPayload()
{
try
{
AccessController.doPrivileged(this);
}
catch(PrivilegedActionException e)
{
//e.printStackTrace();
}
}
public Object run()
throws Exception
{
System.setSecurityManager(null);
return null;
}
public static void r()
throws Exception
{
//System.out.println("hello!");
}
}
|
<reponame>hnjolles1/CasperLabs
package io.casperlabs.models.bytesrepr
import cats.arrow.FunctionK
import cats.data.StateT
import cats.free.Free
import cats.implicits._
import io.casperlabs.catscontrib.RangeOps.FoldM
import java.nio.charset.StandardCharsets
import scala.util.{Failure, Success, Try}
import scala.util.Try
import simulacrum.typeclass
@typeclass trait FromBytes[T] {
def fromBytes(
bytes: BytesView
): Either[FromBytes.Error, (T, BytesView)]
}
object FromBytes {
// Algebra defining operations used in de-serialization
sealed trait Algebra[A]
// Do nothing
case object Id extends Algebra[Unit]
// Take the first byte from the view
case class Pop(bytes: BytesView) extends Algebra[(BytesView, Byte)]
// create a new view from the first `n` bytes
case class Take(n: Int, bytes: BytesView) extends Algebra[(BytesView, BytesView)]
// raise an error
case class Raise[A](error: Error) extends Algebra[A]
// The context for de-serialization is the free monad over our algebra (gives
// stack safety). The state for the computation is `BytesView`. The return value
// is of type `A`.
type Deserializer[A] = StateT[Free[Algebra, *], BytesView, A]
// Given a deserializer and some bytes, attempt to parse a value of type A
def deserialize[A](des: Deserializer[A], bytes: Array[Byte]): Either[Error, A] =
des.run(BytesView(bytes)).foldMap(interpreter).flatMap {
case (rem, _) if rem.nonEmpty => Left(Error.LeftOverBytes)
case (_, a) => Right(a)
}
// Lift basic operations of the algebra as Deserializers
val id: Deserializer[Unit] = StateT.liftF(Free.liftF(Id))
val byte: Deserializer[Byte] = StateT(bytes => Free.liftF(Pop(bytes)))
def take(n: Int): Deserializer[BytesView] = StateT(bytes => Free.liftF(Take(n, bytes)))
def raise[A](error: Error): Deserializer[A] = StateT.liftF(Free.liftF(Raise[A](error)))
// Convenience function for defining Deserializers outside this package without importing StateT
def pure[A](a: A): Deserializer[A] = StateT.pure(a)
// get the current state of the bytes stream (without modifying it)
val getState: Deserializer[BytesView] = StateT.get
// Catch exceptions in the Monad of our Deserializer
private def attempt[A](block: => A): Free[Algebra, A] = Try(block) match {
case Success(a) => Free.pure(a)
case Failure(err) => Free.liftF(Raise(Error.FormatException(err.getMessage)))
}
val bool: Deserializer[Boolean] =
byte.flatMap {
case tag if tag == Constants.Boolean.TRUE_TAG => pure(true)
case tag if tag == Constants.Boolean.FALSE_TAG => pure(false)
case other =>
raise(Error.FormatException(s"Byte $other could not be interpreted as a boolean"))
}
val int: Deserializer[Int] = take(4).flatMapF(view => attempt(view.toByteBuffer.getInt))
val long: Deserializer[Long] = take(8).flatMapF(view => attempt(view.toByteBuffer.getLong))
val bigInt: Deserializer[BigInt] =
byte.flatMap {
case numBytes if numBytes < 0 =>
raise(Error.FormatException("Negative number of BigInt Bytes"))
case 0 => pure(BigInt(0))
case numBytes =>
take(numBytes.toInt).flatMapF { littleEndian =>
val bigEndian = littleEndian.toArray.reverse
// we prepend a 0 to indicate that the value is positive
// (otherwise you can end up with weird things like 255 will deserialize to -1)
attempt(BigInt(0.toByte +: bigEndian))
}
}
val unit: Deserializer[Unit] = id
val bytes: Deserializer[Array[Byte]] =
for {
size <- int
stateLength <- getState.map(_.length)
view <- if (size < 0) raise(Error.FormatException("Negative length of ByteArray"))
else if (size > stateLength)
raise(
Error.FormatException(
s"Size of ByteArray $size is greater than the number of remaining bytes $stateLength"
)
)
else take(size)
} yield view.toArray
val string: Deserializer[String] =
bytes.flatMapF(chars => attempt(new String(chars.toArray, StandardCharsets.UTF_8)))
// Pass in `desA` lazily for stack safety when recursively chaining Deserializers.
// It is also more efficient in the None case because we do not evaluate `desA`.
def option[A](desA: => Deserializer[A]): Deserializer[Option[A]] =
byte.flatMap {
case tag if tag == Constants.Option.NONE_TAG =>
pure(none[A])
case tag if tag == Constants.Option.SOME_TAG =>
desA.map(_.some)
case other =>
raise(Error.InvalidVariantTag(other, "Option"))
}
def fixedSeq[A](desA: => Deserializer[A], size: Int): Deserializer[Seq[A]] =
(0 until size)
.foldLeft(pure(IndexedSeq.empty[A])) {
case (acc, _) =>
acc.flatMap { xs =>
desA.map(a => xs :+ a)
}
}
.map(_.toSeq)
def seq[A](desA: => Deserializer[A]): Deserializer[Seq[A]] =
for {
size <- int
stateLength <- getState.map(_.length)
xs <- if (size < 0) raise(Error.FormatException("Negative length of Sequence"))
else if (size > stateLength)
raise(
Error.FormatException(
s"Size of Sequence $size is greater than the number of remaining bytes $stateLength"
)
)
else fixedSeq(desA, size)
} yield xs
def either[A, B](desA: => Deserializer[A], desB: => Deserializer[B]): Deserializer[Either[A, B]] =
byte.flatMap {
case tag if tag == Constants.Either.LEFT_TAG =>
desA.map(a => Left(a).rightCast[B])
case tag if tag == Constants.Either.RIGHT_TAG =>
desB.map(b => Right(b).leftCast[A])
case other =>
raise(Error.InvalidVariantTag(other, "Either"))
}
def map[A, B](desA: => Deserializer[A], desB: => Deserializer[B]): Deserializer[Map[A, B]] =
seq(tuple2(desA, desB)).map(_.toMap)
def tuple1[A](desA: => Deserializer[A]): Deserializer[Tuple1[A]] =
for {
_ <- id // put a flatMap before `desA` so that it will not be immediately evaluated
a <- desA
} yield Tuple1(a)
def tuple2[A, B](desA: => Deserializer[A], desB: => Deserializer[B]): Deserializer[(A, B)] =
for {
_ <- id
a <- desA
b <- desB
} yield (a, b)
def tuple3[A, B, C](
desA: => Deserializer[A],
desB: => Deserializer[B],
desC: Deserializer[C]
): Deserializer[(A, B, C)] =
for {
_ <- id
a <- desA
b <- desB
c <- desC
} yield (a, b, c)
// interpreter for our algebra converting each operation into a result
val interpreter: FunctionK[Algebra, Either[Error, *]] =
new FunctionK[Algebra, Either[Error, *]] {
def apply[A](instruction: Algebra[A]): Either[Error, A] = instruction match {
case Pop(bytes) =>
bytes.pop match {
case None => Left(Error.NotEnoughBytes)
case Some(result) => Right(result)
}
case Take(n, bytes) =>
bytes.safeTake(n) match {
case None => Left(Error.NotEnoughBytes)
case Some(result) => Right(result)
}
case Raise(err) => Left(err)
case Id => Right(())
}
}
sealed trait Error
object Error {
case object LeftOverBytes extends Error
case object NotEnoughBytes extends Error
case class InvalidVariantTag(tag: Byte, typeName: String) extends Error
case class FormatException(message: String) extends Error
}
}
|
import java.util.Arrays;
public class Main
{
public static void main(String[] args)
{
// Get the list of numbers
int[] nums = {2, 1, 5, 4, 3};
// Sort the list
Arrays.sort(nums);
// Print the result
System.out.println("Sorted list in ascending order:");
for (int num: nums)
System.out.print(num + " ");
// Sort the list in descending order
Arrays.sort(nums, Collections.reverseOrder());
// Print the result
System.out.println("\nSorted list in descending order:");
for (int num: nums)
System.out.print(num + " ");
}
}
|
#!/bin/bash
set -euo pipefail
echo "Copy generated JavaDoc API from projects into /docs - must been built earlier with 'mvn install'..."
rm -rf ../../docs/api/schema2template
mv ../../generator/schema2template/target/apidocs ../../docs/api/schema2template
rm -rf ../../docs/api/odfdom
mv ../../odfdom/target/apidocs ../../docs/api/odfdom
rm -rf ../../docs/api/taglets
mv ../../taglets/target/apidocs ../../docs/api/taglets
|
<gh_stars>1-10
hour, minute = input().split(':')
print(f'{hour}:{minute}')
|
var LinkedList = require('./output-linked-list');
LinkedList = LinkedList.LinkedList;
describe('LinkedList', () => {
it('pop single item', () => {
const list = new LinkedList();
list.push(10);
expect(list.shift()).toBe(10);
});
it('push/pop', () => {
const list = new LinkedList();
list.push(10);
list.push(20);
expect(list.pop()).toBe(20);
expect(list.pop()).toBe(10);
});
it('push/shift', () => {
const list = new LinkedList();
list.push(10);
list.push(20);
expect(list.shift()).toBe(10);
expect(list.shift()).toBe(20);
});
it('unshift/shift', () => {
const list = new LinkedList();
list.unshift(10);
list.unshift(20);
expect(list.shift()).toBe(20);
expect(list.shift()).toBe(10);
});
it('unshift/pop', () => {
const list = new LinkedList();
list.unshift(10);
list.unshift(20);
expect(list.pop()).toBe(10);
expect(list.pop()).toBe(20);
});
it('example', () => {
const list = new LinkedList();
list.push(10);
list.push(20);
expect(list.pop()).toBe(20);
list.push(30);
expect(list.shift()).toBe(10);
list.unshift(40);
list.push(50);
expect(list.shift()).toBe(40);
expect(list.pop()).toBe(50);
expect(list.shift()).toBe(30);
});
it('can count its elements', () => {
const list = new LinkedList();
expect(list.count()).toBe(0);
list.push(10);
expect(list.count()).toBe(1);
list.push(20);
expect(list.count()).toBe(2);
});
it('deletes the last element from the list', () => {
const list = new LinkedList();
list.push(10);
list.push(20);
list.push(30);
list.delete(20);
expect(list.count()).toBe(2);
expect(list.pop()).toBe(30);
expect(list.shift()).toBe(10);
});
it('deletes the only element', () => {
const list = new LinkedList();
list.push(10);
list.delete(10);
expect(list.count()).toBe(0);
});
});
|
<gh_stars>0
package main
import (
"bufio"
"bytes"
"io"
"strconv"
"strings"
)
// DdProgress is a struct containing progress of the dd operation.
type DdProgress struct {
Bytes int
Error error
}
// CopyConvert is a wrapper around the `dd` Unix utility.
func CopyConvert(iff string, of string) (chan DdProgress, error) {
channel := make(chan DdProgress)
cmd, err := ElevatedCommand("dd", "if="+iff, "of="+of, "status=progress", "bs=1M", "conv=fdatasync")
if err != nil {
return nil, err
}
output, input := io.Pipe()
cmd.Stderr = input
cmd.Stdout = input
err = cmd.Start()
if err != nil {
return nil, err
}
// Wait for command to exit.
go (func() {
defer input.Close()
err := cmd.Wait()
if err != nil {
channel <- DdProgress{
Error: err,
}
}
close(channel)
})()
// Read the output line by line.
go (func() {
scanner := bufio.NewScanner(output)
scanner.Split(ScanCrLines)
for scanner.Scan() {
text := scanner.Text()
println(text)
firstSpace := strings.Index(text, " ")
if firstSpace != -1 && strings.HasPrefix(text[firstSpace+1:], "bytes (") {
// TODO: Probably handle error, but we can't tell full dd behavior without seeing the code.
parse, _ := strconv.Atoi(text[:firstSpace])
channel <- DdProgress{
Bytes: parse,
}
}
}
})()
return channel, nil
}
// GetDevices returns the list of USB devices available to read/write from.
func GetDevices() []string {
// TODO: Complete GetDevices properly.
return []string{"/dev/null", "/dev/urandom"}
}
// dropCR drops a terminal \r from the data.
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// ScanCrLines is a split function for a Scanner that returns each line of
// text, stripped of any trailing end-of-line marker. The returned line may
// be empty. The end-of-line marker is one carriage return or one mandatory
// newline. In regular expression notation, it is `\r|\n`. The last
// non-empty line of input will be returned even if it has no newline.
func ScanCrLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, dropCR(data[0:i]), nil
} else if i := bytes.IndexByte(data, '\r'); i >= 0 {
// We have a full carriage return-terminated line.
return i + 1, dropCR(data[0:i]), nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), dropCR(data), nil
}
// Request more data.
return 0, nil, nil
}
|
<reponame>s4id/swagger-codegen
/// <reference path="api.d.ts" />
module API.Client {
'use strict';
export class Tag {
id: number;
name: string;
}
}
|
export { default } from './components/User';
|
# Open the input file in read mode
with open("input.txt", "r") as file_object:
# Read the contents of the file and store each line as a separate element in a list called lines
lines = file_object.readlines()
# Create an empty dictionary to store student names and their test scores
student_scores = {}
# Iterate through each line in lines
for line in lines:
# Split the line by the comma to separate the student's name and test score
name, score = line.strip().split(',')
# Convert the test score to an integer
score = int(score)
# If the student's name is already in the dictionary, update the list of scores
if name in student_scores:
student_scores[name].append(score)
# If the student's name is not in the dictionary, create a new entry with the test score
else:
student_scores[name] = [score]
# Open the output file in write mode
with open("output.txt", "w") as output_file:
# Iterate through the student names and their test scores in the dictionary
for name, scores in student_scores.items():
# Calculate the average test score for each student
average_score = sum(scores) / len(scores)
# Write each student's name and their average test score to the output file
output_file.write(f"{name}: {average_score}\n")
|
import React from 'react';
import { Card } from '@material-ui/core';
import {
HashLoader,
BarLoader,
BeatLoader,
BounceLoader,
CircleLoader,
ClimbingBoxLoader,
ClipLoader,
ClockLoader,
DotLoader,
FadeLoader,
GridLoader,
MoonLoader,
PacmanLoader,
PropagateLoader,
PulseLoader,
RingLoader,
RiseLoader,
RotateLoader,
ScaleLoader,
SkewLoader,
SquareLoader,
SyncLoader
} from 'react-spinners';
export default function LivePreviewExample() {
return (
<>
<div className="d-flex flex-row text-center flex-wrap justify-content-center">
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<BarLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">BarLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<HashLoader color={'#3c44b1'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">HashLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<BeatLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">BeatLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<BounceLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">BounceLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<CircleLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">CircleLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<ClimbingBoxLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">
ClimbingBoxLoader
</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<ClipLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">ClipLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<ClockLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">ClockLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<DotLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">DotLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<FadeLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">FadeLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<GridLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">GridLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<MoonLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">MoonLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<PacmanLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">PacmanLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<PropagateLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">PropagateLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<PulseLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">PulseLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<RingLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">RingLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<RiseLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">RiseLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<RotateLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">RotateLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<ScaleLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">ScaleLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<SkewLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">SkewLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<SquareLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">SquareLoader</p>
</Card>
<Card className="rounded-sm card-box shadow-none p-3 m-3">
<div
className="d-flex align-items-center justify-content-center"
style={{ width: '150px', height: '80px' }}>
<SyncLoader color={'var(--primary)'} loading={true} />
</div>
<p className="mb-0 pt-3 text-black-50 text-center">SyncLoader</p>
</Card>
</div>
</>
);
}
|
<script>
function toggleDivVisibility() {
let myDiv = document.getElementById("myDiv");
if (myDiv.style.display == "none")
myDiv.style.display = "block";
else
myDiv.style.display = "none";
}
</script>
|
/**
* Setting parameters as default config.
* object's members names will be used for
* @param {*} default_params object with default_parameters
* @param {*} params
*/
function params_setter(default_params, params){
for(let param_name in default_params){
try{
if(typeof(params[param_name]) == typeof(default_params[param_name])){
this[param_name] = params[param_name];
} else {
this[param_name] = default_params[param_name];
}
}catch(e){
console.log(`CHECK ${param_name} PARAMETER`);
this[param_name] = default_params[param_name];
}
}
}
/**
*
* @param {*} min
* @param {*} max
*/
function getRandomArbitrary(min, max) {
return Math.random() * (max - min) + min;
}
/**
*
* @param {*} min
* @param {*} max
*/
function getRandomInt(min, max) {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min)) + min;
}
/**
*
* @param {*} model
*/
function getWeightsFromModelToWorkerTransfer(model){
let ret = {};
if(model){
for(let layer in model.layers){
ret[layer] = {};
for(let wg of model.layers[layer].getWeights()){
ret[layer][wg.name] = wg.arraySync();
}
}
}
return ret;
}
/**
*
* @param {tf.model} model
* @param {object} weights_obj
*/
function setWeightsToModelByObject(model, weights_obj){
if(model){
for(let layer in model.layers){
for(let wg of model.layers[layer].getWeights()){
if(weights_obj[layer][wg.name]){
let new_weights = tf.tensor(weights_obj[layer][wg.name]);
model.layers[layer][wg.name] = new_weights;
}
}
}
}
return model;
}
/**
*
* @param {tf.model} model Tensorflow.js LayersModel
*
* let layerData = [
{
"name": "dense_Dense3",
"layers": {
"dense_Dense3/bias": {
shape: [10],
layer_data: []
},
"dense_Dense3/kernel": {
shape: [10],
layer_data: []
}
}
},
{
"name": "dense_Dense4",
"layers": {
"dense_Dense4/bias": {
shape: [10],
layer_data: []
},
"dense_Dense4/kernel": {
shape: [10],
layer_data: []
}
}
}
];
*/
function get_serialized_layers_data(model){
if(model){
let layersData = [];
for(let layer of model.layers){
let layer_config = layer.getConfig();
let layer_name = layer.name;
let layer_shape = null;
let layer_activation = null;
if (layer_name.substring(0, 5) == "input"){
layer_shape = layer.inputSpec[0].shape;
if (layer_shape.length > 2){
layer_shape = layer_shape.slice(1, layer_shape.length);
}
} else {
layer_shape = layer.units;
layer_activation = layer_config.activation;
}
let layer_weights = [];
for (let ld of layer.getWeights()){
let weight = ld.arraySync();
layer_weights.push(weight);
}
let layerDataItem = {
"name": layer_name,
"shape": layer_shape,
"layer_weights": layer_weights,
"activation": layer_activation
}
layersData.push(layerDataItem);
}
return layersData;
}
throw Error("Model must be specified.")
}
function create_model_by_serialized_data(model_weight_data){
if(model_weight_data){
let inputt = tf.input({shape: model_weight_data[0].shape});
let cur_layer = inputt;
for(let layer of model_weight_data.slice(1, model_weight_data.length)){
let layer_name = layer.name;
let layer_shape = layer.shape;
let layer_activation = layer.activation;
cur_layer = tf.layers.dense({units: layer_shape, activation: layer_activation}).apply(cur_layer);
}
let model = tf.model({inputs: inputt, outputs: cur_layer});
for (let layer_number in model_weight_data){
let layer_weights = model_weight_data[layer_number].layer_weights;
if (layer_weights && layer_weights.length > 0){
for (let i in layer_weights){
layer_weights[i] = tf.tensor(layer_weights[i]);
}
model.layers[layer_number].setWeights(model_weight_data[layer_number].layer_weights);
}
}
return model;
}
throw Error("Model must be specified.")
}
|
def recommend_content(data):
# create a dictionary to store every item with its relevance score
relevance_scores = {}
# calculate relevance scores
for item in data:
relevance_score = 0
if item['likes'] > 0:
relevance_score += item['likes']
if item['views'] > 0:
relevance_score += item['views']
if item['shares'] > 0:
relevance_score += item['shares']
relevance_scores[item['id']] = relevance_score
# sort the relevance scores
sorted_scores = sorted(relevance_scores.items(), key=lambda x: x[1], reverse=True)
# return the ids of the most relevant items
return [item[0] for item in sorted_scores]
|
/*******************************************************************************
* Copyright 2015 InfinitiesSoft Solutions Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*******************************************************************************/
package com.infinities.skyport.model.configuration.platform;
import java.io.Serializable;
import com.infinities.skyport.model.FunctionConfiguration;
public class MonitoringConfiguration implements Serializable, Cloneable {
private static final long serialVersionUID = 1L;
private FunctionConfiguration listMetrics = new FunctionConfiguration();
private FunctionConfiguration listAlarms = new FunctionConfiguration();
private FunctionConfiguration updateAlarm = new FunctionConfiguration();
private FunctionConfiguration removeAlarms = new FunctionConfiguration();
private FunctionConfiguration enableAlarmActions = new FunctionConfiguration();
private FunctionConfiguration disableAlarmActions = new FunctionConfiguration();
public FunctionConfiguration getListMetrics() {
return listMetrics;
}
public void setListMetrics(FunctionConfiguration listMetrics) {
this.listMetrics = listMetrics;
}
public FunctionConfiguration getListAlarms() {
return listAlarms;
}
public void setListAlarms(FunctionConfiguration listAlarms) {
this.listAlarms = listAlarms;
}
public FunctionConfiguration getUpdateAlarm() {
return updateAlarm;
}
public void setUpdateAlarm(FunctionConfiguration updateAlarm) {
this.updateAlarm = updateAlarm;
}
public FunctionConfiguration getRemoveAlarms() {
return removeAlarms;
}
public void setRemoveAlarms(FunctionConfiguration removeAlarms) {
this.removeAlarms = removeAlarms;
}
public FunctionConfiguration getEnableAlarmActions() {
return enableAlarmActions;
}
public void setEnableAlarmActions(FunctionConfiguration enableAlarmActions) {
this.enableAlarmActions = enableAlarmActions;
}
public FunctionConfiguration getDisableAlarmActions() {
return disableAlarmActions;
}
public void setDisableAlarmActions(FunctionConfiguration disableAlarmActions) {
this.disableAlarmActions = disableAlarmActions;
}
@Override
public MonitoringConfiguration clone() {
MonitoringConfiguration clone = new MonitoringConfiguration();
clone.listMetrics = listMetrics.clone();
clone.listAlarms = listAlarms.clone();
clone.updateAlarm = updateAlarm.clone();
clone.removeAlarms = removeAlarms.clone();
clone.enableAlarmActions = enableAlarmActions.clone();
clone.disableAlarmActions = disableAlarmActions.clone();
return clone;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((disableAlarmActions == null) ? 0 : disableAlarmActions.hashCode());
result = prime * result + ((enableAlarmActions == null) ? 0 : enableAlarmActions.hashCode());
result = prime * result + ((listAlarms == null) ? 0 : listAlarms.hashCode());
result = prime * result + ((listMetrics == null) ? 0 : listMetrics.hashCode());
result = prime * result + ((removeAlarms == null) ? 0 : removeAlarms.hashCode());
result = prime * result + ((updateAlarm == null) ? 0 : updateAlarm.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MonitoringConfiguration other = (MonitoringConfiguration) obj;
if (disableAlarmActions == null) {
if (other.disableAlarmActions != null)
return false;
} else if (!disableAlarmActions.equals(other.disableAlarmActions))
return false;
if (enableAlarmActions == null) {
if (other.enableAlarmActions != null)
return false;
} else if (!enableAlarmActions.equals(other.enableAlarmActions))
return false;
if (listAlarms == null) {
if (other.listAlarms != null)
return false;
} else if (!listAlarms.equals(other.listAlarms))
return false;
if (listMetrics == null) {
if (other.listMetrics != null)
return false;
} else if (!listMetrics.equals(other.listMetrics))
return false;
if (removeAlarms == null) {
if (other.removeAlarms != null)
return false;
} else if (!removeAlarms.equals(other.removeAlarms))
return false;
if (updateAlarm == null) {
if (other.updateAlarm != null)
return false;
} else if (!updateAlarm.equals(other.updateAlarm))
return false;
return true;
}
}
|
class AddStartOffsetMsToVideos < ActiveRecord::Migration[6.0]
def change
add_column :videos, :start_offset_ms, :integer
change_column_default :videos, :start_offset_ms, from: nil, to: 10_000
end
end
|
def calculate_total_images(dataset, imgs_per_gpu, workers_per_gpu, cfg):
num_gpus_test = len(cfg['gpus']['test'])
total_images = dataset * num_gpus_test * imgs_per_gpu * workers_per_gpu
return total_images
|
#!/bin/bash
# https://github.com/koalaman/shellcheck/wiki/SC2034
# shellcheck disable=2034
true
PWD_CMD="pwd"
# get native Windows paths on Mingw
uname | grep -qi mingw && PWD_CMD="pwd -W"
cd "$(dirname "$0")"
SIM_ROOT="$($PWD_CMD)"
# Set a default value for the env vars usually supplied by a Makefile
cd "$(git rev-parse --show-toplevel)"
: ${GIT_ROOT:="$($PWD_CMD)"}
cd - &>/dev/null
# When changing these, also update the readme section on running simulation
# so that the run_node example is correct!
NUM_VALIDATORS=${VALIDATORS:-128}
TOTAL_NODES=${NODES:-4}
TOTAL_USER_NODES=${USER_NODES:-0}
TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES ))
BOOTSTRAP_NODE=$(( TOTAL_NODES - 1 ))
USE_BN_VC_VALIDATOR_SPLIT=${BN_VC_VALIDATOR_SPLIT:-yes}
SIMULATION_DIR="${SIM_ROOT}/data"
METRICS_DIR="${SIM_ROOT}/prometheus"
VALIDATORS_DIR="${SIM_ROOT}/validators"
SECRETS_DIR="${SIM_ROOT}/secrets"
SNAPSHOT_FILE="${SIMULATION_DIR}/state_snapshot.ssz"
NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt"
BEACON_NODE_BIN="${GIT_ROOT}/build/beacon_node"
VALIDATOR_CLIENT_BIN="${GIT_ROOT}/build/validator_client"
DEPOSIT_CONTRACT_BIN="${GIT_ROOT}/build/deposit_contract"
BOOTSTRAP_ENR_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE}/beacon_node.enr"
NETWORK_METADATA_FILE="${SIMULATION_DIR}/network.json"
DEPOSITS_FILE="${SIMULATION_DIR}/deposits.json"
if [[ "$USE_GANACHE" == "yes" ]]; then
WEB3_ARG="--web3-url=ws://localhost:8545"
else
WEB3_ARG=""
fi
BASE_P2P_PORT=30000
BASE_RPC_PORT=7000
BASE_METRICS_PORT=8008
|
<reponame>vharsh/cattle2<filename>modules/model/src/main/java/io/cattle/platform/core/dao/impl/DataDaoImpl.java
package io.cattle.platform.core.dao.impl;
import io.cattle.platform.core.dao.DataDao;
import io.cattle.platform.core.model.Data;
import io.cattle.platform.core.model.tables.records.DataRecord;
import io.cattle.platform.db.jooq.dao.impl.AbstractJooqDao;
import io.cattle.platform.lock.LockManager;
import io.cattle.platform.object.ObjectManager;
import io.cattle.platform.util.exception.ExceptionUtils;
import io.github.ibuildthecloud.gdapi.util.TransactionDelegate;
import org.jooq.Configuration;
import org.jooq.DSLContext;
import org.jooq.impl.DefaultDSLContext;
import java.util.concurrent.Callable;
import static io.cattle.platform.core.model.tables.DataTable.*;
public class DataDaoImpl extends AbstractJooqDao implements DataDao {
LockManager lockManager;
ObjectManager objectManager;
Configuration newConfiguration;
TransactionDelegate newTransaction;
public DataDaoImpl(Configuration configuration, LockManager lockManager, ObjectManager objectManager, Configuration newConfiguration, TransactionDelegate newTransaction) {
super(configuration);
this.lockManager = lockManager;
this.objectManager = objectManager;
this.newConfiguration = newConfiguration;
this.newTransaction = newTransaction;
}
@Override
protected DSLContext create() {
return new DefaultDSLContext(newConfiguration);
}
@Override
public String getOrCreate(final String key, final boolean visible, final Callable<String> generator) {
Data data = create().selectFrom(DATA)
.where(DATA.NAME.eq(key))
.fetchAny();
if ( data != null && data.getVisible() != null && data.getVisible() == visible ) {
return data.getValue();
}
return lockManager.lock(new DataChangeLock(key), () -> {
DataRecord data1 = create().selectFrom(DATA)
.where(DATA.NAME.eq(key))
.fetchAny();
if ( data1 != null && data1.getVisible() != null && data1.getVisible() == visible ) {
return data1.getValue();
} else if ( data1 != null ) {
data1.setVisible(visible);
data1.update();
return data1.getValue();
}
try {
String value = generator.call();
if ( value == null ) {
return value;
}
newTransaction.doInTransaction(() -> {
DataRecord record = new DataRecord();
record.attach(newConfiguration);
record.setName(key);
record.setVisible(visible);
record.setValue(value);
record.insert();
});
return value;
} catch (Exception e) {
ExceptionUtils.rethrowRuntime(e);
throw new RuntimeException("Failed to generate value for [" + key + "]", e);
}
});
}
@Override
public String get(String key) {
Data data = objectManager.findAny(Data.class,
DATA.NAME, key);
return data == null ? null : data.getValue();
}
@Override
public void save(String key, boolean visible, String value) {
int count = create().update(DATA)
.set(DATA.VALUE, value)
.where(DATA.NAME.eq(key))
.execute();
if (count == 0) {
objectManager.create(Data.class,
DATA.NAME, key,
DATA.VISIBLE, visible,
DATA.VALUE, value);
}
}
}
|
#include <fmt/core.h>
auto main() -> int {
fmt::print("Hello world\n");
}
|
<reponame>b0rgbart3/myGoogleBooks
import React,{ useEffect } from "react";
import { useBookContext } from "../utils/GlobalState";
import { Redirect } from "react-router-dom";
import API from "../utils/API";
import { GET_ALL_BOOKS, DELETE_BOOK } from "../utils/actions";
const Styles = {
// nav: {
// fontWeight: 900,
// fontSize: "26px"
// }
}
function Saved() {
const [state, dispatch] = useBookContext();
useEffect(() => {
// Get all the books from our Mongo DB
API.getBooks().then((response) => {
console.log("Back from DB: " + JSON.stringify(response.data ));
dispatch( { type: GET_ALL_BOOKS, savedBooks: response.data } );
});
}, []);
function removeBook(id) {
API.deleteBook(id).then( dispatch({ type: DELETE_BOOK, id})).catch( err => console.log(err) );
}
return (
<div className="container" style={{backgroundColor:"#ffffff", padding:"10px"}}>
{
state.savedBooks ?
state.savedBooks.map(book => {
return (
<div className="collection-item" key={book._id}>
<p>{book.title}</p>
<p>{book.description}</p>
{ (book.image!="") && (book.image != undefined) ?
<img src={book.image} /> : <p>No image stored.</p> }
<button onClick={()=>removeBook( book._id )}>
Remove Book</button>
</div>)
})
:
<div></div>
}
</div>
);
}
export default Saved;
|
def find_frequent(list):
dictionary = {}
for element in list:
if element not in dictionary:
dictionary[element] = 1
else:
dictionary[element] += 1
frequency = max(dictionary.values())
most_frequent = [key for key in dictionary if dictionary[key] == frequency]
return { most_frequent[0] : frequency }
print(find_frequent([1, 1, 2, 3, 3, 3, 4, 4, 4, 4]))
|
<gh_stars>0
package org.yarnandtail.andhow.compile;
/**
* Utilities for the AndHow AnnotationProcessor.
*/
public class CompileUtil {
/**
* Determine the correct 'Generated' annotation class name based on the current Java runtime.
*
* This method fetches the version of the current runtime via getMajorJavaVersion() and
* uses that to call getGeneratedAnnotationClassName(int).
*
* @return A the fully qualified class name of the Generated annotation.
*/
public static String getGeneratedAnnotationClassName() {
return getGeneratedAnnotationClassName(getMajorJavaVersion(System.getProperty("java.version")));
}
/**
* Determine the correct 'Generated' annotation class name based on the Java major version.
* Java 8 uses the <code>@javax.annotation.Generated</code> annotation to mark a generated class.
* Java 9 and beyond uses <code>@javax.annotation.processing.Generated</code>
*
* Both annotations are SOURCE level retention, so they are only present in the source code and no
* record of them is compiled into the binary. Thus, the determination of which one to use is
* based only on the Java version used to compile, not the -source or -target settings.
*
* @param javaMajorVersion The Java version in integer form. Use '8' for 1.8.
* @return A the fully qualified class name of the Generated annotation.
*/
public static String getGeneratedAnnotationClassName(int javaMajorVersion) {
if (javaMajorVersion < 9) {
return "javax.annotation.Generated";
} else {
return "javax.annotation.processing.Generated";
}
}
/**
* Determine the major version of the Java runtime based on a version string.
*
* All versions are integers, thus version `1.8` returns `8`.
* Java 10 introduces the Runtime.version(), which would remove the need for this method,
* however, at the moment the code is still Java 8 compatable.
*
* @param versionString As returned from SystemProperties.getProperty("java.version")
* @return
*/
public static int getMajorJavaVersion(String versionString) {
String[] versionParts = versionString.split("[\\.\\-_]", 3);
try {
if ("1".equals(versionParts[0])) {
//Old style 1.x format
return Integer.parseInt(versionParts[1]);
} else {
return Integer.parseInt(versionParts[0]);
}
} catch (NumberFormatException e) {
throw new RuntimeException(
"AndHow couldn't parse '" + versionString + "' as a 'java.version' string in System.properties. " +
"Is this a non-standard JDK? ", e
);
}
}
}
|
package com.oven.netty.exception;
/**
* 参数错误异常
*/
public class ErrorParamsException extends RuntimeException {
private static final long serialVersionUID = -623198335011996153L;
public ErrorParamsException() {
super();
}
public ErrorParamsException(String message) {
super(message);
}
}
|
/*
This file is part of the JitCat library.
Copyright (C) <NAME> 2019
Distributed under the MIT License (license terms are at http://opensource.org/licenses/MIT).
*/
#include "jitcat/CatIfStatement.h"
#include "jitcat/ASTHelper.h"
#include "jitcat/CatLog.h"
#include "jitcat/CatRuntimeContext.h"
#include "jitcat/CatScopeBlock.h"
#include "jitcat/CatTypedExpression.h"
#include "jitcat/ExpressionErrorManager.h"
using namespace jitcat;
using namespace jitcat::AST;
CatIfStatement::CatIfStatement(CatTypedExpression* condition, CatScopeBlock* ifBody, const Tokenizer::Lexeme& lexeme, CatStatement* elseNode):
CatStatement(lexeme),
condition(condition),
ifBody(ifBody),
elseNode(elseNode)
{
}
jitcat::AST::CatIfStatement::CatIfStatement(const CatIfStatement& other):
CatStatement(other),
condition(static_cast<CatTypedExpression*>(other.condition->copy())),
ifBody(static_cast<CatStatement*>(other.ifBody->copy())),
elseNode(other.elseNode != nullptr ? static_cast<CatStatement*>(other.elseNode->copy()) : nullptr)
{
}
CatASTNode* jitcat::AST::CatIfStatement::copy() const
{
return new CatIfStatement(*this);
}
void CatIfStatement::print() const
{
Tools::CatLog::log("if (");
condition->print();
Tools::CatLog::log(")\n");
ifBody->print();
if (elseNode != nullptr)
{
Tools::CatLog::log("else");
if (elseNode->getNodeType() == CatASTNodeType::ScopeBlock)
{
Tools::CatLog::log("\n");
}
elseNode->print();
}
}
CatASTNodeType CatIfStatement::getNodeType() const
{
return CatASTNodeType::IfStatement;
}
bool jitcat::AST::CatIfStatement::typeCheck(CatRuntimeContext* compiletimeContext, ExpressionErrorManager* errorManager, void* errorContext)
{
bool conditionOk = condition->typeCheck(compiletimeContext, errorManager, errorContext);
bool ifBodyOk = ifBody->typeCheck(compiletimeContext, errorManager, errorContext);
bool elseBodyOk = true;
if (elseNode != nullptr)
{
elseBodyOk = static_cast<CatStatement*>(elseNode.get())->typeCheck(compiletimeContext, errorManager, errorContext);
}
bool conditionIsBool = condition->getType().isBoolType();
if (!conditionIsBool)
{
errorManager->compiledWithError("Condition expression does not evaluate to a boolean.", errorContext, compiletimeContext->getContextName(), condition->getLexeme());
}
return conditionOk && ifBodyOk && elseBodyOk && conditionIsBool;
}
CatStatement* CatIfStatement::constCollapse(CatRuntimeContext* compiletimeContext, ExpressionErrorManager* errorManager, void* errorContext)
{
ASTHelper::updatePointerIfChanged(condition, condition->constCollapse(compiletimeContext, errorManager, errorContext));
if (condition->isConst())
{
if (std::any_cast<bool>(condition->execute(nullptr)))
{
return ifBody.release();
}
else if (elseNode != nullptr)
{
return elseNode.release();
}
else
{
return new CatScopeBlock({}, getLexeme());
}
}
else
{
ASTHelper::updatePointerIfChanged(ifBody, ifBody->constCollapse(compiletimeContext, errorManager, errorContext));
if (elseNode != nullptr)
{
ASTHelper::updatePointerIfChanged(elseNode, elseNode->constCollapse(compiletimeContext, errorManager, errorContext));
}
}
return this;
}
std::any jitcat::AST::CatIfStatement::execute(CatRuntimeContext* runtimeContext)
{
bool result = std::any_cast<bool>(condition->execute(runtimeContext));
if (result)
{
return ifBody->execute(runtimeContext);
}
else if (elseNode != nullptr)
{
return static_cast<CatStatement*>(elseNode.get())->execute(runtimeContext);
}
return std::any();
}
std::optional<bool> jitcat::AST::CatIfStatement::checkControlFlow(CatRuntimeContext* compiletimeContext, ExpressionErrorManager* errorManager, void* errorContext, bool& unreachableCodeDetected)
{
auto ifBodyReturns = ifBody->checkControlFlow(compiletimeContext, errorManager, errorContext, unreachableCodeDetected);
if (elseNode == nullptr)
{
if (condition->isConst() && std::any_cast<bool>(condition->execute(compiletimeContext)))
{
allControlPathsReturn = ifBodyReturns;
}
else
{
allControlPathsReturn = false;
}
}
else
{
auto elseBodyReturns = elseNode->checkControlFlow(compiletimeContext, errorManager, errorContext, unreachableCodeDetected);
assert(ifBodyReturns.has_value() && elseBodyReturns.has_value());
allControlPathsReturn = (*ifBodyReturns) && (*elseBodyReturns);
}
return allControlPathsReturn;
}
const CatTypedExpression* jitcat::AST::CatIfStatement::getConditionExpression() const
{
return condition.get();
}
const CatStatement* jitcat::AST::CatIfStatement::getIfBody() const
{
return ifBody.get();
}
const CatStatement* jitcat::AST::CatIfStatement::getElseBody() const
{
return elseNode.get();
}
|
#!/bin/bash
# Run with: go run core.go udp-client.go [HOST:PORT]
go run core.go udp-client.go $1
|
let num = [5, 6, 7, 8, 9, 10, 11];
num.sort();
for(let c in num) {
console.log(num[c]);
}
let pos = num.indexOf(20);
if(pos == -1) {
console.log(`Valor não encontrado!`)
}else{
console.log(`Valor esta na posição ${pos}`);
}
|
class Security(dict):
pass
class BasicAuth(Security):
name = 'Basic'
def __init__(self):
super().__init__(
type='http',
scheme='basic'
)
|
#!/bin/sh
USER=${1:?A user is needed}
PERMISSIONS="create,clone,destroy,hold,mount,release,rename,snapshot,canmount,mountpoint"
if ! [ "$(uname -o)" = "FreeBSD" ] ; then
exit 0
fi
ZPOOL_NAME=$(zfs list -o name,mountpoint | grep -E '/$' | awk -F'/' '{print $1;}')
if ! [ "${ZPOOL_NAME}" ] ; then
exit 0
fi
CREATE_TEST_DATASET=false
DELEGATE_TEST_DATESET=false
if ! zfs list -o name -H | grep "${ZPOOL_NAME}/jmanager_test" ; then
CREATE_TEST_DATASET=true
DELEGATE_TEST_DATESET=true
elif [ -z "$(zfs allow "${ZPOOL_NAME}/jmanager_test")" ] ; then
DELEGATE_TEST_DATESET=true
fi
if ${CREATE_TEST_DATASET} ; then
if [ "$(id -u)" -ne 0 ] ; then
echo "root permissions are needed to create the dataset" >&2
exit 1
fi
zfs create "${ZPOOL_NAME}/jmanager_test"
DELEGATE_TEST_DATESET=true
fi
if ${DELEGATE_TEST_DATESET} ; then
if [ "$(id -u)" -ne 0 ] ; then
echo "root permissions are needed to delegate permissions to the dataset" >&2
exit 1
fi
zfs umount "${ZPOOL_NAME}/jmanager_test"
zfs allow -u "${USER}" "${PERMISSIONS}" "${ZPOOL_NAME}/jmanager_test"
zfs allow -c "${PERMISSIONS}" "${ZPOOL_NAME}/jmanager_test"
chown -R "${USER}" "/${ZPOOL_NAME}/jmanager_test"
sysctl vfs.usermount=1
su - "${USER}" -c "/sbin/zfs mount ${ZPOOL_NAME}/jmanager_test"
chown -R "${USER}" "/${ZPOOL_NAME}/jmanager_test"
fi
|
cut -f 1-3
|
package com.java.study.algorithm.zuo.emiddle.class08;
public class Code06_PosArrayToBST{
}
|
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
echo $parent_path
if ! echo $parent_path | grep -p "daas-common"; then
cd ../daas-common
fi
current_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
echo $current_path
echo "now swithching back"
cd $parent_path
|
#!/bin/bash
echo "Switching to Stable..."
systemctl stop pavlov
runuser -l steam -c '~/Steam/steamcmd.sh +login anonymous +force_install_dir /home/steam/pavlovserver +app_update 622970 -beta shack +exit'
systemctl start pavlov
|
<form action="/survey" method="post">
<div>
<label for="name">Name:</label>
<input type="text" name="name">
</div>
<div>
<label for="email">Email:</label>
<input type="email" name="email">
</div>
<div>
<label for="age">Age:</label>
<input type="number" name="age">
</div>
<div>
<label for="gender">Gender:</label>
<select name="gender">
<option value="male">Male</option>
<option value="female">Female</option>
<option value="other">Other</option>
</select>
</div>
<div>
<label for="occupation">Occupation:</label>
<input type="text" name="occupation">
</div>
<div>
<input type="submit" value="Submit">
</div>
</form>
|
require 'test_helper'
class PeriodTest < ActiveSupport::TestCase
test "the Period requires a timetable_id" do
assert Period.new(name: "test1", start_time: Time.now, end_time: Time.now).invalid?
end
test "the Period's name is not allowed to be blank" do
assert Period.new(name: "", start_time: Time.now, end_time: Time.now, timetable_id: 1).invalid?
end
test "the Period has a valid start_time" do
assert Period.new(name: "test1", start_time: "", end_time: Time.now, timetable_id: 1).invalid?, 1
assert Period.new(name: "test1", end_time: Time.now, timetable_id: 1).invalid? , 2
assert Period.new(name: "test1", start_time: "bob", end_time: Time.now, timetable_id: 1).invalid? , 3
end
test "the Period has a valid end_time" do
assert Period.new(name: "test1", start_time: Time.now, end_time: "", timetable_id: 1).invalid?
assert Period.new(name: "test1", start_time: Time.now, timetable_id: 1).invalid?
assert Period.new(name: "test1", start_time: Time.now, end_time: "bob", timetable_id: 1).invalid?
end
test "the Period is valid only when it's start_time is before it's end_time" do
assert Period.new(name: "test1", start_time: 1.minute.ago, end_time: Time.now, timetable_id: 1).valid?, 1
assert Period.new(name: "test1", start_time: Time.now, end_time: 1.minute.ago, timetable_id: 1).invalid?, 2
t = Time.now
assert Period.new(name: "test1", start_time: t, end_time: t, timetable_id: 1).invalid?, 3
end
end
|
# Node Version Manager
# Implemented as a POSIX-compliant function
# Should work on sh, dash, bash, ksh, zsh
# To use source this file from your bash profile
#
# Implemented by Tim Caswell <tim@creationix.com>
# with much bash help from Matthew Ranney
# "local" warning, quote expansion warning, sed warning, `local` warning
# shellcheck disable=SC2039,SC2016,SC2001,SC3043
{ # this ensures the entire script is downloaded #
# shellcheck disable=SC3028
NVM_SCRIPT_SOURCE="$_"
nvm_is_zsh() {
[ -n "${ZSH_VERSION-}" ]
}
nvm_stdout_is_terminal() {
[ -t 1 ]
}
nvm_echo() {
command printf %s\\n "$*" 2>/dev/null
}
nvm_echo_with_colors() {
command printf %b\\n "$*" 2>/dev/null
}
nvm_cd() {
\cd "$@"
}
nvm_err() {
>&2 nvm_echo "$@"
}
nvm_err_with_colors() {
>&2 nvm_echo_with_colors "$@"
}
nvm_grep() {
GREP_OPTIONS='' command grep "$@"
}
nvm_has() {
type "${1-}" >/dev/null 2>&1
}
nvm_has_non_aliased() {
nvm_has "${1-}" && ! nvm_is_alias "${1-}"
}
nvm_is_alias() {
# this is intentionally not "command alias" so it works in zsh.
\alias "${1-}" >/dev/null 2>&1
}
nvm_command_info() {
local COMMAND
local INFO
COMMAND="${1}"
if type "${COMMAND}" | nvm_grep -q hashed; then
INFO="$(type "${COMMAND}" | command sed -E 's/\(|\)//g' | command awk '{print $4}')"
elif type "${COMMAND}" | nvm_grep -q aliased; then
# shellcheck disable=SC2230
INFO="$(which "${COMMAND}") ($(type "${COMMAND}" | command awk '{ $1=$2=$3=$4="" ;print }' | command sed -e 's/^\ *//g' -Ee "s/\`|'//g"))"
elif type "${COMMAND}" | nvm_grep -q "^${COMMAND} is an alias for"; then
# shellcheck disable=SC2230
INFO="$(which "${COMMAND}") ($(type "${COMMAND}" | command awk '{ $1=$2=$3=$4=$5="" ;print }' | command sed 's/^\ *//g'))"
elif type "${COMMAND}" | nvm_grep -q "^${COMMAND} is \\/"; then
INFO="$(type "${COMMAND}" | command awk '{print $3}')"
else
INFO="$(type "${COMMAND}")"
fi
nvm_echo "${INFO}"
}
nvm_has_colors() {
local NVM_NUM_COLORS
if nvm_has tput; then
NVM_NUM_COLORS="$(tput -T "${TERM:-vt100}" colors)"
fi
[ "${NVM_NUM_COLORS:--1}" -ge 8 ]
}
nvm_curl_libz_support() {
curl -V 2>/dev/null | nvm_grep "^Features:" | nvm_grep -q "libz"
}
nvm_curl_use_compression() {
nvm_curl_libz_support && nvm_version_greater_than_or_equal_to "$(nvm_curl_version)" 7.21.0
}
nvm_get_latest() {
local NVM_LATEST_URL
local CURL_COMPRESSED_FLAG
if nvm_has "curl"; then
if nvm_curl_use_compression; then
CURL_COMPRESSED_FLAG="--compressed"
fi
NVM_LATEST_URL="$(curl ${CURL_COMPRESSED_FLAG:-} -q -w "%{url_effective}\\n" -L -s -S http://latest.nvm.sh -o /dev/null)"
elif nvm_has "wget"; then
NVM_LATEST_URL="$(wget -q http://latest.nvm.sh --server-response -O /dev/null 2>&1 | command awk '/^ Location: /{DEST=$2} END{ print DEST }')"
else
nvm_err 'nvm needs curl or wget to proceed.'
return 1
fi
if [ -z "${NVM_LATEST_URL}" ]; then
nvm_err "http://latest.nvm.sh did not redirect to the latest release on GitHub"
return 2
fi
nvm_echo "${NVM_LATEST_URL##*/}"
}
nvm_download() {
local CURL_COMPRESSED_FLAG
if nvm_has "curl"; then
if nvm_curl_use_compression; then
CURL_COMPRESSED_FLAG="--compressed"
fi
curl --fail ${CURL_COMPRESSED_FLAG:-} -q "$@"
elif nvm_has "wget"; then
# Emulate curl with wget
ARGS=$(nvm_echo "$@" | command sed -e 's/--progress-bar /--progress=bar /' \
-e 's/--compressed //' \
-e 's/--fail //' \
-e 's/-L //' \
-e 's/-I /--server-response /' \
-e 's/-s /-q /' \
-e 's/-sS /-nv /' \
-e 's/-o /-O /' \
-e 's/-C - /-c /')
# shellcheck disable=SC2086
eval wget $ARGS
fi
}
nvm_has_system_node() {
[ "$(nvm deactivate >/dev/null 2>&1 && command -v node)" != '' ]
}
nvm_has_system_iojs() {
[ "$(nvm deactivate >/dev/null 2>&1 && command -v iojs)" != '' ]
}
nvm_is_version_installed() {
if [ -z "${1-}" ]; then
return 1
fi
local NVM_NODE_BINARY
NVM_NODE_BINARY='node'
if [ "_$(nvm_get_os)" = '_win' ]; then
NVM_NODE_BINARY='node.exe'
fi
if [ -x "$(nvm_version_path "$1" 2>/dev/null)/bin/${NVM_NODE_BINARY}" ]; then
return 0
fi
return 1
}
nvm_print_npm_version() {
if nvm_has "npm"; then
command printf " (npm v$(npm --version 2>/dev/null))"
fi
}
nvm_install_latest_npm() {
nvm_echo 'Attempting to upgrade to the latest working version of npm...'
local NODE_VERSION
NODE_VERSION="$(nvm_strip_iojs_prefix "$(nvm_ls_current)")"
if [ "${NODE_VERSION}" = 'system' ]; then
NODE_VERSION="$(node --version)"
elif [ "${NODE_VERSION}" = 'none' ]; then
nvm_echo "Detected node version ${NODE_VERSION}, npm version v${NPM_VERSION}"
NODE_VERSION=''
fi
if [ -z "${NODE_VERSION}" ]; then
nvm_err 'Unable to obtain node version.'
return 1
fi
local NPM_VERSION
NPM_VERSION="$(npm --version 2>/dev/null)"
if [ -z "${NPM_VERSION}" ]; then
nvm_err 'Unable to obtain npm version.'
return 2
fi
local NVM_NPM_CMD
NVM_NPM_CMD='npm'
if [ "${NVM_DEBUG-}" = 1 ]; then
nvm_echo "Detected node version ${NODE_VERSION}, npm version v${NPM_VERSION}"
NVM_NPM_CMD='nvm_echo npm'
fi
local NVM_IS_0_6
NVM_IS_0_6=0
if nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 0.6.0 && nvm_version_greater 0.7.0 "${NODE_VERSION}"; then
NVM_IS_0_6=1
fi
local NVM_IS_0_9
NVM_IS_0_9=0
if nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 0.9.0 && nvm_version_greater 0.10.0 "${NODE_VERSION}"; then
NVM_IS_0_9=1
fi
if [ $NVM_IS_0_6 -eq 1 ]; then
nvm_echo '* `node` v0.6.x can only upgrade to `npm` v1.3.x'
$NVM_NPM_CMD install -g npm@1.3
elif [ $NVM_IS_0_9 -eq 0 ]; then
# node 0.9 breaks here, for some reason
if nvm_version_greater_than_or_equal_to "${NPM_VERSION}" 1.0.0 && nvm_version_greater 2.0.0 "${NPM_VERSION}"; then
nvm_echo '* `npm` v1.x needs to first jump to `npm` v1.4.28 to be able to upgrade further'
$NVM_NPM_CMD install -g npm@1.4.28
elif nvm_version_greater_than_or_equal_to "${NPM_VERSION}" 2.0.0 && nvm_version_greater 3.0.0 "${NPM_VERSION}"; then
nvm_echo '* `npm` v2.x needs to first jump to the latest v2 to be able to upgrade further'
$NVM_NPM_CMD install -g npm@2
fi
fi
if [ $NVM_IS_0_9 -eq 1 ] || [ $NVM_IS_0_6 -eq 1 ]; then
nvm_echo '* node v0.6 and v0.9 are unable to upgrade further'
elif nvm_version_greater 1.1.0 "${NODE_VERSION}"; then
nvm_echo '* `npm` v4.5.x is the last version that works on `node` versions < v1.1.0'
$NVM_NPM_CMD install -g npm@4.5
elif nvm_version_greater 4.0.0 "${NODE_VERSION}"; then
nvm_echo '* `npm` v5 and higher do not work on `node` versions below v4.0.0'
$NVM_NPM_CMD install -g npm@4
elif [ $NVM_IS_0_9 -eq 0 ] && [ $NVM_IS_0_6 -eq 0 ]; then
local NVM_IS_4_4_OR_BELOW
NVM_IS_4_4_OR_BELOW=0
if nvm_version_greater 4.5.0 "${NODE_VERSION}"; then
NVM_IS_4_4_OR_BELOW=1
fi
local NVM_IS_5_OR_ABOVE
NVM_IS_5_OR_ABOVE=0
if [ $NVM_IS_4_4_OR_BELOW -eq 0 ] && nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 5.0.0; then
NVM_IS_5_OR_ABOVE=1
fi
local NVM_IS_6_OR_ABOVE
NVM_IS_6_OR_ABOVE=0
local NVM_IS_6_2_OR_ABOVE
NVM_IS_6_2_OR_ABOVE=0
if [ $NVM_IS_5_OR_ABOVE -eq 1 ] && nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 6.0.0; then
NVM_IS_6_OR_ABOVE=1
if nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 6.2.0; then
NVM_IS_6_2_OR_ABOVE=1
fi
fi
local NVM_IS_9_OR_ABOVE
NVM_IS_9_OR_ABOVE=0
local NVM_IS_9_3_OR_ABOVE
NVM_IS_9_3_OR_ABOVE=0
if [ $NVM_IS_6_2_OR_ABOVE -eq 1 ] && nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 9.0.0; then
NVM_IS_9_OR_ABOVE=1
if nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 9.3.0; then
NVM_IS_9_3_OR_ABOVE=1
fi
fi
local NVM_IS_10_OR_ABOVE
NVM_IS_10_OR_ABOVE=0
if [ $NVM_IS_9_3_OR_ABOVE -eq 1 ] && nvm_version_greater_than_or_equal_to "${NODE_VERSION}" 10.0.0; then
NVM_IS_10_OR_ABOVE=1
fi
if [ $NVM_IS_4_4_OR_BELOW -eq 1 ] || {
[ $NVM_IS_5_OR_ABOVE -eq 1 ] && nvm_version_greater 5.10.0 "${NODE_VERSION}"; \
}; then
nvm_echo '* `npm` `v5.3.x` is the last version that works on `node` 4.x versions below v4.4, or 5.x versions below v5.10, due to `Buffer.alloc`'
$NVM_NPM_CMD install -g npm@5.3
elif [ $NVM_IS_4_4_OR_BELOW -eq 0 ] && nvm_version_greater 4.7.0 "${NODE_VERSION}"; then
nvm_echo '* `npm` `v5.4.1` is the last version that works on `node` `v4.5` and `v4.6`'
$NVM_NPM_CMD install -g npm@5.4.1
elif [ $NVM_IS_6_OR_ABOVE -eq 0 ]; then
nvm_echo '* `npm` `v5.x` is the last version that works on `node` below `v6.0.0`'
$NVM_NPM_CMD install -g npm@5
elif \
{ [ $NVM_IS_6_OR_ABOVE -eq 1 ] && [ $NVM_IS_6_2_OR_ABOVE -eq 0 ]; } \
|| { [ $NVM_IS_9_OR_ABOVE -eq 1 ] && [ $NVM_IS_9_3_OR_ABOVE -eq 0 ]; } \
; then
nvm_echo '* `npm` `v6.9` is the last version that works on `node` `v6.0.x`, `v6.1.x`, `v9.0.x`, `v9.1.x`, or `v9.2.x`'
$NVM_NPM_CMD install -g npm@6.9
elif [ $NVM_IS_10_OR_ABOVE -eq 0 ]; then
nvm_echo '* `npm` `v6.x` is the last version that works on `node` below `v10.0.0`'
$NVM_NPM_CMD install -g npm@6
else
nvm_echo '* Installing latest `npm`; if this does not work on your node version, please report a bug!'
$NVM_NPM_CMD install -g npm
fi
fi
nvm_echo "* npm upgraded to: v$(npm --version 2>/dev/null)"
}
# Make zsh glob matching behave same as bash
# This fixes the "zsh: no matches found" errors
if [ -z "${NVM_CD_FLAGS-}" ]; then
export NVM_CD_FLAGS=''
fi
if nvm_is_zsh; then
NVM_CD_FLAGS="-q"
fi
# Auto detect the NVM_DIR when not set
if [ -z "${NVM_DIR-}" ]; then
# shellcheck disable=SC2128
if [ -n "${BASH_SOURCE-}" ]; then
# shellcheck disable=SC2169,SC3054
NVM_SCRIPT_SOURCE="${BASH_SOURCE[0]}"
fi
NVM_DIR="$(nvm_cd ${NVM_CD_FLAGS} "$(dirname "${NVM_SCRIPT_SOURCE:-$0}")" >/dev/null && \pwd)"
export NVM_DIR
else
# https://unix.stackexchange.com/a/198289
case $NVM_DIR in
*[!/]*/)
NVM_DIR="${NVM_DIR%"${NVM_DIR##*[!/]}"}"
export NVM_DIR
nvm_err "Warning: \$NVM_DIR should not have trailing slashes"
;;
esac
fi
unset NVM_SCRIPT_SOURCE 2>/dev/null
nvm_tree_contains_path() {
local tree
tree="${1-}"
local node_path
node_path="${2-}"
if [ "@${tree}@" = "@@" ] || [ "@${node_path}@" = "@@" ]; then
nvm_err "both the tree and the node path are required"
return 2
fi
local previous_pathdir
previous_pathdir="${node_path}"
local pathdir
pathdir=$(dirname "${previous_pathdir}")
while [ "${pathdir}" != '' ] && [ "${pathdir}" != '.' ] && [ "${pathdir}" != '/' ] &&
[ "${pathdir}" != "${tree}" ] && [ "${pathdir}" != "${previous_pathdir}" ]; do
previous_pathdir="${pathdir}"
pathdir=$(dirname "${previous_pathdir}")
done
[ "${pathdir}" = "${tree}" ]
}
nvm_find_project_dir() {
local path_
path_="${PWD}"
while [ "${path_}" != "" ] && [ ! -f "${path_}/package.json" ] && [ ! -d "${path_}/node_modules" ]; do
path_=${path_%/*}
done
nvm_echo "${path_}"
}
# Traverse up in directory tree to find containing folder
nvm_find_up() {
local path_
path_="${PWD}"
while [ "${path_}" != "" ] && [ ! -f "${path_}/${1-}" ]; do
path_=${path_%/*}
done
nvm_echo "${path_}"
}
nvm_find_nvmrc() {
local dir
dir="$(nvm_find_up '.nvmrc')"
if [ -e "${dir}/.nvmrc" ]; then
nvm_echo "${dir}/.nvmrc"
fi
}
# Obtain nvm version from rc file
nvm_rc_version() {
export NVM_RC_VERSION=''
local NVMRC_PATH
NVMRC_PATH="$(nvm_find_nvmrc)"
if [ ! -e "${NVMRC_PATH}" ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "No .nvmrc file found"
fi
return 1
fi
NVM_RC_VERSION="$(command head -n 1 "${NVMRC_PATH}" | command tr -d '\r')" || command printf ''
if [ -z "${NVM_RC_VERSION}" ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "Warning: empty .nvmrc file found at \"${NVMRC_PATH}\""
fi
return 2
fi
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "Found '${NVMRC_PATH}' with version <${NVM_RC_VERSION}>"
fi
}
nvm_clang_version() {
clang --version | command awk '{ if ($2 == "version") print $3; else if ($3 == "version") print $4 }' | command sed 's/-.*$//g'
}
nvm_curl_version() {
curl -V | command awk '{ if ($1 == "curl") print $2 }' | command sed 's/-.*$//g'
}
nvm_version_greater() {
command awk 'BEGIN {
if (ARGV[1] == "" || ARGV[2] == "") exit(1)
split(ARGV[1], a, /\./);
split(ARGV[2], b, /\./);
for (i=1; i<=3; i++) {
if (a[i] && a[i] !~ /^[0-9]+$/) exit(2);
if (b[i] && b[i] !~ /^[0-9]+$/) { exit(0); }
if (a[i] < b[i]) exit(3);
else if (a[i] > b[i]) exit(0);
}
exit(4)
}' "${1#v}" "${2#v}"
}
nvm_version_greater_than_or_equal_to() {
command awk 'BEGIN {
if (ARGV[1] == "" || ARGV[2] == "") exit(1)
split(ARGV[1], a, /\./);
split(ARGV[2], b, /\./);
for (i=1; i<=3; i++) {
if (a[i] && a[i] !~ /^[0-9]+$/) exit(2);
if (a[i] < b[i]) exit(3);
else if (a[i] > b[i]) exit(0);
}
exit(0)
}' "${1#v}" "${2#v}"
}
nvm_version_dir() {
local NVM_WHICH_DIR
NVM_WHICH_DIR="${1-}"
if [ -z "${NVM_WHICH_DIR}" ] || [ "${NVM_WHICH_DIR}" = "new" ]; then
nvm_echo "${NVM_DIR}/versions/node"
elif [ "_${NVM_WHICH_DIR}" = "_iojs" ]; then
nvm_echo "${NVM_DIR}/versions/io.js"
elif [ "_${NVM_WHICH_DIR}" = "_old" ]; then
nvm_echo "${NVM_DIR}"
else
nvm_err 'unknown version dir'
return 3
fi
}
nvm_alias_path() {
nvm_echo "$(nvm_version_dir old)/alias"
}
nvm_version_path() {
local VERSION
VERSION="${1-}"
if [ -z "${VERSION}" ]; then
nvm_err 'version is required'
return 3
elif nvm_is_iojs_version "${VERSION}"; then
nvm_echo "$(nvm_version_dir iojs)/$(nvm_strip_iojs_prefix "${VERSION}")"
elif nvm_version_greater 0.12.0 "${VERSION}"; then
nvm_echo "$(nvm_version_dir old)/${VERSION}"
else
nvm_echo "$(nvm_version_dir new)/${VERSION}"
fi
}
nvm_ensure_version_installed() {
local PROVIDED_VERSION
PROVIDED_VERSION="${1-}"
if [ "${PROVIDED_VERSION}" = 'system' ]; then
if nvm_has_system_iojs || nvm_has_system_node; then
return 0
fi
nvm_err "N/A: no system version of node/io.js is installed."
return 1
fi
local LOCAL_VERSION
local EXIT_CODE
LOCAL_VERSION="$(nvm_version "${PROVIDED_VERSION}")"
EXIT_CODE="$?"
local NVM_VERSION_DIR
if [ "${EXIT_CODE}" != "0" ] || ! nvm_is_version_installed "${LOCAL_VERSION}"; then
if VERSION="$(nvm_resolve_alias "${PROVIDED_VERSION}")"; then
nvm_err "N/A: version \"${PROVIDED_VERSION} -> ${VERSION}\" is not yet installed."
else
local PREFIXED_VERSION
PREFIXED_VERSION="$(nvm_ensure_version_prefix "${PROVIDED_VERSION}")"
nvm_err "N/A: version \"${PREFIXED_VERSION:-$PROVIDED_VERSION}\" is not yet installed."
fi
nvm_err ""
nvm_err "You need to run \"nvm install ${PROVIDED_VERSION}\" to install it before using it."
return 1
fi
}
# Expand a version using the version cache
nvm_version() {
local PATTERN
PATTERN="${1-}"
local VERSION
# The default version is the current one
if [ -z "${PATTERN}" ]; then
PATTERN='current'
fi
if [ "${PATTERN}" = "current" ]; then
nvm_ls_current
return $?
fi
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "_${PATTERN}" in
"_${NVM_NODE_PREFIX}" | "_${NVM_NODE_PREFIX}-")
PATTERN="stable"
;;
esac
VERSION="$(nvm_ls "${PATTERN}" | command tail -1)"
if [ -z "${VERSION}" ] || [ "_${VERSION}" = "_N/A" ]; then
nvm_echo "N/A"
return 3
fi
nvm_echo "${VERSION}"
}
nvm_remote_version() {
local PATTERN
PATTERN="${1-}"
local VERSION
if nvm_validate_implicit_alias "${PATTERN}" 2>/dev/null; then
case "${PATTERN}" in
"$(nvm_iojs_prefix)")
VERSION="$(NVM_LTS="${NVM_LTS-}" nvm_ls_remote_iojs | command tail -1)" &&:
;;
*)
VERSION="$(NVM_LTS="${NVM_LTS-}" nvm_ls_remote "${PATTERN}")" &&:
;;
esac
else
VERSION="$(NVM_LTS="${NVM_LTS-}" nvm_remote_versions "${PATTERN}" | command tail -1)"
fi
if [ -n "${NVM_VERSION_ONLY-}" ]; then
command awk 'BEGIN {
n = split(ARGV[1], a);
print a[1]
}' "${VERSION}"
else
nvm_echo "${VERSION}"
fi
if [ "${VERSION}" = 'N/A' ]; then
return 3
fi
}
nvm_remote_versions() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local PATTERN
PATTERN="${1-}"
local NVM_FLAVOR
if [ -n "${NVM_LTS-}" ]; then
NVM_FLAVOR="${NVM_NODE_PREFIX}"
fi
case "${PATTERN}" in
"${NVM_IOJS_PREFIX}" | "io.js")
NVM_FLAVOR="${NVM_IOJS_PREFIX}"
unset PATTERN
;;
"${NVM_NODE_PREFIX}")
NVM_FLAVOR="${NVM_NODE_PREFIX}"
unset PATTERN
;;
esac
if nvm_validate_implicit_alias "${PATTERN-}" 2>/dev/null; then
nvm_err 'Implicit aliases are not supported in nvm_remote_versions.'
return 1
fi
local NVM_LS_REMOTE_EXIT_CODE
NVM_LS_REMOTE_EXIT_CODE=0
local NVM_LS_REMOTE_PRE_MERGED_OUTPUT
NVM_LS_REMOTE_PRE_MERGED_OUTPUT=''
local NVM_LS_REMOTE_POST_MERGED_OUTPUT
NVM_LS_REMOTE_POST_MERGED_OUTPUT=''
if [ -z "${NVM_FLAVOR-}" ] || [ "${NVM_FLAVOR-}" = "${NVM_NODE_PREFIX}" ]; then
local NVM_LS_REMOTE_OUTPUT
# extra space is needed here to avoid weird behavior when `nvm_ls_remote` ends in a `*`
NVM_LS_REMOTE_OUTPUT="$(NVM_LTS="${NVM_LTS-}" nvm_ls_remote "${PATTERN-}") " &&:
NVM_LS_REMOTE_EXIT_CODE=$?
# split output into two
NVM_LS_REMOTE_PRE_MERGED_OUTPUT="${NVM_LS_REMOTE_OUTPUT%%v4\.0\.0*}"
NVM_LS_REMOTE_POST_MERGED_OUTPUT="${NVM_LS_REMOTE_OUTPUT#$NVM_LS_REMOTE_PRE_MERGED_OUTPUT}"
fi
local NVM_LS_REMOTE_IOJS_EXIT_CODE
NVM_LS_REMOTE_IOJS_EXIT_CODE=0
local NVM_LS_REMOTE_IOJS_OUTPUT
NVM_LS_REMOTE_IOJS_OUTPUT=''
if [ -z "${NVM_LTS-}" ] && {
[ -z "${NVM_FLAVOR-}" ] || [ "${NVM_FLAVOR-}" = "${NVM_IOJS_PREFIX}" ];
}; then
NVM_LS_REMOTE_IOJS_OUTPUT=$(nvm_ls_remote_iojs "${PATTERN-}") &&:
NVM_LS_REMOTE_IOJS_EXIT_CODE=$?
fi
# the `sed` removes both blank lines, and only-whitespace lines (see "weird behavior" ~19 lines up)
VERSIONS="$(nvm_echo "${NVM_LS_REMOTE_PRE_MERGED_OUTPUT}
${NVM_LS_REMOTE_IOJS_OUTPUT}
${NVM_LS_REMOTE_POST_MERGED_OUTPUT}" | nvm_grep -v "N/A" | command sed '/^ *$/d')"
if [ -z "${VERSIONS}" ]; then
nvm_echo 'N/A'
return 3
fi
# the `sed` is to remove trailing whitespaces (see "weird behavior" ~25 lines up)
nvm_echo "${VERSIONS}" | command sed 's/ *$//g'
return $NVM_LS_REMOTE_EXIT_CODE || $NVM_LS_REMOTE_IOJS_EXIT_CODE
}
nvm_is_valid_version() {
if nvm_validate_implicit_alias "${1-}" 2>/dev/null; then
return 0
fi
case "${1-}" in
"$(nvm_iojs_prefix)" | \
"$(nvm_node_prefix)")
return 0
;;
*)
local VERSION
VERSION="$(nvm_strip_iojs_prefix "${1-}")"
nvm_version_greater_than_or_equal_to "${VERSION}" 0
;;
esac
}
nvm_normalize_version() {
command awk 'BEGIN {
split(ARGV[1], a, /\./);
printf "%d%06d%06d\n", a[1], a[2], a[3];
exit;
}' "${1#v}"
}
nvm_ensure_version_prefix() {
local NVM_VERSION
NVM_VERSION="$(nvm_strip_iojs_prefix "${1-}" | command sed -e 's/^\([0-9]\)/v\1/g')"
if nvm_is_iojs_version "${1-}"; then
nvm_add_iojs_prefix "${NVM_VERSION}"
else
nvm_echo "${NVM_VERSION}"
fi
}
nvm_format_version() {
local VERSION
VERSION="$(nvm_ensure_version_prefix "${1-}")"
local NUM_GROUPS
NUM_GROUPS="$(nvm_num_version_groups "${VERSION}")"
if [ "${NUM_GROUPS}" -lt 3 ]; then
nvm_format_version "${VERSION%.}.0"
else
nvm_echo "${VERSION}" | command cut -f1-3 -d.
fi
}
nvm_num_version_groups() {
local VERSION
VERSION="${1-}"
VERSION="${VERSION#v}"
VERSION="${VERSION%.}"
if [ -z "${VERSION}" ]; then
nvm_echo "0"
return
fi
local NVM_NUM_DOTS
NVM_NUM_DOTS=$(nvm_echo "${VERSION}" | command sed -e 's/[^\.]//g')
local NVM_NUM_GROUPS
NVM_NUM_GROUPS=".${NVM_NUM_DOTS}" # add extra dot, since it's (n - 1) dots at this point
nvm_echo "${#NVM_NUM_GROUPS}"
}
nvm_strip_path() {
if [ -z "${NVM_DIR-}" ]; then
nvm_err '${NVM_DIR} not set!'
return 1
fi
nvm_echo "${1-}" | command sed \
-e "s#${NVM_DIR}/[^/]*${2-}[^:]*:##g" \
-e "s#:${NVM_DIR}/[^/]*${2-}[^:]*##g" \
-e "s#${NVM_DIR}/[^/]*${2-}[^:]*##g" \
-e "s#${NVM_DIR}/versions/[^/]*/[^/]*${2-}[^:]*:##g" \
-e "s#:${NVM_DIR}/versions/[^/]*/[^/]*${2-}[^:]*##g" \
-e "s#${NVM_DIR}/versions/[^/]*/[^/]*${2-}[^:]*##g"
}
nvm_change_path() {
# if there’s no initial path, just return the supplementary path
if [ -z "${1-}" ]; then
nvm_echo "${3-}${2-}"
# if the initial path doesn’t contain an nvm path, prepend the supplementary
# path
elif ! nvm_echo "${1-}" | nvm_grep -q "${NVM_DIR}/[^/]*${2-}" \
&& ! nvm_echo "${1-}" | nvm_grep -q "${NVM_DIR}/versions/[^/]*/[^/]*${2-}"; then
nvm_echo "${3-}${2-}:${1-}"
# if the initial path contains BOTH an nvm path (checked for above) and
# that nvm path is preceded by a system binary path, just prepend the
# supplementary path instead of replacing it.
# https://github.com/nvm-sh/nvm/issues/1652#issuecomment-342571223
elif nvm_echo "${1-}" | nvm_grep -Eq "(^|:)(/usr(/local)?)?${2-}:.*${NVM_DIR}/[^/]*${2-}" \
|| nvm_echo "${1-}" | nvm_grep -Eq "(^|:)(/usr(/local)?)?${2-}:.*${NVM_DIR}/versions/[^/]*/[^/]*${2-}"; then
nvm_echo "${3-}${2-}:${1-}"
# use sed to replace the existing nvm path with the supplementary path. This
# preserves the order of the path.
else
nvm_echo "${1-}" | command sed \
-e "s#${NVM_DIR}/[^/]*${2-}[^:]*#${3-}${2-}#" \
-e "s#${NVM_DIR}/versions/[^/]*/[^/]*${2-}[^:]*#${3-}${2-}#"
fi
}
nvm_binary_available() {
# binaries started with node 0.8.6
nvm_version_greater_than_or_equal_to "$(nvm_strip_iojs_prefix "${1-}")" v0.8.6
}
nvm_set_colors() {
if [ "${#1}" -eq 5 ] && nvm_echo "$1" | nvm_grep -E "^[rRgGbBcCyYmMkKeW]{1,}$" 1>/dev/null; then
local INSTALLED_COLOR
local LTS_AND_SYSTEM_COLOR
local CURRENT_COLOR
local NOT_INSTALLED_COLOR
local DEFAULT_COLOR
INSTALLED_COLOR="$(echo "$1" | awk '{ print substr($0, 1, 1); }')"
LTS_AND_SYSTEM_COLOR="$(echo "$1" | awk '{ print substr($0, 2, 1); }')"
CURRENT_COLOR="$(echo "$1" | awk '{ print substr($0, 3, 1); }')"
NOT_INSTALLED_COLOR="$(echo "$1" | awk '{ print substr($0, 4, 1); }')"
DEFAULT_COLOR="$(echo "$1" | awk '{ print substr($0, 5, 1); }')"
if ! nvm_has_colors; then
nvm_echo "Setting colors to: ${INSTALLED_COLOR} ${LTS_AND_SYSTEM_COLOR} ${CURRENT_COLOR} ${NOT_INSTALLED_COLOR} ${DEFAULT_COLOR}"
nvm_echo "WARNING: Colors may not display because they are not supported in this shell."
else
nvm_echo_with_colors "Setting colors to: \033[$(nvm_print_color_code "${INSTALLED_COLOR}") ${INSTALLED_COLOR}\033[$(nvm_print_color_code "${LTS_AND_SYSTEM_COLOR}") ${LTS_AND_SYSTEM_COLOR}\033[$(nvm_print_color_code "${CURRENT_COLOR}") ${CURRENT_COLOR}\033[$(nvm_print_color_code "${NOT_INSTALLED_COLOR}") ${NOT_INSTALLED_COLOR}\033[$(nvm_print_color_code "${DEFAULT_COLOR}") ${DEFAULT_COLOR}\033[0m"
fi
export NVM_COLORS="$1"
else
return 17
fi
}
nvm_get_colors() {
local COLOR
local SYS_COLOR
if [ -n "${NVM_COLORS-}" ]; then
case $1 in
1) COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 1, 1); }')");;
2) COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 2, 1); }')");;
3) COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 3, 1); }')");;
4) COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 4, 1); }')");;
5) COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 5, 1); }')");;
6)
SYS_COLOR=$(nvm_print_color_code "$(echo "$NVM_COLORS" | awk '{ print substr($0, 2, 1); }')")
COLOR=$(nvm_echo "$SYS_COLOR" | command tr '0;' '1;')
;;
*)
nvm_err "Invalid color index, ${1-}"
return 1
;;
esac
else
case $1 in
1) COLOR='0;34m';;
2) COLOR='0;33m';;
3) COLOR='0;32m';;
4) COLOR='0;31m';;
5) COLOR='0;37m';;
6) COLOR='1;33m';;
*)
nvm_err "Invalid color index, ${1-}"
return 1
;;
esac
fi
echo "$COLOR"
}
nvm_print_color_code() {
case "${1-}" in
'r') nvm_echo '0;31m';;
'R') nvm_echo '1;31m';;
'g') nvm_echo '0;32m';;
'G') nvm_echo '1;32m';;
'b') nvm_echo '0;34m';;
'B') nvm_echo '1;34m';;
'c') nvm_echo '0;36m';;
'C') nvm_echo '1;36m';;
'm') nvm_echo '0;35m';;
'M') nvm_echo '1;35m';;
'y') nvm_echo '0;33m';;
'Y') nvm_echo '1;33m';;
'k') nvm_echo '0;30m';;
'K') nvm_echo '1;30m';;
'e') nvm_echo '0;37m';;
'W') nvm_echo '1;37m';;
*) nvm_err 'Invalid color code';
return 1
;;
esac
}
nvm_print_formatted_alias() {
local ALIAS
ALIAS="${1-}"
local DEST
DEST="${2-}"
local VERSION
VERSION="${3-}"
if [ -z "${VERSION}" ]; then
VERSION="$(nvm_version "${DEST}")" ||:
fi
local VERSION_FORMAT
local ALIAS_FORMAT
local DEST_FORMAT
local INSTALLED_COLOR
local SYSTEM_COLOR
local CURRENT_COLOR
local NOT_INSTALLED_COLOR
local DEFAULT_COLOR
local LTS_COLOR
INSTALLED_COLOR=$(nvm_get_colors 1)
SYSTEM_COLOR=$(nvm_get_colors 2)
CURRENT_COLOR=$(nvm_get_colors 3)
NOT_INSTALLED_COLOR=$(nvm_get_colors 4)
DEFAULT_COLOR=$(nvm_get_colors 5)
LTS_COLOR=$(nvm_get_colors 6)
ALIAS_FORMAT='%s'
DEST_FORMAT='%s'
VERSION_FORMAT='%s'
local NEWLINE
NEWLINE='\n'
if [ "_${DEFAULT}" = '_true' ]; then
NEWLINE=' (default)\n'
fi
local ARROW
ARROW='->'
if [ -z "${NVM_NO_COLORS}" ] && nvm_has_colors; then
ARROW='\033[0;90m->\033[0m'
if [ "_${DEFAULT}" = '_true' ]; then
NEWLINE=" \033[${DEFAULT_COLOR}(default)\033[0m\n"
fi
if [ "_${VERSION}" = "_${NVM_CURRENT-}" ]; then
ALIAS_FORMAT="\033[${CURRENT_COLOR}%s\033[0m"
DEST_FORMAT="\033[${CURRENT_COLOR}%s\033[0m"
VERSION_FORMAT="\033[${CURRENT_COLOR}%s\033[0m"
elif nvm_is_version_installed "${VERSION}"; then
ALIAS_FORMAT="\033[${INSTALLED_COLOR}%s\033[0m"
DEST_FORMAT="\033[${INSTALLED_COLOR}%s\033[0m"
VERSION_FORMAT="\033[${INSTALLED_COLOR}%s\033[0m"
elif [ "${VERSION}" = '∞' ] || [ "${VERSION}" = 'N/A' ]; then
ALIAS_FORMAT="\033[${NOT_INSTALLED_COLOR}%s\033[0m"
DEST_FORMAT="\033[${NOT_INSTALLED_COLOR}%s\033[0m"
VERSION_FORMAT="\033[${NOT_INSTALLED_COLOR}%s\033[0m"
fi
if [ "_${NVM_LTS-}" = '_true' ]; then
ALIAS_FORMAT="\033[${LTS_COLOR}%s\033[0m"
fi
if [ "_${DEST%/*}" = "_lts" ]; then
DEST_FORMAT="\033[${LTS_COLOR}%s\033[0m"
fi
elif [ "_${VERSION}" != '_∞' ] && [ "_${VERSION}" != '_N/A' ]; then
VERSION_FORMAT='%s *'
fi
if [ "${DEST}" = "${VERSION}" ]; then
command printf -- "${ALIAS_FORMAT} ${ARROW} ${VERSION_FORMAT}${NEWLINE}" "${ALIAS}" "${DEST}"
else
command printf -- "${ALIAS_FORMAT} ${ARROW} ${DEST_FORMAT} (${ARROW} ${VERSION_FORMAT})${NEWLINE}" "${ALIAS}" "${DEST}" "${VERSION}"
fi
}
nvm_print_alias_path() {
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="${1-}"
if [ -z "${NVM_ALIAS_DIR}" ]; then
nvm_err 'An alias dir is required.'
return 1
fi
local ALIAS_PATH
ALIAS_PATH="${2-}"
if [ -z "${ALIAS_PATH}" ]; then
nvm_err 'An alias path is required.'
return 2
fi
local ALIAS
ALIAS="${ALIAS_PATH##${NVM_ALIAS_DIR}\/}"
local DEST
DEST="$(nvm_alias "${ALIAS}" 2>/dev/null)" ||:
if [ -n "${DEST}" ]; then
NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_LTS="${NVM_LTS-}" DEFAULT=false nvm_print_formatted_alias "${ALIAS}" "${DEST}"
fi
}
nvm_print_default_alias() {
local ALIAS
ALIAS="${1-}"
if [ -z "${ALIAS}" ]; then
nvm_err 'A default alias is required.'
return 1
fi
local DEST
DEST="$(nvm_print_implicit_alias local "${ALIAS}")"
if [ -n "${DEST}" ]; then
NVM_NO_COLORS="${NVM_NO_COLORS-}" DEFAULT=true nvm_print_formatted_alias "${ALIAS}" "${DEST}"
fi
}
nvm_make_alias() {
local ALIAS
ALIAS="${1-}"
if [ -z "${ALIAS}" ]; then
nvm_err "an alias name is required"
return 1
fi
local VERSION
VERSION="${2-}"
if [ -z "${VERSION}" ]; then
nvm_err "an alias target version is required"
return 2
fi
nvm_echo "${VERSION}" | tee "$(nvm_alias_path)/${ALIAS}" >/dev/null
}
nvm_list_aliases() {
local ALIAS
ALIAS="${1-}"
local NVM_CURRENT
NVM_CURRENT="$(nvm_ls_current)"
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
command mkdir -p "${NVM_ALIAS_DIR}/lts"
nvm_is_zsh && unsetopt local_options nomatch
(
local ALIAS_PATH
for ALIAS_PATH in "${NVM_ALIAS_DIR}/${ALIAS}"*; do
NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_CURRENT="${NVM_CURRENT}" nvm_print_alias_path "${NVM_ALIAS_DIR}" "${ALIAS_PATH}" &
done
wait
) | sort
(
local ALIAS_NAME
for ALIAS_NAME in "$(nvm_node_prefix)" "stable" "unstable"; do
{
# shellcheck disable=SC2030,SC2031 # (https://github.com/koalaman/shellcheck/issues/2217)
if [ ! -f "${NVM_ALIAS_DIR}/${ALIAS_NAME}" ] && { [ -z "${ALIAS}" ] || [ "${ALIAS_NAME}" = "${ALIAS}" ]; }; then
NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_CURRENT="${NVM_CURRENT}" nvm_print_default_alias "${ALIAS_NAME}"
fi
} &
done
wait
ALIAS_NAME="$(nvm_iojs_prefix)"
# shellcheck disable=SC2030,SC2031 # (https://github.com/koalaman/shellcheck/issues/2217)
if [ ! -f "${NVM_ALIAS_DIR}/${ALIAS_NAME}" ] && { [ -z "${ALIAS}" ] || [ "${ALIAS_NAME}" = "${ALIAS}" ]; }; then
NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_CURRENT="${NVM_CURRENT}" nvm_print_default_alias "${ALIAS_NAME}"
fi
) | sort
(
local LTS_ALIAS
# shellcheck disable=SC2030,SC2031 # (https://github.com/koalaman/shellcheck/issues/2217)
for ALIAS_PATH in "${NVM_ALIAS_DIR}/lts/${ALIAS}"*; do
{
LTS_ALIAS="$(NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_LTS=true nvm_print_alias_path "${NVM_ALIAS_DIR}" "${ALIAS_PATH}")"
if [ -n "${LTS_ALIAS}" ]; then
nvm_echo "${LTS_ALIAS}"
fi
} &
done
wait
) | sort
return
}
nvm_alias() {
local ALIAS
ALIAS="${1-}"
if [ -z "${ALIAS}" ]; then
nvm_err 'An alias is required.'
return 1
fi
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
if [ "$(expr "${ALIAS}" : '^lts/-[1-9][0-9]*$')" -gt 0 ]; then
local N
N="$(echo "${ALIAS}" | cut -d '-' -f 2)"
N=$((N+1))
local RESULT
RESULT="$(command ls "${NVM_ALIAS_DIR}/lts" | command tail -n "${N}" | command head -n 1)"
if [ "${RESULT}" != '*' ]; then
nvm_alias "lts/${RESULT}"
return $?
else
nvm_err 'That many LTS releases do not exist yet.'
return 2
fi
fi
local NVM_ALIAS_PATH
NVM_ALIAS_PATH="${NVM_ALIAS_DIR}/${ALIAS}"
if [ ! -f "${NVM_ALIAS_PATH}" ]; then
nvm_err 'Alias does not exist.'
return 2
fi
command cat "${NVM_ALIAS_PATH}"
}
nvm_ls_current() {
local NVM_LS_CURRENT_NODE_PATH
if ! NVM_LS_CURRENT_NODE_PATH="$(command which node 2>/dev/null)"; then
nvm_echo 'none'
elif nvm_tree_contains_path "$(nvm_version_dir iojs)" "${NVM_LS_CURRENT_NODE_PATH}"; then
nvm_add_iojs_prefix "$(iojs --version 2>/dev/null)"
elif nvm_tree_contains_path "${NVM_DIR}" "${NVM_LS_CURRENT_NODE_PATH}"; then
local VERSION
VERSION="$(node --version 2>/dev/null)"
if [ "${VERSION}" = "v0.6.21-pre" ]; then
nvm_echo 'v0.6.21'
else
nvm_echo "${VERSION}"
fi
else
nvm_echo 'system'
fi
}
nvm_resolve_alias() {
if [ -z "${1-}" ]; then
return 1
fi
local PATTERN
PATTERN="${1-}"
local ALIAS
ALIAS="${PATTERN}"
local ALIAS_TEMP
local SEEN_ALIASES
SEEN_ALIASES="${ALIAS}"
while true; do
ALIAS_TEMP="$(nvm_alias "${ALIAS}" 2>/dev/null || nvm_echo)"
if [ -z "${ALIAS_TEMP}" ]; then
break
fi
if command printf "${SEEN_ALIASES}" | nvm_grep -q -e "^${ALIAS_TEMP}$"; then
ALIAS="∞"
break
fi
SEEN_ALIASES="${SEEN_ALIASES}\\n${ALIAS_TEMP}"
ALIAS="${ALIAS_TEMP}"
done
if [ -n "${ALIAS}" ] && [ "_${ALIAS}" != "_${PATTERN}" ]; then
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "${ALIAS}" in
'∞' | \
"${NVM_IOJS_PREFIX}" | "${NVM_IOJS_PREFIX}-" | \
"${NVM_NODE_PREFIX}")
nvm_echo "${ALIAS}"
;;
*)
nvm_ensure_version_prefix "${ALIAS}"
;;
esac
return 0
fi
if nvm_validate_implicit_alias "${PATTERN}" 2>/dev/null; then
local IMPLICIT
IMPLICIT="$(nvm_print_implicit_alias local "${PATTERN}" 2>/dev/null)"
if [ -n "${IMPLICIT}" ]; then
nvm_ensure_version_prefix "${IMPLICIT}"
fi
fi
return 2
}
nvm_resolve_local_alias() {
if [ -z "${1-}" ]; then
return 1
fi
local VERSION
local EXIT_CODE
VERSION="$(nvm_resolve_alias "${1-}")"
EXIT_CODE=$?
if [ -z "${VERSION}" ]; then
return $EXIT_CODE
fi
if [ "_${VERSION}" != '_∞' ]; then
nvm_version "${VERSION}"
else
nvm_echo "${VERSION}"
fi
}
nvm_iojs_prefix() {
nvm_echo 'iojs'
}
nvm_node_prefix() {
nvm_echo 'node'
}
nvm_is_iojs_version() {
case "${1-}" in iojs-*) return 0 ;; esac
return 1
}
nvm_add_iojs_prefix() {
nvm_echo "$(nvm_iojs_prefix)-$(nvm_ensure_version_prefix "$(nvm_strip_iojs_prefix "${1-}")")"
}
nvm_strip_iojs_prefix() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
if [ "${1-}" = "${NVM_IOJS_PREFIX}" ]; then
nvm_echo
else
nvm_echo "${1#${NVM_IOJS_PREFIX}-}"
fi
}
nvm_ls() {
local PATTERN
PATTERN="${1-}"
local VERSIONS
VERSIONS=''
if [ "${PATTERN}" = 'current' ]; then
nvm_ls_current
return
fi
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_VERSION_DIR_IOJS
NVM_VERSION_DIR_IOJS="$(nvm_version_dir "${NVM_IOJS_PREFIX}")"
local NVM_VERSION_DIR_NEW
NVM_VERSION_DIR_NEW="$(nvm_version_dir new)"
local NVM_VERSION_DIR_OLD
NVM_VERSION_DIR_OLD="$(nvm_version_dir old)"
case "${PATTERN}" in
"${NVM_IOJS_PREFIX}" | "${NVM_NODE_PREFIX}")
PATTERN="${PATTERN}-"
;;
*)
if nvm_resolve_local_alias "${PATTERN}"; then
return
fi
PATTERN="$(nvm_ensure_version_prefix "${PATTERN}")"
;;
esac
if [ "${PATTERN}" = 'N/A' ]; then
return
fi
# If it looks like an explicit version, don't do anything funny
local NVM_PATTERN_STARTS_WITH_V
case $PATTERN in
v*) NVM_PATTERN_STARTS_WITH_V=true ;;
*) NVM_PATTERN_STARTS_WITH_V=false ;;
esac
if [ $NVM_PATTERN_STARTS_WITH_V = true ] && [ "_$(nvm_num_version_groups "${PATTERN}")" = "_3" ]; then
if nvm_is_version_installed "${PATTERN}"; then
VERSIONS="${PATTERN}"
elif nvm_is_version_installed "$(nvm_add_iojs_prefix "${PATTERN}")"; then
VERSIONS="$(nvm_add_iojs_prefix "${PATTERN}")"
fi
else
case "${PATTERN}" in
"${NVM_IOJS_PREFIX}-" | "${NVM_NODE_PREFIX}-" | "system") ;;
*)
local NUM_VERSION_GROUPS
NUM_VERSION_GROUPS="$(nvm_num_version_groups "${PATTERN}")"
if [ "${NUM_VERSION_GROUPS}" = "2" ] || [ "${NUM_VERSION_GROUPS}" = "1" ]; then
PATTERN="${PATTERN%.}."
fi
;;
esac
nvm_is_zsh && setopt local_options shwordsplit
nvm_is_zsh && unsetopt local_options markdirs
local NVM_DIRS_TO_SEARCH1
NVM_DIRS_TO_SEARCH1=''
local NVM_DIRS_TO_SEARCH2
NVM_DIRS_TO_SEARCH2=''
local NVM_DIRS_TO_SEARCH3
NVM_DIRS_TO_SEARCH3=''
local NVM_ADD_SYSTEM
NVM_ADD_SYSTEM=false
if nvm_is_iojs_version "${PATTERN}"; then
NVM_DIRS_TO_SEARCH1="${NVM_VERSION_DIR_IOJS}"
PATTERN="$(nvm_strip_iojs_prefix "${PATTERN}")"
if nvm_has_system_iojs; then
NVM_ADD_SYSTEM=true
fi
elif [ "${PATTERN}" = "${NVM_NODE_PREFIX}-" ]; then
NVM_DIRS_TO_SEARCH1="${NVM_VERSION_DIR_OLD}"
NVM_DIRS_TO_SEARCH2="${NVM_VERSION_DIR_NEW}"
PATTERN=''
if nvm_has_system_node; then
NVM_ADD_SYSTEM=true
fi
else
NVM_DIRS_TO_SEARCH1="${NVM_VERSION_DIR_OLD}"
NVM_DIRS_TO_SEARCH2="${NVM_VERSION_DIR_NEW}"
NVM_DIRS_TO_SEARCH3="${NVM_VERSION_DIR_IOJS}"
if nvm_has_system_iojs || nvm_has_system_node; then
NVM_ADD_SYSTEM=true
fi
fi
if ! [ -d "${NVM_DIRS_TO_SEARCH1}" ] || ! (command ls -1qA "${NVM_DIRS_TO_SEARCH1}" | nvm_grep -q .); then
NVM_DIRS_TO_SEARCH1=''
fi
if ! [ -d "${NVM_DIRS_TO_SEARCH2}" ] || ! (command ls -1qA "${NVM_DIRS_TO_SEARCH2}" | nvm_grep -q .); then
NVM_DIRS_TO_SEARCH2="${NVM_DIRS_TO_SEARCH1}"
fi
if ! [ -d "${NVM_DIRS_TO_SEARCH3}" ] || ! (command ls -1qA "${NVM_DIRS_TO_SEARCH3}" | nvm_grep -q .); then
NVM_DIRS_TO_SEARCH3="${NVM_DIRS_TO_SEARCH2}"
fi
local SEARCH_PATTERN
if [ -z "${PATTERN}" ]; then
PATTERN='v'
SEARCH_PATTERN='.*'
else
SEARCH_PATTERN="$(nvm_echo "${PATTERN}" | command sed 's#\.#\\\.#g;')"
fi
if [ -n "${NVM_DIRS_TO_SEARCH1}${NVM_DIRS_TO_SEARCH2}${NVM_DIRS_TO_SEARCH3}" ]; then
VERSIONS="$(command find "${NVM_DIRS_TO_SEARCH1}"/* "${NVM_DIRS_TO_SEARCH2}"/* "${NVM_DIRS_TO_SEARCH3}"/* -name . -o -type d -prune -o -path "${PATTERN}*" \
| command sed -e "
s#${NVM_VERSION_DIR_IOJS}/#versions/${NVM_IOJS_PREFIX}/#;
s#^${NVM_DIR}/##;
\\#^[^v]# d;
\\#^versions\$# d;
s#^versions/##;
s#^v#${NVM_NODE_PREFIX}/v#;
\\#${SEARCH_PATTERN}# !d;
" \
-e 's#^\([^/]\{1,\}\)/\(.*\)$#\2.\1#;' \
| command sort -t. -u -k 1.2,1n -k 2,2n -k 3,3n \
| command sed -e 's#\(.*\)\.\([^\.]\{1,\}\)$#\2-\1#;' \
-e "s#^${NVM_NODE_PREFIX}-##;" \
)"
fi
fi
if [ "${NVM_ADD_SYSTEM-}" = true ]; then
if [ -z "${PATTERN}" ] || [ "${PATTERN}" = 'v' ]; then
VERSIONS="${VERSIONS}$(command printf '\n%s' 'system')"
elif [ "${PATTERN}" = 'system' ]; then
VERSIONS="$(command printf '%s' 'system')"
fi
fi
if [ -z "${VERSIONS}" ]; then
nvm_echo 'N/A'
return 3
fi
nvm_echo "${VERSIONS}"
}
nvm_ls_remote() {
local PATTERN
PATTERN="${1-}"
if nvm_validate_implicit_alias "${PATTERN}" 2>/dev/null ; then
local IMPLICIT
IMPLICIT="$(nvm_print_implicit_alias remote "${PATTERN}")"
if [ -z "${IMPLICIT-}" ] || [ "${IMPLICIT}" = 'N/A' ]; then
nvm_echo "N/A"
return 3
fi
PATTERN="$(NVM_LTS="${NVM_LTS-}" nvm_ls_remote "${IMPLICIT}" | command tail -1 | command awk '{ print $1 }')"
elif [ -n "${PATTERN}" ]; then
PATTERN="$(nvm_ensure_version_prefix "${PATTERN}")"
else
PATTERN=".*"
fi
NVM_LTS="${NVM_LTS-}" nvm_ls_remote_index_tab node std "${PATTERN}"
}
nvm_ls_remote_iojs() {
NVM_LTS="${NVM_LTS-}" nvm_ls_remote_index_tab iojs std "${1-}"
}
# args flavor, type, version
nvm_ls_remote_index_tab() {
local LTS
LTS="${NVM_LTS-}"
if [ "$#" -lt 3 ]; then
nvm_err 'not enough arguments'
return 5
fi
local FLAVOR
FLAVOR="${1-}"
local TYPE
TYPE="${2-}"
local MIRROR
MIRROR="$(nvm_get_mirror "${FLAVOR}" "${TYPE}")"
if [ -z "${MIRROR}" ]; then
return 3
fi
local PREFIX
PREFIX=''
case "${FLAVOR}-${TYPE}" in
iojs-std) PREFIX="$(nvm_iojs_prefix)-" ;;
node-std) PREFIX='' ;;
iojs-*)
nvm_err 'unknown type of io.js release'
return 4
;;
*)
nvm_err 'unknown type of node.js release'
return 4
;;
esac
local SORT_COMMAND
SORT_COMMAND='command sort'
case "${FLAVOR}" in
node) SORT_COMMAND='command sort -t. -u -k 1.2,1n -k 2,2n -k 3,3n' ;;
esac
local PATTERN
PATTERN="${3-}"
if [ "${PATTERN#"${PATTERN%?}"}" = '.' ]; then
PATTERN="${PATTERN%.}"
fi
local VERSIONS
if [ -n "${PATTERN}" ] && [ "${PATTERN}" != '*' ]; then
if [ "${FLAVOR}" = 'iojs' ]; then
PATTERN="$(nvm_ensure_version_prefix "$(nvm_strip_iojs_prefix "${PATTERN}")")"
else
PATTERN="$(nvm_ensure_version_prefix "${PATTERN}")"
fi
else
unset PATTERN
fi
nvm_is_zsh && setopt local_options shwordsplit
local VERSION_LIST
VERSION_LIST="$(nvm_download -L -s "${MIRROR}/index.tab" -o - \
| command sed "
1d;
s/^/${PREFIX}/;
" \
)"
local LTS_ALIAS
local LTS_VERSION
command mkdir -p "$(nvm_alias_path)/lts"
{ command awk '{
if ($10 ~ /^\-?$/) { next }
if ($10 && !a[tolower($10)]++) {
if (alias) { print alias, version }
alias_name = "lts/" tolower($10)
if (!alias) { print "lts/*", alias_name }
alias = alias_name
version = $1
}
}
END {
if (alias) {
print alias, version
}
}' \
| while read -r LTS_ALIAS_LINE; do
LTS_ALIAS="${LTS_ALIAS_LINE%% *}"
LTS_VERSION="${LTS_ALIAS_LINE#* }"
nvm_make_alias "${LTS_ALIAS}" "${LTS_VERSION}" >/dev/null 2>&1
done; } << EOF
$VERSION_LIST
EOF
VERSIONS="$({ command awk -v lts="${LTS-}" '{
if (!$1) { next }
if (lts && $10 ~ /^\-?$/) { next }
if (lts && lts != "*" && tolower($10) !~ tolower(lts)) { next }
if ($10 !~ /^\-?$/) {
if ($10 && $10 != prev) {
print $1, $10, "*"
} else {
print $1, $10
}
} else {
print $1
}
prev=$10;
}' \
| nvm_grep -w "${PATTERN:-.*}" \
| $SORT_COMMAND; } << EOF
$VERSION_LIST
EOF
)"
if [ -z "${VERSIONS}" ]; then
nvm_echo 'N/A'
return 3
fi
nvm_echo "${VERSIONS}"
}
nvm_get_checksum_binary() {
if nvm_has_non_aliased 'sha256sum'; then
nvm_echo 'sha256sum'
elif nvm_has_non_aliased 'shasum'; then
nvm_echo 'shasum'
elif nvm_has_non_aliased 'sha256'; then
nvm_echo 'sha256'
elif nvm_has_non_aliased 'gsha256sum'; then
nvm_echo 'gsha256sum'
elif nvm_has_non_aliased 'openssl'; then
nvm_echo 'openssl'
elif nvm_has_non_aliased 'bssl'; then
nvm_echo 'bssl'
elif nvm_has_non_aliased 'sha1sum'; then
nvm_echo 'sha1sum'
elif nvm_has_non_aliased 'sha1'; then
nvm_echo 'sha1'
else
nvm_err 'Unaliased sha256sum, shasum, sha256, gsha256sum, openssl, or bssl not found.'
nvm_err 'Unaliased sha1sum or sha1 not found.'
return 1
fi
}
nvm_get_checksum_alg() {
local NVM_CHECKSUM_BIN
NVM_CHECKSUM_BIN="$(nvm_get_checksum_binary 2>/dev/null)"
case "${NVM_CHECKSUM_BIN-}" in
sha256sum | shasum | sha256 | gsha256sum | openssl | bssl)
nvm_echo 'sha-256'
;;
sha1sum | sha1)
nvm_echo 'sha-1'
;;
*)
nvm_get_checksum_binary
return $?
;;
esac
}
nvm_compute_checksum() {
local FILE
FILE="${1-}"
if [ -z "${FILE}" ]; then
nvm_err 'Provided file to checksum is empty.'
return 2
elif ! [ -f "${FILE}" ]; then
nvm_err 'Provided file to checksum does not exist.'
return 1
fi
if nvm_has_non_aliased "sha256sum"; then
nvm_err 'Computing checksum with sha256sum'
command sha256sum "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "shasum"; then
nvm_err 'Computing checksum with shasum -a 256'
command shasum -a 256 "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "sha256"; then
nvm_err 'Computing checksum with sha256 -q'
command sha256 -q "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "gsha256sum"; then
nvm_err 'Computing checksum with gsha256sum'
command gsha256sum "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "openssl"; then
nvm_err 'Computing checksum with openssl dgst -sha256'
command openssl dgst -sha256 "${FILE}" | command awk '{print $NF}'
elif nvm_has_non_aliased "bssl"; then
nvm_err 'Computing checksum with bssl sha256sum'
command bssl sha256sum "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "sha1sum"; then
nvm_err 'Computing checksum with sha1sum'
command sha1sum "${FILE}" | command awk '{print $1}'
elif nvm_has_non_aliased "sha1"; then
nvm_err 'Computing checksum with sha1 -q'
command sha1 -q "${FILE}"
fi
}
nvm_compare_checksum() {
local FILE
FILE="${1-}"
if [ -z "${FILE}" ]; then
nvm_err 'Provided file to checksum is empty.'
return 4
elif ! [ -f "${FILE}" ]; then
nvm_err 'Provided file to checksum does not exist.'
return 3
fi
local COMPUTED_SUM
COMPUTED_SUM="$(nvm_compute_checksum "${FILE}")"
local CHECKSUM
CHECKSUM="${2-}"
if [ -z "${CHECKSUM}" ]; then
nvm_err 'Provided checksum to compare to is empty.'
return 2
fi
if [ -z "${COMPUTED_SUM}" ]; then
nvm_err "Computed checksum of '${FILE}' is empty." # missing in raspberry pi binary
nvm_err 'WARNING: Continuing *without checksum verification*'
return
elif [ "${COMPUTED_SUM}" != "${CHECKSUM}" ]; then
nvm_err "Checksums do not match: '${COMPUTED_SUM}' found, '${CHECKSUM}' expected."
return 1
fi
nvm_err 'Checksums matched!'
}
# args: flavor, type, version, slug, compression
nvm_get_checksum() {
local FLAVOR
case "${1-}" in
node | iojs) FLAVOR="${1}" ;;
*)
nvm_err 'supported flavors: node, iojs'
return 2
;;
esac
local MIRROR
MIRROR="$(nvm_get_mirror "${FLAVOR}" "${2-}")"
if [ -z "${MIRROR}" ]; then
return 1
fi
local SHASUMS_URL
if [ "$(nvm_get_checksum_alg)" = 'sha-256' ]; then
SHASUMS_URL="${MIRROR}/${3}/SHASUMS256.txt"
else
SHASUMS_URL="${MIRROR}/${3}/SHASUMS.txt"
fi
nvm_download -L -s "${SHASUMS_URL}" -o - | command awk "{ if (\"${4}.${5}\" == \$2) print \$1}"
}
nvm_print_versions() {
local VERSION
local LTS
local FORMAT
local NVM_CURRENT
local NVM_LATEST_LTS_COLOR
local NVM_OLD_LTS_COLOR
local INSTALLED_COLOR
local SYSTEM_COLOR
local CURRENT_COLOR
local NOT_INSTALLED_COLOR
local DEFAULT_COLOR
local LTS_COLOR
INSTALLED_COLOR=$(nvm_get_colors 1)
SYSTEM_COLOR=$(nvm_get_colors 2)
CURRENT_COLOR=$(nvm_get_colors 3)
NOT_INSTALLED_COLOR=$(nvm_get_colors 4)
DEFAULT_COLOR=$(nvm_get_colors 5)
LTS_COLOR=$(nvm_get_colors 6)
NVM_CURRENT=$(nvm_ls_current)
NVM_LATEST_LTS_COLOR=$(nvm_echo "${CURRENT_COLOR}" | command tr '0;' '1;')
NVM_OLD_LTS_COLOR="${DEFAULT_COLOR}"
local NVM_HAS_COLORS
if [ -z "${NVM_NO_COLORS-}" ] && nvm_has_colors; then
NVM_HAS_COLORS=1
fi
local LTS_LENGTH
local LTS_FORMAT
nvm_echo "${1-}" \
| command sed '1!G;h;$!d' \
| command awk '{ if ($2 && $3 && $3 == "*") { print $1, "(Latest LTS: " $2 ")" } else if ($2) { print $1, "(LTS: " $2 ")" } else { print $1 } }' \
| command sed '1!G;h;$!d' \
| while read -r VERSION_LINE; do
VERSION="${VERSION_LINE%% *}"
LTS="${VERSION_LINE#* }"
FORMAT='%15s'
if [ "_${VERSION}" = "_${NVM_CURRENT}" ]; then
if [ "${NVM_HAS_COLORS-}" = '1' ]; then
FORMAT="\033[${CURRENT_COLOR}-> %12s\033[0m"
else
FORMAT='-> %12s *'
fi
elif [ "${VERSION}" = "system" ]; then
if [ "${NVM_HAS_COLORS-}" = '1' ]; then
FORMAT="\033[${SYSTEM_COLOR}%15s\033[0m"
else
FORMAT='%15s *'
fi
elif nvm_is_version_installed "${VERSION}"; then
if [ "${NVM_HAS_COLORS-}" = '1' ]; then
FORMAT="\033[${INSTALLED_COLOR}%15s\033[0m"
else
FORMAT='%15s *'
fi
fi
if [ "${LTS}" != "${VERSION}" ]; then
case "${LTS}" in
*Latest*)
LTS="${LTS##Latest }"
LTS_LENGTH="${#LTS}"
if [ "${NVM_HAS_COLORS-}" = '1' ]; then
LTS_FORMAT=" \\033[${NVM_LATEST_LTS_COLOR}%${LTS_LENGTH}s\\033[0m"
else
LTS_FORMAT=" %${LTS_LENGTH}s"
fi
;;
*)
LTS_LENGTH="${#LTS}"
if [ "${NVM_HAS_COLORS-}" = '1' ]; then
LTS_FORMAT=" \\033[${NVM_OLD_LTS_COLOR}%${LTS_LENGTH}s\\033[0m"
else
LTS_FORMAT=" %${LTS_LENGTH}s"
fi
;;
esac
command printf -- "${FORMAT}${LTS_FORMAT}\\n" "${VERSION}" " ${LTS}"
else
command printf -- "${FORMAT}\\n" "${VERSION}"
fi
done
}
nvm_validate_implicit_alias() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "$1" in
"stable" | "unstable" | "${NVM_IOJS_PREFIX}" | "${NVM_NODE_PREFIX}")
return
;;
*)
nvm_err "Only implicit aliases 'stable', 'unstable', '${NVM_IOJS_PREFIX}', and '${NVM_NODE_PREFIX}' are supported."
return 1
;;
esac
}
nvm_print_implicit_alias() {
if [ "_$1" != "_local" ] && [ "_$1" != "_remote" ]; then
nvm_err "nvm_print_implicit_alias must be specified with local or remote as the first argument."
return 1
fi
local NVM_IMPLICIT
NVM_IMPLICIT="$2"
if ! nvm_validate_implicit_alias "${NVM_IMPLICIT}"; then
return 2
fi
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_COMMAND
local NVM_ADD_PREFIX_COMMAND
local LAST_TWO
case "${NVM_IMPLICIT}" in
"${NVM_IOJS_PREFIX}")
NVM_COMMAND="nvm_ls_remote_iojs"
NVM_ADD_PREFIX_COMMAND="nvm_add_iojs_prefix"
if [ "_$1" = "_local" ]; then
NVM_COMMAND="nvm_ls ${NVM_IMPLICIT}"
fi
nvm_is_zsh && setopt local_options shwordsplit
local NVM_IOJS_VERSION
local EXIT_CODE
NVM_IOJS_VERSION="$(${NVM_COMMAND})" &&:
EXIT_CODE="$?"
if [ "_${EXIT_CODE}" = "_0" ]; then
NVM_IOJS_VERSION="$(nvm_echo "${NVM_IOJS_VERSION}" | command sed "s/^${NVM_IMPLICIT}-//" | nvm_grep -e '^v' | command cut -c2- | command cut -d . -f 1,2 | uniq | command tail -1)"
fi
if [ "_$NVM_IOJS_VERSION" = "_N/A" ]; then
nvm_echo 'N/A'
else
${NVM_ADD_PREFIX_COMMAND} "${NVM_IOJS_VERSION}"
fi
return $EXIT_CODE
;;
"${NVM_NODE_PREFIX}")
nvm_echo 'stable'
return
;;
*)
NVM_COMMAND="nvm_ls_remote"
if [ "_$1" = "_local" ]; then
NVM_COMMAND="nvm_ls node"
fi
nvm_is_zsh && setopt local_options shwordsplit
LAST_TWO=$($NVM_COMMAND | nvm_grep -e '^v' | command cut -c2- | command cut -d . -f 1,2 | uniq)
;;
esac
local MINOR
local STABLE
local UNSTABLE
local MOD
local NORMALIZED_VERSION
nvm_is_zsh && setopt local_options shwordsplit
for MINOR in $LAST_TWO; do
NORMALIZED_VERSION="$(nvm_normalize_version "$MINOR")"
if [ "_0${NORMALIZED_VERSION#?}" != "_$NORMALIZED_VERSION" ]; then
STABLE="$MINOR"
else
MOD="$(awk 'BEGIN { print int(ARGV[1] / 1000000) % 2 ; exit(0) }' "${NORMALIZED_VERSION}")"
if [ "${MOD}" -eq 0 ]; then
STABLE="${MINOR}"
elif [ "${MOD}" -eq 1 ]; then
UNSTABLE="${MINOR}"
fi
fi
done
if [ "_$2" = '_stable' ]; then
nvm_echo "${STABLE}"
elif [ "_$2" = '_unstable' ]; then
nvm_echo "${UNSTABLE:-"N/A"}"
fi
}
nvm_get_os() {
local NVM_UNAME
NVM_UNAME="$(command uname -a)"
local NVM_OS
case "${NVM_UNAME}" in
Linux\ *) NVM_OS=linux ;;
Darwin\ *) NVM_OS=darwin ;;
SunOS\ *) NVM_OS=sunos ;;
FreeBSD\ *) NVM_OS=freebsd ;;
OpenBSD\ *) NVM_OS=openbsd ;;
AIX\ *) NVM_OS=aix ;;
CYGWIN* | MSYS* | MINGW*) NVM_OS=win ;;
esac
nvm_echo "${NVM_OS-}"
}
nvm_get_arch() {
local HOST_ARCH
local NVM_OS
local EXIT_CODE
NVM_OS="$(nvm_get_os)"
# If the OS is SunOS, first try to use pkgsrc to guess
# the most appropriate arch. If it's not available, use
# isainfo to get the instruction set supported by the
# kernel.
if [ "_${NVM_OS}" = "_sunos" ]; then
if HOST_ARCH=$(pkg_info -Q MACHINE_ARCH pkg_install); then
HOST_ARCH=$(nvm_echo "${HOST_ARCH}" | command tail -1)
else
HOST_ARCH=$(isainfo -n)
fi
elif [ "_${NVM_OS}" = "_aix" ]; then
HOST_ARCH=ppc64
else
HOST_ARCH="$(command uname -m)"
fi
local NVM_ARCH
case "${HOST_ARCH}" in
x86_64 | amd64) NVM_ARCH="x64" ;;
i*86) NVM_ARCH="x86" ;;
aarch64) NVM_ARCH="arm64" ;;
*) NVM_ARCH="${HOST_ARCH}" ;;
esac
# If running a 64bit ARM kernel but a 32bit ARM userland, change ARCH to 32bit ARM (armv7l)
L=$(ls -dl /sbin/init 2>/dev/null) # if /sbin/init is 32bit executable
if [ "$(uname)" = "Linux" ] && [ "${NVM_ARCH}" = arm64 ] && [ "$(od -An -t x1 -j 4 -N 1 "${L#*-> }")" = ' 01' ]; then
NVM_ARCH=armv7l
HOST_ARCH=armv7l
fi
nvm_echo "${NVM_ARCH}"
}
nvm_get_minor_version() {
local VERSION
VERSION="$1"
if [ -z "${VERSION}" ]; then
nvm_err 'a version is required'
return 1
fi
case "${VERSION}" in
v | .* | *..* | v*[!.0123456789]* | [!v]*[!.0123456789]* | [!v0123456789]* | v[!0123456789]*)
nvm_err 'invalid version number'
return 2
;;
esac
local PREFIXED_VERSION
PREFIXED_VERSION="$(nvm_format_version "${VERSION}")"
local MINOR
MINOR="$(nvm_echo "${PREFIXED_VERSION}" | nvm_grep -e '^v' | command cut -c2- | command cut -d . -f 1,2)"
if [ -z "${MINOR}" ]; then
nvm_err 'invalid version number! (please report this)'
return 3
fi
nvm_echo "${MINOR}"
}
nvm_ensure_default_set() {
local VERSION
VERSION="$1"
if [ -z "${VERSION}" ]; then
nvm_err 'nvm_ensure_default_set: a version is required'
return 1
elif nvm_alias default >/dev/null 2>&1; then
# default already set
return 0
fi
local OUTPUT
OUTPUT="$(nvm alias default "${VERSION}")"
local EXIT_CODE
EXIT_CODE="$?"
nvm_echo "Creating default alias: ${OUTPUT}"
return $EXIT_CODE
}
nvm_is_merged_node_version() {
nvm_version_greater_than_or_equal_to "$1" v4.0.0
}
nvm_get_mirror() {
case "${1}-${2}" in
node-std) nvm_echo "${NVM_NODEJS_ORG_MIRROR:-https://nodejs.org/dist}" ;;
iojs-std) nvm_echo "${NVM_IOJS_ORG_MIRROR:-https://iojs.org/dist}" ;;
*)
nvm_err 'unknown type of node.js or io.js release'
return 1
;;
esac
}
# args: os, prefixed version, version, tarball, extract directory
nvm_install_binary_extract() {
if [ "$#" -ne 5 ]; then
nvm_err 'nvm_install_binary_extract needs 5 parameters'
return 1
fi
local NVM_OS
local PREFIXED_VERSION
local VERSION
local TARBALL
local TMPDIR
NVM_OS="${1}"
PREFIXED_VERSION="${2}"
VERSION="${3}"
TARBALL="${4}"
TMPDIR="${5}"
local VERSION_PATH
[ -n "${TMPDIR-}" ] && \
command mkdir -p "${TMPDIR}" && \
VERSION_PATH="$(nvm_version_path "${PREFIXED_VERSION}")" || return 1
# For Windows system (GitBash with MSYS, Cygwin)
if [ "${NVM_OS}" = 'win' ]; then
VERSION_PATH="${VERSION_PATH}/bin"
command unzip -q "${TARBALL}" -d "${TMPDIR}" || return 1
# For non Windows system (including WSL running on Windows)
else
local tar_compression_flag
tar_compression_flag='z'
if nvm_supports_xz "${VERSION}"; then
tar_compression_flag='J'
fi
local tar
if [ "${NVM_OS}" = 'aix' ]; then
tar='gtar'
else
tar='tar'
fi
command "${tar}" -x${tar_compression_flag}f "${TARBALL}" -C "${TMPDIR}" --strip-components 1 || return 1
fi
command mkdir -p "${VERSION_PATH}" || return 1
if [ "${NVM_OS}" = 'win' ]; then
command mv "${TMPDIR}/"*/* "${VERSION_PATH}" || return 1
command chmod +x "${VERSION_PATH}"/node.exe || return 1
command chmod +x "${VERSION_PATH}"/npm || return 1
command chmod +x "${VERSION_PATH}"/npx 2>/dev/null
else
command mv "${TMPDIR}/"* "${VERSION_PATH}" || return 1
fi
command rm -rf "${TMPDIR}"
return 0
}
# args: flavor, type, version, reinstall
nvm_install_binary() {
local FLAVOR
case "${1-}" in
node | iojs) FLAVOR="${1}" ;;
*)
nvm_err 'supported flavors: node, iojs'
return 4
;;
esac
local TYPE
TYPE="${2-}"
local PREFIXED_VERSION
PREFIXED_VERSION="${3-}"
if [ -z "${PREFIXED_VERSION}" ]; then
nvm_err 'A version number is required.'
return 3
fi
local nosource
nosource="${4-}"
local VERSION
VERSION="$(nvm_strip_iojs_prefix "${PREFIXED_VERSION}")"
local NVM_OS
NVM_OS="$(nvm_get_os)"
if [ -z "${NVM_OS}" ]; then
return 2
fi
local TARBALL
local TMPDIR
local PROGRESS_BAR
local NODE_OR_IOJS
if [ "${FLAVOR}" = 'node' ]; then
NODE_OR_IOJS="${FLAVOR}"
elif [ "${FLAVOR}" = 'iojs' ]; then
NODE_OR_IOJS="io.js"
fi
if [ "${NVM_NO_PROGRESS-}" = "1" ]; then
# --silent, --show-error, use short option as @samrocketman mentions the compatibility issue.
PROGRESS_BAR="-sS"
else
PROGRESS_BAR="--progress-bar"
fi
nvm_echo "Downloading and installing ${NODE_OR_IOJS-} ${VERSION}..."
TARBALL="$(PROGRESS_BAR="${PROGRESS_BAR}" nvm_download_artifact "${FLAVOR}" binary "${TYPE-}" "${VERSION}" | command tail -1)"
if [ -f "${TARBALL}" ]; then
TMPDIR="$(dirname "${TARBALL}")/files"
fi
if nvm_install_binary_extract "${NVM_OS}" "${PREFIXED_VERSION}" "${VERSION}" "${TARBALL}" "${TMPDIR}"; then
if [ -n "${ALIAS-}" ]; then
nvm alias "${ALIAS}" "${provided_version}"
fi
return 0
fi
# Read nosource from arguments
if [ "${nosource-}" = '1' ]; then
nvm_err 'Binary download failed. Download from source aborted.'
return 0
fi
nvm_err 'Binary download failed, trying source.'
if [ -n "${TMPDIR-}" ]; then
command rm -rf "${TMPDIR}"
fi
return 1
}
# args: flavor, kind, version
nvm_get_download_slug() {
local FLAVOR
case "${1-}" in
node | iojs) FLAVOR="${1}" ;;
*)
nvm_err 'supported flavors: node, iojs'
return 1
;;
esac
local KIND
case "${2-}" in
binary | source) KIND="${2}" ;;
*)
nvm_err 'supported kinds: binary, source'
return 2
;;
esac
local VERSION
VERSION="${3-}"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local NVM_ARCH
NVM_ARCH="$(nvm_get_arch)"
if ! nvm_is_merged_node_version "${VERSION}"; then
if [ "${NVM_ARCH}" = 'armv6l' ] || [ "${NVM_ARCH}" = 'armv7l' ]; then
NVM_ARCH="arm-pi"
fi
fi
if [ "${KIND}" = 'binary' ]; then
nvm_echo "${FLAVOR}-${VERSION}-${NVM_OS}-${NVM_ARCH}"
elif [ "${KIND}" = 'source' ]; then
nvm_echo "${FLAVOR}-${VERSION}"
fi
}
nvm_get_artifact_compression() {
local VERSION
VERSION="${1-}"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local COMPRESSION
COMPRESSION='tar.gz'
if [ "_${NVM_OS}" = '_win' ]; then
COMPRESSION='zip'
elif nvm_supports_xz "${VERSION}"; then
COMPRESSION='tar.xz'
fi
nvm_echo "${COMPRESSION}"
}
# args: flavor, kind, type, version
nvm_download_artifact() {
local FLAVOR
case "${1-}" in
node | iojs) FLAVOR="${1}" ;;
*)
nvm_err 'supported flavors: node, iojs'
return 1
;;
esac
local KIND
case "${2-}" in
binary | source) KIND="${2}" ;;
*)
nvm_err 'supported kinds: binary, source'
return 1
;;
esac
local TYPE
TYPE="${3-}"
local MIRROR
MIRROR="$(nvm_get_mirror "${FLAVOR}" "${TYPE}")"
if [ -z "${MIRROR}" ]; then
return 2
fi
local VERSION
VERSION="${4}"
if [ -z "${VERSION}" ]; then
nvm_err 'A version number is required.'
return 3
fi
if [ "${KIND}" = 'binary' ] && ! nvm_binary_available "${VERSION}"; then
nvm_err "No precompiled binary available for ${VERSION}."
return
fi
local SLUG
SLUG="$(nvm_get_download_slug "${FLAVOR}" "${KIND}" "${VERSION}")"
local COMPRESSION
COMPRESSION="$(nvm_get_artifact_compression "${VERSION}")"
local CHECKSUM
CHECKSUM="$(nvm_get_checksum "${FLAVOR}" "${TYPE}" "${VERSION}" "${SLUG}" "${COMPRESSION}")"
local tmpdir
if [ "${KIND}" = 'binary' ]; then
tmpdir="$(nvm_cache_dir)/bin/${SLUG}"
else
tmpdir="$(nvm_cache_dir)/src/${SLUG}"
fi
command mkdir -p "${tmpdir}/files" || (
nvm_err "creating directory ${tmpdir}/files failed"
return 3
)
local TARBALL
TARBALL="${tmpdir}/${SLUG}.${COMPRESSION}"
local TARBALL_URL
if nvm_version_greater_than_or_equal_to "${VERSION}" 0.1.14; then
TARBALL_URL="${MIRROR}/${VERSION}/${SLUG}.${COMPRESSION}"
else
# node <= 0.1.13 does not have a directory
TARBALL_URL="${MIRROR}/${SLUG}.${COMPRESSION}"
fi
if [ -r "${TARBALL}" ]; then
nvm_err "Local cache found: $(nvm_sanitize_path "${TARBALL}")"
if nvm_compare_checksum "${TARBALL}" "${CHECKSUM}" >/dev/null 2>&1; then
nvm_err "Checksums match! Using existing downloaded archive $(nvm_sanitize_path "${TARBALL}")"
nvm_echo "${TARBALL}"
return 0
fi
nvm_compare_checksum "${TARBALL}" "${CHECKSUM}"
nvm_err "Checksum check failed!"
nvm_err "Removing the broken local cache..."
command rm -rf "${TARBALL}"
fi
nvm_err "Downloading ${TARBALL_URL}..."
nvm_download -L -C - "${PROGRESS_BAR}" "${TARBALL_URL}" -o "${TARBALL}" || (
command rm -rf "${TARBALL}" "${tmpdir}"
nvm_err "Binary download from ${TARBALL_URL} failed, trying source."
return 4
)
if nvm_grep '404 Not Found' "${TARBALL}" >/dev/null; then
command rm -rf "${TARBALL}" "${tmpdir}"
nvm_err "HTTP 404 at URL ${TARBALL_URL}"
return 5
fi
nvm_compare_checksum "${TARBALL}" "${CHECKSUM}" || (
command rm -rf "${tmpdir}/files"
return 6
)
nvm_echo "${TARBALL}"
}
nvm_get_make_jobs() {
if nvm_is_natural_num "${1-}"; then
NVM_MAKE_JOBS="$1"
nvm_echo "number of \`make\` jobs: ${NVM_MAKE_JOBS}"
return
elif [ -n "${1-}" ]; then
unset NVM_MAKE_JOBS
nvm_err "$1 is invalid for number of \`make\` jobs, must be a natural number"
fi
local NVM_OS
NVM_OS="$(nvm_get_os)"
local NVM_CPU_CORES
case "_${NVM_OS}" in
"_linux")
NVM_CPU_CORES="$(nvm_grep -c -E '^processor.+: [0-9]+' /proc/cpuinfo)"
;;
"_freebsd" | "_darwin" | "_openbsd")
NVM_CPU_CORES="$(sysctl -n hw.ncpu)"
;;
"_sunos")
NVM_CPU_CORES="$(psrinfo | wc -l)"
;;
"_aix")
NVM_CPU_CORES="$(pmcycles -m | wc -l)"
;;
esac
if ! nvm_is_natural_num "${NVM_CPU_CORES}"; then
nvm_err 'Can not determine how many core(s) are available, running in single-threaded mode.'
nvm_err 'Please report an issue on GitHub to help us make nvm run faster on your computer!'
NVM_MAKE_JOBS=1
else
nvm_echo "Detected that you have ${NVM_CPU_CORES} CPU core(s)"
if [ "${NVM_CPU_CORES}" -gt 2 ]; then
NVM_MAKE_JOBS=$((NVM_CPU_CORES - 1))
nvm_echo "Running with ${NVM_MAKE_JOBS} threads to speed up the build"
else
NVM_MAKE_JOBS=1
nvm_echo 'Number of CPU core(s) less than or equal to 2, running in single-threaded mode'
fi
fi
}
# args: flavor, type, version, make jobs, additional
nvm_install_source() {
local FLAVOR
case "${1-}" in
node | iojs) FLAVOR="${1}" ;;
*)
nvm_err 'supported flavors: node, iojs'
return 4
;;
esac
local TYPE
TYPE="${2-}"
local PREFIXED_VERSION
PREFIXED_VERSION="${3-}"
if [ -z "${PREFIXED_VERSION}" ]; then
nvm_err 'A version number is required.'
return 3
fi
local VERSION
VERSION="$(nvm_strip_iojs_prefix "${PREFIXED_VERSION}")"
local NVM_MAKE_JOBS
NVM_MAKE_JOBS="${4-}"
local ADDITIONAL_PARAMETERS
ADDITIONAL_PARAMETERS="${5-}"
local NVM_ARCH
NVM_ARCH="$(nvm_get_arch)"
if [ "${NVM_ARCH}" = 'armv6l' ] || [ "${NVM_ARCH}" = 'armv7l' ]; then
if [ -n "${ADDITIONAL_PARAMETERS}" ]; then
ADDITIONAL_PARAMETERS="--without-snapshot ${ADDITIONAL_PARAMETERS}"
else
ADDITIONAL_PARAMETERS='--without-snapshot'
fi
fi
if [ -n "${ADDITIONAL_PARAMETERS}" ]; then
nvm_echo "Additional options while compiling: ${ADDITIONAL_PARAMETERS}"
fi
local NVM_OS
NVM_OS="$(nvm_get_os)"
local make
make='make'
local MAKE_CXX
case "${NVM_OS}" in
'freebsd')
make='gmake'
MAKE_CXX="CC=${CC:-cc} CXX=${CXX:-c++}"
;;
'darwin')
MAKE_CXX="CC=${CC:-cc} CXX=${CXX:-c++}"
;;
'aix')
make='gmake'
;;
esac
if nvm_has "clang++" && nvm_has "clang" && nvm_version_greater_than_or_equal_to "$(nvm_clang_version)" 3.5; then
if [ -z "${CC-}" ] || [ -z "${CXX-}" ]; then
nvm_echo "Clang v3.5+ detected! CC or CXX not specified, will use Clang as C/C++ compiler!"
MAKE_CXX="CC=${CC:-cc} CXX=${CXX:-c++}"
fi
fi
local tar_compression_flag
tar_compression_flag='z'
if nvm_supports_xz "${VERSION}"; then
tar_compression_flag='J'
fi
local tar
tar='tar'
if [ "${NVM_OS}" = 'aix' ]; then
tar='gtar'
fi
local TARBALL
local TMPDIR
local VERSION_PATH
if [ "${NVM_NO_PROGRESS-}" = "1" ]; then
# --silent, --show-error, use short option as @samrocketman mentions the compatibility issue.
PROGRESS_BAR="-sS"
else
PROGRESS_BAR="--progress-bar"
fi
nvm_is_zsh && setopt local_options shwordsplit
TARBALL="$(PROGRESS_BAR="${PROGRESS_BAR}" nvm_download_artifact "${FLAVOR}" source "${TYPE}" "${VERSION}" | command tail -1)" && \
[ -f "${TARBALL}" ] && \
TMPDIR="$(dirname "${TARBALL}")/files" && \
if ! (
# shellcheck disable=SC2086
command mkdir -p "${TMPDIR}" && \
command "${tar}" -x${tar_compression_flag}f "${TARBALL}" -C "${TMPDIR}" --strip-components 1 && \
VERSION_PATH="$(nvm_version_path "${PREFIXED_VERSION}")" && \
nvm_cd "${TMPDIR}" && \
nvm_echo '$>'./configure --prefix="${VERSION_PATH}" $ADDITIONAL_PARAMETERS'<' && \
./configure --prefix="${VERSION_PATH}" $ADDITIONAL_PARAMETERS && \
$make -j "${NVM_MAKE_JOBS}" ${MAKE_CXX-} && \
command rm -f "${VERSION_PATH}" 2>/dev/null && \
$make -j "${NVM_MAKE_JOBS}" ${MAKE_CXX-} install
); then
nvm_err "nvm: install ${VERSION} failed!"
command rm -rf "${TMPDIR-}"
return 1
fi
}
nvm_use_if_needed() {
if [ "_${1-}" = "_$(nvm_ls_current)" ]; then
return
fi
nvm use "$@"
}
nvm_install_npm_if_needed() {
local VERSION
VERSION="$(nvm_ls_current)"
if ! nvm_has "npm"; then
nvm_echo 'Installing npm...'
if nvm_version_greater 0.2.0 "${VERSION}"; then
nvm_err 'npm requires node v0.2.3 or higher'
elif nvm_version_greater_than_or_equal_to "${VERSION}" 0.2.0; then
if nvm_version_greater 0.2.3 "${VERSION}"; then
nvm_err 'npm requires node v0.2.3 or higher'
else
nvm_download -L https://npmjs.org/install.sh -o - | clean=yes npm_install=0.2.19 sh
fi
else
nvm_download -L https://npmjs.org/install.sh -o - | clean=yes sh
fi
fi
return $?
}
nvm_match_version() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local PROVIDED_VERSION
PROVIDED_VERSION="$1"
case "_${PROVIDED_VERSION}" in
"_${NVM_IOJS_PREFIX}" | '_io.js')
nvm_version "${NVM_IOJS_PREFIX}"
;;
'_system')
nvm_echo 'system'
;;
*)
nvm_version "${PROVIDED_VERSION}"
;;
esac
}
nvm_npm_global_modules() {
local NPMLIST
local VERSION
VERSION="$1"
NPMLIST=$(nvm use "${VERSION}" >/dev/null && npm list -g --depth=0 2>/dev/null | command sed 1,1d | nvm_grep -v 'UNMET PEER DEPENDENCY')
local INSTALLS
INSTALLS=$(nvm_echo "${NPMLIST}" | command sed -e '/ -> / d' -e '/\(empty\)/ d' -e 's/^.* \(.*@[^ ]*\).*/\1/' -e '/^npm@[^ ]*.*$/ d' | command xargs)
local LINKS
LINKS="$(nvm_echo "${NPMLIST}" | command sed -n 's/.* -> \(.*\)/\1/ p')"
nvm_echo "${INSTALLS} //// ${LINKS}"
}
nvm_npmrc_bad_news_bears() {
local NVM_NPMRC
NVM_NPMRC="${1-}"
if [ -n "${NVM_NPMRC}" ] && [ -f "${NVM_NPMRC}" ] && nvm_grep -Ee '^(prefix|globalconfig) *=' <"${NVM_NPMRC}" >/dev/null; then
return 0
fi
return 1
}
nvm_die_on_prefix() {
local NVM_DELETE_PREFIX
NVM_DELETE_PREFIX="${1-}"
case "${NVM_DELETE_PREFIX}" in
0 | 1) ;;
*)
nvm_err 'First argument "delete the prefix" must be zero or one'
return 1
;;
esac
local NVM_COMMAND
NVM_COMMAND="${2-}"
local NVM_VERSION_DIR
NVM_VERSION_DIR="${3-}"
if [ -z "${NVM_COMMAND}" ] || [ -z "${NVM_VERSION_DIR}" ]; then
nvm_err 'Second argument "nvm command", and third argument "nvm version dir", must both be nonempty'
return 2
fi
# npm first looks at $PREFIX (case-sensitive)
# we do not bother to test the value here; if this env var is set, unset it to continue.
# however, `npm exec` in npm v7.2+ sets $PREFIX; if set, inherit it
if [ -n "${PREFIX-}" ] && [ "$(nvm_version_path "$(node -v)")" != "${PREFIX}" ]; then
nvm deactivate >/dev/null 2>&1
nvm_err "nvm is not compatible with the \"PREFIX\" environment variable: currently set to \"${PREFIX}\""
nvm_err 'Run `unset PREFIX` to unset it.'
return 3
fi
local NVM_OS
NVM_OS="$(nvm_get_os)"
# npm normalizes NPM_CONFIG_-prefixed env vars
# https://github.com/npm/npmconf/blob/22827e4038d6eebaafeb5c13ed2b92cf97b8fb82/npmconf.js#L331-L348
# https://github.com/npm/npm/blob/5e426a78ca02d0044f8dd26e0c5f881217081cbd/lib/config/core.js#L343-L359
#
# here, we avoid trying to replicate "which one wins" or testing the value; if any are defined, it errors
# until none are left.
local NVM_NPM_CONFIG_x_PREFIX_ENV
if [ -n "${BASH_SOURCE-}" ]; then
NVM_NPM_CONFIG_x_PREFIX_ENV="$(command set | command awk -F '=' '! /^[0-9A-Z_a-z]+=/ {exit} {print $1}' | nvm_grep -i NPM_CONFIG_PREFIX | command tail -1)"
else
NVM_NPM_CONFIG_x_PREFIX_ENV="$(command env | nvm_grep -i NPM_CONFIG_PREFIX | command tail -1 | command awk -F '=' '{print $1}')"
fi
if [ -n "${NVM_NPM_CONFIG_x_PREFIX_ENV-}" ]; then
local NVM_CONFIG_VALUE
eval "NVM_CONFIG_VALUE=\"\$${NVM_NPM_CONFIG_x_PREFIX_ENV}\""
if [ -n "${NVM_CONFIG_VALUE-}" ] && [ "_${NVM_OS}" = "_win" ]; then
NVM_CONFIG_VALUE="$(cd "$NVM_CONFIG_VALUE" 2>/dev/null && pwd)"
fi
if [ -n "${NVM_CONFIG_VALUE-}" ] && ! nvm_tree_contains_path "${NVM_DIR}" "${NVM_CONFIG_VALUE}"; then
nvm deactivate >/dev/null 2>&1
nvm_err "nvm is not compatible with the \"${NVM_NPM_CONFIG_x_PREFIX_ENV}\" environment variable: currently set to \"${NVM_CONFIG_VALUE}\""
nvm_err "Run \`unset ${NVM_NPM_CONFIG_x_PREFIX_ENV}\` to unset it."
return 4
fi
fi
# here, npm config checks npmrc files.
# the stack is: cli, env, project, user, global, builtin, defaults
# cli does not apply; env is covered above, defaults don't exist for prefix
# there are 4 npmrc locations to check: project, global, user, and builtin
# project: find the closest node_modules or package.json-containing dir, `.npmrc`
# global: default prefix + `/etc/npmrc`
# user: $HOME/.npmrc
# builtin: npm install location, `npmrc`
#
# if any of them have a `prefix`, fail.
# if any have `globalconfig`, fail also, just in case, to avoid spidering configs.
local NVM_NPM_BUILTIN_NPMRC
NVM_NPM_BUILTIN_NPMRC="${NVM_VERSION_DIR}/lib/node_modules/npm/npmrc"
if nvm_npmrc_bad_news_bears "${NVM_NPM_BUILTIN_NPMRC}"; then
if [ "_${NVM_DELETE_PREFIX}" = "_1" ]; then
npm config --loglevel=warn delete prefix --userconfig="${NVM_NPM_BUILTIN_NPMRC}"
npm config --loglevel=warn delete globalconfig --userconfig="${NVM_NPM_BUILTIN_NPMRC}"
else
nvm_err "Your builtin npmrc file ($(nvm_sanitize_path "${NVM_NPM_BUILTIN_NPMRC}"))"
nvm_err 'has a `globalconfig` and/or a `prefix` setting, which are incompatible with nvm.'
nvm_err "Run \`${NVM_COMMAND}\` to unset it."
return 10
fi
fi
local NVM_NPM_GLOBAL_NPMRC
NVM_NPM_GLOBAL_NPMRC="${NVM_VERSION_DIR}/etc/npmrc"
if nvm_npmrc_bad_news_bears "${NVM_NPM_GLOBAL_NPMRC}"; then
if [ "_${NVM_DELETE_PREFIX}" = "_1" ]; then
npm config --global --loglevel=warn delete prefix
npm config --global --loglevel=warn delete globalconfig
else
nvm_err "Your global npmrc file ($(nvm_sanitize_path "${NVM_NPM_GLOBAL_NPMRC}"))"
nvm_err 'has a `globalconfig` and/or a `prefix` setting, which are incompatible with nvm.'
nvm_err "Run \`${NVM_COMMAND}\` to unset it."
return 10
fi
fi
local NVM_NPM_USER_NPMRC
NVM_NPM_USER_NPMRC="${HOME}/.npmrc"
if nvm_npmrc_bad_news_bears "${NVM_NPM_USER_NPMRC}"; then
if [ "_${NVM_DELETE_PREFIX}" = "_1" ]; then
npm config --loglevel=warn delete prefix --userconfig="${NVM_NPM_USER_NPMRC}"
npm config --loglevel=warn delete globalconfig --userconfig="${NVM_NPM_USER_NPMRC}"
else
nvm_err "Your user’s .npmrc file ($(nvm_sanitize_path "${NVM_NPM_USER_NPMRC}"))"
nvm_err 'has a `globalconfig` and/or a `prefix` setting, which are incompatible with nvm.'
nvm_err "Run \`${NVM_COMMAND}\` to unset it."
return 10
fi
fi
local NVM_NPM_PROJECT_NPMRC
NVM_NPM_PROJECT_NPMRC="$(nvm_find_project_dir)/.npmrc"
if nvm_npmrc_bad_news_bears "${NVM_NPM_PROJECT_NPMRC}"; then
if [ "_${NVM_DELETE_PREFIX}" = "_1" ]; then
npm config --loglevel=warn delete prefix
npm config --loglevel=warn delete globalconfig
else
nvm_err "Your project npmrc file ($(nvm_sanitize_path "${NVM_NPM_PROJECT_NPMRC}"))"
nvm_err 'has a `globalconfig` and/or a `prefix` setting, which are incompatible with nvm.'
nvm_err "Run \`${NVM_COMMAND}\` to unset it."
return 10
fi
fi
}
# Succeeds if $IOJS_VERSION represents an io.js version that has a
# Solaris binary, fails otherwise.
# Currently, only io.js 3.3.1 has a Solaris binary available, and it's the
# latest io.js version available. The expectation is that any potential io.js
# version later than v3.3.1 will also have Solaris binaries.
nvm_iojs_version_has_solaris_binary() {
local IOJS_VERSION
IOJS_VERSION="$1"
local STRIPPED_IOJS_VERSION
STRIPPED_IOJS_VERSION="$(nvm_strip_iojs_prefix "${IOJS_VERSION}")"
if [ "_${STRIPPED_IOJS_VERSION}" = "${IOJS_VERSION}" ]; then
return 1
fi
# io.js started shipping Solaris binaries with io.js v3.3.1
nvm_version_greater_than_or_equal_to "${STRIPPED_IOJS_VERSION}" v3.3.1
}
# Succeeds if $NODE_VERSION represents a node version that has a
# Solaris binary, fails otherwise.
# Currently, node versions starting from v0.8.6 have a Solaris binary
# available.
nvm_node_version_has_solaris_binary() {
local NODE_VERSION
NODE_VERSION="$1"
# Error out if $NODE_VERSION is actually an io.js version
local STRIPPED_IOJS_VERSION
STRIPPED_IOJS_VERSION="$(nvm_strip_iojs_prefix "${NODE_VERSION}")"
if [ "_${STRIPPED_IOJS_VERSION}" != "_${NODE_VERSION}" ]; then
return 1
fi
# node (unmerged) started shipping Solaris binaries with v0.8.6 and
# node versions v1.0.0 or greater are not considered valid "unmerged" node
# versions.
nvm_version_greater_than_or_equal_to "${NODE_VERSION}" v0.8.6 \
&& ! nvm_version_greater_than_or_equal_to "${NODE_VERSION}" v1.0.0
}
# Succeeds if $VERSION represents a version (node, io.js or merged) that has a
# Solaris binary, fails otherwise.
nvm_has_solaris_binary() {
local VERSION=$1
if nvm_is_merged_node_version "${VERSION}"; then
return 0 # All merged node versions have a Solaris binary
elif nvm_is_iojs_version "${VERSION}"; then
nvm_iojs_version_has_solaris_binary "${VERSION}"
else
nvm_node_version_has_solaris_binary "${VERSION}"
fi
}
nvm_sanitize_path() {
local SANITIZED_PATH
SANITIZED_PATH="${1-}"
if [ "_${SANITIZED_PATH}" != "_${NVM_DIR}" ]; then
SANITIZED_PATH="$(nvm_echo "${SANITIZED_PATH}" | command sed -e "s#${NVM_DIR}#\${NVM_DIR}#g")"
fi
if [ "_${SANITIZED_PATH}" != "_${HOME}" ]; then
SANITIZED_PATH="$(nvm_echo "${SANITIZED_PATH}" | command sed -e "s#${HOME}#\${HOME}#g")"
fi
nvm_echo "${SANITIZED_PATH}"
}
nvm_is_natural_num() {
if [ -z "$1" ]; then
return 4
fi
case "$1" in
0) return 1 ;;
-*) return 3 ;; # some BSDs return false positives for double-negated args
*)
[ "$1" -eq "$1" ] 2>/dev/null # returns 2 if it doesn't match
;;
esac
}
# Check version dir permissions
nvm_check_file_permissions() {
nvm_is_zsh && setopt local_options nonomatch
for FILE in "$1"/* "$1"/.[!.]* "$1"/..?* ; do
if [ -d "$FILE" ]; then
if [ -n "${NVM_DEBUG-}" ]; then
nvm_err "${FILE}"
fi
if ! nvm_check_file_permissions "${FILE}"; then
return 2
fi
elif [ -e "$FILE" ] && [ ! -w "$FILE" ] && [ ! -O "$FILE" ]; then
nvm_err "file is not writable or self-owned: $(nvm_sanitize_path "$FILE")"
return 1
fi
done
return 0
}
nvm_cache_dir() {
nvm_echo "${NVM_DIR}/.cache"
}
nvm() {
if [ "$#" -lt 1 ]; then
nvm --help
return
fi
local DEFAULT_IFS
DEFAULT_IFS=" $(nvm_echo t | command tr t \\t)
"
if [ "${-#*e}" != "$-" ]; then
set +e
local EXIT_CODE
IFS="${DEFAULT_IFS}" nvm "$@"
EXIT_CODE="$?"
set -e
return "$EXIT_CODE"
elif [ "${IFS}" != "${DEFAULT_IFS}" ]; then
IFS="${DEFAULT_IFS}" nvm "$@"
return "$?"
fi
local i
for i in "$@"
do
case $i in
--) break ;;
'-h'|'help'|'--help')
NVM_NO_COLORS=""
for j in "$@"; do
if [ "${j}" = '--no-colors' ]; then
NVM_NO_COLORS="${j}"
break
fi
done
local INITIAL_COLOR_INFO
local RED_INFO
local GREEN_INFO
local BLUE_INFO
local CYAN_INFO
local MAGENTA_INFO
local YELLOW_INFO
local BLACK_INFO
local GREY_WHITE_INFO
if [ -z "${NVM_NO_COLORS-}" ] && nvm_has_colors; then
INITIAL_COLOR_INFO='\033[0;32m g\033[0m \033[0;34m b\033[0m \033[0;33m y\033[0m \033[0;31m r\033[0m \033[0;37m e\033[0m'
RED_INFO='\033[0;31m r\033[0m/\033[1;31mR\033[0m = \033[0;31mred\033[0m / \033[1;31mbold red\033[0m'
GREEN_INFO='\033[0;32m g\033[0m/\033[1;32mG\033[0m = \033[0;32mgreen\033[0m / \033[1;32mbold green\033[0m'
BLUE_INFO='\033[0;34m b\033[0m/\033[1;34mB\033[0m = \033[0;34mblue\033[0m / \033[1;34mbold blue\033[0m'
CYAN_INFO='\033[0;36m c\033[0m/\033[1;36mC\033[0m = \033[0;36mcyan\033[0m / \033[1;36mbold cyan\033[0m'
MAGENTA_INFO='\033[0;35m m\033[0m/\033[1;35mM\033[0m = \033[0;35mmagenta\033[0m / \033[1;35mbold magenta\033[0m'
YELLOW_INFO='\033[0;33m y\033[0m/\033[1;33mY\033[0m = \033[0;33myellow\033[0m / \033[1;33mbold yellow\033[0m'
BLACK_INFO='\033[0;30m k\033[0m/\033[1;30mK\033[0m = \033[0;30mblack\033[0m / \033[1;30mbold black\033[0m'
GREY_WHITE_INFO='\033[0;37m e\033[0m/\033[1;37mW\033[0m = \033[0;37mlight grey\033[0m / \033[1;37mwhite\033[0m'
else
INITIAL_COLOR_INFO='gbYre'
RED_INFO='r/R = red / bold red'
GREEN_INFO='g/G = green / bold green'
BLUE_INFO='b/B = blue / bold blue'
CYAN_INFO='c/C = cyan / bold cyan'
MAGENTA_INFO='m/M = magenta / bold magenta'
YELLOW_INFO='y/Y = yellow / bold yellow'
BLACK_INFO='k/K = black / bold black'
GREY_WHITE_INFO='e/W = light grey / white'
fi
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
NVM_VERSION="$(nvm --version)"
nvm_echo
nvm_echo "Node Version Manager (v${NVM_VERSION})"
nvm_echo
nvm_echo 'Note: <version> refers to any version-like string nvm understands. This includes:'
nvm_echo ' - full or partial version numbers, starting with an optional "v" (0.10, v0.1.2, v1)'
nvm_echo " - default (built-in) aliases: ${NVM_NODE_PREFIX}, stable, unstable, ${NVM_IOJS_PREFIX}, system"
nvm_echo ' - custom aliases you define with `nvm alias foo`'
nvm_echo
nvm_echo ' Any options that produce colorized output should respect the `--no-colors` option.'
nvm_echo
nvm_echo 'Usage:'
nvm_echo ' nvm --help Show this message'
nvm_echo ' --no-colors Suppress colored output'
nvm_echo ' nvm --version Print out the installed version of nvm'
nvm_echo ' nvm install [<version>] Download and install a <version>. Uses .nvmrc if available and version is omitted.'
nvm_echo ' The following optional arguments, if provided, must appear directly after `nvm install`:'
nvm_echo ' -s Skip binary download, install from source only.'
nvm_echo ' -b Skip source download, install from binary only.'
nvm_echo ' --reinstall-packages-from=<version> When installing, reinstall packages installed in <node|iojs|node version number>'
nvm_echo ' --lts When installing, only select from LTS (long-term support) versions'
nvm_echo ' --lts=<LTS name> When installing, only select from versions for a specific LTS line'
nvm_echo ' --skip-default-packages When installing, skip the default-packages file if it exists'
nvm_echo ' --latest-npm After installing, attempt to upgrade to the latest working npm on the given node version'
nvm_echo ' --no-progress Disable the progress bar on any downloads'
nvm_echo ' --alias=<name> After installing, set the alias specified to the version specified. (same as: nvm alias <name> <version>)'
nvm_echo ' --default After installing, set default alias to the version specified. (same as: nvm alias default <version>)'
nvm_echo ' nvm uninstall <version> Uninstall a version'
nvm_echo ' nvm uninstall --lts Uninstall using automatic LTS (long-term support) alias `lts/*`, if available.'
nvm_echo ' nvm uninstall --lts=<LTS name> Uninstall using automatic alias for provided LTS line, if available.'
nvm_echo ' nvm use [<version>] Modify PATH to use <version>. Uses .nvmrc if available and version is omitted.'
nvm_echo ' The following optional arguments, if provided, must appear directly after `nvm use`:'
nvm_echo ' --silent Silences stdout/stderr output'
nvm_echo ' --lts Uses automatic LTS (long-term support) alias `lts/*`, if available.'
nvm_echo ' --lts=<LTS name> Uses automatic alias for provided LTS line, if available.'
nvm_echo ' nvm exec [<version>] [<command>] Run <command> on <version>. Uses .nvmrc if available and version is omitted.'
nvm_echo ' The following optional arguments, if provided, must appear directly after `nvm exec`:'
nvm_echo ' --silent Silences stdout/stderr output'
nvm_echo ' --lts Uses automatic LTS (long-term support) alias `lts/*`, if available.'
nvm_echo ' --lts=<LTS name> Uses automatic alias for provided LTS line, if available.'
nvm_echo ' nvm run [<version>] [<args>] Run `node` on <version> with <args> as arguments. Uses .nvmrc if available and version is omitted.'
nvm_echo ' The following optional arguments, if provided, must appear directly after `nvm run`:'
nvm_echo ' --silent Silences stdout/stderr output'
nvm_echo ' --lts Uses automatic LTS (long-term support) alias `lts/*`, if available.'
nvm_echo ' --lts=<LTS name> Uses automatic alias for provided LTS line, if available.'
nvm_echo ' nvm current Display currently activated version of Node'
nvm_echo ' nvm ls [<version>] List installed versions, matching a given <version> if provided'
nvm_echo ' --no-colors Suppress colored output'
nvm_echo ' --no-alias Suppress `nvm alias` output'
nvm_echo ' nvm ls-remote [<version>] List remote versions available for install, matching a given <version> if provided'
nvm_echo ' --lts When listing, only show LTS (long-term support) versions'
nvm_echo ' --lts=<LTS name> When listing, only show versions for a specific LTS line'
nvm_echo ' --no-colors Suppress colored output'
nvm_echo ' nvm version <version> Resolve the given description to a single local version'
nvm_echo ' nvm version-remote <version> Resolve the given description to a single remote version'
nvm_echo ' --lts When listing, only select from LTS (long-term support) versions'
nvm_echo ' --lts=<LTS name> When listing, only select from versions for a specific LTS line'
nvm_echo ' nvm deactivate Undo effects of `nvm` on current shell'
nvm_echo ' --silent Silences stdout/stderr output'
nvm_echo ' nvm alias [<pattern>] Show all aliases beginning with <pattern>'
nvm_echo ' --no-colors Suppress colored output'
nvm_echo ' nvm alias <name> <version> Set an alias named <name> pointing to <version>'
nvm_echo ' nvm unalias <name> Deletes the alias named <name>'
nvm_echo ' nvm install-latest-npm Attempt to upgrade to the latest working `npm` on the current node version'
nvm_echo ' nvm reinstall-packages <version> Reinstall global `npm` packages contained in <version> to current version'
nvm_echo ' nvm unload Unload `nvm` from shell'
nvm_echo ' nvm which [current | <version>] Display path to installed node version. Uses .nvmrc if available and version is omitted.'
nvm_echo ' --silent Silences stdout/stderr output when a version is omitted'
nvm_echo ' nvm cache dir Display path to the cache directory for nvm'
nvm_echo ' nvm cache clear Empty cache directory for nvm'
nvm_echo ' nvm set-colors [<color codes>] Set five text colors using format "yMeBg". Available when supported.'
nvm_echo ' Initial colors are:'
nvm_echo_with_colors " ${INITIAL_COLOR_INFO}"
nvm_echo ' Color codes:'
nvm_echo_with_colors " ${RED_INFO}"
nvm_echo_with_colors " ${GREEN_INFO}"
nvm_echo_with_colors " ${BLUE_INFO}"
nvm_echo_with_colors " ${CYAN_INFO}"
nvm_echo_with_colors " ${MAGENTA_INFO}"
nvm_echo_with_colors " ${YELLOW_INFO}"
nvm_echo_with_colors " ${BLACK_INFO}"
nvm_echo_with_colors " ${GREY_WHITE_INFO}"
nvm_echo
nvm_echo 'Example:'
nvm_echo ' nvm install 8.0.0 Install a specific version number'
nvm_echo ' nvm use 8.0 Use the latest available 8.0.x release'
nvm_echo ' nvm run 6.10.3 app.js Run app.js using node 6.10.3'
nvm_echo ' nvm exec 4.8.3 node app.js Run `node app.js` with the PATH pointing to node 4.8.3'
nvm_echo ' nvm alias default 8.1.0 Set default node version on a shell'
nvm_echo ' nvm alias default node Always default to the latest available node version on a shell'
nvm_echo
nvm_echo ' nvm install node Install the latest available version'
nvm_echo ' nvm use node Use the latest version'
nvm_echo ' nvm install --lts Install the latest LTS version'
nvm_echo ' nvm use --lts Use the latest LTS version'
nvm_echo
nvm_echo ' nvm set-colors cgYmW Set text colors to cyan, green, bold yellow, magenta, and white'
nvm_echo
nvm_echo 'Note:'
nvm_echo ' to remove, delete, or uninstall nvm - just remove the `$NVM_DIR` folder (usually `~/.nvm`)'
nvm_echo
return 0;
;;
esac
done
local COMMAND
COMMAND="${1-}"
shift
# initialize local variables
local VERSION
local ADDITIONAL_PARAMETERS
case $COMMAND in
"cache")
case "${1-}" in
dir) nvm_cache_dir ;;
clear)
local DIR
DIR="$(nvm_cache_dir)"
if command rm -rf "${DIR}" && command mkdir -p "${DIR}"; then
nvm_echo 'nvm cache cleared.'
else
nvm_err "Unable to clear nvm cache: ${DIR}"
return 1
fi
;;
*)
>&2 nvm --help
return 127
;;
esac
;;
"debug")
local OS_VERSION
nvm_is_zsh && setopt local_options shwordsplit
nvm_err "nvm --version: v$(nvm --version)"
if [ -n "${TERM_PROGRAM-}" ]; then
nvm_err "\$TERM_PROGRAM: ${TERM_PROGRAM}"
fi
nvm_err "\$SHELL: ${SHELL}"
# shellcheck disable=SC2169,SC3028
nvm_err "\$SHLVL: ${SHLVL-}"
nvm_err "whoami: '$(whoami)'"
nvm_err "\${HOME}: ${HOME}"
nvm_err "\${NVM_DIR}: '$(nvm_sanitize_path "${NVM_DIR}")'"
nvm_err "\${PATH}: $(nvm_sanitize_path "${PATH}")"
nvm_err "\$PREFIX: '$(nvm_sanitize_path "${PREFIX}")'"
nvm_err "\${NPM_CONFIG_PREFIX}: '$(nvm_sanitize_path "${NPM_CONFIG_PREFIX}")'"
nvm_err "\$NVM_NODEJS_ORG_MIRROR: '${NVM_NODEJS_ORG_MIRROR}'"
nvm_err "\$NVM_IOJS_ORG_MIRROR: '${NVM_IOJS_ORG_MIRROR}'"
nvm_err "shell version: '$(${SHELL} --version | command head -n 1)'"
nvm_err "uname -a: '$(command uname -a | command awk '{$2=""; print}' | command xargs)'"
nvm_err "checksum binary: '$(nvm_get_checksum_binary 2>/dev/null)'"
if [ "$(nvm_get_os)" = "darwin" ] && nvm_has sw_vers; then
OS_VERSION="$(sw_vers | command awk '{print $2}' | command xargs)"
elif [ -r "/etc/issue" ]; then
OS_VERSION="$(command head -n 1 /etc/issue | command sed 's/\\.//g')"
if [ -z "${OS_VERSION}" ] && [ -r "/etc/os-release" ]; then
# shellcheck disable=SC1091
OS_VERSION="$(. /etc/os-release && echo "${NAME}" "${VERSION}")"
fi
fi
if [ -n "${OS_VERSION}" ]; then
nvm_err "OS version: ${OS_VERSION}"
fi
if nvm_has "curl"; then
nvm_err "curl: $(nvm_command_info curl), $(command curl -V | command head -n 1)"
else
nvm_err "curl: not found"
fi
if nvm_has "wget"; then
nvm_err "wget: $(nvm_command_info wget), $(command wget -V | command head -n 1)"
else
nvm_err "wget: not found"
fi
local TEST_TOOLS ADD_TEST_TOOLS
TEST_TOOLS="git grep awk"
ADD_TEST_TOOLS="sed cut basename rm mkdir xargs"
if [ "darwin" != "$(nvm_get_os)" ] && [ "freebsd" != "$(nvm_get_os)" ]; then
TEST_TOOLS="${TEST_TOOLS} ${ADD_TEST_TOOLS}"
else
for tool in ${ADD_TEST_TOOLS} ; do
if nvm_has "${tool}"; then
nvm_err "${tool}: $(nvm_command_info "${tool}")"
else
nvm_err "${tool}: not found"
fi
done
fi
for tool in ${TEST_TOOLS} ; do
local NVM_TOOL_VERSION
if nvm_has "${tool}"; then
if command ls -l "$(nvm_command_info "${tool}" | command awk '{print $1}')" | command grep -q busybox; then
NVM_TOOL_VERSION="$(command "${tool}" --help 2>&1 | command head -n 1)"
else
NVM_TOOL_VERSION="$(command "${tool}" --version 2>&1 | command head -n 1)"
fi
nvm_err "${tool}: $(nvm_command_info "${tool}"), ${NVM_TOOL_VERSION}"
else
nvm_err "${tool}: not found"
fi
unset NVM_TOOL_VERSION
done
unset TEST_TOOLS ADD_TEST_TOOLS
local NVM_DEBUG_OUTPUT
for NVM_DEBUG_COMMAND in 'nvm current' 'which node' 'which iojs' 'which npm' 'npm config get prefix' 'npm root -g'; do
NVM_DEBUG_OUTPUT="$(${NVM_DEBUG_COMMAND} 2>&1)"
nvm_err "${NVM_DEBUG_COMMAND}: $(nvm_sanitize_path "${NVM_DEBUG_OUTPUT}")"
done
return 42
;;
"install" | "i")
local version_not_provided
version_not_provided=0
local NVM_OS
NVM_OS="$(nvm_get_os)"
if ! nvm_has "curl" && ! nvm_has "wget"; then
nvm_err 'nvm needs curl or wget to proceed.'
return 1
fi
if [ $# -lt 1 ]; then
version_not_provided=1
fi
local nobinary
local nosource
local noprogress
nobinary=0
noprogress=0
nosource=0
local LTS
local ALIAS
local NVM_UPGRADE_NPM
NVM_UPGRADE_NPM=0
local PROVIDED_REINSTALL_PACKAGES_FROM
local REINSTALL_PACKAGES_FROM
local SKIP_DEFAULT_PACKAGES
local DEFAULT_PACKAGES
while [ $# -ne 0 ]; do
case "$1" in
---*)
nvm_err 'arguments with `---` are not supported - this is likely a typo'
return 55;
;;
-s)
shift # consume "-s"
nobinary=1
if [ $nosource -eq 1 ]; then
nvm err '-s and -b cannot be set together since they would skip install from both binary and source'
return 6
fi
;;
-b)
shift # consume "-b"
nosource=1
if [ $nobinary -eq 1 ]; then
nvm err '-s and -b cannot be set together since they would skip install from both binary and source'
return 6
fi
;;
-j)
shift # consume "-j"
nvm_get_make_jobs "$1"
shift # consume job count
;;
--no-progress)
noprogress=1
shift
;;
--lts)
LTS='*'
shift
;;
--lts=*)
LTS="${1##--lts=}"
shift
;;
--latest-npm)
NVM_UPGRADE_NPM=1
shift
;;
--default)
if [ -n "${ALIAS-}" ]; then
nvm_err '--default and --alias are mutually exclusive, and may not be provided more than once'
return 6
fi
ALIAS='default'
shift
;;
--alias=*)
if [ -n "${ALIAS-}" ]; then
nvm_err '--default and --alias are mutually exclusive, and may not be provided more than once'
return 6
fi
ALIAS="${1##--alias=}"
shift
;;
--reinstall-packages-from=*)
if [ -n "${PROVIDED_REINSTALL_PACKAGES_FROM-}" ]; then
nvm_err '--reinstall-packages-from may not be provided more than once'
return 6
fi
PROVIDED_REINSTALL_PACKAGES_FROM="$(nvm_echo "$1" | command cut -c 27-)"
if [ -z "${PROVIDED_REINSTALL_PACKAGES_FROM}" ]; then
nvm_err 'If --reinstall-packages-from is provided, it must point to an installed version of node.'
return 6
fi
REINSTALL_PACKAGES_FROM="$(nvm_version "${PROVIDED_REINSTALL_PACKAGES_FROM}")" ||:
shift
;;
--copy-packages-from=*)
if [ -n "${PROVIDED_REINSTALL_PACKAGES_FROM-}" ]; then
nvm_err '--reinstall-packages-from may not be provided more than once, or combined with `--copy-packages-from`'
return 6
fi
PROVIDED_REINSTALL_PACKAGES_FROM="$(nvm_echo "$1" | command cut -c 22-)"
if [ -z "${PROVIDED_REINSTALL_PACKAGES_FROM}" ]; then
nvm_err 'If --copy-packages-from is provided, it must point to an installed version of node.'
return 6
fi
REINSTALL_PACKAGES_FROM="$(nvm_version "${PROVIDED_REINSTALL_PACKAGES_FROM}")" ||:
shift
;;
--reinstall-packages-from | --copy-packages-from)
nvm_err "If ${1} is provided, it must point to an installed version of node using \`=\`."
return 6
;;
--skip-default-packages)
SKIP_DEFAULT_PACKAGES=true
shift
;;
*)
break # stop parsing args
;;
esac
done
local provided_version
provided_version="${1-}"
if [ -z "${provided_version}" ]; then
if [ "_${LTS-}" = '_*' ]; then
nvm_echo 'Installing latest LTS version.'
if [ $# -gt 0 ]; then
shift
fi
elif [ "_${LTS-}" != '_' ]; then
nvm_echo "Installing with latest version of LTS line: ${LTS}"
if [ $# -gt 0 ]; then
shift
fi
else
nvm_rc_version
if [ $version_not_provided -eq 1 ] && [ -z "${NVM_RC_VERSION}" ]; then
unset NVM_RC_VERSION
>&2 nvm --help
return 127
fi
provided_version="${NVM_RC_VERSION}"
unset NVM_RC_VERSION
fi
elif [ $# -gt 0 ]; then
shift
fi
case "${provided_version}" in
'lts/*')
LTS='*'
provided_version=''
;;
lts/*)
LTS="${provided_version##lts/}"
provided_version=''
;;
esac
VERSION="$(NVM_VERSION_ONLY=true NVM_LTS="${LTS-}" nvm_remote_version "${provided_version}")"
if [ "${VERSION}" = 'N/A' ]; then
local LTS_MSG
local REMOTE_CMD
if [ "${LTS-}" = '*' ]; then
LTS_MSG='(with LTS filter) '
REMOTE_CMD='nvm ls-remote --lts'
elif [ -n "${LTS-}" ]; then
LTS_MSG="(with LTS filter '${LTS}') "
REMOTE_CMD="nvm ls-remote --lts=${LTS}"
else
REMOTE_CMD='nvm ls-remote'
fi
nvm_err "Version '${provided_version}' ${LTS_MSG-}not found - try \`${REMOTE_CMD}\` to browse available versions."
return 3
fi
ADDITIONAL_PARAMETERS=''
while [ $# -ne 0 ]; do
case "$1" in
--reinstall-packages-from=*)
if [ -n "${PROVIDED_REINSTALL_PACKAGES_FROM-}" ]; then
nvm_err '--reinstall-packages-from may not be provided more than once'
return 6
fi
PROVIDED_REINSTALL_PACKAGES_FROM="$(nvm_echo "$1" | command cut -c 27-)"
if [ -z "${PROVIDED_REINSTALL_PACKAGES_FROM}" ]; then
nvm_err 'If --reinstall-packages-from is provided, it must point to an installed version of node.'
return 6
fi
REINSTALL_PACKAGES_FROM="$(nvm_version "${PROVIDED_REINSTALL_PACKAGES_FROM}")" ||:
;;
--copy-packages-from=*)
if [ -n "${PROVIDED_REINSTALL_PACKAGES_FROM-}" ]; then
nvm_err '--reinstall-packages-from may not be provided more than once, or combined with `--copy-packages-from`'
return 6
fi
PROVIDED_REINSTALL_PACKAGES_FROM="$(nvm_echo "$1" | command cut -c 22-)"
if [ -z "${PROVIDED_REINSTALL_PACKAGES_FROM}" ]; then
nvm_err 'If --copy-packages-from is provided, it must point to an installed version of node.'
return 6
fi
REINSTALL_PACKAGES_FROM="$(nvm_version "${PROVIDED_REINSTALL_PACKAGES_FROM}")" ||:
;;
--reinstall-packages-from | --copy-packages-from)
nvm_err "If ${1} is provided, it must point to an installed version of node using \`=\`."
return 6
;;
--skip-default-packages)
SKIP_DEFAULT_PACKAGES=true
;;
*)
ADDITIONAL_PARAMETERS="${ADDITIONAL_PARAMETERS} $1"
;;
esac
shift
done
if [ -z "${SKIP_DEFAULT_PACKAGES-}" ]; then
DEFAULT_PACKAGES="$(nvm_get_default_packages)"
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
return $EXIT_CODE
fi
fi
if [ -n "${PROVIDED_REINSTALL_PACKAGES_FROM-}" ] && [ "$(nvm_ensure_version_prefix "${PROVIDED_REINSTALL_PACKAGES_FROM}")" = "${VERSION}" ]; then
nvm_err "You can't reinstall global packages from the same version of node you're installing."
return 4
elif [ "${REINSTALL_PACKAGES_FROM-}" = 'N/A' ]; then
nvm_err "If --reinstall-packages-from is provided, it must point to an installed version of node."
return 5
fi
local FLAVOR
if nvm_is_iojs_version "${VERSION}"; then
FLAVOR="$(nvm_iojs_prefix)"
else
FLAVOR="$(nvm_node_prefix)"
fi
if nvm_is_version_installed "${VERSION}"; then
nvm_err "${VERSION} is already installed."
if nvm use "${VERSION}"; then
if [ "${NVM_UPGRADE_NPM}" = 1 ]; then
nvm install-latest-npm
fi
if [ -z "${SKIP_DEFAULT_PACKAGES-}" ] && [ -n "${DEFAULT_PACKAGES-}" ]; then
nvm_install_default_packages "${DEFAULT_PACKAGES}"
fi
if [ -n "${REINSTALL_PACKAGES_FROM-}" ] && [ "_${REINSTALL_PACKAGES_FROM}" != "_N/A" ]; then
nvm reinstall-packages "${REINSTALL_PACKAGES_FROM}"
fi
fi
if [ -n "${LTS-}" ]; then
LTS="$(echo "${LTS}" | tr '[:upper:]' '[:lower:]')"
nvm_ensure_default_set "lts/${LTS}"
else
nvm_ensure_default_set "${provided_version}"
fi
if [ -n "${ALIAS-}" ]; then
nvm alias "${ALIAS}" "${provided_version}"
fi
return $?
fi
local EXIT_CODE
EXIT_CODE=-1
if [ -n "${NVM_INSTALL_THIRD_PARTY_HOOK-}" ]; then
nvm_err '** $NVM_INSTALL_THIRD_PARTY_HOOK env var set; dispatching to third-party installation method **'
local NVM_METHOD_PREFERENCE
NVM_METHOD_PREFERENCE='binary'
if [ $nobinary -eq 1 ]; then
NVM_METHOD_PREFERENCE='source'
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "${VERSION}")"
"${NVM_INSTALL_THIRD_PARTY_HOOK}" "${VERSION}" "${FLAVOR}" std "${NVM_METHOD_PREFERENCE}" "${VERSION_PATH}" || {
EXIT_CODE=$?
nvm_err '*** Third-party $NVM_INSTALL_THIRD_PARTY_HOOK env var failed to install! ***'
return $EXIT_CODE
}
if ! nvm_is_version_installed "${VERSION}"; then
nvm_err '*** Third-party $NVM_INSTALL_THIRD_PARTY_HOOK env var claimed to succeed, but failed to install! ***'
return 33
fi
EXIT_CODE=0
else
if [ "_${NVM_OS}" = "_freebsd" ]; then
# node.js and io.js do not have a FreeBSD binary
nobinary=1
nvm_err "Currently, there is no binary for FreeBSD"
elif [ "_${NVM_OS}" = "_sunos" ]; then
# Not all node/io.js versions have a Solaris binary
if ! nvm_has_solaris_binary "${VERSION}"; then
nobinary=1
nvm_err "Currently, there is no binary of version ${VERSION} for SunOS"
fi
fi
# skip binary install if "nobinary" option specified.
if [ $nobinary -ne 1 ] && nvm_binary_available "${VERSION}"; then
NVM_NO_PROGRESS="${NVM_NO_PROGRESS:-${noprogress}}" nvm_install_binary "${FLAVOR}" std "${VERSION}" "${nosource}"
EXIT_CODE=$?
fi
if [ $EXIT_CODE -ne 0 ]; then
if [ -z "${NVM_MAKE_JOBS-}" ]; then
nvm_get_make_jobs
fi
if [ "_${NVM_OS}" = "_win" ]; then
nvm_err 'Installing from source on non-WSL Windows is not supported'
EXIT_CODE=87
else
NVM_NO_PROGRESS="${NVM_NO_PROGRESS:-${noprogress}}" nvm_install_source "${FLAVOR}" std "${VERSION}" "${NVM_MAKE_JOBS}" "${ADDITIONAL_PARAMETERS}"
EXIT_CODE=$?
fi
fi
fi
if [ $EXIT_CODE -eq 0 ] && nvm_use_if_needed "${VERSION}" && nvm_install_npm_if_needed "${VERSION}"; then
if [ -n "${LTS-}" ]; then
nvm_ensure_default_set "lts/${LTS}"
else
nvm_ensure_default_set "${provided_version}"
fi
if [ "${NVM_UPGRADE_NPM}" = 1 ]; then
nvm install-latest-npm
EXIT_CODE=$?
fi
if [ -z "${SKIP_DEFAULT_PACKAGES-}" ] && [ -n "${DEFAULT_PACKAGES-}" ]; then
nvm_install_default_packages "${DEFAULT_PACKAGES}"
fi
if [ -n "${REINSTALL_PACKAGES_FROM-}" ] && [ "_${REINSTALL_PACKAGES_FROM}" != "_N/A" ]; then
nvm reinstall-packages "${REINSTALL_PACKAGES_FROM}"
EXIT_CODE=$?
fi
else
EXIT_CODE=$?
fi
return $EXIT_CODE
;;
"uninstall")
if [ $# -ne 1 ]; then
>&2 nvm --help
return 127
fi
local PATTERN
PATTERN="${1-}"
case "${PATTERN-}" in
--) ;;
--lts | 'lts/*')
VERSION="$(nvm_match_version "lts/*")"
;;
lts/*)
VERSION="$(nvm_match_version "lts/${PATTERN##lts/}")"
;;
--lts=*)
VERSION="$(nvm_match_version "lts/${PATTERN##--lts=}")"
;;
*)
VERSION="$(nvm_version "${PATTERN}")"
;;
esac
if [ "_${VERSION}" = "_$(nvm_ls_current)" ]; then
if nvm_is_iojs_version "${VERSION}"; then
nvm_err "nvm: Cannot uninstall currently-active io.js version, ${VERSION} (inferred from ${PATTERN})."
else
nvm_err "nvm: Cannot uninstall currently-active node version, ${VERSION} (inferred from ${PATTERN})."
fi
return 1
fi
if ! nvm_is_version_installed "${VERSION}"; then
nvm_err "${VERSION} version is not installed..."
return
fi
local SLUG_BINARY
local SLUG_SOURCE
if nvm_is_iojs_version "${VERSION}"; then
SLUG_BINARY="$(nvm_get_download_slug iojs binary std "${VERSION}")"
SLUG_SOURCE="$(nvm_get_download_slug iojs source std "${VERSION}")"
else
SLUG_BINARY="$(nvm_get_download_slug node binary std "${VERSION}")"
SLUG_SOURCE="$(nvm_get_download_slug node source std "${VERSION}")"
fi
local NVM_SUCCESS_MSG
if nvm_is_iojs_version "${VERSION}"; then
NVM_SUCCESS_MSG="Uninstalled io.js $(nvm_strip_iojs_prefix "${VERSION}")"
else
NVM_SUCCESS_MSG="Uninstalled node ${VERSION}"
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "${VERSION}")"
if ! nvm_check_file_permissions "${VERSION_PATH}"; then
nvm_err 'Cannot uninstall, incorrect permissions on installation folder.'
nvm_err 'This is usually caused by running `npm install -g` as root. Run the following commands as root to fix the permissions and then try again.'
nvm_err
nvm_err " chown -R $(whoami) \"$(nvm_sanitize_path "${VERSION_PATH}")\""
nvm_err " chmod -R u+w \"$(nvm_sanitize_path "${VERSION_PATH}")\""
return 1
fi
# Delete all files related to target version.
local CACHE_DIR
CACHE_DIR="$(nvm_cache_dir)"
command rm -rf \
"${CACHE_DIR}/bin/${SLUG_BINARY}/files" \
"${CACHE_DIR}/src/${SLUG_SOURCE}/files" \
"${VERSION_PATH}" 2>/dev/null
nvm_echo "${NVM_SUCCESS_MSG}"
# rm any aliases that point to uninstalled version.
for ALIAS in $(nvm_grep -l "${VERSION}" "$(nvm_alias_path)/*" 2>/dev/null); do
nvm unalias "$(command basename "${ALIAS}")"
done
;;
"deactivate")
local NVM_SILENT
while [ $# -ne 0 ]; do
case "${1}" in
--silent) NVM_SILENT=1 ;;
--) ;;
esac
shift
done
local NEWPATH
NEWPATH="$(nvm_strip_path "${PATH}" "/bin")"
if [ "_${PATH}" = "_${NEWPATH}" ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "Could not find ${NVM_DIR}/*/bin in \${PATH}"
fi
else
export PATH="${NEWPATH}"
hash -r
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "${NVM_DIR}/*/bin removed from \${PATH}"
fi
fi
if [ -n "${MANPATH-}" ]; then
NEWPATH="$(nvm_strip_path "${MANPATH}" "/share/man")"
if [ "_${MANPATH}" = "_${NEWPATH}" ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "Could not find ${NVM_DIR}/*/share/man in \${MANPATH}"
fi
else
export MANPATH="${NEWPATH}"
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "${NVM_DIR}/*/share/man removed from \${MANPATH}"
fi
fi
fi
if [ -n "${NODE_PATH-}" ]; then
NEWPATH="$(nvm_strip_path "${NODE_PATH}" "/lib/node_modules")"
if [ "_${NODE_PATH}" != "_${NEWPATH}" ]; then
export NODE_PATH="${NEWPATH}"
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "${NVM_DIR}/*/lib/node_modules removed from \${NODE_PATH}"
fi
fi
fi
unset NVM_BIN
unset NVM_INC
;;
"use")
local PROVIDED_VERSION
local NVM_SILENT
local NVM_SILENT_ARG
local NVM_DELETE_PREFIX
NVM_DELETE_PREFIX=0
local NVM_LTS
while [ $# -ne 0 ]; do
case "$1" in
--silent)
NVM_SILENT=1
NVM_SILENT_ARG='--silent'
;;
--delete-prefix) NVM_DELETE_PREFIX=1 ;;
--) ;;
--lts) NVM_LTS='*' ;;
--lts=*) NVM_LTS="${1##--lts=}" ;;
--*) ;;
*)
if [ -n "${1-}" ]; then
PROVIDED_VERSION="$1"
fi
;;
esac
shift
done
if [ -n "${NVM_LTS-}" ]; then
VERSION="$(nvm_match_version "lts/${NVM_LTS:-*}")"
elif [ -z "${PROVIDED_VERSION-}" ]; then
NVM_SILENT="${NVM_SILENT:-0}" nvm_rc_version
if [ -n "${NVM_RC_VERSION-}" ]; then
PROVIDED_VERSION="${NVM_RC_VERSION}"
VERSION="$(nvm_version "${PROVIDED_VERSION}")"
fi
unset NVM_RC_VERSION
if [ -z "${VERSION}" ]; then
nvm_err 'Please see `nvm --help` or https://github.com/nvm-sh/nvm#nvmrc for more information.'
return 127
fi
else
VERSION="$(nvm_match_version "${PROVIDED_VERSION}")"
fi
if [ -z "${VERSION}" ]; then
>&2 nvm --help
return 127
fi
if [ "_${VERSION}" = '_system' ]; then
if nvm_has_system_node && nvm deactivate "${NVM_SILENT_ARG-}" >/dev/null 2>&1; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "Now using system version of node: $(node -v 2>/dev/null)$(nvm_print_npm_version)"
fi
return
elif nvm_has_system_iojs && nvm deactivate "${NVM_SILENT_ARG-}" >/dev/null 2>&1; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "Now using system version of io.js: $(iojs --version 2>/dev/null)$(nvm_print_npm_version)"
fi
return
elif [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err 'System version of node not found.'
fi
return 127
elif [ "_${VERSION}" = "_∞" ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "The alias \"${PROVIDED_VERSION}\" leads to an infinite loop. Aborting."
fi
return 8
fi
if [ "${VERSION}" = 'N/A' ]; then
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_err "N/A: version \"${PROVIDED_VERSION} -> ${VERSION}\" is not yet installed."
nvm_err ""
nvm_err "You need to run \"nvm install ${PROVIDED_VERSION}\" to install it before using it."
fi
return 3
# This nvm_ensure_version_installed call can be a performance bottleneck
# on shell startup. Perhaps we can optimize it away or make it faster.
elif ! nvm_ensure_version_installed "${VERSION}"; then
return $?
fi
local NVM_VERSION_DIR
NVM_VERSION_DIR="$(nvm_version_path "${VERSION}")"
# Change current version
PATH="$(nvm_change_path "${PATH}" "/bin" "${NVM_VERSION_DIR}")"
if nvm_has manpath; then
if [ -z "${MANPATH-}" ]; then
local MANPATH
MANPATH=$(manpath)
fi
# Change current version
MANPATH="$(nvm_change_path "${MANPATH}" "/share/man" "${NVM_VERSION_DIR}")"
export MANPATH
fi
export PATH
hash -r
export NVM_BIN="${NVM_VERSION_DIR}/bin"
export NVM_INC="${NVM_VERSION_DIR}/include/node"
if [ "${NVM_SYMLINK_CURRENT-}" = true ]; then
command rm -f "${NVM_DIR}/current" && ln -s "${NVM_VERSION_DIR}" "${NVM_DIR}/current"
fi
local NVM_USE_OUTPUT
NVM_USE_OUTPUT=''
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
if nvm_is_iojs_version "${VERSION}"; then
NVM_USE_OUTPUT="Now using io.js $(nvm_strip_iojs_prefix "${VERSION}")$(nvm_print_npm_version)"
else
NVM_USE_OUTPUT="Now using node ${VERSION}$(nvm_print_npm_version)"
fi
fi
if [ "_${VERSION}" != "_system" ]; then
local NVM_USE_CMD
NVM_USE_CMD="nvm use --delete-prefix"
if [ -n "${PROVIDED_VERSION}" ]; then
NVM_USE_CMD="${NVM_USE_CMD} ${VERSION}"
fi
if [ "${NVM_SILENT:-0}" -eq 1 ]; then
NVM_USE_CMD="${NVM_USE_CMD} --silent"
fi
if ! nvm_die_on_prefix "${NVM_DELETE_PREFIX}" "${NVM_USE_CMD}" "${NVM_VERSION_DIR}"; then
return 11
fi
fi
if [ -n "${NVM_USE_OUTPUT-}" ] && [ "${NVM_SILENT:-0}" -ne 1 ]; then
nvm_echo "${NVM_USE_OUTPUT}"
fi
;;
"run")
local provided_version
local has_checked_nvmrc
has_checked_nvmrc=0
# run given version of node
local NVM_SILENT
local NVM_SILENT_ARG
local NVM_LTS
while [ $# -gt 0 ]; do
case "$1" in
--silent)
NVM_SILENT=1
NVM_SILENT_ARG='--silent'
shift
;;
--lts) NVM_LTS='*' ; shift ;;
--lts=*) NVM_LTS="${1##--lts=}" ; shift ;;
*)
if [ -n "$1" ]; then
break
else
shift
fi
;; # stop processing arguments
esac
done
if [ $# -lt 1 ] && [ -z "${NVM_LTS-}" ]; then
NVM_SILENT="${NVM_SILENT:-0}" nvm_rc_version && has_checked_nvmrc=1
if [ -n "${NVM_RC_VERSION-}" ]; then
VERSION="$(nvm_version "${NVM_RC_VERSION-}")" ||:
fi
unset NVM_RC_VERSION
if [ "${VERSION:-N/A}" = 'N/A' ]; then
>&2 nvm --help
return 127
fi
fi
if [ -z "${NVM_LTS-}" ]; then
provided_version="$1"
if [ -n "${provided_version}" ]; then
VERSION="$(nvm_version "${provided_version}")" ||:
if [ "_${VERSION:-N/A}" = '_N/A' ] && ! nvm_is_valid_version "${provided_version}"; then
provided_version=''
if [ $has_checked_nvmrc -ne 1 ]; then
NVM_SILENT="${NVM_SILENT:-0}" nvm_rc_version && has_checked_nvmrc=1
fi
VERSION="$(nvm_version "${NVM_RC_VERSION}")" ||:
unset NVM_RC_VERSION
else
shift
fi
fi
fi
local NVM_IOJS
if nvm_is_iojs_version "${VERSION}"; then
NVM_IOJS=true
fi
local EXIT_CODE
nvm_is_zsh && setopt local_options shwordsplit
local LTS_ARG
if [ -n "${NVM_LTS-}" ]; then
LTS_ARG="--lts=${NVM_LTS-}"
VERSION=''
fi
if [ "_${VERSION}" = "_N/A" ]; then
nvm_ensure_version_installed "${provided_version}"
elif [ "${NVM_IOJS}" = true ]; then
nvm exec "${NVM_SILENT_ARG-}" "${LTS_ARG-}" "${VERSION}" iojs "$@"
else
nvm exec "${NVM_SILENT_ARG-}" "${LTS_ARG-}" "${VERSION}" node "$@"
fi
EXIT_CODE="$?"
return $EXIT_CODE
;;
"exec")
local NVM_SILENT
local NVM_LTS
while [ $# -gt 0 ]; do
case "$1" in
--silent) NVM_SILENT=1 ; shift ;;
--lts) NVM_LTS='*' ; shift ;;
--lts=*) NVM_LTS="${1##--lts=}" ; shift ;;
--) break ;;
--*)
nvm_err "Unsupported option \"$1\"."
return 55
;;
*)
if [ -n "$1" ]; then
break
else
shift
fi
;; # stop processing arguments
esac
done
local provided_version
provided_version="$1"
if [ "${NVM_LTS-}" != '' ]; then
provided_version="lts/${NVM_LTS:-*}"
VERSION="${provided_version}"
elif [ -n "${provided_version}" ]; then
VERSION="$(nvm_version "${provided_version}")" ||:
if [ "_${VERSION}" = '_N/A' ] && ! nvm_is_valid_version "${provided_version}"; then
NVM_SILENT="${NVM_SILENT:-0}" nvm_rc_version && has_checked_nvmrc=1
provided_version="${NVM_RC_VERSION}"
unset NVM_RC_VERSION
VERSION="$(nvm_version "${provided_version}")" ||:
else
shift
fi
fi
nvm_ensure_version_installed "${provided_version}"
EXIT_CODE=$?
if [ "${EXIT_CODE}" != "0" ]; then
return $EXIT_CODE
fi
if [ "${NVM_SILENT:-0}" -ne 1 ]; then
if [ "${NVM_LTS-}" = '*' ]; then
nvm_echo "Running node latest LTS -> $(nvm_version "${VERSION}")$(nvm use --silent "${VERSION}" && nvm_print_npm_version)"
elif [ -n "${NVM_LTS-}" ]; then
nvm_echo "Running node LTS \"${NVM_LTS-}\" -> $(nvm_version "${VERSION}")$(nvm use --silent "${VERSION}" && nvm_print_npm_version)"
elif nvm_is_iojs_version "${VERSION}"; then
nvm_echo "Running io.js $(nvm_strip_iojs_prefix "${VERSION}")$(nvm use --silent "${VERSION}" && nvm_print_npm_version)"
else
nvm_echo "Running node ${VERSION}$(nvm use --silent "${VERSION}" && nvm_print_npm_version)"
fi
fi
NODE_VERSION="${VERSION}" "${NVM_DIR}/nvm-exec" "$@"
;;
"ls" | "list")
local PATTERN
local NVM_NO_COLORS
local NVM_NO_ALIAS
while [ $# -gt 0 ]; do
case "${1}" in
--) ;;
--no-colors) NVM_NO_COLORS="${1}" ;;
--no-alias) NVM_NO_ALIAS="${1}" ;;
--*)
nvm_err "Unsupported option \"${1}\"."
return 55
;;
*)
PATTERN="${PATTERN:-$1}"
;;
esac
shift
done
if [ -n "${PATTERN-}" ] && [ -n "${NVM_NO_ALIAS-}" ]; then
nvm_err '`--no-alias` is not supported when a pattern is provided.'
return 55
fi
local NVM_LS_OUTPUT
local NVM_LS_EXIT_CODE
NVM_LS_OUTPUT=$(nvm_ls "${PATTERN-}")
NVM_LS_EXIT_CODE=$?
NVM_NO_COLORS="${NVM_NO_COLORS-}" nvm_print_versions "${NVM_LS_OUTPUT}"
if [ -z "${NVM_NO_ALIAS-}" ] && [ -z "${PATTERN-}" ]; then
if [ -n "${NVM_NO_COLORS-}" ]; then
nvm alias --no-colors
else
nvm alias
fi
fi
return $NVM_LS_EXIT_CODE
;;
"ls-remote" | "list-remote")
local NVM_LTS
local PATTERN
local NVM_NO_COLORS
while [ $# -gt 0 ]; do
case "${1-}" in
--) ;;
--lts)
NVM_LTS='*'
;;
--lts=*)
NVM_LTS="${1##--lts=}"
;;
--no-colors) NVM_NO_COLORS="${1}" ;;
--*)
nvm_err "Unsupported option \"${1}\"."
return 55
;;
*)
if [ -z "${PATTERN-}" ]; then
PATTERN="${1-}"
if [ -z "${NVM_LTS-}" ]; then
case "${PATTERN}" in
'lts/*') NVM_LTS='*' ;;
lts/*) NVM_LTS="${PATTERN##lts/}" ;;
esac
fi
fi
;;
esac
shift
done
local NVM_OUTPUT
local EXIT_CODE
NVM_OUTPUT="$(NVM_LTS="${NVM_LTS-}" nvm_remote_versions "${PATTERN}" &&:)"
EXIT_CODE=$?
if [ -n "${NVM_OUTPUT}" ]; then
NVM_NO_COLORS="${NVM_NO_COLORS-}" nvm_print_versions "${NVM_OUTPUT}"
return $EXIT_CODE
fi
NVM_NO_COLORS="${NVM_NO_COLORS-}" nvm_print_versions "N/A"
return 3
;;
"current")
nvm_version current
;;
"which")
local NVM_SILENT
local provided_version
while [ $# -ne 0 ]; do
case "${1}" in
--silent) NVM_SILENT=1 ;;
--) ;;
*) provided_version="${1-}" ;;
esac
shift
done
if [ -z "${provided_version-}" ]; then
NVM_SILENT="${NVM_SILENT:-0}" nvm_rc_version
if [ -n "${NVM_RC_VERSION}" ]; then
provided_version="${NVM_RC_VERSION}"
VERSION=$(nvm_version "${NVM_RC_VERSION}") ||:
fi
unset NVM_RC_VERSION
elif [ "${provided_version}" != 'system' ]; then
VERSION="$(nvm_version "${provided_version}")" ||:
else
VERSION="${provided_version-}"
fi
if [ -z "${VERSION}" ]; then
>&2 nvm --help
return 127
fi
if [ "_${VERSION}" = '_system' ]; then
if nvm_has_system_iojs >/dev/null 2>&1 || nvm_has_system_node >/dev/null 2>&1; then
local NVM_BIN
NVM_BIN="$(nvm use system >/dev/null 2>&1 && command which node)"
if [ -n "${NVM_BIN}" ]; then
nvm_echo "${NVM_BIN}"
return
fi
return 1
fi
nvm_err 'System version of node not found.'
return 127
elif [ "${VERSION}" = '∞' ]; then
nvm_err "The alias \"${2}\" leads to an infinite loop. Aborting."
return 8
fi
nvm_ensure_version_installed "${provided_version}"
EXIT_CODE=$?
if [ "${EXIT_CODE}" != "0" ]; then
return $EXIT_CODE
fi
local NVM_VERSION_DIR
NVM_VERSION_DIR="$(nvm_version_path "${VERSION}")"
nvm_echo "${NVM_VERSION_DIR}/bin/node"
;;
"alias")
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
local NVM_CURRENT
NVM_CURRENT="$(nvm_ls_current)"
command mkdir -p "${NVM_ALIAS_DIR}/lts"
local ALIAS
local TARGET
local NVM_NO_COLORS
ALIAS='--'
TARGET='--'
while [ $# -gt 0 ]; do
case "${1-}" in
--) ;;
--no-colors) NVM_NO_COLORS="${1}" ;;
--*)
nvm_err "Unsupported option \"${1}\"."
return 55
;;
*)
if [ "${ALIAS}" = '--' ]; then
ALIAS="${1-}"
elif [ "${TARGET}" = '--' ]; then
TARGET="${1-}"
fi
;;
esac
shift
done
if [ -z "${TARGET}" ]; then
# for some reason the empty string was explicitly passed as the target
# so, unalias it.
nvm unalias "${ALIAS}"
return $?
elif [ "${TARGET}" != '--' ]; then
# a target was passed: create an alias
if [ "${ALIAS#*\/}" != "${ALIAS}" ]; then
nvm_err 'Aliases in subdirectories are not supported.'
return 1
fi
VERSION="$(nvm_version "${TARGET}")" ||:
if [ "${VERSION}" = 'N/A' ]; then
nvm_err "! WARNING: Version '${TARGET}' does not exist."
fi
nvm_make_alias "${ALIAS}" "${TARGET}"
NVM_NO_COLORS="${NVM_NO_COLORS-}" NVM_CURRENT="${NVM_CURRENT-}" DEFAULT=false nvm_print_formatted_alias "${ALIAS}" "${TARGET}" "${VERSION}"
else
if [ "${ALIAS-}" = '--' ]; then
unset ALIAS
fi
nvm_list_aliases "${ALIAS-}"
fi
;;
"unalias")
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
command mkdir -p "${NVM_ALIAS_DIR}"
if [ $# -ne 1 ]; then
>&2 nvm --help
return 127
fi
if [ "${1#*\/}" != "${1-}" ]; then
nvm_err 'Aliases in subdirectories are not supported.'
return 1
fi
local NVM_IOJS_PREFIX
local NVM_NODE_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_ALIAS_EXISTS
NVM_ALIAS_EXISTS=0
if [ -f "${NVM_ALIAS_DIR}/${1-}" ]; then
NVM_ALIAS_EXISTS=1
fi
if [ $NVM_ALIAS_EXISTS -eq 0 ]; then
case "$1" in
"stable" | "unstable" | "${NVM_IOJS_PREFIX}" | "${NVM_NODE_PREFIX}" | "system")
nvm_err "${1-} is a default (built-in) alias and cannot be deleted."
return 1
;;
esac
nvm_err "Alias ${1-} doesn't exist!"
return
fi
local NVM_ALIAS_ORIGINAL
NVM_ALIAS_ORIGINAL="$(nvm_alias "${1}")"
command rm -f "${NVM_ALIAS_DIR}/${1}"
nvm_echo "Deleted alias ${1} - restore it with \`nvm alias \"${1}\" \"${NVM_ALIAS_ORIGINAL}\"\`"
;;
"install-latest-npm")
if [ $# -ne 0 ]; then
>&2 nvm --help
return 127
fi
nvm_install_latest_npm
;;
"reinstall-packages" | "copy-packages")
if [ $# -ne 1 ]; then
>&2 nvm --help
return 127
fi
local PROVIDED_VERSION
PROVIDED_VERSION="${1-}"
if [ "${PROVIDED_VERSION}" = "$(nvm_ls_current)" ] || [ "$(nvm_version "${PROVIDED_VERSION}" ||:)" = "$(nvm_ls_current)" ]; then
nvm_err 'Can not reinstall packages from the current version of node.'
return 2
fi
local VERSION
if [ "_${PROVIDED_VERSION}" = "_system" ]; then
if ! nvm_has_system_node && ! nvm_has_system_iojs; then
nvm_err 'No system version of node or io.js detected.'
return 3
fi
VERSION="system"
else
VERSION="$(nvm_version "${PROVIDED_VERSION}")" ||:
fi
local NPMLIST
NPMLIST="$(nvm_npm_global_modules "${VERSION}")"
local INSTALLS
local LINKS
INSTALLS="${NPMLIST%% //// *}"
LINKS="${NPMLIST##* //// }"
nvm_echo "Reinstalling global packages from ${VERSION}..."
if [ -n "${INSTALLS}" ]; then
nvm_echo "${INSTALLS}" | command xargs npm install -g --quiet
else
nvm_echo "No installed global packages found..."
fi
nvm_echo "Linking global packages from ${VERSION}..."
if [ -n "${LINKS}" ]; then
(
set -f; IFS='
' # necessary to turn off variable expansion except for newlines
for LINK in ${LINKS}; do
set +f; unset IFS # restore variable expansion
if [ -n "${LINK}" ]; then
case "${LINK}" in
'/'*) (nvm_cd "${LINK}" && npm link) ;;
*) (nvm_cd "$(npm root -g)/../${LINK}" && npm link)
esac
fi
done
)
else
nvm_echo "No linked global packages found..."
fi
;;
"clear-cache")
command rm -f "${NVM_DIR}/v*" "$(nvm_version_dir)" 2>/dev/null
nvm_echo 'nvm cache cleared.'
;;
"version")
nvm_version "${1}"
;;
"version-remote")
local NVM_LTS
local PATTERN
while [ $# -gt 0 ]; do
case "${1-}" in
--) ;;
--lts)
NVM_LTS='*'
;;
--lts=*)
NVM_LTS="${1##--lts=}"
;;
--*)
nvm_err "Unsupported option \"${1}\"."
return 55
;;
*)
PATTERN="${PATTERN:-${1}}"
;;
esac
shift
done
case "${PATTERN-}" in
'lts/*')
NVM_LTS='*'
unset PATTERN
;;
lts/*)
NVM_LTS="${PATTERN##lts/}"
unset PATTERN
;;
esac
NVM_VERSION_ONLY=true NVM_LTS="${NVM_LTS-}" nvm_remote_version "${PATTERN:-node}"
;;
"--version" | "-v")
nvm_echo '0.38.0'
;;
"unload")
nvm deactivate >/dev/null 2>&1
unset -f nvm \
nvm_iojs_prefix nvm_node_prefix \
nvm_add_iojs_prefix nvm_strip_iojs_prefix \
nvm_is_iojs_version nvm_is_alias nvm_has_non_aliased \
nvm_ls_remote nvm_ls_remote_iojs nvm_ls_remote_index_tab \
nvm_ls nvm_remote_version nvm_remote_versions \
nvm_install_binary nvm_install_source nvm_clang_version \
nvm_get_mirror nvm_get_download_slug nvm_download_artifact \
nvm_install_npm_if_needed nvm_use_if_needed nvm_check_file_permissions \
nvm_print_versions nvm_compute_checksum \
nvm_get_checksum_binary \
nvm_get_checksum_alg nvm_get_checksum nvm_compare_checksum \
nvm_version nvm_rc_version nvm_match_version \
nvm_ensure_default_set nvm_get_arch nvm_get_os \
nvm_print_implicit_alias nvm_validate_implicit_alias \
nvm_resolve_alias nvm_ls_current nvm_alias \
nvm_binary_available nvm_change_path nvm_strip_path \
nvm_num_version_groups nvm_format_version nvm_ensure_version_prefix \
nvm_normalize_version nvm_is_valid_version \
nvm_ensure_version_installed nvm_cache_dir \
nvm_version_path nvm_alias_path nvm_version_dir \
nvm_find_nvmrc nvm_find_up nvm_find_project_dir nvm_tree_contains_path \
nvm_version_greater nvm_version_greater_than_or_equal_to \
nvm_print_npm_version nvm_install_latest_npm nvm_npm_global_modules \
nvm_has_system_node nvm_has_system_iojs \
nvm_download nvm_get_latest nvm_has nvm_install_default_packages nvm_get_default_packages \
nvm_curl_use_compression nvm_curl_version \
nvm_auto nvm_supports_xz \
nvm_echo nvm_err nvm_grep nvm_cd \
nvm_die_on_prefix nvm_get_make_jobs nvm_get_minor_version \
nvm_has_solaris_binary nvm_is_merged_node_version \
nvm_is_natural_num nvm_is_version_installed \
nvm_list_aliases nvm_make_alias nvm_print_alias_path \
nvm_print_default_alias nvm_print_formatted_alias nvm_resolve_local_alias \
nvm_sanitize_path nvm_has_colors nvm_process_parameters \
nvm_node_version_has_solaris_binary nvm_iojs_version_has_solaris_binary \
nvm_curl_libz_support nvm_command_info nvm_is_zsh nvm_stdout_is_terminal \
nvm_npmrc_bad_news_bears \
nvm_get_colors nvm_set_colors nvm_print_color_code nvm_format_help_message_colors \
nvm_echo_with_colors nvm_err_with_colors \
nvm_get_artifact_compression nvm_install_binary_extract \
>/dev/null 2>&1
unset NVM_RC_VERSION NVM_NODEJS_ORG_MIRROR NVM_IOJS_ORG_MIRROR NVM_DIR \
NVM_CD_FLAGS NVM_BIN NVM_INC NVM_MAKE_JOBS \
NVM_COLORS INSTALLED_COLOR SYSTEM_COLOR \
CURRENT_COLOR NOT_INSTALLED_COLOR DEFAULT_COLOR LTS_COLOR \
>/dev/null 2>&1
;;
"set-colors")
local EXIT_CODE
nvm_set_colors "${1-}"
EXIT_CODE=$?
if [ "$EXIT_CODE" -eq 17 ]; then
>&2 nvm --help
nvm_echo
nvm_err_with_colors "\033[1;37mPlease pass in five \033[1;31mvalid color codes\033[1;37m. Choose from: rRgGbBcCyYmMkKeW\033[0m"
fi
;;
*)
>&2 nvm --help
return 127
;;
esac
}
nvm_get_default_packages() {
local NVM_DEFAULT_PACKAGE_FILE="${NVM_DIR}/default-packages"
if [ -f "${NVM_DEFAULT_PACKAGE_FILE}" ]; then
local DEFAULT_PACKAGES
DEFAULT_PACKAGES=''
# Read lines from $NVM_DIR/default-packages
local line
# ensure a trailing newline
WORK=$(mktemp -d) || exit $?
# shellcheck disable=SC2064
trap "command rm -rf '$WORK'" EXIT
# shellcheck disable=SC1003
sed -e '$a\' "${NVM_DEFAULT_PACKAGE_FILE}" > "${WORK}/default-packages"
while IFS=' ' read -r line; do
# Skip empty lines.
[ -n "${line-}" ] || continue
# Skip comment lines that begin with `#`.
[ "$(nvm_echo "${line}" | command cut -c1)" != "#" ] || continue
# Fail on lines that have multiple space-separated words
case $line in
*\ *)
nvm_err "Only one package per line is allowed in the ${NVM_DIR}/default-packages file. Please remove any lines with multiple space-separated values."
return 1
;;
esac
DEFAULT_PACKAGES="${DEFAULT_PACKAGES}${line} "
done < "${WORK}/default-packages"
echo "${DEFAULT_PACKAGES}" | command xargs
fi
}
nvm_install_default_packages() {
nvm_echo "Installing default global packages from ${NVM_DIR}/default-packages..."
nvm_echo "npm install -g --quiet $1"
if ! nvm_echo "$1" | command xargs npm install -g --quiet; then
nvm_err "Failed installing default packages. Please check if your default-packages file or a package in it has problems!"
return 1
fi
}
nvm_supports_xz() {
if [ -z "${1-}" ]; then
return 1
fi
local NVM_OS
NVM_OS="$(nvm_get_os)"
if [ "_${NVM_OS}" = '_darwin' ]; then
local MACOS_VERSION
MACOS_VERSION="$(sw_vers -productVersion)"
if nvm_version_greater "10.9.0" "${MACOS_VERSION}"; then
# macOS 10.8 and earlier doesn't support extracting xz-compressed tarballs with tar
return 1
fi
elif [ "_${NVM_OS}" = '_freebsd' ]; then
if ! [ -e '/usr/lib/liblzma.so' ]; then
# FreeBSD without /usr/lib/liblzma.so doesn't support extracting xz-compressed tarballs with tar
return 1
fi
else
if ! command which xz >/dev/null 2>&1; then
# Most OSes without xz on the PATH don't support extracting xz-compressed tarballs with tar
# (Should correctly handle Linux, SmartOS, maybe more)
return 1
fi
fi
# all node versions v4.0.0 and later have xz
if nvm_is_merged_node_version "${1}"; then
return 0
fi
# 0.12x: node v0.12.10 and later have xz
if nvm_version_greater_than_or_equal_to "${1}" "0.12.10" && nvm_version_greater "0.13.0" "${1}"; then
return 0
fi
# 0.10x: node v0.10.42 and later have xz
if nvm_version_greater_than_or_equal_to "${1}" "0.10.42" && nvm_version_greater "0.11.0" "${1}"; then
return 0
fi
case "${NVM_OS}" in
darwin)
# darwin only has xz for io.js v2.3.2 and later
nvm_version_greater_than_or_equal_to "${1}" "2.3.2"
;;
*)
nvm_version_greater_than_or_equal_to "${1}" "1.0.0"
;;
esac
return $?
}
nvm_auto() {
local NVM_MODE
NVM_MODE="${1-}"
local VERSION
local NVM_CURRENT
if [ "_${NVM_MODE}" = '_install' ]; then
VERSION="$(nvm_alias default 2>/dev/null || nvm_echo)"
if [ -n "${VERSION}" ]; then
nvm install "${VERSION}" >/dev/null
elif nvm_rc_version >/dev/null 2>&1; then
nvm install >/dev/null
fi
elif [ "_$NVM_MODE" = '_use' ]; then
NVM_CURRENT="$(nvm_ls_current)"
if [ "_${NVM_CURRENT}" = '_none' ] || [ "_${NVM_CURRENT}" = '_system' ]; then
VERSION="$(nvm_resolve_local_alias default 2>/dev/null || nvm_echo)"
if [ -n "${VERSION}" ]; then
nvm use --silent "${VERSION}" >/dev/null
elif nvm_rc_version >/dev/null 2>&1; then
nvm use --silent >/dev/null
fi
else
nvm use --silent "${NVM_CURRENT}" >/dev/null
fi
elif [ "_${NVM_MODE}" != '_none' ]; then
nvm_err 'Invalid auto mode supplied.'
return 1
fi
}
nvm_process_parameters() {
local NVM_AUTO_MODE
NVM_AUTO_MODE='use'
while [ "$#" -ne 0 ]; do
case "$1" in
--install) NVM_AUTO_MODE='install' ;;
--no-use) NVM_AUTO_MODE='none' ;;
esac
shift
done
nvm_auto "${NVM_AUTO_MODE}"
}
nvm_process_parameters "$@"
} # this ensures the entire script is downloaded #
|
<reponame>Ks89/javascript-on-things
const five = require('johnny-five');
const board = new five.Board();
board.on('ready', () => {
const compass = new five.Compass({ controller: 'HMC5883L' });
compass.on('change', () => {
console.log(compass.bearing);
});
});
|
#!/usr/bin/env bash
set -e
MSG="[GEN_TEST_CERTS]"
KEY="test_renderer.key.pem"
KEY_B64="${KEY}.b64"
CERT="test_renderer.cert.pem"
CERT_B64="${CERT}.b64"
CA_KEY="test_ca.key.pem"
CA_CERT="test_ca.cert.pem"
CA_CERT_B64="${CA_CERT}.b64"
CLIENT_KEY="test_client.key.pem"
CLIENT_CSR="test_client.csr"
CLIENT_CERT="test_client.cert.pem"
CLIENT_CERTS="test_client_certs.pem"
CLIENT_CERTS_B64="${CLIENT_CERTS}.b64"
echo "$MSG generating test x509 server certs (DO NOT USE IN PRODUCTION)"
openssl req -new -x509 -config openssl.cnf -nodes \
-days 365 -newkey rsa:4096 \
-keyout "$KEY" -out "$CERT" > /dev/null 2>&1
cat "$KEY" | base64 > "${KEY}.b64"
cat "$CERT" | base64 > "${CERT}.b64"
echo "$MSG generating test x509 CA (DO NOT USE IN PRODUCTION)"
openssl req -new -x509 -config openssl_ca.cnf -nodes \
-days 365 -newkey rsa:4096 \
-keyout "$CA_KEY" -out "$CA_CERT" > /dev/null 2>&1
cat "$CA_CERT" | base64 > "$CA_CERT_B64"
echo "$MSG generating client key (DO NOT USE IN PRODUCTION)"
openssl genrsa -out "$CLIENT_KEY" 4096 > /dev/null 2>&1
echo "$MSG generating client key CSR (DO NOT USE IN PRODUCTION)"
openssl req -new -key "$CLIENT_KEY" -config openssl_client.cnf -out "$CLIENT_CSR" > /dev/null 2>&1
echo "$MSG signing client certificate (DO NOT USE IN PRODUCTION)"
openssl x509 -req -in "$CLIENT_CSR" -CA "$CA_CERT" -CAkey "$CA_KEY" -CAcreateserial \
-out "$CLIENT_CERT" -days 365 -sha256 -extfile openssl_client_sign.ext > /dev/null 2>&1
cat "$CLIENT_KEY" > "$CLIENT_CERTS"
cat "$CLIENT_CERT" >> "$CLIENT_CERTS"
cat "$CLIENT_CERTS" | base64 > "$CLIENT_CERTS_B64"
#echo "$MSG cleaning up files"
#rm "$KEY" "$CERT" "$CA_KEY" "$CA_CERT" "$CLIENT_KEY" "$CLIENT_CSR" "$CLIENT_CERT" "$CLIENT_CERTS" "test_ca.srl"
echo "$MSG your certs in b64 to be used for testing via environment variables:"
echo "$MSG - NGINX_X509_PRIVATE_KEY_B64: contents of '${KEY_B64}'"
echo "$MSG - NGINX_X509_PUBLIC_CERT_B64: contents of '${CERT_B64}'"
echo "$MSG - CLIENT_CERT_CA: contents of '${CA_CERT_B64}'"
echo "$MSG"
echo "$MSG ** You should base64 decode the content of the '${CLIENT_CERTS_B64}' file and use it in your client requests"
echo "$MSG"
echo "$MSG DONE"
|
import * as core from '@actions/core'
export const checkUser = (): boolean => {
const input = core.getInput('user', {
required: false
})
const user = input?.length > 0 ? input : 'dependabot[bot]'
const actor = process.env.GITHUB_ACTOR
const result: boolean = actor === user
return result
}
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "Must pass URL as first arg"
exit 1
fi
if [[ ! -f $2 ]]; then
echo "Must pass path to all-balances.json as second arg"
exit 1
fi
export ETH_RPC_URL=$1
all_balances=$2
supply=$(seth call 0x4200000000000000000000000000000000000006 'totalSupply()(uint256)')
echo "total supply: $(seth --from-wei $supply ETH)"
cat $all_balances | jq -r 'keys | .[]' | while read addr; do
ovm_eth=$(seth call 0x4200000000000000000000000000000000000006 \
'balanceOf(address)(uint256)' $addr)
balance=$(seth balance $addr)
if [[ $ovm_eth != $balance ]]; then
echo "OVM_ETH and balance mismatch for $addr"
fi
expect=$(cat $all_balances \
| jq -r --arg key $addr '.[$key]' \
| xargs printf '%d')
if [[ $balance != $expect ]]; then
echo "$addr balance mismatch"
else
echo "Balance correct: $addr has $(seth --from-wei $balance eth) ETH"
fi
done
|
<reponame>tignear/bot<filename>packages/presentation/web/src/components/layout.tsx
import Header from "./header";
import styled from "styled-components";
import tw from "tailwind.macro";
const Layout = styled.div``;
const Inner = styled.div`
${tw`mx-auto mt-4`}
`;
type Props = {
children: React.ReactNode;
};
const Component: React.FC<Props> = (props) => (
<Layout>
<Header />
<Inner className="container">{props.children}</Inner>
</Layout>
);
export default Component;
|
#!/bin/bash
FN="benchmarkfdrData2019_1.6.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/experiment/src/contrib/benchmarkfdrData2019_1.6.0.tar.gz"
"https://bioarchive.galaxyproject.org/benchmarkfdrData2019_1.6.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-benchmarkfdrdata2019/bioconductor-benchmarkfdrdata2019_1.6.0_src_all.tar.gz"
)
MD5="17a045441d2311736928ece335450b36"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
<filename>codejam/2019-qualification/a.cc
// https://codingcompetitions.withgoogle.com/codejam/round/0000000000051705/0000000000088231
#include<bits/stdc++.h>
using namespace std;
using vi=vector<int>;
int main(){
ios::sync_with_stdio(0);
cin.tie(0);
int t;
cin>>t;
for(int T=1;T<=t;T++){
string s;
cin>>s;
int n=s.size();
vi a(n),b(n);
for(int i=0;i<n;i++){
a[i]=s[n-i-1]-'0';
if(a[i]==4){
a[i]--;
b[i]=1;
}
}
while(!b.back())b.pop_back();
reverse(a.begin(),a.end());
reverse(b.begin(),b.end());
cout<<"Case #"<<T<<": ";
for(int x:a)cout<<x;
cout<<" ";
for(int x:b)cout<<x;
cout<<"\n";
}
}
|
def reverseString(s):
return s[::-1]
s = "Hello"
print(reverseString(s))
|
#!/bin/bash
#
#SBATCH --job-name=iwslt_grid_0021_05
#SBATCH --partition=1080ti-long
#SBATCH --gres=gpu:1
#SBATCH --ntasks-per-node=24
#SBATCH --mem=47GB
#SBATCH -d singleton
#SBATCH --open-mode append
#SBATCH -o /mnt/nfs/work1/miyyer/simengsun/synst/experiments/iwslt_grid_0021_05/output_train.txt
#SBATCH --mail-type=ALL
#SBATCH --mail-user=simengsun@cs.umass.edu
BASE_PATH=/mnt/nfs/work1/miyyer
PROJECT_PATH=$BASE_PATH/simengsun/synst
EXPERIMENT_PATH=$PROJECT_PATH/experiments/iwslt_grid_0021_05
left left left right left center left left
# Load in python3 and source the venv
module load python3/3.6.6-1810
source /mnt/nfs/work1/miyyer/wyou/py36/bin/activate
# Need to include the venv path upfront...
# otherwise it defaults to the loaded slurm module which breaks for pynvml with python3
#PYTHONPATH=$BASE_PATH/simengsun/synst/bin/lib/python3.6/site-packages/:$PYTHONPATH
PYTHONPATH=/mnt/nfs/work1/miyyer/wyou/py36/lib/python3.6/site-packages:$PYTHONPATH
env $(cat ~/.comet.ml | xargs) python main.py \
--track -b 6000 --dataset iwslt_en_de --span 1 \
--model new_transformer \
--attn-param 1.4 \
--attn-type normal \
--attn-position left left left right left right left right left right \
--attn-displacement 1 \
--dec-attn-param 1.4 \
--dec-attn-type normal \
--dec-attn-position left center left center left center left center left left \
--dec-attn-displacement 1 \
--embedding-size 286 --hidden-dim 507 --num-heads 2 --num-layers 5 \
-d /mnt/nfs/work1/miyyer/wyou/iwslt -p /mnt/nfs/work1/miyyer/wyou/iwslt -v train \
--checkpoint-interval 600 --accumulate 1 --learning-rate 3e-4 --checkpoint-directory $EXPERIMENT_PATH \
--label-smoothing 0.0 --learning-rate-scheduler linear
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.course.assessment;
import org.olat.core.gui.control.Event;
/**
*
* Initial date: 12 Aug 2021<br>
* @author uhensler, <EMAIL>, http://www.frentix.com
*
*/
public class AssessmentEvents {
/** Many course node run controllers send a Event.CHANGED_EVENT.
* The RunMainController does rebuild the menu tree, progress bar, navigation AND does recreate the current controller.
* This Event does meant to initiate the rebuild of the menu tree etc but does not recreate the current controller.
*
*/
public static final Event CHANGED_EVENT = new Event("assessment-changed");
private AssessmentEvents() {
//
}
}
|
<filename>include/LLVMCodeGen.h
// -*- mode: c++ -*-
#pragma once
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/PassManager.h"
#include "AST.h"
#include "ContextManager.h"
#include "Driver.h"
class LLVMCodeGen {
const Driver *_driver;
std::unique_ptr<llvm::Module> _module;
std::unique_ptr<llvm::FunctionPassManager> _fpm;
llvm::IRBuilder<> _builder;
ContextManager _context;
std::map<std::string, int> _constants;
public:
LLVMCodeGen(llvm::Module *module, llvm::FunctionPassManager *fpm) :
_module(module), _fpm(fpm), _builder(llvm::getGlobalContext()),
_context(module) { }
void codegen(const Driver &, std::string);
std::unique_ptr<llvm::Module> &&getModule() { return std::move(_module); }
private:
llvm::Value *error(std::string);
llvm::Value *visit(const std::shared_ptr<ast::AST>);
llvm::Value *visitCompUnit(const ast::CompUnit &);
llvm::Value *visitDecl(const ast::Decl &);
llvm::Value *visitConstDef(const ast::ConstDef &);
llvm::Value *visitVar(const ast::Var &);
llvm::Value *visitFuncDef(const ast::FuncDef &);
llvm::Value *visitExtFunc(const ast::ExtFunc &);
llvm::Value *visitParam(const ast::Param &);
llvm::Value *visitBlock(const ast::Block &);
llvm::Value *visitAsgnStmt(const ast::AsgnStmt &);
llvm::Value *visitExp(const ast::Exp &);
llvm::Value *visitLVal(const ast::LVal &);
llvm::Value *visitNumber(const ast::Number &);
llvm::Value *visitCond(const ast::Cond &);
llvm::Value *visitWhileStmt(const ast::WhileStmt &);
llvm::Value *visitIfStmt(const ast::IfStmt &);
llvm::Value *visitFuncCall(const ast::FuncCall &);
llvm::Value *visitCallExp(const ast::CallExp &);
llvm::Value *visitRetStmt(const ast::RetStmt &);
};
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from optparse import OptionParser
import numpy as np
import flydra_analysis.a2.core_analysis as core_analysis
import flydra_core.align as align
from flydra_core.reconstruct import Reconstructor, DEFAULT_WATER_REFRACTIVE_INDEX
import flydra_core.water as water
from . import ransac
import cgtypes # cgkit 1.x
import os
from flydra_core.common_variables import WATER_ROOTS_EPS
svd = np.linalg.svd
D2R = np.pi / 180.0
def norm(vec):
vlen = np.sqrt(np.sum(vec ** 2))
return vec / vlen
def cgmat2np(cgkit_mat):
"""convert cgkit matrix to numpy matrix"""
arr = np.array(cgkit_mat.toList())
if len(arr) == 9:
arr.shape = 3, 3
elif len(arr) == 16:
arr.shape = 4, 4
else:
raise ValueError("unknown shape")
return arr.T
def test_cgmat2np():
point1 = (1, 0, 0)
point1_out = (0, 1, 0)
cg_quat = cgtypes.quat().fromAngleAxis(90.0 * D2R, (0, 0, 1))
cg_in = cgtypes.vec3(point1)
m_cg = cg_quat.toMat3()
cg_out = m_cg * cg_in
cg_out_tup = (cg_out[0], cg_out[1], cg_out[2])
assert np.allclose(cg_out_tup, point1_out)
m_np = cgmat2np(m_cg)
np_out = np.dot(m_np, point1)
assert np.allclose(np_out, point1_out)
class PlaneModelHelper:
def fit(self, data):
# http://stackoverflow.com/a/10904220/1633026
data = np.array(data, dtype=np.float)
assert data.ndim == 2
assert data.shape[1] == 3
nrows = len(data)
G = np.empty((nrows, 4))
G[:, :3] = data
G[:, 3] = 1.0
u, d, vt = svd(G, full_matrices=True)
plane_model = vt[3, :]
assert plane_model.ndim == 1
assert plane_model.shape[0] == 4
return plane_model
def get_error(self, data, plane_model):
# http://mathworld.wolfram.com/Point-PlaneDistance.html
assert data.ndim == 2
n_pts = data.shape[0]
assert data.shape[1] == 3
assert plane_model.ndim == 1
assert plane_model.shape[0] == 4
a, b, c, d = plane_model
denom = np.sqrt(a ** 2 + b ** 2 + c ** 2)
x0 = data[:, 0]
y0 = data[:, 1]
z0 = data[:, 2]
numer = np.abs(a * x0 + b * y0 + c * z0 + d)
all_distances = numer / denom
assert all_distances.ndim == 1
assert all_distances.shape[0] == n_pts
return all_distances
def doit(
filename=None, obj_only=None, do_ransac=False, show=False,
):
# get original 3D points -------------------------------
ca = core_analysis.get_global_CachingAnalyzer()
obj_ids, use_obj_ids, is_mat_file, data_file, extra = ca.initial_file_load(filename)
if obj_only is not None:
use_obj_ids = np.array(obj_only)
x = []
y = []
z = []
for obj_id in use_obj_ids:
obs_rows = ca.load_dynamics_free_MLE_position(obj_id, data_file)
goodcond = ~np.isnan(obs_rows["x"])
good_rows = obs_rows[goodcond]
x.append(good_rows["x"])
y.append(good_rows["y"])
z.append(good_rows["z"])
x = np.concatenate(x)
y = np.concatenate(y)
z = np.concatenate(z)
recon = Reconstructor(cal_source=data_file)
extra["kresults"].close() # close file
data = np.empty((len(x), 3), dtype=np.float)
data[:, 0] = x
data[:, 1] = y
data[:, 2] = z
# calculate plane-of-best fit ------------
helper = PlaneModelHelper()
if not do_ransac:
plane_params = helper.fit(data)
else:
# do RANSAC
"""
n: the minimum number of data values required to fit the model
k: the maximum number of iterations allowed in the algorithm
t: a threshold value for determining when a data point fits a model
d: the number of close data values required to assert that a model fits well to data
"""
n = 20
k = 100
t = np.mean([np.std(x), np.std(y), np.std(z)])
d = 100
plane_params = ransac.ransac(data, helper, n, k, t, d, debug=False)
# Calculate rotation matrix from plane-of-best-fit to z==0 --------
orig_normal = norm(plane_params[:3])
new_normal = np.array([0, 0, 1], dtype=np.float)
rot_axis = norm(np.cross(orig_normal, new_normal))
cos_angle = np.dot(orig_normal, new_normal)
angle = np.arccos(cos_angle)
q = cgtypes.quat().fromAngleAxis(angle, rot_axis)
m = q.toMat3()
R = cgmat2np(m)
# Calculate aligned data without translation -----------------
s = 1.0
t = np.array([0, 0, 0], dtype=np.float)
aligned_data = align.align_points(s, R, t, data.T).T
# Calculate aligned data so that mean point is origin -----------------
t = -np.mean(aligned_data[:, :3], axis=0)
aligned_data = align.align_points(s, R, t, data.T).T
M = align.build_xform(s, R, t)
r2 = recon.get_aligned_copy(M)
wateri = water.WaterInterface(
refractive_index=DEFAULT_WATER_REFRACTIVE_INDEX, water_roots_eps=WATER_ROOTS_EPS
)
r2.add_water(wateri)
dst = os.path.splitext(filename)[0] + "-water-aligned.xml"
r2.save_to_xml_filename(dst)
print("saved to", dst)
if show:
import matplotlib.pyplot as plt
from pymvg.plot_utils import plot_system
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot(data[:, 0], data[:, 1], "b.")
ax1.set_xlabel("x")
ax1.set_ylabel("y")
ax2 = fig.add_subplot(222)
ax2.plot(data[:, 0], data[:, 2], "b.")
ax2.set_xlabel("x")
ax2.set_ylabel("z")
ax3 = fig.add_subplot(223)
ax3.plot(aligned_data[:, 0], aligned_data[:, 1], "b.")
ax3.set_xlabel("x")
ax3.set_ylabel("y")
ax4 = fig.add_subplot(224)
ax4.plot(aligned_data[:, 0], aligned_data[:, 2], "b.")
ax4.set_xlabel("x")
ax4.set_ylabel("z")
fig2 = plt.figure("cameras")
ax = fig2.add_subplot(111, projection="3d")
system = r2.convert_to_pymvg(ignore_water=True)
plot_system(ax, system)
x = np.linspace(-0.1, 0.1, 10)
y = np.linspace(-0.1, 0.1, 10)
X, Y = np.meshgrid(x, y)
Z = np.zeros_like(X)
ax.plot(X.ravel(), Y.ravel(), Z.ravel(), "b.")
ax.set_title("aligned camera positions")
plt.show()
def main():
usage = "%prog FILE [options]"
parser = OptionParser(usage)
parser.add_option("--obj-only", type="string")
parser.add_option("--ransac", action="store_true", default=False)
parser.add_option("--show", action="store_true", default=False)
(options, args) = parser.parse_args()
h5_filename = args[0]
if options.obj_only is not None:
options.obj_only = core_analysis.parse_seq(options.obj_only)
doit(
filename=h5_filename,
obj_only=options.obj_only,
do_ransac=options.ransac,
show=options.show,
)
if __name__ == "__main__":
main()
|
#!/bin/bash
set -e
#
# The minimal test case: Only building dune-common with the default build directory
#
DUNECONTROL_OPTS="--opts=./testcases/common-build/config.opts --module=dune-common"
./dune-common/bin/dunecontrol $DUNECONTROL_OPTS all
# Testing the Python code
./dune-common/bin/dunecontrol $DUNECONTROL_OPTS make test_python
|
#!/bin/bash
RESULT_FILE=$1
if [ -f $RESULT_FILE ]; then
rm $RESULT_FILE
fi
touch $RESULT_FILE
checksum_file() {
echo $(openssl md5 $1 | awk '{print $2}')
}
FILES=()
while read -r -d ''; do
FILES+=("$REPLY")
done < <(find . -type f \( -name "build.gradle*" -o -name "dependencies.kt" -o -name "gradle-wrapper.properties" \) -print0)
# Loop through files and append MD5 to result file
for FILE in ${FILES[@]}; do
echo $(checksum_file $FILE) >> $RESULT_FILE
done
# Now sort the file so that it is
sort $RESULT_FILE -o $RESULT_FILE
|
#!/bin/bash
set -eu
docker-compose -f run-redash/docker-compose.yaml run --rm server create_db
docker-compose -f run-redash/docker-compose.yaml run --rm server /app/manage.py users create_root octocat@users.noreply.github.com root_user --password root_password
docker-compose -f run-redash/docker-compose.yaml run --rm server /app/manage.py ds new postgres --type pg --options '{"dbname": "postgres", "host": "127.0.0.1", "password": "postgres", "user": "password"}'
docker-compose -f run-redash/docker-compose.yaml up -d
|
#
# For use by Private Cloud team. May safely be removed.
#
#
# Executing over ssh, don't be so noisy or needy
[ $TERM == "dumb" ] && S=1 Q=1
if [ ! "$OS_USERNAME" ]; then
echo "Source your credentials first."
return
fi
[ ${Q=0} -eq 0 ] && echo
[ ${Q=0} -eq 0 ] && echo "Importing Private Cloud Common Functions..."
HZ=`getconf CLK_TCK`
################
[ ${Q=0} -eq 0 ] && echo " - ix() - Quickly post things to ix.io"
function ix() { curl -F 'f:1=<-' http://ix.io < "${1:-/dev/stdin}"; }
################
if [ -f /etc/rpc-release ]; then
if [ "$( grep DISTRIB_RELEASE /etc/rpc-release 2> /dev/null)" ]; then
source /etc/rpc-release
RPC_RELEASE=`echo $DISTRIB_RELEASE | cut -d. -f1 | tr -d '[:alpha:]'`
fi
fi
if [ -f /etc/openstack-release ]; then
if [ "$( grep DISTRIB_RELEASE /etc/openstack-release 2> /dev/null)" ]; then
OSA_VERSION=$(awk -F= '/DISTRIB_RELEASE/ { x=gsub(/"/,"",$2); print $x }' /etc/openstack-release | tr -d '[:alpha:]')
fi
fi
test -z "$OSA_VERSION" -a -z "$RPC_RELEASE" && OSA_VERSION=9.0.0 #Fallback to RPC9
test -z "$RPC_RELEASE" && RPC_RELEASE=$( echo $OSA_VERSION | cut -d '.' -f1)
###############
# These functions are intented to run inside containers only to determine the
# current virtual environment for all OS services after kilo
function rpc-get-neutron-venv {
#Upstart
VENV_ACTIVATE=$( awk '/\..*\/bin\/activate/ {print $2}' /etc/init/neutron-*.conf 2> /dev/null |tail -1 )
#SystemD
VENV_ACTIVATE=$( awk -F'--config-file' '/ExecStart=/ { gsub(/(ExecStart=|neutron[^\/]+$)/,"",$1); print $1 "activate" }' /etc/systemd/system/neutron*.service | tail -1)
if [ -n "$VENV_ACTIVATE" ]; then
VENV_PATH=$( dirname $VENV_ACTIVATE )
if [ -d "$VENV_PATH" ]; then
VENV_ENABLED=1
source ${VENV_ACTIVATE}
else
VENV_ENABLED=0
fi
else
VENV_ENABLED=0
fi
}
function rpc-get-nova-venv {
VENV_ACTIVATE=$( awk '/\..*\/bin\/activate/ {print $2}' /etc/init/nova-*.conf 2> /dev/null |tail -1 )
if [ -n "$VENV_ACTIVATE" ]; then
VENV_PATH=$( dirname $VENV_ACTIVATE )
if [ -d "$VENV_PATH" ]; then
VENV_ENABLED=1
source ${VENV_ACTIVATE}
else
VENV_ENABLED=0
fi
else
VENV_ENABLED=0
fi
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-hypervisor-vms() - Display all hypervisors and associated instances"
function rpc-hypervisor-vms {
which mysql > /dev/null 2>&1
if [ $? -eq 0 ]; then
mysql -te 'select host as "Hypervisor", instances.display_name as "Instance Name",image_ref as "Image", vm_state as State, vcpus as "VCPUs", memory_mb as "RAM", root_gb as "Root", ephemeral_gb as "Ephem" from instance_system_metadata left join instances on instance_system_metadata.instance_uuid=instances.uuid where instance_uuid in (select uuid from instances where deleted = 0) and `key` = "instance_type_name" order by host,display_name' nova
else
echo "'mysql' not found. Go to there."
fi
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-hypervisor-free() - Display free resources on each Hypervisor, as reported by MySQL"
function rpc-hypervisor-free {
if [ ! -s /etc/nova/nova.conf ]; then
NOVACONTAINER=`lxc-ls -1 | grep nova_conductor | tail -1`
if [ "$NOVACONTAINER" ]; then
LXC="lxc-attach -n $NOVACONTAINER -- "
else
LXC=
fi
fi
CPU_RATIO=`$LXC awk -F= '/^cpu_allocation_ratio/ {print $2}' /etc/nova/nova.conf`
RAM_RATIO=`$LXC awk -F= '/^ram_allocation_ratio/ {print $2}' /etc/nova/nova.conf`
DISK_RATIO=`$LXC awk -F= '/^disk_allocation_ratio/ {print $2}' /etc/nova/nova.conf`
[ ! "$CPU_RATIO" ] && CPU_RATIO=1 && echo "Unable to find cpu_allocation_ratio in nova.conf. Using 1.0"
[ ! "$RAM_RATIO" ] && RAM_RATIO=1 && echo "Unable to find ram_allocation_ratio in nova.conf. Using 1.0"
[ ! "$DISK_RATIO" ] && DISK_RATIO=1 && echo "Unable to find disk_allocation_ratio in nova.conf. Using 1.0"
which mysql > /dev/null 2>&1
if [ $? -eq 0 ]; then
mysql -te "select hypervisor_hostname as Hypervisor,((memory_mb*${RAM_RATIO})-memory_mb_used)/1024 as FreeMemGB,(vcpus*${CPU_RATIO})-vcpus_used as FreeVCPUs, (free_disk_gb*${DISK_RATIO}) as FreeDiskGB,running_vms ActiveVMs from compute_nodes where deleted = 0;" nova
else
echo "'mysql' not found. Go to there."
fi
unset CPU_RATIO RAM_RATIO
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-filter() - Replace stinky UUIDs with refreshing descriptive names inline"
function rpc-filter {
replist=`echo ${tenant_repl}${host_repl}${net_repl}${flav_repl}${img_repl}${user_repl} | tr -d "\n"`
OLDIFS=$IFS
IFS=''
while read inputs; do
echo $inputs | sed -e "${replist}"
done
IFS=$OLDIFS
unset replist
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-iscsi-generate-sessions() - Generate list of commands to re-initiate currently open iscsi sessions"
function rpc-iscsi-generate-sessions() {
iscsiadm --mode session | awk '{split($3, a, ":"); print "iscsiadm -m node -T " $4 " -p " a[1] " -l"}'
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-common-errors-scan() - Pretty much what it sounds like"
function rpc-v4-common-errors-scan() {
echo "Checking for common issues..."
echo -n " - MySQL Replication "
MYSQL_SLAVE=`mysql -e "show slave status"`
if [ "$MYSQL_SLAVE" ]; then
mysql -e "show slave status \G" | egrep 'Slave_(IO|SQL)_Running' | wc -l | grep 2 > /dev/null 2>&1
[ $? -ne 0 ] && echo -n "Local Slave Broken"
# check remote slave, too - steal from rpchousecall
fi
unset MYSQL_SLAVE
echo " - OpenVSwitch"
# Networks without dhcp agents
if [ ! "$OS_NETCMD" ]; then
echo "Skipping dhcp scan due to lack of networking-related openstack commands in this environment"
else
for net in `$OS_NETCMD net-list | awk '/[0-9]/ {print $2}'`; do $OS_NETCMD dhcp-agent-list-hosting-net $net | grep True > /dev/null 2>&1; [ $? -eq 0 ] && echo " [OK] `echo $net | rpc-filter`"; done
fi
# Dead taps
ovs-vsctl show | grep -A1 \"tap | egrep "tag: 4095" > /dev/null 2>&1
[ $? -eq 0 ] && echo "Dead Taps Detected." || echo "[OK]"
# Network namespaces that don't exist
# fill me in laterz
# bridges with less than 2 ports
for bridge in `ovs-vsctl list-br | egrep 'eth|bond'`; do
PORTS=`ovs-vsctl list-ports $bridge | wc -l`
if [ $PORTS -lt 2 ]; then
echo " $bridge has less than two ports attached:"
ovs-vsctl list-ports $bridge
fi
done
unset PORTS
echo " - Operating System:"
echo -n " - Disk: "
df -P -t ext2 -t ext3 -t ext4 -t xfs -t nfs | awk '{print $5}' | tr -d \% | egrep '^[0-9]+$' | egrep '^9[0-9]' > /dev/null 2>&1
[ $? -eq 0 ] && echo "Disk reaching capacity. Investigate" || echo "[OK]"
echo "Done!"
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-bondflip() - Change given bondX to backup NIC"
function rpc-bondflip() {
if [ $# -ne 1 ]; then
echo "Usage: rpc-bondflip <bond>"
return 1
fi
if [ ! -f /proc/net/bonding/$1 ]; then
echo "No such bond: $1"
return 1
fi
ACTIVESLAVE=`awk '/Active Slave/ { print $4 }' /proc/net/bonding/$1`
#ifenslave -c $1 $(egrep 'Slave Interface' /proc/net/bonding/$1 | awk '{print $3}' | grep -v $ACTIVESLAVE | head -1)
ifdown $ACTIVESLAVE > /dev/null 2>&1
ifup $ACTIVESLAVE > /dev/null 1>&1
NEWACTIVE=`awk '/Active Slave/ { print $4 }' /proc/net/bonding/$1`
echo "$1 was active on $ACTIVESLAVE, now $NEWACTIVE"
unset ACTIVESLAVE NEWACTIVE
}
################
#[ ${Q=0} -eq 0 ] && echo " - rpc-port-stats() - Show live interface usage by port"
function rpc-port-stats() {
echo "Don't use this yet. Fixing it to be awesome"
return
if [ $# -ne 1 ]; then
echo "Usage: rpc-port-stats <port-id>"
return
fi
port=tap${1:0:11}
if [ ! "$( echo $port | egrep '^[0-9a-z]{8}-[0-9a-z]{2}$' )" ]; then
echo "Inavlid port: $1"
echo "Usage: rpc-port-stats <port-id>"
return
fi
echo "Using $1($port)"
# meaty stuff goes here
rm $tmpfile
unset CTR tmpfile port br_int_port
# Come back some day and clean up all the TX_ RX_ vars :/
for x in RX TX; do
for v in pkts bytes drop errs frame over crc coll; do
for t in OLD DELTA; do
echo unset ${x}_${v} ${x}_${v}_${t}
done
done
done
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-environment-scan() - Update list of internal filters"
function rpc-environment-scan() {
echo "Scanning environment. Please hold..."
echo " - RPC Version $RPC_RELEASE"
test -n "$OSA_VERSION" && echo " - OSA Version $OSA_VERSION"
test -x `which keystone` -a `which openstack`> /dev/null 2>&1
[ $? -ne 0 ] && echo -e "\nMissing local openstack binaries. Not scanning environment." && return
echo " - Keystone"
if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then
tenant_repl=`openstack project list | awk '/[0-9]/ {print "s/"$2"/[[Tenant: "$4"]]/g;"}'`
user_repl=`openstack user list --domain=default | awk '/[0-9]/ {print "s/"$2"/[[User: "$4"]]/g;"}'`
else
tenant_repl=`keystone tenant-list | awk '/[0-9]/ {print "s/"$2"/[[Tenant: "$4"]]/g;"}'`
user_repl=`keystone user-list | awk '/[0-9]/ {print "s/"$2"/[[User: "$4"]]/g;"}'`
fi
echo " - Networking (${OS_NETCMD="None"})"
[ "$OS_NETCMD" ] && net_repl=`$OS_NETCMD net-list | awk '/[0-9]/ {print "s/"$2"/[[Network: "$4"]]/g;"}'`
echo " - Nova"
host_repl=`nova list | awk '/[0-9]/ {print "s/"$2"/[[Instance: "$4"]]/g;"}' 2> /dev/null`
flav_repl=`nova flavor-list | awk -F\| '/[0-9]/ {print "s/"$3"/[[Flavor: "$8"v,"$4"M,"$5"\/"$6"G,"$7"swap]]/g;"}' 2>/dev/null | tr -d " "`
echo " - Glance"
img_repl=`glance image-list | awk -F\| '/[0-9]/ {gsub(/[ ]+/, "", $2);gsub(/^ /, "", $3);print "s/"$2"/[[Img: "$3"]]/g;"}'`
echo "Done!"
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-os-version-check() - Are we running latest availble version?"
function rpc-os-version-check() {
apt-cache policy nova-common | egrep 'Installed|Candidate' | cut -d: -f3 | sort -ur | wc -l | egrep 1 > /dev/null 2>&1
[ $? -eq 0 ] && echo -n "Running Latest Available Version: " || echo -n "NOT Running Latest Available Version: "
apt-cache policy nova-common | egrep 'Installed|Candidate' | cut -d: -f3 | sort -ur | head -1
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-instance-test-networking() - Test instance networking."
function rpc-instance-test-networking() {
if [ ! "$1" ]; then
echo "Must pass instance UUID or Name"
return
fi
if [ $RPC_RELEASE -ge 9 ]; then
echo "Attempting to find neutron namespaces"
CONTAINER=`lxc-ls -1 | egrep 'neutron(_|-)agents' | tail -1`
LXC="lxc-attach -n $CONTAINER -- "
if [ -n "$CONTAINER" ]; then
echo -e "\nUsing [$CONTAINER:]\n"
$LXC curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh
$LXC bash -c "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv ; rpc-instance-test-networking $1"
$LXC rm /tmp/pccommon.sh
unset CONTAINER LXC
return
fi
# Prepare primary neutron_dhcp_agent from deployment node only
. /usr/local/bin/openstack-ansible.rc 2>/dev/null
if [ $( which ansible) && ! $( ansible --list-hosts neutron_dhcp_agent |grep -q 'hosts (0)' ) ]; then
echo "Using ansible host $( ansible --list-hosts neutron_dhcp_agent[0] )"
ansible neutron_dhcp_agent[0] -m shell -a "curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh"
ansible neutron_dhcp_agent[0] -m shell -a "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv ; rpc-instance-test-networking $1"
return
fi
fi
# Prepare on primary neutron_dhcp_agent
if [ "$( ip netns |egrep 'qdhcp|qrouter' )" ]; then
echo "Found local qrouter or qdhcp namespaces"
else
echo "Failed. Giving up."
return
fi
[ ! "$OS_NETCMD" ] && echo "Unable to find networking subsystem. Giving up." && return
ID=$1
[ -s $HOME/.ssh/rpc_support ] && KEY="-i $HOME/.ssh/rpc_support"
TMPFILE=/tmp/.nova-test-networking-$$-$ID
nova show $ID 2> /dev/null > $TMPFILE
IP=`awk -F\| '/ network / { print $3 }' $TMPFILE | tr -d ' '`
NETNAME=`awk '/ network / { print $2 }' $TMPFILE`
HYP=`awk '/OS-EXT-SRV-ATTR:host/ { print $4 }' $TMPFILE`
rm -f $TMPFILE
eval `neutron net-show -Fid -f shell $NETNAME`
NETID=$id
unset id
echo -ne "[$HYP:$NETNAME]\t: "
CMD="nc -w1 $IP 22 "
NSWRAP="ip netns exec qdhcp-$NETID"
#echo $NSWRAP $CMD
$NSWRAP $CMD | grep SSH > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo -n "[SSH PORT: SUCCESS] "
eval `neutron net-show -Fsubnets -f shell $NETID`
eval `neutron subnet-show -Fgateway_ip -f shell $( echo $subnets | cut -d\ -f1)`
if [ "$gateway_ip" ]; then
# If we can SSH, let's ping out...
CMD="ping -c1 -w2 8.8.8.8"
NSWRAP="ip netns exec qdhcp-$NETID ssh -q -o StrictHostKeyChecking=no $KEY ubuntu@$IP"
$NSWRAP $CMD > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "[PING GOOGLE: SUCCESS]"
RET=0
else
echo "[PING GOOGLE: FAILURE]"
RET=1
fi
else
echo "[PING GOOGLE: No Gateway - SKIPPING]"
RET=1
fi
else
echo "[SSH PORT: FAILED]"
RET=1
fi
unset KEY IP NETNAME NETID CMD NSWRAP
return $RET
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-instance-per-network() - Per network, spin up an instance on given hypervisor, ping, and tear down"
function rpc-instance-per-network() {
UUID_LIST=""
if [ $RPC_RELEASE -ge 9 ]; then
if [ ! "$( hostname | egrep 'neutron(_|-)agents' )" ]; then
echo "Attempting to find neutron namespaces"
CONTAINER=`lxc-ls -1 | egrep 'neutron(_|-)agents' | tail -1`
LXC="lxc-attach -n $CONTAINER -- "
if [ -n "$CONTAINER" ]; then
echo -e "\nUsing [$CONTAINER]:\n"
$LXC curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh
$LXC bash -c "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv; rpc-instance-per-network $1"
$LXC rm /tmp/pccommon.sh
unset CONTAINER LXC
return
fi
# Prepare primary neutron_dhcp_agent from deployment node only
. /usr/local/bin/openstack-ansible.rc 2>/dev/null
if [ $( which ansible) && ! $( ansible --list-hosts neutron_dhcp_agent |grep -q 'hosts (0)' ) ]; then
echo "Using ansible host $( ansible --list-hosts neutron_dhcp_agent[0] )"
ansible neutron_dhcp_agent[0] -m shell -a "curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh"
ansible neutron_dhcp_agent[0] -m shell -a "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv ; rpc-instance-per-network $1"
return
fi
fi
fi
# Prepare on primary neutron_dhcp_agent
if [ "$( ip netns |egrep 'qdhcp|qrouter' )" ]; then
echo "Found local qrouter or qdhcp namespaces"
else
echo "Failed. Giving up."
return
fi
[ ! "$OS_NETCMD" ] && echo "Unable to find networking subsystem. Giving up." && return
if [ ! "$1" ]; then
echo "Must pass a compute or AZ:Compute combo."
return
fi
if [ "$( echo $1 | grep : )" ]; then
AZ=`echo $1 | cut -d: -f1`
COMPUTE=`echo $1 | cut -d: -f2`
else
AZ="nova"
COMPUTE=$1
fi
case $RPC_RELEASE in
4) VALID_COMPUTE=`nova service-list --binary nova-compute | awk '/[0-9]/ {print $4}' | grep $COMPUTE`
;;
*) VALID_COMPUTE=`nova service-list --binary nova-compute | awk '/[0-9]/ {print $6}' | grep $COMPUTE`
;;
esac
if [ ! "$VALID_COMPUTE" ]; then
echo "Compute node $COMPUTE doesn't exist."
unset VALID_COMPUTE AZ COMPUTE
return
else
unset VALID_COMPUTE
fi
IMAGE=`glance image-list | awk 'tolower($4) ~ /ubuntu/ {print $2}' | tail -1`
case $RPC_RELEASE in
4) KEYNAME="controller-id_rsa"
;;
*) KEYNAME="rpc_support"
esac
nova flavor-create rpctest-$$-flavor rpctest-$$-flavor 512 10 1 > /dev/null 2>&1
for NET in `$OS_NETCMD net-list | awk -F\| '$2 ~ /[0-9]+/ { print $2 }'`; do
unset router_external
eval `neutron net-show -Frouter:external -f shell $NET | tr : _`
if [ "$router_external" == "True" ]; then
echo "Skipping $NET due to router:external tag"
continue
fi
echo "Spinning up instance on network $NET"
INSTANCE_NAME="rpctest-$$-NET-${NET}"
NEWID=`nova boot --image $IMAGE \
--flavor rpctest-$$-flavor \
--security-group rpc-support \
--key-name $KEYNAME \
--nic net-id=$NET \
--availability-zone $AZ:$COMPUTE \
$INSTANCE_NAME | awk '/ id / { print $4 }'`
UUID_LIST="${NEWID} ${UUID_LIST}"
unset INSTANCE_NAME
done
unset IMAGE router_external
unset SPAWNED_UUID_LIST
for UUID in $UUID_LIST; do
rpc-instance-waitfor-spawn $UUID 60
[ $? -eq 0 ] && SPAWNED_UUID_LIST="$UUID $SPAWNED_UUID_LIST" || echo "No further testing will be performed on this instance."
done
unset BOOTED_UUID_LIST
for UUID in $SPAWNED_UUID_LIST; do
rpc-instance-waitfor-boot $UUID 180
[ $? -eq 0 ] && BOOTED_UUID_LIST="$UUID $BOOTED_UUID_LIST" || echo "No further testing will be performed on this instance."
done
unset SPAWNED_UUID_LIST
echo "Testing Instances..."
for ID in $BOOTED_UUID_LIST; do
rpc-instance-test-networking $ID
done
unset BOOTED_UUID_LIST
echo -n "Deleting instances..."
for ID in $UUID_LIST; do
echo -n "."
nova delete $ID > /dev/null
sleep 1
done
nova flavor-delete rpctest-$$-flavor > /dev/null 2>&1
echo
unset UUID_LIST ID
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-instance-per-network-per-hypervisor() - Per network, spin up an instance on each hypervisor, ping, and tear down"
function rpc-instance-per-network-per-hypervisor() {
if [ $RPC_RELEASE -ge 9 ]; then
echo "Attempting to find neutron namespaces"
CONTAINER=`lxc-ls -1 | egrep 'neutron(_|-)agents' | tail -1`
LXC="lxc-attach -n $CONTAINER -- "
if [ -n "$CONTAINER" ]; then
echo -e "\nUsing [$CONTAINER]:\n"
$LXC curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh
$LXC bash -c "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv; rpc-instance-per-network-per-hypervisor"
$LXC rm /tmp/pccommon.sh
unset CONTAINER LXC
return
fi
# Prepare primary neutron_dhcp_agent from deployment node only
. /usr/local/bin/openstack-ansible.rc 2>/dev/null
if [ $( which ansible) && ! $( ansible --list-hosts neutron_dhcp_agent |grep -q 'hosts (0)' ) ]; then
echo "Using ansible host $( ansible --list-hosts neutron_dhcp_agent[0] )"
ansible neutron_dhcp_agent[0] -m shell -a "curl -L -s -o /tmp/pccommon.sh https://raw.githubusercontent.com/rcbops/openstack-ops/master/playbooks/files/rpc-o-support/pccommon.sh"
ansible neutron_dhcp_agent[0] -m shell -a "source /root/openrc ; S=1 Q=1 source /tmp/pccommon.sh ; rpc-get-neutron-venv ; rpc-instance-test-networking $1"
return
fi
fi
# Prepare on primary neutron_dhcp_agent
if [ "$( ip netns |egrep 'qdhcp|qrouter' )" ]; then
echo "Found local qrouter or qdhcp namespaces"
else
echo "Failed. Giving up."
return
fi
[ ! "$OS_NETCMD" ] && echo "Unable to find networking subsystem. Giving up." && return
IMAGE=`glance image-list | awk 'tolower($4) ~ /ubuntu/ {print $2}' | tail -1`
case $RPC_RELEASE in
4) KEYNAME="controller-id_rsa"
;;
*) KEYNAME="rpc_support"
esac
nova flavor-create rpctest-$$-flavor rpctest-$$-flavor 512 10 1 > /dev/null 2>&1
for NET in `$OS_NETCMD net-list | awk -F\| '$4 ~ /[0-9]+/ { print $2 }' | sort -R`; do
unset router_external
eval `neutron net-show -Frouter:external -f shell $NET | tr : _`
if [ "$router_external" == "True" ]; then
echo "Skipping $NET due to router:external tag"
continue
fi
echo -n "Spinning up instance per hypervisor on network $NET..."
UUID_LIST=""
case $RPC_RELEASE in
4) COMPUTES=`nova service-list --binary nova-compute | awk '/[0-9]/ {print $4}'`
;;
*) COMPUTES=`nova service-list --binary nova-compute | awk '/[0-9]/ {print $6}'`
esac
for COMPUTE in $COMPUTES; do
case $RPC_RELEASE in
4) AZ=`nova service-list --binary nova-compute --host $COMPUTE | awk '/[0-9]/ {print $6}'`
;;
*) AZ=`nova service-list --binary nova-compute --host $COMPUTE | awk '/[0-9]/ {print $8}'`
esac
echo -n "."
INSTANCE_NAME="rpctest-$$-${COMPUTE}-${NET}"
CMD="nova boot --image $IMAGE \
--flavor rpctest-$$-flavor \
--security-group rpc-support \
--key-name $KEYNAME \
--nic net-id=$NET \
--availability-zone ${AZ}:${COMPUTE} \
$INSTANCE_NAME"
NEWID=`$CMD | awk '/ id / { print $4 }'`
UUID_LIST="${NEWID} ${UUID_LIST}"
done;
echo
unset SPAWNED_UUID_LIST
for UUID in $UUID_LIST; do
rpc-instance-waitfor-spawn $UUID 30
[ $? -eq 0 ] && SPAWNED_UUID_LIST="$UUID $SPAWNED_UUID_LIST" || echo "^^^ No further testing will be performed on this instance ^^^"
done;
unset BOOTED_UUID_LIST
for UUID in $SPAWNED_UUID_LIST; do
rpc-instance-waitfor-boot $UUID 120
[ $? -eq 0 ] && BOOTED_UUID_LIST="$UUID $BOOTED_UUID_LIST" || echo "^^^ No further testing will be performed on this instance ^^^"
done
unset SPAWNED_UUID_LIST
for UUID in $BOOTED_UUID_LIST; do
rpc-instance-test-networking $UUID
done
unset BOOTED_UUID_LIST
echo
echo -n "Deleting instances..."
for ID in $UUID_LIST; do
echo -n "."
nova delete $ID > /dev/null
sleep 1
done
echo;echo
unset UUID_LIST NEWID INSTANCE_NAME TIMEOUT CTR DONE UUID
done
nova flavor-delete rpctest-$$-flavor > /dev/null 2>&1
unset UUID_LIST NEWID IMAGE INSTANCE_NAME TIMEOUT CTR DONE KEY UUID
}
################
[ ${Q=0} -eq 0 ] && echo " - rpc-sg-rules() - Makes security groups easier to read. Pass it a Security Group ID (not name)"
function rpc-sg-rules () {
if [ ! "$1" ]; then
echo "Usage: rpc-sg-rules <securityGroupID>"
return 1
fi
if [ "$( echo $OS_NETCMD | egrep '(neutron|quantum)')" ]; then
RULES=`mysql -BN -e "select remote_group_id,direction,ethertype,protocol,port_range_min,port_range_max,remote_ip_prefix from securitygrouprules where security_group_id = '$1' order by direction desc,port_range_min asc" $OS_NETCMD | tr '\t' ,`
else
echo Broked
fi
echo
echo -e "Dir\tType\tProto\tPortMin\tPortMax\tRemoteIP\t\tRemoteGroupID"
echo -e "-----------------------------------------------------------------------------------------"
for RULE in $RULES; do
RGID=`echo $RULE | cut -d, -f1 | sed 's/NULL/ /g'`
DIR=`echo $RULE | cut -d, -f2 | sed 's/NULL/_______/g'`
ETHER=`echo $RULE | cut -d, -f3 | sed 's/NULL/_______/g'`
PROT=`echo $RULE | cut -d, -f4 | sed 's/NULL/_______/g'`
PMIN=`echo $RULE | cut -d, -f5 | sed 's/NULL/_______/g'`
PMAX=`echo $RULE | cut -d, -f6 | sed 's/NULL/_______/g'`
RIP=`echo $RULE | cut -d, -f7 | sed 's/NULL/_______/g'`
if [ "$RIP" == "_______" ]; then
RIP="$RIP\t\t"
fi
echo -e "$DIR\t$ETHER\t$PROT\t$PMIN\t$PMAX\t$RIP\t$RGID"
done
unset RULES RULE RGID DIR ETHER PROT PMIN PMAX RIP
}
[ ${Q=0} -eq 0 ] && echo " - rpc-image-check() - Shows all running instances and the state of their base images (active/deleted)"
function rpc-image-check () {
printf "%-37s:: %-39s:: Img State\n" "Instance ID" "Image ID"
for i in `nova list --all- | awk '/[0-9]/ {print $2}'`; do echo -n "$i :: "; IMG=`nova show $i | awk -F\| '$2 ~ /image/ {print $3}' | egrep -o '\([0-9a-z-]+\)\s*$' | tr -d ' '`; echo -n "$IMG :: "; glance image-show ` echo $IMG | tr -d '()'` | awk '$2 ~ /status/ {print $4}'; done
}
[ ${Q=0} -eq 0 ] && echo " - rpc-user-roles() - List all users and their roles across all tenants"
function rpc-user-roles () {
if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then
for D in default `openstack domain list | awk '/[0-9]+/ { print $2 }'`; do
for U in `openstack user list --domain $D | awk '/[0-9]/ { print $4 }'`; do
echo "User [$U] ::"
for T in `openstack project list | awk '/[0-9]/ {print $4}'`; do
for R in `openstack role assignment list --user $U --project $T --names | awk '/@/ {print $42'`; do
[ ${HDR=0} == 0 ] && echo -n " Tenant [$T]: "
HDR=1
echo -n "$R "
done
[ ${HDR=0} == 1 ] && echo
unset HDR
done
echo
done
done
else
for U in `keystone user-list | awk '/[0-9]/ { print $4 }'`; do
echo "User [$U] ::"
for T in `keystone tenant-list | awk '/[0-9]/ {print $4}'`; do
for R in `keystone user-role-list --user $U --tenant $T | awk '/[0-9]/ {print $4}'`; do
[ ${HDR=0} == 0 ] && echo -n " Tenant [$T]: "
HDR=1
echo -n "$R "
done
[ ${HDR=0} == 1 ] && echo
unset HDR
done
echo
done
fi
}
#[ ${Q=0} -eq 0 ] && echo " - rpc-update-pccommon() - Grabs the latest version of pccommon.sh if there is one"
function rpc-update-pccommon () {
GITHUB="https://raw.githubusercontent.com/rcbops/openstack-ops/master/pccommon.sh"
[ !"$1" ] && PCCOMMON="./pccommon.sh" || PCCOMMON=$1
if [ -s "$PCCOMMON" ]; then
TMPFILE="/tmp/$$.pccommon.upgrade"
curl -s $GITHUB > $TMPFILE 2>&1
if [ $? -ne 0 ]; then
echo "Error connecting to github - not attempting pccommon upgrade."
rm -f $TMPFILE
return 1
fi
EXISTING_SUM=`md5sum $PCCOMMON | cut -d\ -f1`
GITHUB_SUM=`md5sum $TMPFILE | cut -d\ -f1`
if [ "$EXISTING_SUM" != "$GITHUB_SUM" ]; then
echo
echo "**********************************************"
echo "New Version available, upgrading and executing"
echo "**********************************************"
mv $TMPFILE $PCCOMMON
. $PCCOMMON
else
echo "Running latest available version of pccommon"
fi
fi
}
# Shows swap usage per process
[ ${Q=0} -eq 0 ] && echo " - swap-usage() - Shows current usage of swap memory, by process"
function swap-usage
{
if [ $UID -ne 0 ]; then
echo "Must be run as root"
return
fi
for PID in `ps -A -o \%p --no-headers | egrep -o '[0-9]+'` ; do
if [ -d /proc/$PID ]; then
PROGNAME=`cat /proc/$PID/cmdline | tr '\000' '\t' | cut -f1`
for SWAP in `grep Swap /proc/$PID/smaps 2>/dev/null| awk '{ print $2 }'`; do
SUM=$(( $SUM+$SWAP ))
done
[ $SUM -ne 0 ] && echo "PID=$PID - Swap used: ${SUM}kb - ( $PROGNAME )"
OVERALL=$(( $OVERALL+$SUM ))
SUM=0
fi
done
if [ $OVERALL -gt $(( 1024 * 1024 )) ]; then
HUMAN="$( echo 2 k $OVERALL 1024 / 1024 / p | dc )GB"
else
if [ $OVERALL -gt 1024 ]; then
HUMAN="$( echo 2 k $OVERALL 1024 / p | dc )MB"
else
HUMAN="${OVERALL}KB"
fi
fi
echo "Overall swap used: ${HUMAN}"
unset HUMAN OVERALL SUM PID
}
# Performs cinder volume verification on cinder servers:
# * For each existing volume, there must be an underlying LVM
# -- Pull list of volumes, SSH to cinder nodes if and check lvs
[ ${Q=0} -eq 0 ] && echo " - rpc-cinder-verify-lvm() - Audit cinder volumes to verify underlying LVM"
function rpc-cinder-verify-lvm
{
VOLHOST=`cinder list --all-t --fields os-vol-host-attr:host | awk '$4 ~ /@lvm/ {print $2","$4}'`
for volhost in $VOLHOST; do
VOL=`echo $volhost | cut -d, -f1`
HOST=`echo $volhost | cut -d, -f2 | cut -d@ -f1 | cut -d. -f1`
VOLSNAP=`cinder snapshot-list --all- --volume-id=$VOL | awk '/[0-9]/ {print $2}'`
if [ "$(hostname | grep $HOST)" ]; then
VOLEXISTS=`lvs | grep volume-$VOL`
else
VOLEXISTS=`ssh -q $HOST lvs \| grep volume-$VOL`
if [ $? == 255 ]; then
echo "$VOL [ Unable to connect ] $HOST"
fi
fi
if [ "$VOLEXISTS" ]; then
echo "$VOL [ PASS ] @ $HOST"
else
echo "$VOL [ FAIL ] @ $HOST"
fi
for snap in $VOLSNAP; do
if [ "$(hostname | grep $HOST)" ]; then
SNAPEXISTS=`lvs | grep volume-$VOL`
else
SNAPEXISTS=`ssh -q $HOST lvs \| grep _snapshot-$snap`
if [ $? == 255 ]; then
echo "$snap [ Unable to connect ] $HOST"
fi
fi
if [ "$SNAPEXISTS" ]; then
echo "$snap [ PASS ] @ $HOST (snapshot of $VOL)"
else
echo "$snap[ FAIL ] @ $HOST (snapshot of $VOL)"
fi
done
unset VOLEXISTS SNAPEXISTS
done
unset VOLHOST VOL HOST VOLEXISTS
}
# Performs cinder volume verification on hypervisors:
# * For each existing volume, there
# -- Pull list of volumes, SSH to cinder nodes if and check lvs
[ ${Q=0} -eq 0 ] && echo " - rpc-cinder-verify-attach() - Audit cinder volumes to verify instance attachments"
function rpc-cinder-verify-attach
{
VOLINST=`cinder list --all-t | awk -F\| '$9 ~ /[0-9]/ {gsub(" ","",$2); gsub(" ","",$9); print $2","$9}'`
for volinst in $VOLINST; do
VOL=`echo $volinst | cut -d, -f1`
INST=`echo $volinst | cut -d, -f2 | cut -d. -f1`
HYPID=`nova show --minimal $INST | awk '/hypervisor_hostname/ {hyp = $4}; /instance_name/ {name = $4}; END {print hyp","name}'`
HYP=`echo $HYPID | cut -d, -f1 | cut -d. -f1`
ID=`echo $HYPID | cut -d, -f2`
if [ "$(hostname | grep $HYP)" ]; then
ATTACHED=`virsh dumpxml $ID | grep volume-$VOL`
else
ATTACHED=`ssh -q $HYP virsh dumpxml $ID \| grep volume-$VOL`
[ $? == 255 ] && echo "$VOL [ Unable to connect ] $HYP"
fi
if [ "$ATTACHED" ]; then
echo "$VOL [ PASS ] @ $HYP/$INST:$ID"
else
echo "$VOL [ FAIL ] @ $HYP/$INST:$ID"
fi
done
unset VOLINST VOL INST HYPID HYP ID ATTACHED
}
[ ${Q=0} -eq 0 ] && echo " - rpc-image2base() - Given an image ID, will translate to _base image name."
# Thanks to Mark Deverter
function rpc-image2base {
if [ ! "$1" ]; then
echo "Must supply image ID"
return
fi
echo "Image ID: $1"
echo -n "Base Image Filename: "
echo -n $1 | sha1sum | cut -d\ -f1
}
################
# Unlisted helper functions
function rpc-instance-waitfor-spawn() {
if [ $# -ne 2 ]; then
echo -e "Usage: rpc-instance-waitfor-spawn <instance> <timeout>"
return
fi
ID=$1
SPAWN_TIMEOUT=$2
echo -n "-- Waiting up to $SPAWN_TIMEOUT seconds for $ID to spawn..."
CTR=0
STATE=`nova show $ID | awk '/ status / { print $4 }'`
while [ "${STATE="BUILD"}" == "BUILD" -a $CTR -lt $SPAWN_TIMEOUT ]; do
STATE=`nova show $ID | awk '/ status / { print $4 }'`
CTR=$(( $CTR + 2 ))
echo -n "."
sleep 2
done
unset DONE ID
if [ $CTR -ge $SPAWN_TIMEOUT ]; then
echo "Timed out"
RET=1
else
if [ "$STATE" != "ACTIVE" ]; then
echo "*ERROR*"
RET=2
else
echo "Done"
RET=0
fi
fi
unset STATE SPAWN_TIMEOUT CTR ID
return $RET
}
function rpc-instance-waitfor-boot() {
if [ $# -ne 2 ]; then
echo -e "Usage: rpc-instance-waitfor-boot <instance> <timeout>"
return 1
fi
ID=$1
BOOT_TIMEOUT=$2
echo -n "-- Waiting up to $BOOT_TIMEOUT seconds for $ID to boot..."
nova show $ID > /dev/null 2>&1
[ $? -gt 0 ] && echo "$ID Broken somehow. Giving Up." && return 3
CTR=0
TMPFILE=/tmp/.nova-console-$$-$ID
FAILED=0
SUCCESS=0
while [ $FAILED -eq 0 -a $SUCCESS -eq 0 -a $CTR -lt $BOOT_TIMEOUT ]; do
nova console-log $ID 2> /dev/null > $TMPFILE
# Test for success
egrep -i '(^cloud-init .* finished|starting.*ssh)' $TMPFILE > /dev/null 2>&1
if [ $? -eq 0 ]; then
SUCCESS=1
RET=0
fi
# Test(s) For failure
egrep -i '(Route info failed)' $TMPFILE > /dev/null 2>&1
if [ $? -eq 0 ]; then
FAILED=1
MSG="Networking not functional, no routes"
RET=254
fi
grep -Pzo 'waiting 10 seconds for network device\ncloud-init-nonet' $TMPFILE > /dev/null 2>&1
if [ $? -eq 0 ]; then
FAILED=1
MSG="Networking not functional, timed out"
RET=253
fi
CTR=$(( $CTR + 5 ))
[ $FAILED -eq 0 -a $SUCCESS -eq 0 ] && sleep 5
echo -n "."
done
rm -f $TMPFILE
unset TMPFILE
if [ $CTR -ge $BOOT_TIMEOUT ]; then
echo "Timed out"
RET=1
else
if [ $FAILED -gt 0 ]; then
echo "Failed: $MSG"
else
echo "Done"
RET=0
fi
fi
unset BOOT_TIMEOUT CTR R ID FAILED SUCCESS
return $RET
}
function dell_raid_layout
{
OMLOCATIONS="/opt/dell/srvadmin/bin/omreport /usr/bin/omreport"
OM=
for location in $OMLOCATIONS; do
[ -x $location ] && OM=$location
done
if [ ! "$OM" ]; then
echo "Couldn't find OMREPORT $OM"
return
fi
TMPFILE=/tmp/.raid_layout.$$
CONTROLLERS=`$OM storage controller | awk '/^ID/ { print $3 }'`
for ctrl in $CONTROLLERS; do
echo "* Controller $ctrl"
# dump all pdisks on controller to TMPFILE
$OM storage pdisk controller=$ctrl > ${TMPFILE}.pdisks
# dump info for all vdisks on controller
$OM storage vdisk controller=$ctrl > ${TMPFILE}.vdisks
VDISKS=`awk '/^ID/ { print $3 }' ${TMPFILE}.vdisks`
for vdisk in $VDISKS; do
VDISKS=`awk '/^ID/ { print $3 }' ${TMPFILE}.vdisks`
SEDFILTER="/ID\s*:\s+$vdisk/,/^\s*$/"
RAIDSIZE=`sed -rn "$SEDFILTER { /^Size/p}" ${TMPFILE}.vdisks | awk '{ print $3 " " $4}'`
RAIDSTATE=`sed -rn "$SEDFILTER { /^Status/p}" ${TMPFILE}.vdisks | awk '{ print $3}'`
RAIDTYPE=`sed -rn "$SEDFILTER { /^Layout/p}" ${TMPFILE}.vdisks | awk '{ print $3}'`
echo "|-Virtual Disk $vdisk [$RAIDSTATE] ($RAIDTYPE @ $RAIDSIZE)"
# Get IDs for pdisks involved
PDISKS=`$OM storage pdisk vdisk=$vdisk controller=$ctrl | awk '/^ID/ { print $3}'`
for pdisk in $PDISKS; do
SEDFILTER="/^ID\s*:\s*$pdisk/,/^\s*$/"
DISKSTATE=`sed -rn "$SEDFILTER { /^Status/p}" ${TMPFILE}.pdisks | awk '{print $3}'`
DISKSIZE=`sed -rn "$SEDFILTER { /^Used/p}" ${TMPFILE}.pdisks | awk '{print $6 " " $7}'`
echo "| |-- Disk $pdisk [$DISKSTATE] $DISKSIZE"
done
done
rm -f ${TMPFILE}.pdisks
rm -f ${TMPFILE}.vdisks
done
}
function pid_start {
SYS_START=`cat /proc/uptime | cut -d\ -f1 | cut -d. -f1`
PROC_START=`cat /proc/$1/stat | cut -d\ -f22`
PROC_START=$(( $PROC_START / $HZ ))
PROC_UPTIME=$(( $SYS_START - $PROC_START ))
PROC_START=`date -d "-${PROC_UPTIME} seconds"`
echo "$PROC_START"
unset SYS_START PROC_START PROC_UPTIME
}
function pid_age {
SYS_START=`cat /proc/uptime | cut -d\ -f1 | cut -d. -f1`
PROC_START=`cat /proc/$1/stat | cut -d\ -f22`
PROC_START=$(( $PROC_START / $HZ ))
UPSEC=$(( $SYS_START - $PROC_START ))
UPMIN=$(( $UPSEC / 60 ))
UPHR=$(( $UPSEC / 60 / 60 ))
UPDAY=$(( $UPSEC / 60 / 60 / 24 ))
DAYHR=$(( $UPDAY * 24 )); UPHR=$(( $UPHR - $DAYHR ))
HRMIN=$(( $UPHR * 60 )); UPMIN=$(( $UPMIN - $HRMIN ))
MINSEC=$(( $UPDAY * 24 * 60 * 60 + $UPHR * 60 * 60 + $UPMIN * 60 )); UPSEC=$(( $UPSEC - $MINSEC ))
echo "${UPDAY}d, ${UPHR}h, ${UPMIN}m, ${UPSEC}s"
unset SYS_START PROC_START UPSEC UPMIN UPHR UPDAY DAYHR HRMIN MINSEC
}
function humanize_kb () {
scale=( K M G T P E )
if [ $# -ne 1 ]; then
echo "Usage: humanize_kb <value>"
return
fi
val=$1
while [ $val -gt $(( 1024 * 1024 )) ]; do
val=$( echo $val 1024 / p | dc )
power=$(( ${power=0} + 1 ))
done
final=`echo 3 k $val 1024 / p | dc`
echo "$final${scale[${power=0}]}"
unset power final val scale
}
ip netns | grep '^vips$' > /dev/null 2>&1
[ $? -eq 0 ] && V4_HA=1
`which neutron > /dev/null`
if [ $? -eq 0 ]; then
OS_NETCMD="neutron"
else
`which quantum > /dev/null`
if [ $? -eq 0 ]; then
OS_NETCMD="quantum"
else
`which nova > /dev/null`
if [ $? -eq 0 ]; then
OS_NETCMD="nova"
else
OS_NETCMD=""
fi
fi
fi
[ ${Q=0} -eq 0 ] && echo "Done!"
if [ ${S=0} -eq 0 ]; then
rpc-environment-scan
fi
#rpc-update-pccommon
|
/*
This file is part of the JitCat library.
Copyright (C) <NAME> 2018
Distributed under the MIT License (license terms are at http://opensource.org/licenses/MIT).
*/
#include "jitcat/CustomTypeInfo.h"
#include "jitcat/CatClassDefinition.h"
#include "jitcat/CatRuntimeContext.h"
#include "jitcat/Configuration.h"
#include "jitcat/CustomTypeMemberInfo.h"
#include "jitcat/CustomTypeMemberFunctionInfo.h"
#include "jitcat/ReflectableHandle.h"
#include "jitcat/StaticMemberInfo.h"
#include "jitcat/Tools.h"
#include "jitcat/TypeCaster.h"
#include "jitcat/TypeRegistry.h"
#include <cassert>
#include <iostream>
using namespace jitcat::Reflection;
CustomTypeInfo::CustomTypeInfo(const char* typeName, bool isConstType):
TypeInfo(typeName, 0, std::make_unique<CustomObjectTypeCaster>(this)),
classDefinition(nullptr),
isConstType(isConstType),
defaultData(nullptr),
triviallyCopyable(true),
defaultConstructorFunction(nullptr),
destructorFunction(nullptr),
dylib(nullptr)
{
}
CustomTypeInfo::CustomTypeInfo(AST::CatClassDefinition* classDefinition):
TypeInfo(classDefinition->getClassName().c_str(), 0, std::make_unique<CustomObjectTypeCaster>(this)),
classDefinition(classDefinition),
isConstType(false),
defaultData(nullptr),
triviallyCopyable(true),
defaultConstructorFunction(nullptr),
destructorFunction(nullptr),
dylib(nullptr)
{
}
CustomTypeInfo::~CustomTypeInfo()
{
if (defaultData != nullptr)
{
instanceDestructor(defaultData);
}
}
TypeMemberInfo* CustomTypeInfo::addDoubleMember(const std::string& memberName, float defaultValue, bool isWritable, bool isConst)
{
unsigned char* data = increaseDataSize(sizeof(double));
memcpy(data, &defaultValue, sizeof(double));
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
memcpy((unsigned char*)(*iter) + offset, &defaultValue, sizeof(double));
}
TypeMemberInfo* memberInfo = new CustomBasicTypeMemberInfo<double>(memberName, offset, CatGenericType::createDoubleType(isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
return memberInfo;
}
TypeMemberInfo* CustomTypeInfo::addFloatMember(const std::string& memberName, float defaultValue, bool isWritable, bool isConst)
{
unsigned char* data = increaseDataSize(sizeof(float));
memcpy(data, &defaultValue, sizeof(float));
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
memcpy((unsigned char*)(*iter) + offset, &defaultValue, sizeof(float));
}
TypeMemberInfo* memberInfo = new CustomBasicTypeMemberInfo<float>(memberName, offset, CatGenericType::createFloatType(isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
return memberInfo;
}
TypeMemberInfo* CustomTypeInfo::addIntMember(const std::string& memberName, int defaultValue, bool isWritable, bool isConst)
{
unsigned char* data = increaseDataSize(sizeof(int));
memcpy(data, &defaultValue, sizeof(int));
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
memcpy((unsigned char*)(*iter) + offset, &defaultValue, sizeof(int));
}
TypeMemberInfo* memberInfo = new CustomBasicTypeMemberInfo<int>(memberName, offset, CatGenericType::createIntType(isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
return memberInfo;
}
TypeMemberInfo* CustomTypeInfo::addBoolMember(const std::string& memberName, bool defaultValue, bool isWritable, bool isConst)
{
unsigned char* data = increaseDataSize(sizeof(bool));
memcpy(data, &defaultValue, sizeof(bool));
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
memcpy((unsigned char*)(*iter) + offset, &defaultValue, sizeof(bool));
}
TypeMemberInfo* memberInfo = new CustomBasicTypeMemberInfo<bool>(memberName, offset, CatGenericType::createBoolType(isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
return memberInfo;
}
TypeMemberInfo* CustomTypeInfo::addStringMember(const std::string& memberName, const Configuration::CatString& defaultValue, bool isWritable, bool isConst)
{
triviallyCopyable = false;
unsigned char* data = increaseDataSize(sizeof(Configuration::CatString));
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
new ((unsigned char*)(*iter) + offset) Configuration::CatString(defaultValue);
}
new (data) Configuration::CatString(defaultValue);
TypeMemberInfo* memberInfo = new CustomTypeObjectDataMemberInfo(memberName, offset, CatGenericType::createStringType(isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
return memberInfo;
}
TypeMemberInfo* CustomTypeInfo::addObjectMember(const std::string& memberName, unsigned char* defaultValue, TypeInfo* objectTypeInfo, TypeOwnershipSemantics ownershipSemantics, bool isWritable, bool isConst)
{
triviallyCopyable = false;
std::size_t offset = 0;
TypeMemberInfo* memberInfo = nullptr;
if (ownershipSemantics != TypeOwnershipSemantics::Value)
{
CatGenericType type = CatGenericType(objectTypeInfo, isWritable, isConst).toHandle(ownershipSemantics, isWritable, isConst);
offset = addReflectableHandle(reinterpret_cast<Reflectable*>(defaultValue));
objectTypeInfo->addDependentType(this);
memberInfo = new CustomTypeObjectMemberInfo(memberName, offset, CatGenericType(objectTypeInfo, isWritable, isConst).toHandle(ownershipSemantics, isWritable, isConst));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
if (Tools::startsWith(memberName, "$"))
{
addDeferredMembers(memberInfo);
}
return memberInfo;
}
else
{
return addDataObjectMember(memberName, objectTypeInfo);
}
}
TypeMemberInfo* CustomTypeInfo::addDataObjectMember(const std::string& memberName, TypeInfo* objectTypeInfo)
{
if (objectTypeInfo != this)
{
triviallyCopyable = triviallyCopyable && objectTypeInfo->isTriviallyCopyable();
unsigned char* data = increaseDataSize(objectTypeInfo->getTypeSize());
unsigned int offset = (unsigned int)(data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
objectTypeInfo->placementConstruct((unsigned char*)(*iter) + offset, objectTypeInfo->getTypeSize());
}
objectTypeInfo->placementConstruct(data, objectTypeInfo->getTypeSize());
TypeMemberInfo* memberInfo = new CustomTypeObjectDataMemberInfo(memberName, offset, CatGenericType(CatGenericType(objectTypeInfo, true, false), TypeOwnershipSemantics::Value, false, false, false));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
TypeInfo::addMember(lowerCaseMemberName, memberInfo);
objectTypeInfo->addDependentType(this);
if (Tools::startsWith(memberName, "$"))
{
addDeferredMembers(memberInfo);
}
return memberInfo;
}
else
{
assert(false);
return nullptr;
}
}
TypeMemberInfo* CustomTypeInfo::addMember(const std::string& memberName, const CatGenericType& type)
{
if (type.isFloatType()) return addFloatMember(memberName, 0.0f, type.isWritable(), type.isConst());
else if (type.isDoubleType()) return addDoubleMember(memberName, 0.0, type.isWritable(), type.isConst());
else if (type.isIntType()) return addIntMember(memberName, 0, type.isWritable(), type.isConst());
else if (type.isBoolType()) return addBoolMember(memberName, false, type.isWritable(), type.isConst());
else if (type.isStringValueType()) return addStringMember(memberName, "", type.isWritable(), type.isConst());
else if (type.isPointerToReflectableObjectType()) return addObjectMember(memberName, nullptr, type.getPointeeType()->getObjectType(), type.getOwnershipSemantics(), type.isWritable(), type.isConst());
else if (type.isReflectableObjectType()) return addDataObjectMember(memberName, type.getObjectType());
else return nullptr;
}
StaticMemberInfo* CustomTypeInfo::addStaticDoubleMember(const std::string& memberName, double defaultValue, bool isWritable, bool isConst)
{
constexpr std::size_t dataSize = sizeof(double);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
memcpy(memberData, &defaultValue, dataSize);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
StaticMemberInfo* memberInfo = new StaticBasicTypeMemberInfo<double>(memberName, reinterpret_cast<double*>(memberData), CatGenericType::doubleType);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
StaticMemberInfo* CustomTypeInfo::addStaticFloatMember(const std::string& memberName, float defaultValue, bool isWritable, bool isConst)
{
constexpr std::size_t dataSize = sizeof(float);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
memcpy(memberData, &defaultValue, dataSize);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
StaticMemberInfo* memberInfo = new StaticBasicTypeMemberInfo<float>(memberName, reinterpret_cast<float*>(memberData), CatGenericType::floatType);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
StaticMemberInfo* CustomTypeInfo::addStaticIntMember(const std::string& memberName, int defaultValue, bool isWritable, bool isConst)
{
constexpr std::size_t dataSize = sizeof(int);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
memcpy(memberData, &defaultValue, dataSize);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
StaticMemberInfo* memberInfo = new StaticBasicTypeMemberInfo<int>(memberName, reinterpret_cast<int*>(memberData), CatGenericType::intType);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
StaticMemberInfo* CustomTypeInfo::addStaticBoolMember(const std::string& memberName, bool defaultValue, bool isWritable, bool isConst)
{
constexpr std::size_t dataSize = sizeof(bool);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
memcpy(memberData, &defaultValue, dataSize);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
StaticMemberInfo* memberInfo = new StaticBasicTypeMemberInfo<bool>(memberName, reinterpret_cast<bool*>(memberData), CatGenericType::boolType);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
StaticMemberInfo* CustomTypeInfo::addStaticStringMember(const std::string& memberName, const Configuration::CatString& defaultValue, bool isWritable, bool isConst)
{
constexpr std::size_t dataSize = sizeof(Configuration::CatString);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
new (memberData) Configuration::CatString(defaultValue);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
StaticMemberInfo* memberInfo = new StaticClassObjectMemberInfo(memberName, memberData, CatGenericType::createStringType(isWritable, isConst));
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
StaticMemberInfo* CustomTypeInfo::addStaticObjectMember(const std::string& memberName, unsigned char* defaultValue, TypeInfo* objectTypeInfo, TypeOwnershipSemantics ownershipSemantics, bool isWritable, bool isConst)
{
if (ownershipSemantics != TypeOwnershipSemantics::Value)
{
CatGenericType type = CatGenericType(objectTypeInfo, isWritable, isConst).toHandle(ownershipSemantics, isWritable, isConst);
constexpr std::size_t dataSize = sizeof(ReflectableHandle);
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
objectTypeInfo->addDependentType(this);
ReflectableHandle handle(reinterpret_cast<Reflectable*>(defaultValue));
type.copyConstruct(memberData, dataSize, reinterpret_cast<unsigned char*>(&handle), dataSize);
StaticMemberInfo* memberInfo = new StaticClassHandleMemberInfo(memberName, reinterpret_cast<ReflectableHandle*>(memberData), type);
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
return memberInfo;
}
else
{
return addStaticDataObjectMember(memberName, objectTypeInfo);
}
}
StaticMemberInfo* CustomTypeInfo::addStaticDataObjectMember(const std::string& memberName, TypeInfo* objectTypeInfo)
{
if (objectTypeInfo != this)
{
std::size_t dataSize = objectTypeInfo->getTypeSize();
unsigned char* memberData = new unsigned char[dataSize];
staticData.emplace_back(memberData);
objectTypeInfo->placementConstruct(memberData, objectTypeInfo->getTypeSize());
StaticMemberInfo* memberInfo = new StaticClassObjectMemberInfo(memberName, memberData, CatGenericType(CatGenericType(objectTypeInfo, false, false), TypeOwnershipSemantics::Value, false, false, false));
std::string lowerCaseMemberName = Tools::toLowerCase(memberName);
staticMembers.emplace(lowerCaseMemberName, memberInfo);
objectTypeInfo->addDependentType(this);
return memberInfo;
}
else
{
assert(false);
return nullptr;
}
}
StaticMemberInfo* CustomTypeInfo::addStaticMember(const std::string& memberName, const CatGenericType& type)
{
if (type.isFloatType()) return addStaticFloatMember(memberName, 0.0f, type.isWritable(), type.isConst());
else if (type.isDoubleType()) return addStaticDoubleMember(memberName, 0.0, type.isWritable(), type.isConst());
else if (type.isIntType()) return addStaticIntMember(memberName, 0, type.isWritable(), type.isConst());
else if (type.isBoolType()) return addStaticBoolMember(memberName, false, type.isWritable(), type.isConst());
else if (type.isStringValueType()) return addStaticStringMember(memberName, "", type.isWritable(), type.isConst());
else if (type.isPointerToReflectableObjectType()) return addStaticObjectMember(memberName, nullptr, type.getPointeeType()->getObjectType(), type.getOwnershipSemantics(), type.isWritable(), type.isConst());
else if (type.isReflectableObjectType()) return addStaticDataObjectMember(memberName, type.getObjectType());
else return nullptr;
}
CustomTypeMemberFunctionInfo* CustomTypeInfo::addMemberFunction(const std::string& memberFunctionName, const CatGenericType& thisType, AST::CatFunctionDefinition* functionDefinition)
{
CustomTypeMemberFunctionInfo* functionInfo = new CustomTypeMemberFunctionInfo(functionDefinition, thisType);
memberFunctions.emplace(Tools::toLowerCase(memberFunctionName), functionInfo);
return functionInfo;
}
bool CustomTypeInfo::setDefaultConstructorFunction(const std::string& constructorFunctionName)
{
SearchFunctionSignature sig(constructorFunctionName, {});
MemberFunctionInfo* functionInfo = getMemberFunctionInfo(sig);
if (functionInfo != nullptr)
{
defaultConstructorFunction = functionInfo;
return true;
}
return false;
}
bool jitcat::Reflection::CustomTypeInfo::setDestructorFunction(const std::string& destructorFunctionName)
{
SearchFunctionSignature sig(destructorFunctionName, {});
MemberFunctionInfo* functionInfo = getMemberFunctionInfo(sig);
if (functionInfo != nullptr)
{
destructorFunction = functionInfo;
return true;
}
return false;
}
void CustomTypeInfo::removeMember(const std::string& memberName)
{
TypeMemberInfo* memberInfo = releaseMember(memberName);
if (memberInfo != nullptr)
{
removedMembers.emplace_back(memberInfo);
}
}
Reflectable* CustomTypeInfo::getDefaultInstance()
{
return reinterpret_cast<Reflectable*>(defaultData);
}
bool CustomTypeInfo::isCustomType() const
{
return true;
}
jitcat::AST::CatClassDefinition* CustomTypeInfo::getClassDefinition()
{
return classDefinition;
}
void CustomTypeInfo::placementConstruct(unsigned char* buffer, std::size_t bufferSize) const
{
auto iter = instances.find(reinterpret_cast<Reflectable*>(buffer));
if (iter == instances.end())
{
instances.insert(reinterpret_cast<Reflectable*>(buffer));
}
if (defaultConstructorFunction != nullptr)
{
if constexpr (Configuration::enableLLVM)
{
reinterpret_cast<void(*)(unsigned char*)>(defaultConstructorFunction->getFunctionAddress().functionAddress)(buffer);
}
else
{
std::any base = reinterpret_cast<Reflectable*>(buffer);
CatRuntimeContext tempContext("temp");
defaultConstructorFunction->call(&tempContext, base, {});
}
}
else
{
createDataCopy(defaultData, typeSize, buffer, bufferSize);
}
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
if (bufferSize > 0 && buffer != nullptr)
{
std::cout << "(CustomTypeInfo::placementConstruct) Placement constructed " << typeName << " at "<< std::hex << reinterpret_cast<uintptr_t>(buffer) << "\n";
}
}
}
void CustomTypeInfo::placementDestruct(unsigned char* buffer, std::size_t bufferSize)
{
if (destructorFunction != nullptr)
{
if constexpr (Configuration::enableLLVM)
{
reinterpret_cast<void(*)(unsigned char*)>(destructorFunction->getFunctionAddress().functionAddress)(buffer);
}
else
{
std::any base = reinterpret_cast<Reflectable*>(buffer);
CatRuntimeContext tempContext("temp");
destructorFunction->call(&tempContext, base, {});
}
}
else
{
instanceDestructorInPlace(buffer);
}
removeInstance(reinterpret_cast<Reflectable*>(buffer));
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
if (bufferSize > 0 && buffer != nullptr)
{
std::cout << "(CustomTypeInfo::placementDestruct) Placement destructed " << typeName << " at " << std::hex << reinterpret_cast<uintptr_t>(buffer) << "\n";
}
}
}
void CustomTypeInfo::copyConstruct(unsigned char* targetBuffer, std::size_t targetBufferSize, const unsigned char* sourceBuffer, std::size_t sourceBufferSize)
{
std::size_t typeSize = getTypeSize();
assert(typeSize <= targetBufferSize && typeSize <= sourceBufferSize);
createDataCopy(sourceBuffer, typeSize, targetBuffer, typeSize);
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
if (targetBufferSize > 0 && targetBuffer != nullptr)
{
std::cout << "(CustomTypeInfo::copyConstruct) Copy constructed " << typeName << " at " << std::hex << reinterpret_cast<uintptr_t>(targetBuffer) << " from " << std::hex << reinterpret_cast<uintptr_t>(sourceBuffer) << "\n";
}
}
}
void CustomTypeInfo::moveConstruct(unsigned char* targetBuffer, std::size_t targetBufferSize, unsigned char* sourceBuffer, std::size_t sourceBufferSize)
{
std::size_t typeSize = getTypeSize();
assert(targetBufferSize >= typeSize && sourceBufferSize >= typeSize);
assert(sourceBuffer != nullptr || sourceBufferSize == 0);
assert(targetBuffer != nullptr);
if (triviallyCopyable)
{
memcpy(targetBuffer, sourceBuffer, typeSize);
}
else
{
auto end = membersByOrdinal.end();
for (auto iter = membersByOrdinal.begin(); iter != end; ++iter)
{
if (iter->second->isDeferred())
{
continue;
}
std::size_t memberOffset = static_cast<CustomMemberInfo*>(iter->second)->memberOffset;
iter->second->catType.moveConstruct(&targetBuffer[memberOffset], iter->second->catType.getTypeSize(), &sourceBuffer[memberOffset], iter->second->catType.getTypeSize());
}
}
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
if (targetBufferSize > 0 && targetBuffer != nullptr)
{
std::cout << "(CustomTypeInfo::moveConstruct) Move constructed " << typeName << " at " << std::hex << reinterpret_cast<uintptr_t>(targetBuffer) << " from " << std::hex << reinterpret_cast<uintptr_t>(sourceBuffer) << "\n";
}
}
}
bool CustomTypeInfo::isTriviallyCopyable() const
{
return triviallyCopyable;
}
bool CustomTypeInfo::canBeDeleted() const
{
return dependentTypes.size() == 0 && instances.size() == 0;
}
llvm::orc::JITDylib* CustomTypeInfo::getDylib() const
{
return dylib;
}
void CustomTypeInfo::setDylib(llvm::orc::JITDylib* generatedDylib)
{
dylib = generatedDylib;
}
void CustomTypeInfo::instanceDestructor(unsigned char* data)
{
instanceDestructorInPlace(data);
delete[] data;
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
std::cout << "(CustomTypeInfo::instanceDestructor) deallocated buffer of size " << std::dec << typeSize << ": " << std::hex << reinterpret_cast<uintptr_t>(data) << "\n";
}
}
void CustomTypeInfo::instanceDestructorInPlace(unsigned char* data)
{
auto end = membersByOrdinal.end();
for (auto iter = membersByOrdinal.begin(); iter != end; ++iter)
{
if (iter->second->isDeferred())
{
continue;
}
else
{
CustomMemberInfo* customMember = static_cast<CustomMemberInfo*>(iter->second);
iter->second->catType.placementDestruct(&data[customMember->memberOffset], customMember->catType.getTypeSize());
}
}
Reflectable::placementDestruct(reinterpret_cast<Reflectable*>(data));
}
std::size_t CustomTypeInfo::addReflectableHandle(Reflectable* defaultValue)
{
unsigned char* data = increaseDataSize(sizeof(ReflectableHandle));
std::size_t offset = (data - defaultData);
if (defaultData == nullptr)
{
offset = 0;
}
std::set<Reflectable*>::iterator end = instances.end();
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
new ((unsigned char*)(*iter) + offset) ReflectableHandle(defaultValue);
}
new (data) ReflectableHandle(defaultValue);
return offset;
}
unsigned char* CustomTypeInfo::increaseDataSize(std::size_t amount)
{
std::size_t oldSize = typeSize;
increaseDataSize(defaultData, amount, typeSize);
typeSize += amount;
std::set<Reflectable*>::iterator end = instances.end();
std::set<Reflectable*> replacedSet;
for (std::set<Reflectable*>::iterator iter = instances.begin(); iter != end; ++iter)
{
unsigned char* data = (unsigned char*)(*iter);
increaseDataSize(data, amount, oldSize);
replacedSet.insert(reinterpret_cast<Reflectable*>(data));
}
instances = replacedSet;
return defaultData + oldSize;
}
void CustomTypeInfo::increaseDataSize(unsigned char*& data, std::size_t amount, std::size_t currentSize)
{
unsigned char* oldData = data;
std::size_t oldSize = currentSize;
std::size_t newSize = oldSize + amount;
if (oldData != nullptr
&& oldSize != 0)
{
data = new unsigned char[newSize];
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
std::cout << "(CustomTypeInfo::increaseDataSize) Allocated buffer of size " << std::dec << newSize << ": " << std::hex << reinterpret_cast<uintptr_t>(data) << "\n";
}
createDataCopy(oldData, currentSize, data, newSize);
Reflectable::replaceReflectable(reinterpret_cast<Reflectable*>(oldData), reinterpret_cast<Reflectable*>(data));
instanceDestructor(oldData);
}
else
{
data = new unsigned char[newSize];
if constexpr (Configuration::logJitCatObjectConstructionEvents)
{
std::cout << "(CustomTypeInfo::increaseDataSize) Allocated buffer of size " << std::dec << newSize << ": " << std::hex << reinterpret_cast<uintptr_t>(data) << "\n";
}
Reflectable::replaceReflectable(reinterpret_cast<Reflectable*>(oldData), reinterpret_cast<Reflectable*>(data));
}
//Initialise the additional memory to zero
memset(data + oldSize, 0, amount);
}
void CustomTypeInfo::createDataCopy(const unsigned char* sourceData, std::size_t sourceSize, unsigned char* copyData, std::size_t copySize) const
{
assert(copySize >= sourceSize);
assert(sourceData != nullptr || sourceSize == 0);
assert(copyData != nullptr);
//Create copies of strings and member references
memcpy(copyData, sourceData, sourceSize);
if (!triviallyCopyable)
{
auto end = membersByOrdinal.end();
for (auto iter = membersByOrdinal.begin(); iter != end; ++iter)
{
if (iter->second->isDeferred())
{
continue;
}
std::size_t memberOffset = static_cast<CustomMemberInfo*>(iter->second)->memberOffset;
iter->second->catType.copyConstruct(©Data[memberOffset], iter->second->catType.getTypeSize(), &sourceData[memberOffset], iter->second->catType.getTypeSize());
}
}
else
{
memcpy(copyData, sourceData, sourceSize);
}
}
void CustomTypeInfo::removeInstance(Reflectable* instance)
{
auto iter = instances.find(instance);
if (iter != instances.end())
{
instances.erase(iter);
}
}
|
#!/bin/bash
BIN_DIR=`./define_bin_dir.sh`
cat > fsp.tmp << EOF
% ---------------------------------- FINITE-SOURCE RUPTURE MODEL --------------------------------
%
% Event : NEAR COAST OF CENTRAL CHILE 2010/02/27 [Hayes (NEIC,2014)]
% EventTAG: p000h7rfHAYES
%
% Loc : LAT = -36.2200 LON = -73.1740 DEP = 25.0
% Size : LEN = 350 km WID = 53.25 km Mw = 8.85 Mo = 2.4510603e+22 Nm
% Mech : STRK = 18.7 DIP = 17.5 RAKE = 112 Htop = 3.53 km
% Rupt : HypX = 37.5 km Hypz = 26.625 km avTr = 5.9 s avVr = 2.11 km/s
%
% ---------------------------------- inversion-related parameters --------------------------------
%
% Invs : Nx = 14 Nz = 3 Fmin = 0.002 Hz Fmax = 1 Hz
% Invs : Dx = 25 km Dz = 17.75 km
% Invs : Ntw = 8 Nsg = 5 (# of time-windows,# of fault segments)
% Invs : LEN = 3 s SHF = 1.5 s (time-window length and time-shift)
% SVF : Asymetriccosine (type of slip-velocity function used)
%
% Data : SGM TELE TRIL LEVEL GPS INSAR SURF OTHER
% Data : 0 48 0 0 0 0 48 0
% Data : 0 61.17 0 0 0 0 61.17 0
% Data : 0 36.63 0 0 0 0 36.63 0
%
%--------------------------------------------------------------------------------------------------
%
% VELOCITY-DENSITY STRUCTURE
% No. of layers = 6
%
% DEPTH P-VEL S-VEL DENS QP QS
% [km] [km/s] [km/s] [g/cm^3]
% 0.00 2.50 1.20 2.10 1000 500
% 1.00 6.00 3.40 2.70 1000 500
% 11.00 6.60 3.70 2.90 1000 500
% 22.00 7.20 4.00 3.10 1000 500
% 32.00 8.08 4.47 3.38 1200 500
% 228.00 8.59 4.66 3.45 360 140
%
%--------------------------------------------------------------------------------------------------
% 3-Feb-2017 created by GPH (ghayes@usgs.gov)
%--------------------------------------------------------------------------------------------------
%
% SOURCE MODEL PARAMETERS
% X,Y,Z coordinates in km; SLIP in m
% if applicable: RAKE in deg, RISE in s, TRUP in s, slip in each TW in m
%
% Coordinates are given for center of each subfault or segment: |'|
% Origin of local coordinate system at epicenter: X (EW) = 0, Y (NS) = 0
%--------------------------------------------------------------------------------------------------
%--------------------------- MULTISEGMENT MODEL ---------------------------------------------------
%--------------------------------------------------------------------------------------------------
% SEGMENT # 1: STRIKE = 18.7 deg DIP = 17.5 deg
% LEN = 350 km WID = 53.25 km
% depth to top: Z2top = 16.99 km
% coordinates of top-center:
% LAT = -34.9625012594156, LON = -73.002813177712
% hypocenter on SEG # 1 : along-strike (X) = 37.5, down-dip (Z) = 26.625
% Nsbfs = 42 subfaults
%--------------------------------------------------------------------------------------------------
% LAT LON X==EW Y==NS Z SLIP RAKE TRUP RISE SF_MOMENT
%--------------------------------------------------------------------------------------------------
-36.3843 -73.4423 -32.0372 -15.6045 19.6625 6.3921 122.9948 16.0000 6.0000 1.13e+20
-36.1712 -73.3529 -24.0753 8.1160 19.6625 1.1696 87.6819 4.0000 1.5000 2.06e+19
-35.9581 -73.2635 -16.0695 31.8291 19.6625 0.1471 70.9816 16.0000 3.0000 2.59e+18
-35.7449 -73.1740 -8.0109 55.5460 19.6625 1.4115 125.1143 24.0000 12.0000 2.49e+19
-35.5318 -73.0846 0.0778 79.2442 19.6625 8.6343 102.4856 32.0000 7.5000 1.52e+20
-35.3187 -72.9952 8.2143 102.9294 19.6625 14.4901 108.9657 40.0000 9.0000 2.55e+20
-35.1056 -72.9058 16.3893 126.6127 19.6625 10.3908 120.6460 56.0000 12.0000 1.83e+20
-34.8925 -72.8164 24.6163 150.2885 19.6625 9.2366 102.8717 64.0000 1.5000 1.63e+20
-34.6794 -72.7270 32.8768 173.9569 19.6625 9.1188 128.2137 68.0000 12.0000 1.61e+20
-34.4663 -72.6376 41.1796 197.6176 19.6625 5.9541 109.6114 76.0000 1.5000 1.05e+20
-34.2532 -72.5482 49.5293 221.2708 19.6625 2.3619 128.7441 104.0000 10.5000 4.16e+19
-34.0401 -72.4588 57.9162 244.9165 19.6625 2.2150 148.1253 124.0000 4.5000 3.90e+19
-33.8270 -72.3694 66.3448 268.5545 19.6625 0.9315 144.5585 144.0000 3.0000 1.64e+19
-33.6139 -72.2799 74.8288 292.1849 19.6625 0.1307 125.3914 160.0000 6.0000 2.30e+18
-36.4331 -73.2634 -16.0125 -20.9862 25.0000 12.2985 97.2480 4.0000 1.5000 2.71e+20
-36.2200 -73.1740 -8.0120 2.7195 25.0000 6.6988 112.8563 0.0000 7.5000 1.47e+20
-36.0069 -73.0846 0.0279 26.4178 25.0000 0.2575 147.8169 12.0000 9.0000 5.67e+18
-35.7938 -72.9952 8.1205 50.1087 25.0000 0.9203 85.4143 24.0000 10.5000 2.03e+19
-35.5807 -72.9058 16.2475 73.7920 25.0000 7.9613 92.6950 40.0000 9.0000 1.75e+20
-35.3676 -72.8164 24.4221 97.4624 25.0000 12.1735 105.1502 44.0000 4.5000 2.68e+20
-35.1545 -72.7270 32.6352 121.1308 25.0000 9.7945 121.9535 64.0000 6.0000 2.16e+20
-34.9414 -72.6375 40.9046 144.7916 25.0000 9.8108 91.1716 68.0000 4.5000 2.16e+20
-34.7283 -72.5481 49.2029 168.4449 25.0000 10.5390 100.4897 68.0000 6.0000 2.32e+20
-34.5152 -72.4587 57.5526 192.0906 25.0000 3.9522 84.0947 80.0000 6.0000 8.70e+19
-34.3021 -72.3693 65.9352 215.7288 25.0000 0.3420 147.8737 108.0000 9.0000 7.53e+18
-34.0889 -72.2799 74.3597 239.3706 25.0000 2.1901 125.5808 136.0000 12.0000 4.82e+19
-33.8758 -72.1905 82.8303 262.9935 25.0000 0.2637 135.8309 152.0000 7.5000 5.80e+18
-33.6627 -72.1011 91.3376 286.6089 25.0000 0.0645 92.1426 168.0000 10.5000 1.42e+18
-36.4819 -73.0845 -0.0079 -26.3974 30.3375 16.2212 111.5059 16.0000 1.5000 3.57e+20
-36.2688 -72.9951 8.0314 -2.7066 30.3375 10.4041 120.8134 4.0000 4.5000 2.29e+20
-36.0557 -72.9057 16.1099 20.9769 30.3375 1.4696 158.7084 16.0000 6.0000 3.23e+19
-35.8426 -72.8163 24.2365 44.6529 30.3375 0.0557 135.0947 24.0000 7.5000 1.22e+18
-35.6295 -72.7269 32.4108 68.3213 30.3375 3.9006 115.1179 40.0000 1.5000 8.59e+19
-35.4164 -72.6375 40.6192 91.9767 30.3375 9.6075 128.4157 40.0000 7.5000 2.11e+20
-35.2033 -72.5481 48.8749 115.6302 30.3375 10.5964 103.1460 56.0000 3.0000 2.33e+20
-34.9902 -72.4587 57.1687 139.2761 30.3375 13.0931 102.3453 56.0000 7.5000 2.88e+20
-34.7771 -72.3693 65.5094 162.9144 30.3375 14.0243 107.1655 76.0000 1.5000 3.09e+20
-34.5640 -72.2799 73.8877 186.5451 30.3375 5.2135 118.1755 92.0000 12.0000 1.15e+20
-34.3509 -72.1904 82.3170 210.1682 30.3375 0.8277 78.7801 112.0000 9.0000 1.82e+19
-34.1378 -72.1010 90.7835 233.7837 30.3375 4.2257 114.3804 136.0000 10.5000 9.30e+19
-33.9247 -72.0116 99.2914 257.3916 30.3375 2.4323 154.7378 164.0000 10.5000 5.35e+19
-33.7116 -71.9222 107.8359 280.9919 30.3375 0.1322 124.3315 172.0000 3.0000 2.91e+18
%--------------------------------------------------------------------------------------------------
% SEGMENT # 2: STRIKE = 18.7 deg DIP = 10 deg
% LEN = 350 km WID = 71 km
% depth to top: Z2top = 4.66 km
% coordinates of top-center:
% LAT = -34.7615117743998, LON = -73.7416689683824
% hypocenter on SEG # 1 : along-strike (X) = 37.5, down-dip (Z) = 26.625
% Nsbfs = 56 subfaults
%--------------------------------------------------------------------------------------------------
% LAT LON X==EW Y==NS Z SLIP RAKE TRUP RISE SF_MOMENT
%--------------------------------------------------------------------------------------------------
-36.1833 -74.1801 -98.5956 6.3291 6.2058 4.1210 107.6475 48.0000 0.0000 5.71e+19
-35.9702 -74.0904 -90.7622 30.1110 6.2058 4.3215 134.9567 36.0000 0.0000 5.99e+19
-35.7571 -74.0007 -82.8931 53.8854 6.2058 2.6999 139.1175 40.0000 9.0000 3.74e+19
-35.5440 -73.9110 -74.9751 77.6525 6.2058 4.3335 92.7834 44.0000 1.5000 6.00e+19
-35.3309 -73.8213 -67.0128 101.4123 6.2058 4.9374 93.9477 64.0000 4.5000 6.84e+19
-35.1178 -73.7316 -59.0066 125.1647 6.2058 5.4590 97.8479 80.0000 1.5000 7.56e+19
-34.9047 -73.6419 -50.9566 148.9096 6.2058 2.8393 95.1763 76.0000 10.5000 3.93e+19
-34.6916 -73.5522 -42.8676 172.6471 6.2058 0.0779 71.8833 84.0000 6.0000 1.08e+18
-34.4785 -73.4625 -34.7308 196.3771 6.2058 0.0407 70.7329 92.0000 1.5000 5.63e+17
-34.2654 -73.3728 -26.5555 220.0997 6.2058 0.0391 72.1364 112.0000 1.5000 5.42e+17
-34.0523 -73.2831 -18.3282 243.8148 6.2058 0.0371 72.2003 120.0000 10.5000 5.13e+17
-33.8392 -73.1934 -10.0675 267.5223 6.2058 0.0611 71.4040 124.0000 7.5000 8.46e+17
-33.6261 -73.1037 -1.7644 291.2222 6.2058 0.0756 70.7606 132.0000 10.5000 1.05e+18
-33.4129 -73.0141 6.5715 314.9258 6.2058 0.0612 71.5554 144.0000 1.5000 8.48e+17
-36.2337 -73.9948 -81.9131 0.8977 9.2881 6.2513 91.0186 48.0000 4.5000 8.66e+19
-36.0206 -73.9051 -74.0443 24.6642 9.2881 5.6905 119.7881 40.0000 6.0000 7.88e+19
-35.8075 -73.8154 -66.1264 48.4234 9.2881 7.7000 145.1587 52.0000 12.0000 1.07e+20
-35.5944 -73.7257 -58.1732 72.1751 9.2881 8.6551 116.1854 48.0000 9.0000 1.20e+20
-35.3813 -73.6360 -50.1714 95.9195 9.2881 11.1465 103.6269 60.0000 12.0000 1.54e+20
-35.1682 -73.5463 -42.1303 119.6565 9.2881 12.6741 117.0220 72.0000 3.0000 1.76e+20
-34.9551 -73.4566 -34.0364 143.3860 9.2881 9.7873 112.6451 72.0000 9.0000 1.36e+20
-34.7420 -73.3669 -25.9083 167.1080 9.2881 8.2465 123.7802 72.0000 9.0000 1.14e+20
-34.5289 -73.2772 -17.7325 190.8226 9.2881 7.9953 116.2880 92.0000 6.0000 1.11e+20
-34.3158 -73.1875 -9.5183 214.5296 9.2881 5.7315 126.1249 112.0000 1.5000 7.94e+19
-34.1027 -73.0978 -1.2569 238.2292 9.2881 0.0999 72.4532 120.0000 9.0000 1.38e+18
-33.8896 -73.0082 7.0332 261.9211 9.2881 0.0918 71.6881 128.0000 9.0000 1.27e+18
-33.6765 -72.9185 15.3794 285.6055 9.2881 0.0208 71.7729 132.0000 12.0000 2.88e+17
-33.4634 -72.8288 23.7675 309.2823 9.2881 0.1831 99.1366 144.0000 4.5000 2.54e+18
-36.2842 -73.8095 -65.2516 -4.5768 12.3703 4.9684 113.0315 24.0000 10.5000 8.75e+19
-36.0711 -73.7198 -57.3428 19.1744 12.3703 6.5361 109.7698 28.0000 3.0000 1.15e+20
-35.8580 -73.6301 -49.3896 42.9183 12.3703 6.6519 124.4132 32.0000 9.0000 1.17e+20
-35.6449 -73.5404 -41.3922 66.6547 12.3703 8.8180 118.4518 44.0000 9.0000 1.55e+20
-35.4318 -73.4507 -33.3508 90.3837 12.3703 14.3138 103.5219 52.0000 10.5000 2.52e+20
-35.2187 -73.3610 -25.2657 114.1052 12.3703 16.3244 113.8650 60.0000 4.5000 2.88e+20
-35.0056 -73.2713 -17.1417 137.8193 12.3703 16.8144 108.6317 68.0000 6.0000 2.96e+20
-34.7924 -73.1816 -8.9699 161.5370 12.3703 16.1243 109.5655 68.0000 1.5000 2.84e+20
-34.5793 -73.0919 -0.7550 185.2361 12.3703 14.3317 96.3931 80.0000 3.0000 2.52e+20
-34.3662 -73.0023 7.4935 208.9276 12.3703 12.4070 95.6711 100.0000 10.5000 2.19e+20
-34.1531 -72.9126 15.7892 232.6115 12.3703 2.6457 101.0362 120.0000 7.5000 4.66e+19
-33.9400 -72.8229 24.1271 256.2879 12.3703 1.7370 84.8696 124.0000 9.0000 3.06e+19
-33.7269 -72.7332 32.5118 279.9567 12.3703 0.9041 135.5455 132.0000 12.0000 1.59e+19
-33.5138 -72.6435 40.9337 303.6179 12.3703 0.0237 138.0370 148.0000 10.5000 4.17e+17
-36.3346 -73.6242 -48.6159 -10.0719 15.4526 5.3656 139.7118 12.0000 1.5000 9.45e+19
-36.1215 -73.5345 -40.6626 13.6640 15.4526 5.5962 116.3852 20.0000 7.5000 9.86e+19
-35.9084 -73.4448 -32.6695 37.3925 15.4526 3.7373 124.6089 28.0000 1.5000 6.58e+19
-35.6953 -73.3551 -24.6323 61.1135 15.4526 4.9010 133.1759 32.0000 10.5000 8.63e+19
-35.4822 -73.2654 -16.5513 84.8271 15.4526 11.6252 100.3359 52.0000 7.5000 2.05e+20
-35.2691 -73.1758 -8.4358 108.5332 15.4526 16.9582 116.4245 60.0000 4.5000 2.99e+20
-35.0560 -73.0861 -0.2724 132.2318 15.4526 16.2895 122.6051 64.0000 6.0000 2.87e+20
-34.8429 -72.9964 7.9386 155.9229 15.4526 14.2540 112.1542 80.0000 12.0000 2.51e+20
-34.6298 -72.9067 16.1880 179.6064 15.4526 12.6042 118.0709 72.0000 3.0000 2.22e+20
-34.4167 -72.8170 24.4892 203.2824 15.4526 8.0679 109.3127 76.0000 12.0000 1.42e+20
-34.2036 -72.7273 32.8237 226.9508 15.4526 1.7074 125.5437 108.0000 7.5000 3.01e+19
-33.9905 -72.6376 41.2049 250.6116 15.4526 1.4320 99.4126 124.0000 3.0000 2.52e+19
-33.7774 -72.5479 49.6235 274.2648 15.4526 2.3450 136.7264 144.0000 6.0000 4.13e+19
-33.5643 -72.4582 58.0839 297.9103 15.4526 0.0950 95.7748 156.0000 3.0000 1.67e+18
%--------------------------------------------------------------------------------------------------
% SEGMENT # 3: STRIKE = 18.7 deg DIP = 23.5 deg
% LEN = 350 km WID = 53.25 km
% depth to top: Z2top = 34.74 km
% coordinates of top-center:
% LAT = -35.1061937102391, LON = -72.4743519197046
% hypocenter on SEG # 1 : along-strike (X) = 37.5, down-dip (Z) = 26.625
% Nsbfs = 42 subfaults
%--------------------------------------------------------------------------------------------------
% LAT LON X==EW Y==NS Z SLIP RAKE TRUP RISE SF_MOMENT
%--------------------------------------------------------------------------------------------------
-36.5280 -72.9146 15.4810 -31.6428 38.2855 10.6865 91.9385 16.0000 9.0000 3.20e+20
-36.3149 -72.8248 23.5892 -7.9664 38.2855 13.5795 109.4861 12.0000 10.5000 4.07e+20
-36.1018 -72.7350 31.7458 15.7025 38.2855 2.9481 108.8203 20.0000 0.0000 8.83e+19
-35.8887 -72.6452 39.9414 39.3638 38.2855 0.5862 103.4852 28.0000 0.0000 1.76e+19
-35.6756 -72.5554 48.1849 63.0176 38.2855 3.2037 71.2410 36.0000 3.0000 9.60e+19
-35.4624 -72.4656 56.4671 86.6748 38.2855 6.6272 126.2589 48.0000 9.0000 1.99e+20
-35.2493 -72.3758 64.8012 110.3133 38.2855 6.8422 106.7618 72.0000 7.5000 2.05e+20
-35.0362 -72.2860 73.1688 133.9442 38.2855 11.3562 114.5809 68.0000 9.0000 3.40e+20
-34.8231 -72.1962 81.5835 157.5673 38.2855 10.9350 104.9041 84.0000 4.5000 3.28e+20
-34.6100 -72.1064 90.0357 181.1829 38.2855 4.2961 92.5136 88.0000 1.5000 1.29e+20
-34.3969 -72.0166 98.5300 204.7908 38.2855 3.6298 104.8478 104.0000 12.0000 1.09e+20
-34.1838 -71.9268 107.0659 228.3909 38.2855 6.6549 138.9937 124.0000 3.0000 1.99e+20
-33.9707 -71.8370 115.6480 251.9833 38.2855 4.9643 157.8830 160.0000 10.5000 1.49e+20
-33.7576 -71.7472 124.2667 275.5680 38.2855 0.0428 82.6800 176.0000 1.5000 1.28e+18
-36.5749 -72.7419 30.8930 -36.8990 45.3633 6.4925 96.2177 24.0000 4.5000 1.95e+20
-36.3618 -72.6521 39.0432 -13.2370 45.3633 9.7651 113.6104 16.0000 6.0000 2.93e+20
-36.1487 -72.5623 47.2326 10.4175 45.3633 4.0847 153.7122 24.0000 4.5000 1.22e+20
-35.9356 -72.4725 55.4700 34.0643 45.3633 1.3387 146.9030 40.0000 12.0000 4.01e+19
-35.7225 -72.3827 63.7461 57.7036 45.3633 0.7050 92.1100 40.0000 4.5000 2.11e+19
-35.5094 -72.2929 72.0697 81.3352 45.3633 4.4761 95.5885 52.0000 1.5000 1.34e+20
-35.2963 -72.2031 80.4316 104.9591 45.3633 2.3866 146.7741 68.0000 1.5000 7.15e+19
-35.0832 -72.1133 88.8360 128.5755 45.3633 5.8835 130.5969 68.0000 12.0000 1.76e+20
-34.8701 -72.0234 97.2963 152.1840 45.3633 8.0050 113.2867 76.0000 6.0000 2.40e+20
-34.6570 -71.9336 105.7851 175.7849 45.3633 4.2349 96.4618 80.0000 1.5000 1.27e+20
-34.4439 -71.8438 114.3157 199.3781 45.3633 2.9265 139.3516 92.0000 3.0000 8.77e+19
-34.2308 -71.7540 122.8925 222.9636 45.3633 10.3126 141.9533 108.0000 1.5000 3.09e+20
-34.0177 -71.6642 131.5061 246.5414 45.3633 4.2509 157.3371 156.0000 3.0000 1.27e+20
-33.8046 -71.5744 140.1608 270.1114 45.3633 0.1680 110.6492 172.0000 3.0000 5.04e+18
-36.6219 -72.5691 46.2906 -42.1938 52.4411 4.3295 120.3463 28.0000 6.0000 1.30e+20
-36.4088 -72.4793 54.4828 -18.5463 52.4411 3.9519 103.2711 32.0000 1.5000 1.18e+20
-36.1957 -72.3895 62.7097 5.0937 52.4411 1.0217 94.9010 36.0000 12.0000 3.06e+19
-35.9826 -72.2997 70.9843 28.7260 52.4411 1.7315 110.6010 44.0000 10.5000 5.19e+19
-35.7695 -72.2099 79.2976 52.3508 52.4411 1.2039 82.2898 44.0000 1.5000 3.61e+19
-35.5564 -72.1201 87.6582 75.9678 52.4411 2.0075 74.9873 52.0000 12.0000 6.02e+19
-35.3433 -72.0303 96.0570 99.5772 52.4411 0.9796 94.5302 60.0000 3.0000 2.94e+19
-35.1302 -71.9405 104.5027 123.1789 52.4411 3.2295 104.5017 76.0000 9.0000 9.68e+19
-34.9171 -71.8507 112.9861 146.7730 52.4411 4.9754 152.7641 92.0000 3.0000 1.49e+20
-34.7040 -71.7609 121.5113 170.3593 52.4411 1.0964 155.9462 88.0000 3.0000 3.29e+19
-34.4909 -71.6711 130.0829 193.9378 52.4411 0.2474 152.7010 96.0000 1.5000 7.41e+18
-34.2778 -71.5813 138.6914 217.5086 52.4411 4.9760 145.8189 116.0000 10.5000 1.49e+20
-34.0646 -71.4915 147.3414 241.0828 52.4411 1.8186 144.4535 144.0000 12.0000 5.45e+19
-33.8515 -71.4016 156.0414 264.6380 52.4411 0.0688 114.7581 168.0000 1.5000 2.06e+18
%--------------------------------------------------------------------------------------------------
% SEGMENT # 4: STRIKE = 8.7 deg DIP = 17.5 deg
% LEN = 300 km WID = 71 km
% depth to top: Z2top = 16.99 km
% coordinates of top-center:
% LAT = -37.7956734874351, LON = -73.875733125741
% hypocenter on SEG # 1 : along-strike (X) = 37.5, down-dip (Z) = 26.625
% Nsbfs = 48 subfaults
%--------------------------------------------------------------------------------------------------
% LAT LON X==EW Y==NS Z SLIP RAKE TRUP RISE SF_MOMENT
%--------------------------------------------------------------------------------------------------
-39.0354 -73.9694 -77.1007 -312.1174 19.6625 0.0881 123.0779 160.0000 10.5000 1.55e+18
-38.8130 -73.9271 -73.6455 -287.3554 19.6625 4.1916 77.7512 156.0000 12.0000 7.38e+19
-38.5907 -73.8848 -70.1625 -262.6060 19.6625 3.5855 71.8103 148.0000 1.5000 6.32e+19
-38.3683 -73.8425 -66.6650 -237.8471 19.6625 0.5706 125.8236 136.0000 7.5000 1.01e+19
-38.1459 -73.8002 -63.1399 -213.0898 19.6625 1.4841 69.6321 112.0000 0.0000 2.61e+19
-37.9235 -73.7579 -59.5962 -188.3341 19.6625 1.9068 121.2868 84.0000 0.0000 3.36e+19
-37.7011 -73.7155 -56.0120 -163.5799 19.6625 4.1266 92.6777 72.0000 4.5000 7.27e+19
-37.4787 -73.6732 -52.4225 -138.8274 19.6625 6.4054 92.6468 64.0000 9.0000 1.13e+20
-37.2563 -73.6309 -48.8059 -114.0765 19.6625 5.1549 85.6012 56.0000 4.5000 9.08e+19
-37.0339 -73.5886 -45.1712 -89.3272 19.6625 7.5001 123.9610 44.0000 12.0000 1.32e+20
-36.8115 -73.5463 -41.5096 -64.5796 19.6625 11.4107 106.6475 36.0000 3.0000 2.01e+20
-36.5891 -73.5040 -37.8301 -39.8335 19.6625 11.4747 109.9497 28.0000 1.5000 2.02e+20
-39.0585 -73.7822 -60.9047 -314.5496 25.0000 0.1143 66.6456 156.0000 1.5000 2.51e+18
-38.8361 -73.7399 -57.4009 -289.7947 25.0000 2.9272 60.8819 144.0000 3.0000 6.44e+19
-38.6137 -73.6975 -53.8695 -265.0413 25.0000 0.8107 135.1820 144.0000 7.5000 1.78e+19
-38.3913 -73.6552 -50.3150 -240.2896 25.0000 1.0053 132.7415 132.0000 6.0000 2.21e+19
-38.1689 -73.6129 -46.7462 -215.5395 25.0000 1.7848 147.5126 112.0000 6.0000 3.93e+19
-37.9465 -73.5706 -43.1501 -190.7909 25.0000 2.0813 132.9885 88.0000 12.0000 4.58e+19
-37.7241 -73.5283 -39.5313 -166.0440 25.0000 4.6113 86.8079 68.0000 12.0000 1.01e+20
-37.5017 -73.4860 -35.8898 -141.2987 25.0000 4.2352 80.4314 68.0000 1.5000 9.32e+19
-37.2793 -73.4436 -32.2169 -116.5550 25.0000 3.6800 105.3077 56.0000 7.5000 8.10e+19
-37.0569 -73.4013 -28.5348 -91.8129 25.0000 9.7544 132.4953 48.0000 9.0000 2.15e+20
-36.8345 -73.3590 -24.8305 -67.0726 25.0000 12.6905 114.9819 28.0000 4.5000 2.79e+20
-36.6122 -73.3167 -21.0995 -42.3449 25.0000 15.8599 103.7769 20.0000 3.0000 3.49e+20
-39.0815 -73.5949 -44.7188 -317.0027 30.3375 0.0003 142.9629 152.0000 1.5000 6.25e+15
-38.8591 -73.5526 -41.1621 -292.2549 30.3375 2.3127 114.0167 140.0000 4.5000 5.09e+19
-38.6367 -73.5103 -37.5866 -267.5087 30.3375 1.2723 134.6645 144.0000 10.5000 2.80e+19
-38.4143 -73.4680 -33.9924 -242.7642 30.3375 0.1254 110.9950 136.0000 10.5000 2.76e+18
-38.1920 -73.4257 -30.3667 -218.0323 30.3375 2.5572 90.9389 116.0000 3.0000 5.63e+19
-37.9696 -73.3833 -26.7139 -193.2910 30.3375 1.9898 64.9793 96.0000 10.5000 4.38e+19
-37.7472 -73.3410 -23.0517 -168.5513 30.3375 3.8986 101.6265 80.0000 7.5000 8.58e+19
-37.5248 -73.2987 -19.3625 -143.8133 30.3375 3.4953 125.1805 72.0000 1.5000 7.69e+19
-37.3024 -73.2564 -15.6554 -119.0769 30.3375 3.8183 89.9073 48.0000 9.0000 8.40e+19
-37.0800 -73.2141 -11.9172 -94.3421 30.3375 8.0360 120.4964 40.0000 6.0000 1.77e+20
-36.8576 -73.1718 -8.1657 -69.6090 30.3375 10.6860 101.6958 28.0000 12.0000 2.35e+20
-36.6352 -73.1294 -4.3833 -44.8776 30.3375 14.0553 98.2955 28.0000 4.5000 3.09e+20
-39.1046 -73.4077 -28.5431 -319.4989 35.6751 0.1275 141.4487 148.0000 1.5000 3.82e+18
-38.8822 -73.3654 -24.9377 -294.7582 35.6751 1.3524 144.6593 152.0000 7.5000 4.05e+19
-38.6598 -73.3230 -21.3093 -270.0193 35.6751 1.5425 148.3688 148.0000 3.0000 4.62e+19
-38.4374 -73.2807 -17.6625 -245.2819 35.6751 0.7843 102.6891 140.0000 10.5000 2.35e+19
-38.2150 -73.2384 -13.9931 -220.5462 35.6751 1.6657 69.9633 116.0000 3.0000 4.99e+19
-37.9926 -73.1961 -10.3010 -195.8121 35.6751 0.6880 108.6488 100.0000 12.0000 2.06e+19
-37.7702 -73.1538 -6.5866 -171.0797 35.6751 2.4978 91.9950 92.0000 9.0000 7.49e+19
-37.5478 -73.1115 -2.8542 -146.3490 35.6751 3.8491 141.9863 80.0000 7.5000 1.15e+20
-37.3254 -73.0691 0.9137 -121.6199 35.6751 2.9694 142.6638 56.0000 4.5000 8.90e+19
-37.1030 -73.0268 4.6904 -96.8925 35.6751 3.7193 125.5543 40.0000 1.5000 1.11e+20
-36.8806 -72.9845 8.4979 -72.1667 35.6751 4.3977 75.9162 36.0000 1.5000 1.32e+20
-36.6582 -72.9422 12.3185 -47.4427 35.6751 3.6038 97.4013 28.0000 1.5000 1.08e+20
%--------------------------------------------------------------------------------------------------
% SEGMENT # 5: STRIKE = 8.7 deg DIP = 10 deg
% LEN = 300 km WID = 71 km
% depth to top: Z2top = 4.88 km
% coordinates of top-center:
% LAT = -37.5882758966096, LON = -74.5957768421918
% hypocenter on SEG # 1 : along-strike (X) = 37.5, down-dip (Z) = 26.625
% Nsbfs = 48 subfaults
%--------------------------------------------------------------------------------------------------
% LAT LON X==EW Y==NS Z SLIP RAKE TRUP RISE SF_MOMENT
%--------------------------------------------------------------------------------------------------
-38.8280 -74.6892 -139.9450 -289.8389 6.4260 0.1207 79.1445 172.0000 12.0000 1.67e+18
-38.6056 -74.6470 -136.6791 -265.0500 6.4260 2.4538 136.3835 164.0000 7.5000 3.40e+19
-38.3832 -74.6048 -133.3850 -240.2624 6.4260 0.9225 146.7198 152.0000 4.5000 1.28e+19
-38.1608 -74.5626 -130.0757 -215.4765 6.4260 2.7177 148.6773 132.0000 1.5000 3.76e+19
-37.9384 -74.5203 -126.7296 -190.6918 6.4260 4.8299 149.3494 120.0000 3.0000 6.69e+19
-37.7160 -74.4781 -123.3687 -165.9088 6.4260 6.0748 149.3318 112.0000 12.0000 8.41e+19
-37.4936 -74.4359 -119.9842 -141.1273 6.4260 4.0589 148.4058 96.0000 0.0000 5.62e+19
-37.2712 -74.3937 -116.5764 -116.3473 6.4260 2.6431 85.9705 88.0000 0.0000 3.66e+19
-37.0488 -74.3515 -113.1498 -91.5689 6.4260 2.8642 66.2682 64.0000 3.0000 3.97e+19
-36.8264 -74.3093 -109.6957 -66.7919 6.4260 6.6321 61.7194 52.0000 4.5000 9.19e+19
-36.6040 -74.2671 -106.2185 -42.0165 6.4260 9.0524 61.4098 48.0000 1.5000 1.25e+20
-36.3817 -74.2249 -102.7183 -17.2538 6.4260 7.0499 63.2580 52.0000 1.5000 9.76e+19
-38.8518 -74.4963 -123.1935 -292.2180 9.5082 0.1355 141.8439 172.0000 1.5000 1.88e+18
-38.6294 -74.4541 -119.8733 -267.4361 9.5082 3.4683 129.0298 160.0000 3.0000 4.80e+19
-38.4070 -74.4119 -116.5382 -242.6559 9.5082 3.3357 60.2643 152.0000 1.5000 4.62e+19
-38.1846 -74.3697 -113.1706 -217.8770 9.5082 3.8311 97.2828 136.0000 10.5000 5.31e+19
-37.9622 -74.3275 -109.7839 -193.0997 9.5082 5.2500 119.1407 120.0000 3.0000 7.27e+19
-37.7398 -74.2853 -106.3781 -168.3240 9.5082 5.5106 148.0872 104.0000 7.5000 7.63e+19
-37.5174 -74.2431 -102.9402 -143.5496 9.5082 3.1312 69.2662 88.0000 6.0000 4.34e+19
-37.2950 -74.2009 -99.4880 -118.7770 9.5082 6.3269 104.3382 80.0000 9.0000 8.76e+19
-37.0726 -74.1587 -96.0082 -94.0058 9.5082 4.4965 74.9791 60.0000 1.5000 6.23e+19
-36.8502 -74.1165 -92.5100 -69.2362 9.5082 7.5613 72.3651 56.0000 1.5000 1.05e+20
-36.6278 -74.0743 -88.9845 -44.4682 9.5082 10.8924 62.0004 40.0000 4.5000 1.51e+20
-36.4055 -74.0321 -85.4361 -19.7128 9.5082 7.9929 65.9641 44.0000 7.5000 1.11e+20
-38.8756 -74.3035 -106.4610 -294.6311 12.5905 0.1301 76.5389 180.0000 10.5000 2.29e+18
-38.6532 -74.2613 -103.0910 -269.8565 12.5905 3.5165 62.2756 164.0000 12.0000 6.20e+19
-38.4308 -74.2191 -99.7017 -245.0834 12.5905 4.6959 64.2191 152.0000 1.5000 8.27e+19
-38.2084 -74.1769 -96.2890 -220.3118 12.5905 2.9249 89.7126 132.0000 4.5000 5.15e+19
-37.9860 -74.1347 -92.8528 -195.5417 12.5905 4.8004 76.5464 112.0000 1.5000 8.46e+19
-37.7636 -74.0925 -89.3935 -170.7732 12.5905 5.2916 127.5256 100.0000 12.0000 9.32e+19
-37.5412 -74.0503 -85.9109 -146.0063 12.5905 6.6459 87.7918 72.0000 6.0000 1.17e+20
-37.3188 -74.0081 -82.4054 -121.2409 12.5905 11.9568 126.9333 68.0000 4.5000 2.11e+20
-37.0964 -73.9659 -78.8769 -96.4770 12.5905 7.4594 114.1151 60.0000 12.0000 1.31e+20
-36.8740 -73.9236 -75.3212 -71.7147 12.5905 9.2030 97.2244 48.0000 9.0000 1.62e+20
-36.6516 -73.8814 -71.7473 -46.9541 12.5905 9.8853 84.2838 36.0000 6.0000 1.74e+20
-36.4292 -73.8392 -68.1552 -22.1950 12.5905 6.8903 96.8901 32.0000 3.0000 1.21e+20
-38.8994 -74.1106 -89.7261 -297.0781 15.6727 0.0522 78.1238 172.0000 3.0000 9.20e+17
-38.6770 -74.0684 -86.3105 -272.3107 15.6727 3.9914 78.0271 164.0000 12.0000 7.03e+19
-38.4546 -74.0262 -82.8714 -247.5449 15.6727 2.3933 80.7250 152.0000 1.5000 4.22e+19
-38.2322 -73.9840 -79.4091 -222.7805 15.6727 1.4632 116.5442 132.0000 1.5000 2.58e+19
-38.0098 -73.9418 -75.9235 -198.0178 15.6727 0.2739 139.9621 116.0000 3.0000 4.83e+18
-37.7874 -73.8996 -72.4104 -173.2566 15.6727 3.8022 148.5589 100.0000 1.5000 6.70e+19
-37.5650 -73.8574 -68.8832 -148.4970 15.6727 7.1475 75.9601 80.0000 3.0000 1.26e+20
-37.3426 -73.8152 -65.3287 -123.7389 15.6727 10.6342 100.3228 72.0000 1.5000 1.87e+20
-37.1202 -73.7730 -61.7515 -98.9825 15.6727 7.7309 94.6888 56.0000 3.0000 1.36e+20
-36.8978 -73.7308 -58.1516 -74.2276 15.6727 8.3131 107.8228 48.0000 10.5000 1.46e+20
-36.6754 -73.6886 -54.5337 -49.4744 15.6727 9.5839 108.7549 44.0000 1.5000 1.69e+20
-36.4530 -73.6464 -50.8890 -24.7228 15.6727 9.4311 111.8163 32.0000 1.5000 1.66e+20
%--------------------------------------------------------------------------------------------------
EOF
${BIN_DIR}/surf_disp.sh FSP fsp.tmp -o test_surf_disp
rm -f o92util_auto_lims.dat
|
<filename>logx/log.go
package logx
import (
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func NewZap(level string, production bool) (*zap.Logger, error) {
var c zap.Config
if production {
c = zap.NewProductionConfig()
} else {
c = zap.NewDevelopmentConfig()
c.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
if err := c.Level.UnmarshalText([]byte(level)); err != nil {
return nil, fmt.Errorf("failed to parse log level: %w", err)
}
logger, err := c.Build()
if err != nil {
return nil, fmt.Errorf("failed to build logger: %w", err)
}
return logger, nil
}
|
/* eslint-disable no-undef */
const {
getFirstItem,
getLength,
getLastItem,
sumNums,
multiplyNums,
contains,
removeDuplicates
} = require('../src/project-4');
describe('Project 4', () => {
describe('getFirstItem', () => {
it('should pass the first item from the collection to the cb', () => {
const collection = ['a', 'b', 'c', 'd'];
let firstItem;
getFirstItem(collection, (first) => {
firstItem = first;
});
expect(firstItem).toBe('a');
});
});
describe('getLength', () => {
it('should pass the length of the collection to the cb', () => {
const collection = [true, false, {}, []];
let collectionLength;
getLength(collection, (length) => {
collectionLength = length;
});
expect(collectionLength).toBe(4);
});
});
describe('getLastItem', () => {
it('should pass the last item from an array into the provided cb', () => {
const collection1 = [1, 2, 3];
const collection2 = ['a', 'b'];
const collection3 = [true, false, true, null];
const lastItems = [];
getLastItem(collection1, (lastItem) => {
lastItems.push(lastItem);
});
getLastItem(collection2, (lastItem) => {
lastItems.push(lastItem);
});
getLastItem(collection3, (lastItem) => {
lastItems.push(lastItem);
});
expect(lastItems).toEqual([3, 'b', null]);
});
});
describe('sumNums', () => {
it('should sum the numbers together and pass the sum to the cb', () => {
let sum;
sumNums(5, 10, (result) => {
sum = result;
});
expect(sum).toBe(15);
sumNums(-5, 5, (result) => {
sum = result;
});
expect(sum).toBe(0);
});
});
describe('multiplyNums', () => {
it('should multiply the numbers together and pass the product to the cb', () => {
let product;
multiplyNums(5, 10, (result) => {
product = result;
});
expect(product).toBe(50);
multiplyNums(-5, 5, (result) => {
product = result;
});
expect(product).toBe(-25);
});
});
describe('contains', () => {
it('should pass true to cb is the collection contains the specified item', () => {
const collection = ['a', 'b', 'c', 'd'];
let containsItem;
contains(collection, 'd', (result) => {
containsItem = result;
});
expect(containsItem).toBe(true);
});
it('should return false if the item is not contained in the array', () => {
const collection = ['a', 'b', 'c', 'd'];
let containsItem;
contains(collection, 55, (result) => {
containsItem = result;
});
expect(containsItem).toBe(false);
});
it('should work with array references', () => {
const nestedArray = [];
const collection = ['a', 'b', 'c', 'd', nestedArray];
let containsItem;
contains(collection, nestedArray, (result) => {
containsItem = result;
});
expect(containsItem).toBe(true);
contains(collection, [], (result) => {
containsItem = result;
});
expect(containsItem).toBe(false);
});
});
describe('removeDuplicates', () => {
it('should remove duplicates from an array', () => {
const arr = ['a', 'b', 'c', 'c'];
let duplicateFreeArray;
removeDuplicates(arr, (result) => {
duplicateFreeArray = result;
});
expect(duplicateFreeArray).toEqual(['a', 'b', 'c']);
});
it('should not mutate the original array', () => {
const arr = ['a', 'b', 'c', 'c'];
let duplicateFreeArray;
removeDuplicates(arr, (result) => {
duplicateFreeArray = result;
});
expect(Array.isArray(duplicateFreeArray)).toBe(true);
expect(duplicateFreeArray).not.toBe(arr);
});
});
});
|
const a=1;
const b=()=>{
alert(1)
}
const sum=(a,b)=> a+b;
console.log(0)
|
package com.github.daggerok.client;
import com.github.daggerok.employee.Employee;
import org.eclipse.microprofile.rest.client.inject.RegisterRestClient;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import java.util.List;
import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
@Path("/")
@RegisterRestClient
public interface EmployeeRestClient {
@GET
@Produces(APPLICATION_JSON)
List<Employee> getEmployees();
}
|
<reponame>zenglongGH/spresense
var searchData=
[
['theory_20of_20operation',['Theory of Operation',['../theoryOperation.html',1,'']]]
];
|
<reponame>eengineergz/Lambda
// https://repl.it/student/assignments/395908/model_solution?fromSubmissionId=1559885
// https://youtu.be/wNPKVuKBWxo
function isTwinPrime( n ) {
function isPrime( num ) {
for ( let i = 2; i <= Math.sqrt( num ); i++ ) {
console.log( `${num} % ${i} === not zero` );
if ( num % i === 0 ) return false;
}
return num >= 2;
}
return ( isPrime( n ) && ( isPrime( n - 2 ) || isPrime( n + 2 ) ) );
}
console.log( `the sqaure root of 119 is ${Math.sqrt(119)}` );
console.log( isTwinPrime( 119 ) );
|
<gh_stars>1-10
package string_handle;
import java.io.BufferedReader;
import java.io.InputStreamReader;
/**
*
* @author minchoba
* 백준 2386번: 도비의 영어공부
*
* @see https://www.acmicpc.net/problem/2386/
*
*/
public class Boj2386 {
private static final char EXIT = '#';
private static final char SPACE = ' ';
private static final String NEW_LINE = "\n";
public static void main(String[] args) throws Exception{
// 버퍼를 통한 값 입력
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringBuilder sb = new StringBuilder();
while(true) {
char[] words = br.readLine().toCharArray();
if(words[0] == EXIT) break; // 입력으로 '#'이 들어오면 반복문 종료
int res = 0;
char tmp = ' ';
if(words[0] >= 'a' && words[0] <= 'z') { // 기준문자가 a ~ z인 경우 tmp = A ~ Z
tmp = (char) (words[0] - 32);
}
else { // 반대의 경우
tmp = (char) (words[0] + 32);
}
for(int i = 2; i < words.length; i++) {
if(words[i] == SPACE) continue; // 공백 입력은 넘기고
if(words[i] == words[0] || words[i] == tmp) { // 기준 문자와 같은 문자가 문장에 존재하면
res++; // res +1
}
}
sb.append(words[0]).append(SPACE).append(res).append(NEW_LINE); // 결과를 버퍼에 담은 후
}
System.out.println(sb.toString()); // 결과값 한번에 출력
}
}
|
<filename>src/app/admin/admin.component.ts
import {Component, OnDestroy} from '@angular/core';
import {UserService} from '../user-service/user.service';
import {User} from '../entity/user';
import {Subscription} from 'rxjs';
@Component({
selector: 'admin',
templateUrl: './admin.html',
styleUrls: ['./admin.less']
})
export class Admin implements OnDestroy {
private _subscription = new Subscription();
site_title = SITE_TITLE;
user: User;
constructor(private _userService: UserService) {
this._subscription.add(
this._userService.userInfo
.subscribe((u) => {
this.user = u;
})
);
}
ngOnDestroy(): void {
this._subscription.unsubscribe();
}
}
|
function filterSolidProviders(providers: SolidProvider[], keyword: string): SolidProvider[] {
const lowerCaseKeyword = keyword.toLowerCase();
return providers.filter(provider =>
provider.name.toLowerCase().includes(lowerCaseKeyword) ||
provider.desc.toLowerCase().includes(lowerCaseKeyword)
);
}
|
<gh_stars>0
exports.up = function (knex) {
return knex.schema
.createTable("zipcodes", (tbl) => {
tbl.increments("id").unsigned();
tbl.integer("zipCode").unsigned().notNullable();
})
.createTable("users", (tbl) => {
tbl.increments("id").unsigned();
tbl.text("email").unique().notNullable();
tbl.text("password").notNullable();
tbl.boolean("isGovernmentOfficial").notNullable();
tbl
.integer("zip_id")
.unsigned()
.notNullable()
.references("id")
.inTable("zipcodes")
.onUpdate("CASCADE")
.onDelete("CASCADE");
})
.createTable("posts", (tbl) => {
tbl.increments().unsigned();
tbl.text("issue").notNullable();
tbl.text("description").notNullable();
tbl.binary("photo");
tbl
.integer("zip_id")
.unsigned()
.notNullable()
.references("id")
.inTable("zipcodes")
.onUpdate("CASCADE")
.onDelete("CASCADE");
tbl
.integer("user_id")
.unsigned()
.notNullable()
.references("id")
.inTable("users")
.onUpdate("CASCADE")
.onDelete("CASCADE");
});
};
exports.down = function (knex) {
return knex.schema
.dropTableIfExists("posts")
.dropTableIfExists("users")
.dropTableIfExists("zipcodes");
};
|
lsblk --output NAME,LABEL,MODEL -nr | awk '{print $1}'
|
/*
* Copyright © 2016 <<EMAIL>> http://io7m.com
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package com.io7m.jfunctional;
import com.io7m.jequality.annotations.EqualityStructural;
import java.io.Serializable;
/**
* <p> The type of optional values. </p>
*
* @param <T> The type of values.
*/
@EqualityStructural
public interface OptionType<T> extends Serializable
{
/**
* <p> If this value is {@code Some(x)}, evaluate {@code p(x)}.
* Otherwise, do nothing. </p>
*
* @param p The procedure
*
* @since 1.1.0
*/
void map_(final ProcedureType<T> p);
/**
* <p> If this value is {@code Some(x)}, evaluate {@code p(x)}.
* Otherwise, do nothing. </p>
*
* @param p The procedure
* @param <E> The type of exceptions thrown by the procedure.
*
* @throws E Propagated from {@code p}
* @since 1.1.0
*/
<E extends Throwable> void mapPartial_(
final PartialProcedureType<T, E> p)
throws E;
/**
* Accept a visitor.
*
* @param v The visitor
* @param <U> The type of values returned by the visitor.
*
* @return The value returned by the visitor.
*/
<U> U accept(
final OptionVisitorType<T, U> v);
/**
* Accept a partial visitor.
*
* @param v The visitor
* @param <U> The type of values returned by the visitor.
* @param <E> The type of exceptions thrown by the visitor.
*
* @return The value returned by the visitor.
*
* @throws E If the visitor throws {@code E}.
*/
<U, E extends Throwable> U acceptPartial(
final OptionPartialVisitorType<T, U, E> v)
throws E;
/**
* @return {@code true} if the current value is {@link None}.
*/
boolean isNone();
/**
* @return {@code true} if the current value is {@link Some}.
*/
boolean isSome();
/**
* <p> If this value is {@code Some(x)}, return {@link Option#of(Object)}
* with {@code f(x)}. Otherwise, return {@link None}. </p>
*
* @param <U> The type of returned values.
* @param f The map function.
*
* @return An optional value of type {@code U}.
*/
<U> OptionType<U> map(
FunctionType<T, U> f);
/**
* <p> If this value is {@code Some(x)}, return {@link Option#of(Object)}
* with {@code f(x)}. Otherwise, return {@link None}. </p>
*
* @param <E> The type of exceptions raised.
* @param <U> The type of returned values.
* @param f The map function.
*
* @return An optional value of type {@code U}.
*
* @throws E If {@code f} throws {@code E}.
*/
<U, E extends Throwable> OptionType<U> mapPartial(
PartialFunctionType<T, U, E> f)
throws E;
}
|
#!/bin/bash
echo 'Cleaning up leftover files from upgrade...'
rm -rf /var/opt/gitlab/postgresql/data.9.2.18
|
<gh_stars>1-10
package com.report.application.service;
import com.report.application.domain.Report;
import com.report.application.domain.snapshot.ReportSnapshot;
import com.report.application.domain.vo.CharacterPhrase;
import com.report.application.domain.vo.FilmCharacter;
import com.report.application.domain.vo.PlanetName;
import com.report.application.dto.FilmCharacterRecord;
import com.report.application.port.driven.SwapiRepository;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.modelmapper.ModelMapper;
import java.util.Collections;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class ReportFulfillmentTest {
@Mock
private Report report;
@Mock
private ReportSnapshot snapshot;
@Mock
private PlanetName planetName;
@Mock
private CharacterPhrase characterPhrase;
@Mock
private FilmCharacter filmCharacter;
@Mock
private FilmCharacterRecord filmCharacterRecord;
@Mock
private SwapiRepository swapiRepository;
@Mock
private ModelMapper modelMapper;
@InjectMocks
private ReportFulfillment reportFulfillment;
@Test
@DisplayName("Filling null with FilmCharacters")
void shouldNotAcceptNullValue() {
// When & Then
NullPointerException exception = assertThrows(
NullPointerException.class,
() -> reportFulfillment.fillWithFilmCharacters(null)
);
assertTrue(exception
.getMessage()
.contains("is marked non-null but is null")
);
}
@Test
@DisplayName("Filling Report with FilmCharacters, when they don't meet the criteria")
void shouldMarkReportAsCompleteWithoutAddingFilmCharacters() {
// Given
prepareSnapshotStubbing();
when(swapiRepository.getFilmCharacterRecordsThatMeetTheCriteria(planetName, characterPhrase))
.thenReturn(Collections.emptyList());
// When
reportFulfillment.fillWithFilmCharacters(report);
// Then
verify(report, never())
.addFilmCharacter(any(FilmCharacter.class));
verify(report).markComplete();
}
@Test
@DisplayName("Filling Report with FilmCharacters, when they do meet the criteria")
void shouldMarkReportAsCompleteWithAddingFilmCharacter() {
// Given
prepareSnapshotStubbing();
when(swapiRepository.getFilmCharacterRecordsThatMeetTheCriteria(planetName, characterPhrase))
.thenReturn(Collections.singletonList(filmCharacterRecord));
when(modelMapper.map(filmCharacterRecord, FilmCharacter.class))
.thenReturn(filmCharacter);
// When
reportFulfillment.fillWithFilmCharacters(report);
// Then
verify(report).addFilmCharacter(filmCharacter);
verify(report).markComplete();
}
private void prepareSnapshotStubbing() {
when(report.toSnapshot())
.thenReturn(snapshot);
when(snapshot.getPlanetName())
.thenReturn(planetName);
when(snapshot.getCharacterPhrase())
.thenReturn(characterPhrase);
}
}
|
#!/bin/zsh
#
# Run script template for Mille jobs
#
# Adjustments might be needed for CMSSW environment.
#
# In the very beginning of this script, stager requests for the files will be added.
# these defaults will be overwritten by MPS
RUNDIR=$HOME/scratch0/some/path
MSSDIR=/castor/cern.ch/user/u/username/another/path
MSSDIRPOOL=
clean_up () {
#try to recover log files and root files
echo try to recover log files and root files ...
cp -p *.log $RUNDIR
cp -p *.log.gz $RUNDIR
cp -p millePedeMonitor*root $RUNDIR
exit
}
#LSF signals according to http://batch.web.cern.ch/batch/lsf-return-codes.html
trap clean_up HUP INT TERM SEGV USR2 XCPU XFSZ IO
# a helper function to repeatedly try failing copy commands
untilSuccess () {
# trying "${1} ${2} ${3} > /dev/null" until success, if ${4} is a
# positive number run {1} with -f flag and using --cksum md5,
# break after ${5} tries (with four arguments do up to 5 tries).
if [[ ${#} -lt 4 || ${#} -gt 5 ]]
then
echo ${0} needs 4 or 5 arguments
return 1
fi
TRIES=0
MAX_TRIES=5
if [[ ${#} -eq 5 ]]
then
MAX_TRIES=${5}
fi
if [[ ${4} -gt 0 ]]
then
${1} -f --cksum md5 ${2} ${3} > /dev/null
else
${1} ${2} ${3} > /dev/null
fi
while [[ ${?} -ne 0 ]]
do # if not successfull, retry...
if [[ ${TRIES} -ge ${MAX_TRIES} ]]
then # ... but not until infinity!
if [[ ${4} -gt 0 ]]
then
echo ${0}: Give up doing \"${1} -f --cksum md5 ${2} ${3} \> /dev/null\".
return 1
else
echo ${0}: Give up doing \"${1} ${2} ${3} \> /dev/null\".
return 1
fi
fi
TRIES=$((${TRIES}+1))
if [[ ${4} -gt 0 ]]
then
echo ${0}: WARNING, problems with \"${1} -f --cksum md5 ${2} ${3} \> /dev/null\", try again.
sleep $((${TRIES}*5)) # for before each wait a litte longer...
${1} -f --cksum md5 ${2} ${3} > /dev/null
else
echo ${0}: WARNING, problems with \"${1} ${2} ${3} \> /dev/null\", try again.
sleep $((${TRIES}*5)) # for before each wait a litte longer...
${1} ${2} ${3} > /dev/null
fi
done
if [[ ${4} -gt 0 ]]
then
echo successfully executed \"${1} -f --cksum md5 ${2} ${3} \> /dev/null\"
else
echo successfully executed \"${1} ${2} ${3} \> /dev/null\"
fi
return 0
}
export X509_USER_PROXY=${RUNDIR}/.user_proxy
# The batch job directory (will vanish after job end):
BATCH_DIR=$(pwd)
echo "Running at $(date) \n on $HOST \n in directory $BATCH_DIR."
# set up the CMS environment (choose your release and working area):
cd CMSSW_RELEASE_AREA
echo Setting up $(pwd) as CMSSW environment.
eval `scram runtime -sh`
rehash
cd $BATCH_DIR
echo The running directory is $(pwd).
# Execute. The cfg file name will be overwritten by MPS
time cmsRun the.cfg
gzip -f *.log
gzip milleBinaryISN.dat
echo "\nDirectory content after running cmsRun and zipping log+dat files:"
ls -lh
# Copy everything you need to MPS directory of your job,
# but you might want to copy less stuff to save disk space
# (separate cp's for each item, otherwise you loose all if one file is missing):
cp -p *.log.gz $RUNDIR
# store millePedeMonitor also in $RUNDIR, below is backup in $MSSDIR
cp -p millePedeMonitor*root $RUNDIR
# Copy MillePede binary file to Castor
# Must use different command for the cmscafuser pool
if [ "$MSSDIRPOOL" != "cmscafuser" ]; then
# Not using cmscafuser pool => rfcp command must be used
export STAGE_SVCCLASS=$MSSDIRPOOL
export STAGER_TRACE=
nsrm -f $MSSDIR/milleBinaryISN.dat.gz
echo "rfcp milleBinaryISN.dat.gz $MSSDIR/"
untilSuccess rfcp milleBinaryISN.dat.gz $MSSDIR/ 0
untilSuccess rfcp treeFile*root $MSSDIR/treeFileISN.root 0
untilSuccess rfcp millePedeMonitor*root $MSSDIR/millePedeMonitorISN.root 0
else
MSSCAFDIR=`echo $MSSDIR | perl -pe 's/\/castor\/cern.ch\/cms//gi'`
# ensure the directories exists
mkdir -p ${MSSCAFDIR}/binaries
mkdir -p ${MSSCAFDIR}/tree_files
mkdir -p ${MSSCAFDIR}/monitors
# copy the files
echo "xrdcp -f milleBinaryISN.dat.gz ${MSSCAFDIR}/binaries/milleBinaryISN.dat.gz > /dev/null"
untilSuccess xrdcp milleBinaryISN.dat.gz ${MSSCAFDIR}/binaries/milleBinaryISN.dat.gz 1
untilSuccess xrdcp treeFile.root ${MSSCAFDIR}/tree_files/treeFileISN.root 1
untilSuccess xrdcp millePedeMonitorISN.root ${MSSCAFDIR}/monitors/millePedeMonitorISN.root 1
fi
|
import deepFreeze from 'deep-freeze';
import { expect } from 'chai';
import sinon from 'sinon';
import { beregnVarighet } from './metrikkerUtils';
import { TID_INNSENDING_SYKEPENGESOKNAD_SELVSTENDIG, UTFYLLING_STARTET } from '../enums/metrikkerEnums';
describe('metrikkerUtils', () => {
let state;
let event1;
let event2;
let event3;
let event4;
let event5;
let event6;
let tid1;
let tid2;
let tid3;
let tid4;
let tid5;
let tid6;
let clock;
beforeEach(() => {
clock = sinon.useFakeTimers(new Date('2018-02-17'));
tid1 = new Date();
clock.tick(500);
tid2 = new Date();
clock.tick(500);
tid3 = new Date();
clock.tick(45632);
tid4 = new Date();
clock.tick(800);
tid5 = new Date();
clock.tick(1401);
tid6 = new Date();
event1 = {
type: UTFYLLING_STARTET,
ressursId: 'min-sykmelding-id',
tid: tid1,
};
event2 = {
type: UTFYLLING_STARTET,
ressursId: 'min-sykmelding-id-2',
tid: tid2,
};
event3 = {
type: UTFYLLING_STARTET,
ressursId: 'min-sykmelding-id',
tid: tid3,
};
event4 = {
type: 'SYKMELDING_SENDT',
ressursId: 'min-sykmelding-id',
tid: tid4,
};
event5 = {
type: 'UTFYLLING_STARTET',
ressursId: 'min-selvstendig-soknadPt-id',
tid: tid5,
};
event6 = {
type: 'SOKNAD_SENDT',
ressursId: 'min-selvstendig-soknadPt-id',
tid: tid6,
};
state = {
metrikker: {
data: [event2, event1, event3, event4, event6, event5],
},
};
});
afterEach(() => {
clock.restore();
});
describe('beregnVarighet', () => {
it('Skal returnere riktig tid for innsending av søknad for selvstendig næringsdrivende', () => {
const tid = beregnVarighet(deepFreeze(state), {
type: TID_INNSENDING_SYKEPENGESOKNAD_SELVSTENDIG,
ressursId: 'min-selvstendig-soknadPt-id',
});
expect(tid).to.equal(tid6.getTime() - tid5.getTime());
});
});
});
|
import { integration } from '../testing/index.js';
const { Tester } = integration();
let device;
let deviceId;
beforeEach(async () => {
device = await Tester.hasDevice({ name: '<NAME>' });
deviceId = device.id;
});
it('should get all records', async () => {
const [record1, record2] = await Tester.hasRecords([
{ time: '2020-01-02', deviceId, temperature: 12.0, humidity: 30.01 },
{ time: '2020-01-01', deviceId, temperature: 30.5, humidity: 90.5 },
]);
const { errors, data } = await Tester.query({
query: `
query {
records {
deviceId
time
temperature
humidity
}
}
`,
});
expect(errors).toBeUndefined();
expect(data).toEqual({
records: [record2, record1],
});
});
it('should enriche records', async () => {
await Tester.hasRecords([{ time: '2020-01-02', deviceId, temperature: 10.0, humidity: 60 }]);
const {
errors,
data: {
records: [record],
},
} = await Tester.query({
query: `
query {
records {
time
deviceId,
temperature,
humidity,
dewPoint,
}
}
`,
variables: { deviceId },
});
expect(errors).toBeUndefined();
expect(record).toEqual({
time: '2020-01-02T00:00:00.000Z',
deviceId,
temperature: 10.0,
humidity: 60,
dewPoint: 2.6,
});
});
describe('recordsForDevice', () => {
const graphQuery = `
query recordsForDevice($deviceId: ID!, $from: Date, $to: Date) {
recordsForDevice(deviceId: $deviceId, from: $from, to: $to) {
time
deviceId,
temperature,
humidity
}
}
`;
let record1;
let record2;
let record3;
let record4;
beforeEach(async () => {
const anotherDevice = await Tester.hasDevice({ name: 'Another Device' });
[record1, record2, record3, record4] = await Tester.hasRecords([
{ time: '2020-01-01', deviceId, temperature: 12.0, humidity: 50.0 },
{ time: '2020-01-02', deviceId, temperature: 12.0, humidity: 50.0 },
{ time: '2020-01-03', deviceId, temperature: 23.0, humidity: 60.0 },
{ time: '2020-01-04', deviceId, temperature: 23.0, humidity: 60.0 },
{ time: '2020-01-01', deviceId: anotherDevice.id, temperature: 23, humidity: 70.0 },
]);
});
it('should get records for device', async () => {
const { errors, data } = await Tester.query({
query: graphQuery,
variables: { deviceId },
});
expect(errors).toBeUndefined();
expect(data.recordsForDevice).toHaveLength(4);
expect(data).toEqual({
recordsForDevice: [record1, record2, record3, record4],
});
});
it('should get records for device in time range', async () => {
const { errors, data } = await Tester.query({
query: graphQuery,
variables: { deviceId, from: '2020-01-02', to: '2020-01-03' },
});
expect(errors).toBeUndefined();
expect(data.recordsForDevice).toHaveLength(2);
expect(data).toEqual({
recordsForDevice: [record2, record3],
});
});
it('should get recrods for device "from" date', async () => {
const { errors, data } = await Tester.query({
query: graphQuery,
variables: { deviceId, from: '2020-01-02' },
});
expect(errors).toBeUndefined();
expect(data.recordsForDevice).toHaveLength(3);
expect(data).toEqual({
recordsForDevice: [record2, record3, record4],
});
});
it('should get records for device "to" date', async () => {
const { errors, data } = await Tester.query({
query: graphQuery,
variables: { deviceId, to: '2020-01-03' },
});
expect(errors).toBeUndefined();
expect(data.recordsForDevice).toHaveLength(3);
expect(data).toEqual({
recordsForDevice: [record1, record2, record3],
});
});
});
it('should create record', async () => {
const result = await Tester.mutate({
mutation: `
mutation createRecord($deviceId: ID!, $temperature: Float, $humidity: Float) {
createRecord(deviceId: $deviceId, temperature: $temperature, humidity: $humidity) {
deviceId,
temperature,
humidity,
dewPoint
}
}
`,
variables: { deviceId, temperature: 12.0, humidity: 60.5 },
});
expect(result.errors).toBeUndefined();
expect(result.data).toEqual({
createRecord: {
deviceId,
temperature: 12.0,
humidity: 60.5,
dewPoint: 4.6,
},
});
const records = await Tester.grabFromDB('records');
expect(records).toContainEqual({ time: expect.any(String), deviceId, temperature: 12.0, humidity: 60.5 });
});
it('should delete records for device', async () => {
const anotherDevice = await Tester.hasDevice({ name: 'Another Device' });
await Tester.hasRecords([
{ time: '2020-01-01', deviceId },
{ time: '2020-01-01', deviceId },
{ time: '2020-01-01', deviceId: anotherDevice.id },
]);
const result = await Tester.mutate({
mutation: `
mutation deleteRecordsForDevice($deviceId: ID!) {
deleteRecordsForDevice(deviceId: $deviceId)
}
`,
variables: { deviceId },
});
expect(result.errors).toBeUndefined();
const records = await Tester.grabFromDB('records');
expect(records).toHaveLength(1);
expect(records[0]).toMatchObject({
deviceId: anotherDevice.id,
});
});
|
<reponame>NATroutter/HubCore<filename>src/net/natroutter/hubcore/features/SelectorItems/HubItem.java
package net.natroutter.hubcore.features.SelectorItems;
import net.natroutter.natlibs.objects.BaseItem;
public record HubItem(String id, Integer slot,
BaseItem item) {
}
|
#!/bin/bash
#
# Build jekyll site and store site files in ./_site
# v2.0
# https://github.com/cotes2020/jekyll-theme-chirpy
# © 2019 Cotes Chung
# Published under MIT License
set -eu
CMD="JEKYLL_ENV=production bundle exec jekyll b"
WORK_DIR=$(dirname $(dirname $(realpath "$0")))
CONTAINER=${WORK_DIR}/.container
DEST=${WORK_DIR}/_site
_help() {
echo "Usage:"
echo
echo " bash build.sh [options]"
echo
echo "Options:"
echo " -b, --baseurl <URL> The site relative url that start with slash, e.g. '/project'"
echo " -h, --help Print the help information"
echo " -d, --destination <DIR> Destination directory (defaults to ./_site)"
}
_init() {
cd $WORK_DIR
if [[ -d $CONTAINER ]]; then
rm -rf $CONTAINER
fi
if [[ -d _site ]]; then
jekyll clean
fi
local _temp=$(mktemp -d)
cp -r * $_temp
cp -r .git $_temp
mv $_temp $CONTAINER
}
_build() {
cd $CONTAINER
echo "$ cd $(pwd)"
bash _scripts/sh/create_pages.sh
bash _scripts/sh/dump_lastmod.sh
CMD+=" -d ${DEST}"
echo "\$ $CMD"
eval $CMD
echo -e "\nBuild success, the site files have been placed in '${DEST}'."
if [[ -d ${DEST}/.git ]]; then
if [[ ! -z $(git -C $DEST status -s) ]]; then
git -C $DEST add .
git -C $DEST commit -m "[Automation] Update site files." -q
echo -e "\nPlease push the changes of $DEST to remote master branch.\n"
fi
fi
cd .. && rm -rf $CONTAINER
}
_check_unset() {
if [[ -z ${1:+unset} ]]
then
_help
exit 1
fi
}
main() {
while [[ $# -gt 0 ]]
do
opt="$1"
case $opt in
-b|--baseurl)
local _baseurl="$2"
if [[ -z "$_baseurl" ]]; then
_baseurl='""'
fi
CMD+=" -b $_baseurl"
shift
shift
;;
-d|--destination)
_check_unset $2
DEST=$(realpath $2)
shift;
shift;
;;
-h|--help)
_help
exit 0
;;
*) # unknown option
_help
exit 1
;;
esac
done
_init
_build
}
main "$@"
|
<reponame>lheureuxe13/oppia
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for issues."""
from __future__ import absolute_import
from __future__ import unicode_literals
import importlib
import os
from core import feconf
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
class Registry:
"""Registry of all issues."""
# Dict mapping issue types to instances of the issues.
_issues = {}
@classmethod
def get_all_issue_types(cls):
"""Get a list of all issue types.
Returns:
list(str). The list of all allowed issue types.
"""
return stats_models.ALLOWED_ISSUE_TYPES
@classmethod
def _refresh(cls):
"""Initializes the mapping between issue types to instances of the issue
classes.
"""
cls._issues.clear()
for issue_type in cls.get_all_issue_types():
module_path_parts = feconf.ISSUES_DIR.split(os.sep)
module_path_parts.extend([issue_type, issue_type])
module = importlib.import_module('.'.join(module_path_parts))
clazz = getattr(module, issue_type)
ancestor_names = [
base_class.__name__ for base_class in clazz.__bases__
]
if 'BaseExplorationIssueSpec' in ancestor_names:
cls._issues[clazz.__name__] = clazz()
@classmethod
def get_all_issues(cls):
"""Get a list of instances of all issues.
Returns:
list(*). A list of all issue class instances. Classes all have
"BaseExplorationIssueSpec" as an ancestor class.
"""
if len(cls._issues) == 0:
cls._refresh()
return list(cls._issues.values())
@classmethod
def get_issue_by_type(cls, issue_type):
"""Gets an issue by its type.
Refreshes once if the issue is not found; subsequently, throws a
KeyError.
Args:
issue_type: str. Type of the issue.
Returns:
*. An instance of the corresponding issue class. This class has
"BaseExplorationIssueSpec" as an ancestor class.
"""
if issue_type not in cls._issues:
cls._refresh()
return cls._issues[issue_type]
|
<gh_stars>1-10
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
try:
from ..secrets import *
except ModuleNotFoundError:
DB_USER = os.environ.get('DB_USER')
DB_PASS = os.environ.get('DB_PASS')
DB_HOST = os.environ.get('DB_HOST')
SQLALCHEMY_DATABASE_URL = "postgresql://" + \
DB_USER + ":" +\
DB_PASS + "@" + \
DB_HOST + ":5432/iot2"
engine = create_engine(SQLALCHEMY_DATABASE_URL,
# connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
import json
def read_task_from_jsonl(data_file):
'''This function will read a .jsonl file and return the ``task`` fields in all the lines.'''
with open(data_file) as fin:
return [json.loads(line)['task'] for line in fin]
|
#! /bin/bash
grep "Simulation time (seconds)" script_*/output.out
|
#!/usr/bin/env bash
# ------------------------------------------------------------------------------
#
# Program: initpost.sh
# Author: Vitor Britto
# Description: script to create an initial structure for my posts.
#
# Usage: ./initpost.sh [options] <post name>
#
# Options:
# -h, --help output instructions
# -c, --create create post
#
# Alias: alias newpost="bash ~/path/to/script/initpost.sh"
#
# Example:
# ./initpost.sh -c How to replace strings with sed
#
# Important Notes:
# - This script was created to generate new markdown files for my blog.
#
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# | VARIABLES |
# ------------------------------------------------------------------------------
# CORE: Do not change these lines
# ----------------------------------------------------------------
POST_TITLE="${@:2:$(($#-1))}"
POST_NAME="$(echo ${@:2:$(($#-1))} | sed -e 's/ /-/g' | sed "y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/")"
CURRENT_DATE="$(date -u +'%Y-%m-%d')"
TIME=$(date -u +"%T")
FILE_NAME="${CURRENT_DATE}-${POST_NAME}.md"
# ----------------------------------------------------------------
# SETTINGS: your configuration goes here
# ----------------------------------------------------------------
# Set your destination folder
BINPATH=$(cd `dirname $0`; pwd)
POSTPATH="${BINPATH}/_posts"
DIST_FOLDER="$POSTPATH"
# Set your blog URL
BLOG_URL="https://pejuang-onlien.github.io/"
# Set your assets URL
ASSETS_URL="assets/img/"
# ----------------------------------------------------------------
# ------------------------------------------------------------------------------
# | UTILS |
# ------------------------------------------------------------------------------
# Header logging
e_header() {
printf "$(tput setaf 38)→ %s$(tput sgr0)\n" "$@"
}
# Success logging
e_success() {
printf "$(tput setaf 76)✔ %s$(tput sgr0)\n" "$@"
}
# Error logging
e_error() {
printf "$(tput setaf 1)✖ %s$(tput sgr0)\n" "$@"
}
# Warning logging
e_warning() {
printf "$(tput setaf 3)! %s$(tput sgr0)\n" "$@"
}
# ------------------------------------------------------------------------------
# | MAIN FUNCTIONS |
# ------------------------------------------------------------------------------
# Everybody need some help
initpost_help() {
cat <<EOT
------------------------------------------------------------------------------
INIT POST - A shortcut to create an initial structure for my posts.
------------------------------------------------------------------------------
Usage: ./initpost.sh [options] <post name>
Options:
-h, --help output instructions
-c, --create create post
Example:
./initpost.sh -c How to replace strings with sed
Important Notes:
- This script was created to generate new text files to my blog.
Copyright (c) Vitor Britto
Licensed under the MIT license.
------------------------------------------------------------------------------
EOT
}
# Initial Content
initpost_content() {
echo "---"
echo "date: ${CURRENT_DATE} ${TIME}"
echo "layout: post"
echo "title: \"${POST_TITLE}\""
echo "subtitle:"
echo "description:"
echo "image:"
echo "optimized_image:"
echo "category:"
echo "tags:"
echo "author:"
echo "paginate: false"
echo "---"
}
# Create file
initpost_file() {
if [ ! -f "$FILE_NAME" ]; then
e_header "Creating template..."
initpost_content > "${DIST_FOLDER}/${FILE_NAME}"
e_success "Initial post successfully created!"
else
e_warning "File already exist."
exit 1
fi
}
# ------------------------------------------------------------------------------
# | INITIALIZE PROGRAM |
# ------------------------------------------------------------------------------
main() {
# Show help
if [[ "${1}" == "-h" || "${1}" == "--help" ]]; then
initpost_help ${1}
exit
fi
# Create
if [[ "${1}" == "-c" || "${1}" == "--create" ]]; then
initpost_file $*
exit
fi
}
# Initialize
main $*
|
#!/bin/bash
netq decommission oob-mgmt-switch
netq decommission oob-mgmt-server
netq decommission netq-ts
netq decommission spine01
netq decommission spine02
netq decommission spine03
netq decommission spine04
netq decommission fw1
netq decommission fw2
netq decommission leaf01
netq decommission leaf02
netq decommission leaf03
netq decommission leaf04
netq decommission border01
netq decommission border02
netq decommission server01
netq decommission server02
netq decommission server03
netq decommission server04
netq decommission server05
netq decommission server06
netq decommission server07
netq decommission server08
|
<reponame>liugangtaotie/vue-admin-box
import request from '@/utils/system/request'
// 获取数据api
export function getData(data: object) {
return request({
url: '/system/user/list',
method: 'post',
baseURL: '/mock',
data
})
}
// 新增
export function add(data: object) {
return request({
url: '/system/user/add',
method: 'post',
baseURL: '/mock',
data
})
}
// 编辑
export function update(data: object) {
return request({
url: '/system/user/update',
method: 'post',
baseURL: '/mock',
data
})
}
// 状态变更
export function updateStatus(data: object) {
return request({
url: '/system/user/updateStatus',
method: 'post',
baseURL: '/mock',
data
})
}
// 删除
export function del(data: object) {
return request({
url: '/system/user/del',
method: 'post',
baseURL: '/mock',
data
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.