text
stringlengths 3
1.05M
|
|---|
import React, {
useCallback,
useEffect,
useMemo,
useState,
useRef,
} from 'react';
import { useWallet, useWalletPublicKeys } from '../utils/wallet';
import { decodeMessage } from '../utils/transactions';
import { useConnection, useSolanaExplorerUrlSuffix } from '../utils/connection';
import {
Typography,
Divider,
Switch,
FormControlLabel,
SnackbarContent,
} from '@material-ui/core';
import CircularProgress from '@material-ui/core/CircularProgress';
import Box from '@material-ui/core/Box';
import Card from '@material-ui/core/Card';
import CardContent from '@material-ui/core/CardContent';
import CardActions from '@material-ui/core/CardActions';
import Button from '@material-ui/core/Button';
import ImportExportIcon from '@material-ui/icons/ImportExport';
import { makeStyles } from '@material-ui/core/styles';
import assert from 'assert';
import bs58 from 'bs58';
import nacl from 'tweetnacl';
import NewOrder from '../components/instructions/NewOrder';
import UnknownInstruction from '../components/instructions/UnknownInstruction';
import WarningIcon from '@material-ui/icons/Warning';
import SystemInstruction from '../components/instructions/SystemInstruction';
import DexInstruction from '../components/instructions/DexInstruction';
import TokenInstruction from '../components/instructions/TokenInstruction';
import { useLocalStorageState } from '../utils/utils';
export default function PopupPage({ opener }) {
const wallet = useWallet();
const origin = useMemo(() => {
let params = new URLSearchParams(window.location.hash.slice(1));
return params.get('origin');
}, []);
const postMessage = useCallback(
(message) => {
opener.postMessage({ jsonrpc: '2.0', ...message }, origin);
},
[opener, origin],
);
const [connectedAccount, setConnectedAccount] = useState(null);
const hasConnectedAccount = !!connectedAccount;
const [requests, setRequests] = useState([]);
const [autoApprove, setAutoApprove] = useState(false);
// Send a disconnect event if this window is closed, this component is
// unmounted, or setConnectedAccount(null) is called.
useEffect(() => {
if (hasConnectedAccount) {
function unloadHandler() {
postMessage({ method: 'disconnected' });
}
window.addEventListener('beforeunload', unloadHandler);
return () => {
unloadHandler();
window.removeEventListener('beforeunload', unloadHandler);
};
}
}, [hasConnectedAccount, postMessage]);
// Disconnect if the user switches to a different wallet.
useEffect(() => {
if (
connectedAccount &&
!connectedAccount.publicKey.equals(wallet.publicKey)
) {
setConnectedAccount(null);
}
}, [connectedAccount, wallet]);
// Push requests from the parent window into a queue.
useEffect(() => {
function messageHandler(e) {
if (e.origin === origin && e.source === window.opener) {
if (e.data.method !== 'signTransaction') {
postMessage({ error: 'Unsupported method', id: e.data.id });
}
setRequests((requests) => [...requests, e.data]);
}
}
window.addEventListener('message', messageHandler);
return () => window.removeEventListener('message', messageHandler);
}, [origin, postMessage]);
if (
!connectedAccount ||
!connectedAccount.publicKey.equals(wallet.publicKey)
) {
// Approve the parent page to connect to this wallet.
function connect(autoApprove) {
setConnectedAccount(wallet.account);
postMessage({
method: 'connected',
params: { publicKey: wallet.publicKey.toBase58(), autoApprove },
});
setAutoApprove(autoApprove);
focusParent();
}
return <ApproveConnectionForm origin={origin} onApprove={connect} />;
}
if (requests.length > 0) {
const request = requests[0];
assert(request.method === 'signTransaction');
const message = bs58.decode(request.params.message);
function sendSignature() {
setRequests((requests) => requests.slice(1));
postMessage({
result: {
signature: bs58.encode(
nacl.sign.detached(message, wallet.account.secretKey),
),
publicKey: wallet.publicKey.toBase58(),
},
id: request.id,
});
if (requests.length === 1) {
focusParent();
}
}
function sendReject() {
setRequests((requests) => requests.slice(1));
postMessage({
error: 'Transaction cancelled',
id: request.id,
});
if (requests.length === 1) {
focusParent();
}
}
return (
<ApproveSignatureForm
key={request.id}
autoApprove={autoApprove}
origin={origin}
message={message}
onApprove={sendSignature}
onReject={sendReject}
/>
);
}
return (
<Typography>Please keep this window open in the background.</Typography>
);
}
/**
* Switch focus to the parent window. This requires that the parent runs
* `window.name = 'parent'` before opening the popup.
*/
function focusParent() {
window.open('', 'parent');
}
const useStyles = makeStyles((theme) => ({
connection: {
marginTop: theme.spacing(3),
marginBottom: theme.spacing(3),
textAlign: 'center',
},
transaction: {
wordBreak: 'break-all',
},
approveButton: {
backgroundColor: '#43a047',
color: 'white',
},
actions: {
justifyContent: 'space-between',
},
snackbarRoot: {
backgroundColor: theme.palette.background.paper,
},
warningMessage: {
margin: theme.spacing(1),
color: theme.palette.text.primary,
},
warningIcon: {
marginRight: theme.spacing(1),
fontSize: 24,
},
warningTitle: {
color: theme.palette.warning.light,
fontWeight: 600,
fontSize: 16,
alignItems: 'center',
display: 'flex',
},
warningContainer: {
marginTop: theme.spacing(1),
},
divider: {
marginTop: theme.spacing(2),
marginBottom: theme.spacing(2),
},
}));
function ApproveConnectionForm({ origin, onApprove }) {
const wallet = useWallet();
const classes = useStyles();
const [autoApprove, setAutoApprove] = useState(false);
let [dismissed, setDismissed] = useLocalStorageState(
'dismissedAutoApproveWarning',
false,
);
return (
<Card>
<CardContent>
<Typography variant="h6" component="h1" gutterBottom>
Allow this site to access your Solana account?
</Typography>
<div className={classes.connection}>
<Typography>{origin}</Typography>
<ImportExportIcon fontSize="large" />
<Typography>{wallet.publicKey.toBase58()}</Typography>
</div>
<Typography>Only connect with sites you trust.</Typography>
<Divider className={classes.divider} />
<FormControlLabel
control={
<Switch
checked={autoApprove}
onChange={() => setAutoApprove(!autoApprove)}
color="primary"
/>
}
label={`Automatically approve transactions from ${origin}`}
/>
{!dismissed && autoApprove && (
<SnackbarContent
className={classes.warningContainer}
message={
<div>
<span className={classes.warningTitle}>
<WarningIcon className={classes.warningIcon} />
Use at your own risk.
</span>
<Typography className={classes.warningMessage}>
This setting allows sending some transactions on your behalf
without requesting your permission for the remainder of this
session.
</Typography>
</div>
}
action={[
<Button onClick={() => setDismissed('1')}>I understand</Button>,
]}
classes={{ root: classes.snackbarRoot }}
/>
)}
</CardContent>
<CardActions className={classes.actions}>
<Button onClick={window.close}>Cancel</Button>
<Button
color="primary"
onClick={() => onApprove(autoApprove)}
disabled={!dismissed && autoApprove}
>
Connect
</Button>
</CardActions>
</Card>
);
}
function isSafeInstruction(publicKeys, owner, instructions) {
let unsafe = false;
const states = {
CREATED: 0,
OWNED: 1,
CLOSED_TO_OWNED_DESTINATION: 2,
};
const accountStates = {};
function isOwned(pubkey) {
if (!pubkey) {
return false;
}
if (
publicKeys?.some((ownedAccountPubkey) =>
ownedAccountPubkey.equals(pubkey),
)
) {
return true;
}
return accountStates[pubkey.toBase58()] === states.OWNED;
}
instructions.forEach((instruction) => {
if (!instruction) {
unsafe = true;
} else {
if (['cancelOrder', 'matchOrders'].includes(instruction.type)) {
// It is always considered safe to cancel orders, match orders
} else if (instruction.type === 'systemCreate') {
let { newAccountPubkey } = instruction.data;
if (!newAccountPubkey) {
unsafe = true;
} else {
accountStates[newAccountPubkey.toBase58()] = states.CREATED;
}
} else if (instruction.type === 'newOrder') {
// New order instructions are safe if the owner is this wallet
let { openOrdersPubkey, ownerPubkey } = instruction.data;
if (ownerPubkey && owner.equals(ownerPubkey)) {
accountStates[openOrdersPubkey.toBase58()] = states.OWNED;
} else {
unsafe = true;
}
} else if (instruction.type === 'initializeAccount') {
// New SPL token accounts are only considered safe if they are owned by this wallet and newly created
let { ownerPubkey, accountPubkey } = instruction.data;
if (
owner &&
ownerPubkey &&
owner.equals(ownerPubkey) &&
accountPubkey &&
accountStates[accountPubkey.toBase58()] === states.CREATED
) {
accountStates[accountPubkey.toBase58()] = states.OWNED;
} else {
unsafe = true;
}
} else if (instruction.type === 'settleFunds') {
// Settling funds is only safe if the destinations are owned
let { basePubkey, quotePubkey } = instruction.data;
if (!isOwned(basePubkey) || !isOwned(quotePubkey)) {
unsafe = true;
}
} else if (instruction.type === 'closeAccount') {
// Closing is only safe if the destination is owned
let { sourcePubkey, destinationPubkey } = instruction.data;
if (isOwned(destinationPubkey)) {
accountStates[sourcePubkey.toBase58()] =
states.CLOSED_TO_OWNED_DESTINATION;
} else {
unsafe = true;
}
} else {
unsafe = true;
}
}
});
// Check that all accounts are owned
if (
Object.values(accountStates).some(
(state) =>
![states.CLOSED_TO_OWNED_DESTINATION, states.OWNED].includes(state),
)
) {
unsafe = true;
}
return !unsafe;
}
function ApproveSignatureForm({
origin,
message,
onApprove,
onReject,
autoApprove,
}) {
const classes = useStyles();
const explorerUrlSuffix = useSolanaExplorerUrlSuffix();
const connection = useConnection();
const wallet = useWallet();
const [publicKeys] = useWalletPublicKeys();
const [parsing, setParsing] = useState(true);
const [instructions, setInstructions] = useState(null);
const buttonRef = useRef();
useEffect(() => {
decodeMessage(connection, wallet, message).then((instructions) => {
setInstructions(instructions);
setParsing(false);
});
}, [message, connection, wallet]);
const validator = useMemo(() => {
return {
safe:
publicKeys &&
instructions &&
isSafeInstruction(publicKeys, wallet.publicKey, instructions),
};
}, [publicKeys, instructions, wallet]);
useEffect(() => {
if (validator.safe && autoApprove) {
console.log('Auto approving safe transaction');
onApprove();
} else {
// brings window to front when we receive new instructions
// this needs to be executed from wallet instead of adapter
// to ensure chrome brings window to front
window.focus();
// scroll to approve button and focus it to enable approve with enter
if (buttonRef.current) {
buttonRef.current.scrollIntoView({ behavior: 'smooth' });
setTimeout(() => buttonRef.current.focus(), 50);
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [validator, autoApprove, buttonRef]);
const onOpenAddress = (address) => {
address &&
window.open(
'https://explorer.solana.com/address/' + address + explorerUrlSuffix,
'_blank',
);
};
const getContent = (instruction) => {
switch (instruction?.type) {
case 'cancelOrder':
case 'matchOrders':
case 'settleFunds':
return (
<DexInstruction
instruction={instruction}
onOpenAddress={onOpenAddress}
/>
);
case 'closeAccount':
case 'initializeAccount':
case 'transfer':
case 'approve':
case 'mintTo':
return (
<TokenInstruction
instruction={instruction}
onOpenAddress={onOpenAddress}
/>
);
case 'systemCreate':
case 'systemTransfer':
return (
<SystemInstruction
instruction={instruction}
onOpenAddress={onOpenAddress}
/>
);
case 'newOrder':
return (
<NewOrder instruction={instruction} onOpenAddress={onOpenAddress} />
);
default:
return <UnknownInstruction instruction={instruction} />;
}
};
return (
<Card>
<CardContent>
{parsing ? (
<>
<div
style={{
display: 'flex',
alignItems: 'flex-end',
marginBottom: 20,
}}
>
<CircularProgress style={{ marginRight: 20 }} />
<Typography
variant="subtitle1"
style={{ fontWeight: 'bold' }}
gutterBottom
>
Parsing transaction:
</Typography>
</div>
<Typography style={{ wordBreak: 'break-all' }}>
{bs58.encode(message)}
</Typography>
</>
) : (
<>
<Typography variant="h6" gutterBottom>
{instructions
? `${origin} wants to:`
: `Unknown transaction data`}
</Typography>
{instructions ? (
instructions.map((instruction, i) => (
<Box style={{ marginTop: 20 }} key={i}>
{getContent(instruction)}
<Divider style={{ marginTop: 20 }} />
</Box>
))
) : (
<>
<Typography
variant="subtitle1"
style={{ fontWeight: 'bold' }}
gutterBottom
>
Unknown transaction:
</Typography>
<Typography style={{ wordBreak: 'break-all' }}>
{bs58.encode(message)}
</Typography>
</>
)}
{!validator.safe && (
<SnackbarContent
className={classes.warningContainer}
message={
<div>
<span className={classes.warningTitle}>
<WarningIcon className={classes.warningIcon} />
Nonstandard DEX transaction
</span>
<Typography className={classes.warningMessage}>
Sollet does not recognize this transaction as a standard
Serum DEX transaction
</Typography>
</div>
}
classes={{ root: classes.snackbarRoot }}
/>
)}
</>
)}
</CardContent>
<CardActions className={classes.actions}>
<Button onClick={onReject}>Cancel</Button>
<Button
ref={buttonRef}
className={classes.approveButton}
variant="contained"
color="primary"
onClick={onApprove}
>
Approve
</Button>
</CardActions>
</Card>
);
}
|
import torch
import os
import time
import json
import numpy as np
from collections import defaultdict
from speaker import Speaker
from utils import read_vocab,write_vocab,build_vocab,Tokenizer,padding_idx,timeSince, read_img_features
import utils
from env import R2RBatch
from agent import Seq2SeqAgent
from eval import Evaluation
from param import args
import warnings
warnings.filterwarnings("ignore")
from tensorboardX import SummaryWriter
log_dir = 'snap/%s' % args.name
if not os.path.exists(log_dir):
os.makedirs(log_dir)
TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
feedback_method = args.feedback # teacher or sample
print(args)
def train_speaker(train_env, tok, n_iters, log_every=500, val_envs={}):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listner, tok)
if args.fast_train:
log_every = 40
best_bleu = defaultdict(lambda: 0)
best_loss = defaultdict(lambda: 1232)
for idx in range(0, n_iters, log_every):
interval = min(log_every, n_iters - idx)
# Train for log_every interval
speaker.env = train_env
speaker.train(interval) # Train interval iters
print()
print("Iter: %d" % idx)
# Evaluation
for env_name, (env, evaluator) in val_envs.items():
if 'train' in env_name: # Ignore the large training set for the efficiency
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid()
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[str(path_id)]['instructions'])
bleu_score, precisions = evaluator.bleu_score(path2inst)
# Tensorboard log
writer.add_scalar("bleu/%s" % (env_name), bleu_score, idx)
writer.add_scalar("loss/%s" % (env_name), loss, idx)
writer.add_scalar("word_accu/%s" % (env_name), word_accu, idx)
writer.add_scalar("sent_accu/%s" % (env_name), sent_accu, idx)
writer.add_scalar("bleu4/%s" % (env_name), precisions[3], idx)
# Save the model according to the bleu score
if bleu_score > best_bleu[env_name]:
best_bleu[env_name] = bleu_score
print('Save the model with %s BEST env bleu %0.4f' % (env_name, bleu_score))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_bleu' % env_name))
if loss < best_loss[env_name]:
best_loss[env_name] = loss
print('Save the model with %s BEST env loss %0.4f' % (env_name, loss))
speaker.save(idx, os.path.join(log_dir, 'state_dict', 'best_%s_loss' % env_name))
# Screen print out
print("Bleu 1: %0.4f Bleu 2: %0.4f, Bleu 3 :%0.4f, Bleu 4: %0.4f" % tuple(precisions))
def train(train_env, tok, n_iters, log_every=100, val_envs={}, aug_env=None):
writer = SummaryWriter(logdir=log_dir)
listner = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = None
if args.self_train:
speaker = Speaker(train_env, listner, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
start_iter = 0
if args.load is not None:
print("LOAD THE listener from %s" % args.load)
start_iter = listner.load(os.path.join(args.load))
start = time.time()
best_val = {'val_seen': {"accu": 0., "state":"", 'update':False},
'val_unseen': {"accu": 0., "state":"", 'update':False}}
if args.fast_train:
log_every = 40
for idx in range(start_iter, start_iter+n_iters, log_every):
listner.logs = defaultdict(list)
interval = min(log_every, n_iters-idx)
iter = idx + interval
# Train for log_every interval
if aug_env is None: # The default training process
listner.env = train_env
listner.train(interval, feedback=feedback_method) # Train interval iters
else:
if args.accumulate_grad:
for _ in range(interval // 2):
listner.zero_grad()
listner.env = train_env
# Train with GT data
args.ml_weight = 0.2
listner.accumulate_gradient(feedback_method)
listner.env = aug_env
# Train with Back Translation
args.ml_weight = 0.6 # Sem-Configuration
listner.accumulate_gradient(feedback_method, speaker=speaker)
listner.optim_step()
else:
for _ in range(interval // 2):
# Train with GT data
listner.env = train_env
args.ml_weight = 0.2
listner.train(1, feedback=feedback_method)
# Train with Back Translation
listner.env = aug_env
args.ml_weight = 0.6
listner.train(1, feedback=feedback_method, speaker=speaker)
# Log the training stats to tensorboard
total = max(sum(listner.logs['total']), 1)
length = max(len(listner.logs['critic_loss']), 1)
critic_loss = sum(listner.logs['critic_loss']) / total #/ length / args.batchSize
entropy = sum(listner.logs['entropy']) / total #/ length / args.batchSize
predict_loss = sum(listner.logs['us_loss']) / max(len(listner.logs['us_loss']), 1)
writer.add_scalar("loss/critic", critic_loss, idx)
writer.add_scalar("policy_entropy", entropy, idx)
writer.add_scalar("loss/unsupervised", predict_loss, idx)
writer.add_scalar("total_actions", total, idx)
writer.add_scalar("max_length", length, idx)
print("total_actions", total)
print("max_length", length)
# Run validation
loss_str = ""
for env_name, (env, evaluator) in val_envs.items():
listner.env = env
# Get validation loss under the same conditions as training
iters = None if args.fast_train or env_name != 'train' else 20 # 20 * 64 = 1280
# Get validation distance from goal under test evaluation conditions
listner.test(use_dropout=False, feedback='argmax', iters=iters)
result = listner.get_results()
score_summary, _ = evaluator.score(result)
loss_str += ", %s " % env_name
for metric,val in score_summary.items():
if metric in ['success_rate']:
writer.add_scalar("accuracy/%s" % env_name, val, idx)
if env_name in best_val:
if val > best_val[env_name]['accu']:
best_val[env_name]['accu'] = val
best_val[env_name]['update'] = True
loss_str += ', %s: %.3f' % (metric, val)
for env_name in best_val:
if best_val[env_name]['update']:
best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str)
best_val[env_name]['update'] = False
listner.save(idx, os.path.join("snap", args.name, "state_dict", "best_%s" % (env_name)))
print(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),
iter, float(iter)/n_iters*100, loss_str)))
if iter % 1000 == 0:
print("BEST RESULT TILL NOW")
for env_name in best_val:
print(env_name, best_val[env_name]['state'])
if iter % 50000 == 0:
listner.save(idx, os.path.join("snap", args.name, "state_dict", "Iter_%06d" % (iter)))
listner.save(idx, os.path.join("snap", args.name, "state_dict", "LAST_iter%d" % (idx)))
def valid(train_env, tok, val_envs={}):
agent = Seq2SeqAgent(train_env, "", tok, args.maxAction)
print("Loaded the listener model at iter %d from %s" % (agent.load(args.load), args.load))
for env_name, (env, evaluator) in val_envs.items():
agent.logs = defaultdict(list)
agent.env = env
iters = None
agent.test(use_dropout=False, feedback='argmax', iters=iters)
result = agent.get_results()
if env_name != '':
score_summary, _ = evaluator.score(result)
loss_str = "Env name: %s" % env_name
for metric,val in score_summary.items():
loss_str += ', %s: %.4f' % (metric, val)
print(loss_str)
if args.submit:
json.dump(
result,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
def beam_valid(train_env, tok, val_envs={}):
listener = Seq2SeqAgent(train_env, "", tok, args.maxAction)
speaker = Speaker(train_env, listener, tok)
if args.speaker is not None:
print("Load the speaker from %s." % args.speaker)
speaker.load(args.speaker)
print("Loaded the listener model at iter % d" % listener.load(args.load))
final_log = ""
for env_name, (env, evaluator) in val_envs.items():
listener.logs = defaultdict(list)
listener.env = env
listener.beam_search_test(speaker)
results = listener.results
def cal_score(x, alpha, avg_speaker, avg_listener):
speaker_score = sum(x["speaker_scores"]) * alpha
if avg_speaker:
speaker_score /= len(x["speaker_scores"])
# normalizer = sum(math.log(k) for k in x['listener_actions'])
normalizer = 0.
listener_score = (sum(x["listener_scores"]) + normalizer) * (1-alpha)
if avg_listener:
listener_score /= len(x["listener_scores"])
return speaker_score + listener_score
if args.param_search:
# Search for the best speaker / listener ratio
interval = 0.01
logs = []
for avg_speaker in [False, True]:
for avg_listener in [False, True]:
for alpha in np.arange(0, 1 + interval, interval):
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
score_summary, _ = evaluator.score(result_for_eval)
for metric,val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
logs.append((avg_speaker, avg_listener, alpha, val))
tmp_result = "Env Name %s\n" % (env_name) + \
"Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f\n" % max(logs, key=lambda x: x[3])
print(tmp_result)
# print("Env Name %s" % (env_name))
# print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
# max(logs, key=lambda x: x[3]))
final_log += tmp_result
print()
else:
avg_speaker = True
avg_listener = True
alpha = args.alpha
result_for_eval = []
for key in results:
result_for_eval.append({
"instr_id": key,
"trajectory": [(vp, 0, 0) for vp in results[key]['dijk_path']] + \
max(results[key]['paths'],
key=lambda x: cal_score(x, alpha, avg_speaker, avg_listener)
)['trajectory']
})
# result_for_eval = utils.add_exploration(result_for_eval)
score_summary, _ = evaluator.score(result_for_eval)
if env_name != 'test':
loss_str = "Env Name: %s" % env_name
for metric, val in score_summary.items():
if metric in ['success_rate']:
print("Avg speaker %s, Avg listener %s, For the speaker weight %0.4f, the result is %0.4f" %
(avg_speaker, avg_listener, alpha, val))
loss_str += ",%s: %0.4f " % (metric, val)
print(loss_str)
print()
if args.submit:
json.dump(
result_for_eval,
open(os.path.join(log_dir, "submit_%s.json" % env_name), 'w'),
sort_keys=True, indent=4, separators=(',', ': ')
)
print(final_log)
def setup():
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Check for vocabs
if not os.path.exists(TRAIN_VOCAB):
write_vocab(build_vocab(splits=['train']), TRAIN_VOCAB)
if not os.path.exists(TRAINVAL_VOCAB):
write_vocab(build_vocab(splits=['train','val_seen','val_unseen']), TRAINVAL_VOCAB)
def train_val():
''' Train on the training set, and validate on seen and unseen splits. '''
# args.fast_train = True
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
feat_dict = read_img_features(args.features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok)
from collections import OrderedDict
val_env_names = ['val_unseen', 'val_seen']
if args.submit:
val_env_names.append('test')
else:
pass
#val_env_names.append('train')
if not args.beam:
val_env_names.append("train")
val_envs = OrderedDict(
((split,
(R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok),
Evaluation([split], featurized_scans, tok))
)
for split in val_env_names
)
)
if args.train == 'listener':
train(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validlistener':
if args.beam:
beam_valid(train_env, tok, val_envs=val_envs)
else:
valid(train_env, tok, val_envs=val_envs)
elif args.train == 'speaker':
train_speaker(train_env, tok, args.iters, val_envs=val_envs)
elif args.train == 'validspeaker':
valid_speaker(tok, val_envs)
else:
assert False
def valid_speaker(tok, val_envs):
import tqdm
listner = Seq2SeqAgent(None, "", tok, args.maxAction)
speaker = Speaker(None, listner, tok)
speaker.load(args.load)
for env_name, (env, evaluator) in val_envs.items():
if env_name == 'train':
continue
print("............ Evaluating %s ............." % env_name)
speaker.env = env
path2inst, loss, word_accu, sent_accu = speaker.valid(wrapper=tqdm.tqdm)
path_id = next(iter(path2inst.keys()))
print("Inference: ", tok.decode_sentence(path2inst[path_id]))
print("GT: ", evaluator.gt[path_id]['instructions'])
pathXinst = list(path2inst.items())
name2score = evaluator.lang_eval(pathXinst, no_metrics={'METEOR'})
score_string = " "
for score_name, score in name2score.items():
score_string += "%s_%s: %0.4f " % (env_name, score_name, score)
print("For env %s" % env_name)
print(score_string)
print("Average Length %0.4f" % utils.average_length(path2inst))
def train_val_augment():
"""
Train the listener with the augmented data
"""
setup()
# Create a batch training environment that will also preprocess text
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)
# Load the env img features
feat_dict = read_img_features(args.features)
featurized_scans = set([key.split("_")[0] for key in list(feat_dict.keys())])
# Load the augmentation data
aug_path = args.aug
# Create the training environment
train_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=['train'], tokenizer=tok)
aug_env = R2RBatch(feat_dict, batch_size=args.batchSize,
splits=[aug_path], tokenizer=tok, name='aug')
# Printing out the statistics of the dataset
stats = train_env.get_statistics()
print("The training data_size is : %d" % train_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
stats = aug_env.get_statistics()
print("The augmentation data size is %d" % aug_env.size())
print("The average instruction length of the dataset is %0.4f." % (stats['length']))
print("The average action length of the dataset is %0.4f." % (stats['path']))
# Setup the validation data
val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split],
tokenizer=tok), Evaluation([split], featurized_scans, tok))
for split in ['train', 'val_seen', 'val_unseen']}
# Start training
train(train_env, tok, args.iters, val_envs=val_envs, aug_env=aug_env)
if __name__ == "__main__":
if args.train in ['speaker', 'rlspeaker', 'validspeaker',
'listener', 'validlistener']:
train_val()
elif args.train == 'auglistener':
train_val_augment()
else:
assert False
|
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, September 27, 2020 at 12:32:51 PM Mountain Standard Time
* Operating System: Version 14.0 (Build 18A373)
* Image Source: /usr/lib/libAWDSupportFramework.dylib
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
#import <libAWDSupportFramework.dylib/libAWDSupportFramework.dylib-Structs.h>
#import <ProtocolBuffer/PBCodable.h>
#import <libobjc.A.dylib/NSCopying.h>
@class NSMutableArray;
@interface AWDWiFiMetricsManagerRangingReport : PBCodable <NSCopying> {
unsigned long long _timestamp;
unsigned _awdlLatency;
unsigned _numMeasurements;
unsigned _peerMasterChannel;
unsigned _peerPreferredChannel;
unsigned _peerPreferredChannelFlags;
unsigned _protocolVersion;
unsigned _rangingBandwidth;
unsigned _rangingChannel;
unsigned _rangingChannelQuality;
unsigned _rangingLatency;
unsigned _resultFlags;
unsigned _resultStatus;
NSMutableArray* _rttSamples;
unsigned _selfMasterChannel;
unsigned _selfPreferredChannel;
unsigned _selfPreferredChannelFlags;
unsigned _validCount;
SCD_Struct_AW30 _has;
}
@property (assign,nonatomic) BOOL hasTimestamp;
@property (assign,nonatomic) unsigned long long timestamp; //@synthesize timestamp=_timestamp - In the implementation block
@property (assign,nonatomic) BOOL hasSelfPreferredChannel;
@property (assign,nonatomic) unsigned selfPreferredChannel; //@synthesize selfPreferredChannel=_selfPreferredChannel - In the implementation block
@property (assign,nonatomic) BOOL hasSelfPreferredChannelFlags;
@property (assign,nonatomic) unsigned selfPreferredChannelFlags; //@synthesize selfPreferredChannelFlags=_selfPreferredChannelFlags - In the implementation block
@property (assign,nonatomic) BOOL hasSelfMasterChannel;
@property (assign,nonatomic) unsigned selfMasterChannel; //@synthesize selfMasterChannel=_selfMasterChannel - In the implementation block
@property (assign,nonatomic) BOOL hasPeerPreferredChannel;
@property (assign,nonatomic) unsigned peerPreferredChannel; //@synthesize peerPreferredChannel=_peerPreferredChannel - In the implementation block
@property (assign,nonatomic) BOOL hasPeerPreferredChannelFlags;
@property (assign,nonatomic) unsigned peerPreferredChannelFlags; //@synthesize peerPreferredChannelFlags=_peerPreferredChannelFlags - In the implementation block
@property (assign,nonatomic) BOOL hasPeerMasterChannel;
@property (assign,nonatomic) unsigned peerMasterChannel; //@synthesize peerMasterChannel=_peerMasterChannel - In the implementation block
@property (assign,nonatomic) BOOL hasProtocolVersion;
@property (assign,nonatomic) unsigned protocolVersion; //@synthesize protocolVersion=_protocolVersion - In the implementation block
@property (assign,nonatomic) BOOL hasRangingChannel;
@property (assign,nonatomic) unsigned rangingChannel; //@synthesize rangingChannel=_rangingChannel - In the implementation block
@property (assign,nonatomic) BOOL hasRangingBandwidth;
@property (assign,nonatomic) unsigned rangingBandwidth; //@synthesize rangingBandwidth=_rangingBandwidth - In the implementation block
@property (assign,nonatomic) BOOL hasResultFlags;
@property (assign,nonatomic) unsigned resultFlags; //@synthesize resultFlags=_resultFlags - In the implementation block
@property (assign,nonatomic) BOOL hasResultStatus;
@property (assign,nonatomic) unsigned resultStatus; //@synthesize resultStatus=_resultStatus - In the implementation block
@property (assign,nonatomic) BOOL hasValidCount;
@property (assign,nonatomic) unsigned validCount; //@synthesize validCount=_validCount - In the implementation block
@property (assign,nonatomic) BOOL hasNumMeasurements;
@property (assign,nonatomic) unsigned numMeasurements; //@synthesize numMeasurements=_numMeasurements - In the implementation block
@property (assign,nonatomic) BOOL hasAwdlLatency;
@property (assign,nonatomic) unsigned awdlLatency; //@synthesize awdlLatency=_awdlLatency - In the implementation block
@property (assign,nonatomic) BOOL hasRangingLatency;
@property (assign,nonatomic) unsigned rangingLatency; //@synthesize rangingLatency=_rangingLatency - In the implementation block
@property (nonatomic,retain) NSMutableArray * rttSamples; //@synthesize rttSamples=_rttSamples - In the implementation block
@property (assign,nonatomic) BOOL hasRangingChannelQuality;
@property (assign,nonatomic) unsigned rangingChannelQuality; //@synthesize rangingChannelQuality=_rangingChannelQuality - In the implementation block
+(Class)rttSamplesType;
-(unsigned)protocolVersion;
-(void)setHasTimestamp:(BOOL)arg1 ;
-(id)copyWithZone:(NSZone*)arg1 ;
-(void)setProtocolVersion:(unsigned)arg1 ;
-(void)writeTo:(id)arg1 ;
-(BOOL)readFrom:(id)arg1 ;
-(void)dealloc;
-(unsigned long long)timestamp;
-(void)setTimestamp:(unsigned long long)arg1 ;
-(void)mergeFrom:(id)arg1 ;
-(void)copyTo:(id)arg1 ;
-(BOOL)isEqual:(id)arg1 ;
-(unsigned)resultStatus;
-(BOOL)hasTimestamp;
-(BOOL)hasProtocolVersion;
-(unsigned long long)hash;
-(void)setResultStatus:(unsigned)arg1 ;
-(void)setHasProtocolVersion:(BOOL)arg1 ;
-(id)description;
-(id)dictionaryRepresentation;
-(unsigned)peerMasterChannel;
-(unsigned)peerPreferredChannel;
-(void)setPeerMasterChannel:(unsigned)arg1 ;
-(void)setPeerPreferredChannel:(unsigned)arg1 ;
-(void)setRttSamples:(NSMutableArray *)arg1 ;
-(void)addRttSamples:(id)arg1 ;
-(unsigned long long)rttSamplesCount;
-(void)clearRttSamples;
-(id)rttSamplesAtIndex:(unsigned long long)arg1 ;
-(void)setSelfPreferredChannel:(unsigned)arg1 ;
-(void)setHasSelfPreferredChannel:(BOOL)arg1 ;
-(BOOL)hasSelfPreferredChannel;
-(void)setSelfPreferredChannelFlags:(unsigned)arg1 ;
-(void)setHasSelfPreferredChannelFlags:(BOOL)arg1 ;
-(BOOL)hasSelfPreferredChannelFlags;
-(void)setSelfMasterChannel:(unsigned)arg1 ;
-(void)setHasSelfMasterChannel:(BOOL)arg1 ;
-(BOOL)hasSelfMasterChannel;
-(void)setHasPeerPreferredChannel:(BOOL)arg1 ;
-(BOOL)hasPeerPreferredChannel;
-(void)setPeerPreferredChannelFlags:(unsigned)arg1 ;
-(void)setHasPeerPreferredChannelFlags:(BOOL)arg1 ;
-(BOOL)hasPeerPreferredChannelFlags;
-(void)setHasPeerMasterChannel:(BOOL)arg1 ;
-(BOOL)hasPeerMasterChannel;
-(void)setRangingChannel:(unsigned)arg1 ;
-(void)setHasRangingChannel:(BOOL)arg1 ;
-(BOOL)hasRangingChannel;
-(void)setRangingBandwidth:(unsigned)arg1 ;
-(void)setHasRangingBandwidth:(BOOL)arg1 ;
-(BOOL)hasRangingBandwidth;
-(void)setResultFlags:(unsigned)arg1 ;
-(void)setHasResultFlags:(BOOL)arg1 ;
-(BOOL)hasResultFlags;
-(void)setHasResultStatus:(BOOL)arg1 ;
-(BOOL)hasResultStatus;
-(void)setValidCount:(unsigned)arg1 ;
-(void)setHasValidCount:(BOOL)arg1 ;
-(BOOL)hasValidCount;
-(void)setNumMeasurements:(unsigned)arg1 ;
-(void)setHasNumMeasurements:(BOOL)arg1 ;
-(BOOL)hasNumMeasurements;
-(void)setAwdlLatency:(unsigned)arg1 ;
-(void)setHasAwdlLatency:(BOOL)arg1 ;
-(BOOL)hasAwdlLatency;
-(void)setRangingLatency:(unsigned)arg1 ;
-(void)setHasRangingLatency:(BOOL)arg1 ;
-(BOOL)hasRangingLatency;
-(void)setRangingChannelQuality:(unsigned)arg1 ;
-(void)setHasRangingChannelQuality:(BOOL)arg1 ;
-(BOOL)hasRangingChannelQuality;
-(unsigned)selfPreferredChannel;
-(unsigned)selfPreferredChannelFlags;
-(unsigned)selfMasterChannel;
-(unsigned)peerPreferredChannelFlags;
-(unsigned)rangingChannel;
-(unsigned)rangingBandwidth;
-(unsigned)resultFlags;
-(unsigned)validCount;
-(unsigned)numMeasurements;
-(unsigned)awdlLatency;
-(unsigned)rangingLatency;
-(NSMutableArray *)rttSamples;
-(unsigned)rangingChannelQuality;
@end
|
#include "vertexbuffer.h"
#include <kinc/graphics5/vertexbuffer.h>
#include <kinc/backend/SystemMicrosoft.h>
#include <kinc/graphics4/graphics.h>
kinc_g5_vertex_buffer_t *_current_vertex_buffer = NULL;
void kinc_g5_vertex_buffer_init(kinc_g5_vertex_buffer_t *buffer, int count, kinc_g5_vertex_structure_t *structure, bool gpuMemory, int instanceDataStepRate) {
buffer->impl.myCount = count;
buffer->impl.lastStart = -1;
buffer->impl.lastCount = -1;
// static_assert(sizeof(D3D12VertexBufferView) == sizeof(D3D12_VERTEX_BUFFER_VIEW), "Something is wrong with D3D12IVertexBufferView");
buffer->impl.myStride = 0;
for (int i = 0; i < structure->size; ++i) {
buffer->impl.myStride += kinc_g4_vertex_data_size(structure->elements[i].data);
}
int uploadBufferSize = buffer->impl.myStride * buffer->impl.myCount;
D3D12_HEAP_PROPERTIES heapProperties;
heapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;
heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
heapProperties.CreationNodeMask = 1;
heapProperties.VisibleNodeMask = 1;
D3D12_RESOURCE_DESC resourceDesc;
resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
resourceDesc.Alignment = 0;
resourceDesc.Width = uploadBufferSize;
resourceDesc.Height = 1;
resourceDesc.DepthOrArraySize = 1;
resourceDesc.MipLevels = 1;
resourceDesc.Format = DXGI_FORMAT_UNKNOWN;
resourceDesc.SampleDesc.Count = 1;
resourceDesc.SampleDesc.Quality = 0;
resourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
resourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
device->lpVtbl->CreateCommittedResource(device, &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDesc, D3D12_RESOURCE_STATE_GENERIC_READ, NULL,
&IID_ID3D12Resource, &buffer->impl.uploadBuffer);
// device_->CreateCommittedResource(&CD3DX12_HEAP_PROPERTIES (D3D12_HEAP_TYPE_DEFAULT), D3D12_HEAP_FLAG_NONE,
// &CD3DX12_RESOURCE_DESC::Buffer(uploadBufferSize),
// D3D12_RESOURCE_STATE_COPY_DEST, nullptr, IID_PPV_ARGS(&vertexBuffer));
buffer->impl.view.BufferLocation = buffer->impl.uploadBuffer->lpVtbl->GetGPUVirtualAddress(buffer->impl.uploadBuffer);
buffer->impl.view.SizeInBytes = uploadBufferSize;
buffer->impl.view.StrideInBytes = buffer->impl.myStride;
}
void kinc_g5_vertex_buffer_destroy(kinc_g5_vertex_buffer_t *buffer) {
// vb->Release();
// delete[] vertices;
}
float *kinc_g5_vertex_buffer_lock_all(kinc_g5_vertex_buffer_t *buffer) {
return kinc_g5_vertex_buffer_lock(buffer, 0, kinc_g5_vertex_buffer_count(buffer));
}
float *kinc_g5_vertex_buffer_lock(kinc_g5_vertex_buffer_t *buffer, int start, int count) {
buffer->impl.lastStart = start;
buffer->impl.lastCount = count;
void *p;
D3D12_RANGE range;
range.Begin = start * buffer->impl.myStride;
range.End = range.Begin + count * buffer->impl.myStride;
buffer->impl.uploadBuffer->lpVtbl->Map(buffer->impl.uploadBuffer, 0, &range, &p);
byte *bytes = (byte *)p;
bytes += start * buffer->impl.myStride;
return (float *)bytes;
}
void kinc_g5_vertex_buffer_unlock_all(kinc_g5_vertex_buffer_t *buffer) {
D3D12_RANGE range;
range.Begin = buffer->impl.lastStart * buffer->impl.myStride;
range.End = range.Begin + buffer->impl.lastCount * buffer->impl.myStride;
buffer->impl.uploadBuffer->lpVtbl->Unmap(buffer->impl.uploadBuffer, 0, &range);
// view.BufferLocation = uploadBuffer->GetGPUVirtualAddress() + myStart * myStride;
// commandList->CopyBufferRegion(vertexBuffer, 0, uploadBuffer, 0, count() * stride());
// CD3DX12_RESOURCE_BARRIER barriers[1] = { CD3DX12_RESOURCE_BARRIER::Transition(vertexBuffer, D3D12_RESOURCE_STATE_COPY_DEST,
// D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER) };
// commandList->ResourceBarrier(1, barriers);
}
void kinc_g5_vertex_buffer_unlock(kinc_g5_vertex_buffer_t *buffer, int count) {
D3D12_RANGE range;
range.Begin = buffer->impl.lastStart * buffer->impl.myStride;
range.End = range.Begin + count * buffer->impl.myStride;
buffer->impl.uploadBuffer->lpVtbl->Unmap(buffer->impl.uploadBuffer, 0, &range);
// view.BufferLocation = uploadBuffer->GetGPUVirtualAddress() + myStart * myStride;
// commandList->CopyBufferRegion(vertexBuffer, 0, uploadBuffer, 0, count() * stride());
// CD3DX12_RESOURCE_BARRIER barriers[1] = { CD3DX12_RESOURCE_BARRIER::Transition(vertexBuffer, D3D12_RESOURCE_STATE_COPY_DEST,
// D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER) };
// commandList->ResourceBarrier(1, barriers);
}
int kinc_g5_internal_vertex_buffer_set(kinc_g5_vertex_buffer_t *buffer, int offset) {
// UINT stride = myStride;
// UINT offset = 0;
// context->IASetVertexBuffers(0, 1, &vb, &stride, &offset);
_current_vertex_buffer = buffer;
return 0;
}
int kinc_g5_vertex_buffer_count(kinc_g5_vertex_buffer_t *buffer) {
return buffer->impl.myCount;
}
int kinc_g5_vertex_buffer_stride(kinc_g5_vertex_buffer_t *buffer) {
return buffer->impl.myStride;
}
|
from django.db import migrations
FILENAME = 'api/migrations/create_audit_trigger.sql'
def create_trigger(apps, schema_editor):
if schema_editor.connection.vendor != 'postgresql':
return
with open(FILENAME, 'r') as file:
sql = file.read()
with schema_editor.connection.cursor() as cursor:
cursor.execute(sql)
def drop_trigger(apps, schema_editor):
if schema_editor.connection.vendor != 'postgresql':
return
with schema_editor.connection.cursor() as cursor:
cursor.execute('drop schema tfrs_audit cascade;')
class Migration(migrations.Migration):
dependencies = [
('api', '0020_credittradecomment_trade_history_at_creation'),
]
operations = [
migrations.RunPython(create_trigger, drop_trigger)
]
|
'use strict';
const express = require('express');
const nodeRandom = require('node-random');
const request = require('request');
function random() {
return new Promise((resolve, reject) => {
request({
url: 'https://www.random.org/integers/',
method: 'get',
qs: {
num: 3,
min: 1,
max: 10,
base: 10,
format: 'plain',
col: 1,
},
}, function(err, res, body) {
if (res.statusCode === 200) {
return resolve(processTextResult(body));
}
reject(err || new Error(res.statusCode));
});
});
}
function processTextResult(text) {
return text
.trim()
.split('\n')
.map((n) => parseInt(n, 10) - 1);
}
const app = express();
app.use('/node_modules', express.static('node_modules'));
app.use('/public', express.static('public'));
app.use(express.static('public'));
app.get('/api/lucky', (req, res, next) => {
random()
.then((nums) => {
res.json({
arr: nums,
});
})
.catch(function(e) {
// fallback
nodeRandom.integers({
number: 3,
minimum: 0,
maximum: 9,
colums: 1,
base: 10
}, function(error, nums) {
res.json({
arr: nums,
});
})
});
});
app.listen(3333, () => console.log('Started at :3333'));
|
# Generated by Django 3.0.7 on 2021-09-18 12:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blogs', '0041_auto_20210808_2118'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=200)),
('url', models.CharField(max_length=200, unique=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogs.Blog')),
],
),
]
|
import tempfile
from pygromos.files.simulation_parameters import imd
from pygromos.files.qmmm import qmmm
from pygromos.tests.test_files.general_file_functions import general_file_tests
from pygromos.tests.in_testfiles import in_test_file_path
root_in = in_test_file_path+"/qmmm"
in_path_imd = root_in+"/md.imd"
in_path_qmmm = root_in+"/menthol-methanol-dmf.qmmm"
from pygromos.tests.test_files import out_test_root_dir
root_out = tempfile.mkdtemp(dir=out_test_root_dir, prefix="qmmm_")
out_path_imd = root_out+"/out_qmmm.imd"
out_path_qmmm = root_out+"/out_qmmm.qmmm"
class test_qmmm_imd(general_file_tests):
__test__ = True
class_type = imd.Imd
in_file_path = in_path_imd
root_out = root_out
def test_parsing_test_file(self):
imd_file = self.class_type(self.in_file_path)
return 0
def test_to_string(self):
imd_file = self.class_type(self.in_file_path)
print(imd_file)
return 0
def test_write_out(self):
imd_file = self.class_type(self.in_file_path)
imd_file.TITLE.content = "NEW TEST!"
imd_file.write(out_path_imd)
return 0
class test_qmmm(general_file_tests):
__test__ = True
class_type = qmmm.QMMM
in_file_path = in_path_qmmm
root_out = root_out
def test_parsing_test_file(self):
qmmm_file = self.class_type(self.in_file_path)
return 0
def test_to_string(self):
qmmm_file = self.class_type(self.in_file_path)
print(qmmm_file)
return 0
def test_write_out(self):
qmmm_file = self.class_type(self.in_file_path)
qmmm_file.TITLE.content = "NEW TEST!"
qmmm_file.write(out_path_qmmm)
return 0
|
"""Contains the base class for ball devices."""
import asyncio
from collections import deque
from mpf.core.events import QueuedEvent, event_handler
from mpf.devices.ball_device.ball_count_handler import BallCountHandler
from mpf.devices.ball_device.ball_device_ejector import BallDeviceEjector
from mpf.core.delays import DelayManager
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
from mpf.devices.ball_device.incoming_balls_handler import IncomingBallsHandler, IncomingBall
from mpf.devices.ball_device.outgoing_balls_handler import OutgoingBallsHandler, OutgoingBall
@DeviceMonitor("available_balls", _state="state", counted_balls="balls")
class BallDevice(SystemWideDevice):
"""Base class for a 'Ball Device' in a pinball machine.
A ball device is anything that can hold one or more balls, such as a
trough, an eject hole, a VUK, a catapult, etc.
Args: Same as Device.
"""
config_section = 'ball_devices'
collection = 'ball_devices'
class_label = 'ball_device'
__slots__ = ["delay", "available_balls", "_target_on_unexpected_ball", "_source_devices", "_ball_requests",
"ejector", "ball_count_handler", "incoming_balls_handler", "outgoing_balls_handler",
"counted_balls", "_state"]
def __init__(self, machine, name):
"""Initialise ball device."""
super().__init__(machine, name)
self.delay = DelayManager(machine)
self.available_balls = 0
"""Number of balls that are available to be ejected. This differs from
`balls` since it's possible that this device could have balls that are
being used for some other eject, and thus not available."""
self._target_on_unexpected_ball = None
# Device will eject to this target when it captures an unexpected ball
self._source_devices = list()
# Ball devices that have this device listed among their eject targets
self._ball_requests = deque()
# deque of tuples that holds requests from target devices for balls
# that this device could fulfil
# each tuple is (target device, boolean player_controlled flag)
self.ejector = None # type: BallDeviceEjector
self.ball_count_handler = None # type: BallCountHandler
self.incoming_balls_handler = None # type: IncomingBallsHandler
self.outgoing_balls_handler = None # type: OutgoingBallsHandler
# mirrored from ball_count_handler to make it obserable by the monitor
self.counted_balls = 0
self._state = "idle"
def set_eject_state(self, state):
"""Set the current device state."""
self.info_log("State: %s", state)
self._state = state
@property
def balls(self):
"""Return the number of balls we expect in the near future."""
if self._state in ["ball_left", "failed_confirm"]:
return self.counted_balls - 1
return self.counted_balls
@event_handler(11)
def event_entrance(self, **kwargs):
"""Event handler for entrance events."""
del kwargs
self.ball_count_handler.counter.received_entrance_event()
async def _initialize(self):
"""Initialize right away."""
await super()._initialize()
self._configure_targets()
self.ball_count_handler = BallCountHandler(self)
self.incoming_balls_handler = IncomingBallsHandler(self)
self.outgoing_balls_handler = OutgoingBallsHandler(self)
# delay ball counters because we have to wait for switches to be ready
self.machine.events.add_handler('init_phase_2', self._initialize_late)
# check to make sure no switches from this device are tagged with
# playfield_active, because ball devices have their own logic for
# working with the playfield and this will break things. Plus, a ball
# in a ball device is not technically on the playfield.
switch_set = set()
for section in ('hold_switches', 'ball_switches'):
for switch in self.config[section]:
switch_set.add(switch)
if self.config['entrance_switch']:
switch_set.add(self.config['entrance_switch'])
if self.config['jam_switch']:
switch_set.add(self.config['jam_switch'])
for switch in switch_set:
if switch and '{}_active'.format(self.config['captures_from'].name) in switch.tags:
self.raise_config_error(
"Ball device '{}' uses switch '{}' which has a "
"'{}_active' tag. This is handled internally by the device. Remove the "
"redundant '{}_active' tag from that switch.".format(
self.name, switch.name, self.config['captures_from'].name,
self.config['captures_from'].name), 13)
def _initialize_late(self, queue: QueuedEvent, **kwargs):
"""Create ball counters."""
del kwargs
queue.wait()
complete_future = asyncio.ensure_future(self._initialize_async(), loop=self.machine.clock.loop)
complete_future.add_done_callback(lambda x: queue.clear())
def stop_device(self):
"""Stop device."""
self.debug_log("Stopping ball device")
if self.ball_count_handler:
self.ball_count_handler.stop()
self.incoming_balls_handler.stop()
self.outgoing_balls_handler.stop()
async def expected_ball_received(self):
"""Handle an expected ball."""
# post enter event
unclaimed_balls = await self._post_enter_event(unclaimed_balls=0, new_available_balls=0)
# there might still be unclaimed balls (e.g. because of a ball_routing)
self._balls_added_callback(0, unclaimed_balls)
async def unexpected_ball_received(self):
"""Handle an unexpected ball."""
# capture from playfield
await self._post_capture_from_playfield_event()
# post enter event
unclaimed_balls = await self._post_enter_event(unclaimed_balls=1, new_available_balls=1)
# add available_balls and route unclaimed ball to the default target
self._balls_added_callback(1, unclaimed_balls)
async def handle_mechanial_eject_during_idle(self):
"""Handle mechanical eject."""
# handle lost balls via outgoing balls handler (if mechanical eject)
self.config['eject_targets'][0].available_balls += 1
eject = OutgoingBall(self.config['eject_targets'][0])
eject.eject_timeout = self.config['eject_timeouts'][eject.target] / 1000
eject.max_tries = self.config['max_eject_attempts']
eject.mechanical = True
eject.already_left = True
self.outgoing_balls_handler.add_eject_to_queue(eject)
async def lost_idle_ball(self):
"""Lost an ball while the device was idle."""
# handle lost balls
self.warning_log("Ball disappeared while idle. This should not normally happen.")
self.available_balls -= 1
self.config['ball_missing_target'].add_missing_balls(1)
await self._balls_missing(1)
async def lost_ejected_ball(self, target):
"""Handle an outgoing lost ball."""
# follow path and check if we should request a new ball to the target or cancel the path
if target.is_playfield():
raise AssertionError("Lost a ball to playfield {}. This should not happen".format(target))
if target.cancel_path_if_target_is(self, self.config['ball_missing_target']):
# add ball to default target because it would have gone there anyway
self.warning_log("Path to %s canceled. Assuming the ball jumped to %s.", target,
self.config['ball_missing_target'])
elif target.find_available_ball_in_path(self):
self.warning_log("Path is not going to ball_missing_target %s. Restoring path by requesting new ball to "
"target %s.", self.config['ball_missing_target'], target)
# remove one ball first because it will get a new one with the eject
target.available_balls -= 1
self.eject(target=target)
else:
self.warning_log("Failed to restore the path. If you can reproduce this please report in the forum!")
self.config['ball_missing_target'].add_missing_balls(1)
await self._balls_missing(1)
async def lost_incoming_ball(self, source):
"""Handle lost ball which was confirmed to have left source."""
del source
if self.cancel_path_if_target_is(self, self.config['ball_missing_target']):
# add ball to default target
self.warning_log("Path to canceled. Assuming the ball jumped to %s.", self.config['ball_missing_target'])
elif self.find_available_ball_in_path(self):
self.warning_log("Path is not going to ball_missing_target %s. Restoring path by requesting a new ball.",
self.config['ball_missing_target'])
self.available_balls -= 1
self.request_ball()
else:
self.warning_log("Failed to restore the path. If you can reproduce this please report in the forum!")
self.config['ball_missing_target'].add_missing_balls(1)
await self._balls_missing(1)
def cancel_path_if_target_is(self, start, target):
"""Check if the ball is going to a certain target and cancel the path in that case."""
return self.outgoing_balls_handler.cancel_path_if_target_is(start, target)
def find_available_ball_in_path(self, start):
"""Try to remove available ball at the end of the path."""
return self.outgoing_balls_handler.find_available_ball_in_path(start)
async def _initialize_async(self):
"""Count balls without handling them as new."""
await self.ball_count_handler.initialise()
await self.incoming_balls_handler.initialise()
await self.outgoing_balls_handler.initialise()
self.available_balls = self.ball_count_handler.handled_balls
async def _post_capture_from_playfield_event(self):
await self.machine.events.post_async('balldevice_captured_from_{}'.format(
self.config['captures_from'].name),
balls=1)
'''event: balldevice_captured_from_(device)
desc: A ball device has just captured a ball from the device called
(device)
args:
balls: The number of balls that were captured.
'''
async def _post_enter_event(self, unclaimed_balls, new_available_balls):
self.debug_log("Processing new ball")
result = await self.machine.events.post_relay_async('balldevice_{}_ball_enter'.format(
self.name),
new_balls=1,
unclaimed_balls=unclaimed_balls,
new_available_balls=new_available_balls,
device=self)
'''event: balldevice_(name)_ball_enter
desc: A ball (or balls) have just entered the ball device called
"name".
Note that this is a relay event based on the "unclaimed_balls" arg. Any
unclaimed balls in the relay will be processed as new balls entering
this device.
Please be aware that we did not add those balls to balls or available_balls of the device during this event.
args:
unclaimed_balls: The number of balls that have not yet been claimed.
device: A reference to the ball device object that is posting this
event.
'''
return result['unclaimed_balls']
def add_incoming_ball(self, incoming_ball: IncomingBall):
"""Notify this device that there is a ball heading its way."""
self.incoming_balls_handler.add_incoming_ball(incoming_ball)
def remove_incoming_ball(self, incoming_ball: IncomingBall):
"""Remove a ball from the incoming balls queue."""
self.incoming_balls_handler.remove_incoming_ball(incoming_ball)
def wait_for_ready_to_receive(self, source):
"""Wait until this device is ready to receive a ball."""
return self.ball_count_handler.wait_for_ready_to_receive(source)
@property
def requested_balls(self):
"""Return the number of requested balls."""
return len(self._ball_requests)
def _source_device_balls_available(self, **kwargs) -> None:
del kwargs
if self._ball_requests:
(target, player_controlled) = self._ball_requests.popleft()
self._setup_or_queue_eject_to_target(target, player_controlled)
# ---------------------- End of state handling code -----------------------
def _parse_config(self):
# ensure eject timeouts list matches the length of the eject targets
if (len(self.config['eject_timeouts']) <
len(self.config['eject_targets'])):
self.config['eject_timeouts'] += ["10s"] * (
len(self.config['eject_targets']) -
len(self.config['eject_timeouts']))
if (len(self.config['ball_missing_timeouts']) <
len(self.config['eject_targets'])):
self.config['ball_missing_timeouts'] += ["20s"] * (
len(self.config['eject_targets']) -
len(self.config['ball_missing_timeouts']))
timeouts_list = self.config['eject_timeouts']
self.config['eject_timeouts'] = dict()
for i in range(len(self.config['eject_targets'])):
self.config['eject_timeouts'][self.config['eject_targets'][i]] = (
Util.string_to_ms(timeouts_list[i]))
timeouts_list = self.config['ball_missing_timeouts']
self.config['ball_missing_timeouts'] = dict()
for i in range(len(self.config['eject_targets'])):
self.config['ball_missing_timeouts'][
self.config['eject_targets'][i]] = (
Util.string_to_ms(timeouts_list[i]))
# End code to create timeouts list ------------------------------------
# cannot have ball switches and capacity
if self.config['ball_switches'] and self.config['ball_capacity']:
self.raise_config_error("Cannot use capacity and ball switches.", 3)
elif not self.config['ball_capacity'] and not self.config['ball_switches']:
self.raise_config_error("Need ball capcity if there are no switches.", 2)
elif self.config['ball_switches']:
self.config['ball_capacity'] = len(self.config['ball_switches'])
@property
def capacity(self):
"""Return the ball capacity."""
return self.config['ball_capacity']
def _validate_config(self):
# perform logical validation
# a device cannot have hold_coil and eject_coil
if (not self.config['eject_coil'] and not self.config['hold_coil'] and
not self.config['mechanical_eject'] and not self.config.get('ejector', False)):
self.raise_config_error('Configuration error in {} ball device. '
'Device needs an eject_coil, a hold_coil, or '
'"mechanical_eject: True"'.format(self.name), 4)
# entrance switch + mechanical eject is not supported
if (len(self.config['ball_switches']) > 1 and
self.config['mechanical_eject']):
self.raise_config_error('Configuration error in {} ball device. '
'mechanical_eject can only be used with '
'devices that have 1 ball switch'.
format(self.name), 5)
# make sure timeouts are reasonable:
# exit_count_delay < all eject_timeout
if self.config['exit_count_delay'] > min(
self.config['eject_timeouts'].values()):
self.raise_config_error('Configuration error in {} ball device. '
'all eject_timeouts have to be larger than '
'exit_count_delay'.
format(self.name), 6)
# entrance_count_delay < all eject_timeout
if self.config['entrance_count_delay'] > min(
self.config['eject_timeouts'].values()):
self.raise_config_error('Configuration error in {} ball device. '
'all eject_timeouts have to be larger than '
'entrance_count_delay'.
format(self.name), 7)
# all eject_timeout < all ball_missing_timeouts
if max(self.config['eject_timeouts'].values()) > min(
self.config['ball_missing_timeouts'].values()):
self.raise_config_error('Configuration error in {} ball device. '
'all ball_missing_timeouts have to be larger '
'than all eject_timeouts'.
format(self.name), 8)
# all ball_missing_timeouts < incoming ball timeout
if max(self.config['ball_missing_timeouts'].values()) > 60000:
self.raise_config_error('Configuration error in {} ball device. '
'incoming ball timeout has to be larger '
'than all ball_missing_timeouts'.
format(self.name), 9)
if (self.config['confirm_eject_type'] == "switch" and
not self.config['confirm_eject_switch']):
self.raise_config_error("When using confirm_eject_type switch you " +
"to specify a confirm_eject_switch", 7)
if (self.config['confirm_eject_type'] == "event" and
not self.config['confirm_eject_event']):
self.raise_config_error("When using confirm_eject_type event you " +
"to specify a confirm_eject_event", 14)
if "ball_add_live" in self.tags:
self.raise_config_error("Using \"tag: ball_add_live\" is deprecated. Please use default_source_device "
"in your playfield section instead.", 10)
if "drain" in self.tags and "trough" not in self.tags and not self.find_next_trough():
self.raise_config_error("No path to trough but device is tagged as drain", 11)
if ("drain" not in self.tags and "trough" not in self.tags and
not self.find_path_to_target(self._target_on_unexpected_ball)):
self.raise_config_error("BallDevice {} has no path to target_on_unexpected_ball '{}'".format(
self.name, self._target_on_unexpected_ball.name), 12)
def load_config(self, config):
"""Load config."""
super().load_config(config)
# load targets and timeouts
self._parse_config()
def _configure_targets(self):
if self.config['target_on_unexpected_ball']:
self._target_on_unexpected_ball = self.config['target_on_unexpected_ball']
else:
self._target_on_unexpected_ball = self.config['captures_from']
# validate that configuration is valid
self._validate_config()
ejector_config = self.config.get("ejector", {})
# no ejector config. support legacy config
if not ejector_config:
if self.config['eject_coil']:
if self.config['eject_coil_enable_time']:
ejector_config["class"] = "mpf.devices.ball_device.enable_coil_ejector.EnableCoilEjector"
else:
ejector_config["class"] = "mpf.devices.ball_device.pulse_coil_ejector.PulseCoilEjector"
elif self.config['hold_coil']:
ejector_config["class"] = "mpf.devices.ball_device.hold_coil_ejector.HoldCoilEjector"
if not ejector_config:
self.debug_log("Device does not have any ejector.")
else:
ejector_class = Util.string_to_class(ejector_config["class"])
if not ejector_class:
self.raise_config_error("Could not load ejector {}".format(ejector_config["class"]), 1)
self.ejector = ejector_class(ejector_config, self, self.machine)
if self.ejector and self.config['ball_search_order']:
self.config['captures_from'].ball_search.register(
self.config['ball_search_order'], self.ejector.ball_search,
self.name)
# Register events to watch for ejects targeted at this device
for device in self.machine.ball_devices.values():
if device.is_playfield():
continue
for target in device.config['eject_targets']:
if target.name == self.name:
self._source_devices.append(device)
break
# register event handler for available balls at source devices
self.machine.events.add_handler(
'balldevice_balls_available',
self._source_device_balls_available)
def _balls_added_callback(self, new_balls, unclaimed_balls):
# If we still have unclaimed_balls here, that means that no one claimed
# them, so essentially they're "stuck." So we just eject them unless
# this device is tagged 'trough' in which case we let it keep them.
self.debug_log("Adding ball")
self.available_balls += new_balls
if unclaimed_balls and self.available_balls > 0:
if 'trough' in self.tags:
# ball already reached trough. everything is fine
pass
elif 'drain' in self.tags:
# try to eject to next trough
trough = self.find_next_trough()
if not trough:
raise AssertionError("Could not find path to trough")
for dummy_iterator in range(unclaimed_balls):
self._setup_or_queue_eject_to_target(trough)
else:
target = self._target_on_unexpected_ball
# try to eject to configured target
path = self.find_path_to_target(target)
if not path:
raise AssertionError("Could not find path to playfield {}".format(target.name))
self.info_log("Ejecting %s unexpected balls using path %s", unclaimed_balls, path)
for dummy_iterator in range(unclaimed_balls):
self.setup_eject_chain(path, not self.config['auto_fire_on_unexpected_ball'])
# we might have ball requests locally. serve them first
if self._ball_requests:
self._source_device_balls_available()
# tell targets that we have balls available
for dummy_iterator in range(new_balls):
self.machine.events.post_boolean('balldevice_balls_available')
self.machine.events.post('balldevice_{}_ball_entered'.format(self.name), new_balls=new_balls, device=self)
'''event: balldevice_(name)_ball_entered
desc: A ball (or balls) have just entered the ball device called
"name".
The ball was also added to balls and available_balls of the device.
args:
new_balls: The number of new balls that have not been claimed (by locks or similar).
device: A reference to the ball device object that is posting this
event.
'''
async def _balls_missing(self, balls):
# Called when ball_count finds that balls are missing from this device
self.info_log("%s ball(s) missing from device. Mechanical eject?"
" %s", abs(balls), self.config['mechanical_eject'])
await self.machine.events.post_async('balldevice_{}_ball_missing'.format(self.name), balls=abs(balls))
'''event: balldevice_(name)_ball_missing.
desc: The device (name) is missing a ball. Note this event is
posted in addition to the generic *balldevice_ball_missing* event.
args:
balls: The number of balls that are missing
'''
await self.machine.events.post_async('balldevice_ball_missing', balls=abs(balls), name=self.name)
'''event: balldevice_ball_missing
desc: A ball is missing from a device.
args:
balls: The number of balls that are missing
name: Name of device which lost the ball
'''
@property
def state(self):
"""Return the device state."""
return self._state
def find_one_available_ball(self, path=deque()):
"""Find a path to a source device which has at least one available ball."""
# copy path
path = deque(path)
# prevent loops
if self in path:
return False
path.appendleft(self)
if self.available_balls > 0 and len(path) > 1:
return path
for source in self._source_devices:
full_path = source.find_one_available_ball(path=path)
if full_path:
return full_path
return False
@event_handler(1)
def event_request_ball(self, balls=1, **kwargs):
"""Handle request_ball control event."""
del kwargs
self.request_ball(balls)
def request_ball(self, balls=1):
"""Request that one or more balls is added to this device.
Args:
balls: Integer of the number of balls that should be added to this
device. A value of -1 will cause this device to try to fill
itself.
**kwargs: unused
"""
self.debug_log("Requesting Ball(s). Balls=%s", balls)
for dummy_iterator in range(balls):
self._setup_or_queue_eject_to_target(self)
return balls
def _setup_or_queue_eject_to_target(self, target, player_controlled=False):
path_to_target = self.find_path_to_target(target)
if target != self and not path_to_target:
raise AssertionError("Do not know how to eject to {}".format(target.name))
if self.available_balls > 0 and self != target:
path = path_to_target
else:
path = self.find_one_available_ball()
if not path:
# put into queue here
self._ball_requests.append((target, player_controlled))
return False
if target != self:
path_to_target.popleft() # remove self from path
path.extend(path_to_target)
path[0].setup_eject_chain(path, player_controlled)
return True
def setup_player_controlled_eject(self, target=None):
"""Set up a player controlled eject."""
self.info_log("Setting up player-controlled eject. Balls: %s, "
"Target: %s, player_controlled_eject_event: %s",
1, target,
self.config['player_controlled_eject_event'])
if self.config['mechanical_eject'] or (
self.config['player_controlled_eject_event'] and self.ejector):
self._setup_or_queue_eject_to_target(target, True)
else:
self.eject(target=target)
def setup_eject_chain(self, path, player_controlled=False):
"""Set up an eject chain."""
path = deque(path)
if self.available_balls <= 0:
raise AssertionError("Tried to setup an eject chain, but there are"
" no available balls. Device: {}, Path: {}"
.format(self.name, path))
self.available_balls -= 1
target = path[len(path) - 1]
source = path.popleft()
if source != self:
raise AssertionError("Path starts somewhere else!")
self.setup_eject_chain_next_hop(path, player_controlled)
target.available_balls += 1
self.machine.events.post_boolean('balldevice_balls_available')
'''event: balldevice_balls_available
desc: A device has balls available to be ejected.
'''
def setup_eject_chain_next_hop(self, path, player_controlled):
"""Set up one hop of the eject chain."""
next_hop = path.popleft()
self.debug_log("Adding eject chain")
if next_hop not in self.config['eject_targets']:
raise AssertionError("Broken path")
eject = OutgoingBall(next_hop)
eject.eject_timeout = self.config['eject_timeouts'][next_hop] / 1000
eject.max_tries = self.config['max_eject_attempts']
eject.mechanical = player_controlled
self.outgoing_balls_handler.add_eject_to_queue(eject)
# check if we traversed the whole path
if path:
next_hop.setup_eject_chain_next_hop(path, player_controlled)
def find_next_trough(self):
"""Find next trough after device."""
# are we a trough?
if 'trough' in self.tags:
return self
# otherwise find any target which can
for target_device in self.config['eject_targets']:
if target_device.is_playfield():
continue
trough = target_device.find_next_trough()
if trough:
return trough
return False
def find_path_to_target(self, target):
"""Find a path to this target."""
# if we can eject to target directly just do it
if target in self.config['eject_targets']:
path = deque()
path.appendleft(target)
path.appendleft(self)
return path
# otherwise find any target which can
for target_device in self.config['eject_targets']:
if target_device.is_playfield():
continue
path = target_device.find_path_to_target(target)
if path:
path.appendleft(self)
return path
return False
@event_handler(2)
def event_eject(self, balls=1, target=None, **kwargs):
"""Handle eject control event."""
del kwargs
self.eject(balls, target)
def eject(self, balls=1, target=None) -> int:
"""Eject balls to target.
Return the number of balls found for eject. The remaining balls are queued for eject when available.
"""
if not target:
target = self._target_on_unexpected_ball
self.info_log('Adding %s ball(s) to the eject_queue with target %s.',
balls, target)
balls_found = 0
# add request to queue
for dummy_iterator in range(balls):
if self._setup_or_queue_eject_to_target(target):
balls_found += 1
return balls_found
@event_handler(3)
def event_eject_all(self, target=None, **kwargs):
"""Handle eject_all control event."""
del kwargs
self.eject_all(target)
def eject_all(self, target=None) -> bool:
"""Eject all the balls from this device.
Args:
target: The string or BallDevice target for this eject. Default of
None means `playfield`.
**kwargs: unused
Returns True if there are balls to eject. False if this device is empty.
"""
self.debug_log("Ejecting all balls")
if self.available_balls > 0:
self.eject(balls=self.available_balls, target=target)
return True
return False
@event_handler(10)
def event_hold(self, **kwargs):
"""Event handler for hold event."""
del kwargs
# TODO: remove when migrating config to ejectors
self.ejector.hold()
@classmethod
def is_playfield(cls):
"""Return True if this ball device is a Playfield-type device, False if it's a regular ball device."""
return False
|
/* */
"use strict";
module.exports = function(Promise, INTERNAL, tryConvertToPromise, apiRejection) {
var util = require('./util');
var raceLater = function(promise) {
return promise.then(function(array) {
return race(array, promise);
});
};
function race(promises, parent) {
var maybePromise = tryConvertToPromise(promises);
if (maybePromise instanceof Promise) {
return raceLater(maybePromise);
} else {
promises = util.asArray(promises);
if (promises === null)
return apiRejection("expecting an array or an iterable object but got " + util.classString(promises));
}
var ret = new Promise(INTERNAL);
if (parent !== undefined) {
ret._propagateFrom(parent, 3);
}
var fulfill = ret._fulfill;
var reject = ret._reject;
for (var i = 0,
len = promises.length; i < len; ++i) {
var val = promises[i];
if (val === undefined && !(i in promises)) {
continue;
}
Promise.cast(val)._then(fulfill, reject, undefined, ret, null);
}
return ret;
}
Promise.race = function(promises) {
return race(promises, undefined);
};
Promise.prototype.race = function() {
return race(this, undefined);
};
};
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributions import Categorical, constraints
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import sum_leftmost
class MixtureOfDiagNormalsSharedCovariance(TorchDistribution):
"""
Mixture of Normal distributions with diagonal covariance matrices.
That is, this distribution is a mixture with K components, where each
component distribution is a D-dimensional Normal distribution with a
D-dimensional mean parameter loc and a D-dimensional diagonal covariance
matrix specified by a scale parameter `coord_scale`. The K different
component means are gathered into the parameter `locs` and the scale
parameter is shared between all K components. The mixture weights are
controlled by a K-dimensional vector of softmax logits, `component_logits`.
This distribution implements pathwise derivatives for samples from the
distribution.
See reference [1] for details on the implementations of the pathwise
derivative. Please consider citing this reference if you use the pathwise
derivative in your research. Note that this distribution does not support
dimension D = 1.
[1] Pathwise Derivatives for Multivariate Distributions, Martin Jankowiak &
Theofanis Karaletsos. arXiv:1806.01856
:param torch.Tensor locs: K x D mean matrix
:param torch.Tensor coord_scale: shared D-dimensional scale vector
:param torch.Tensor component_logits: K-dimensional vector of softmax logits
"""
has_rsample = True
arg_constraints = {
"locs": constraints.real,
"coord_scale": constraints.positive,
"component_logits": constraints.real,
}
def __init__(self, locs, coord_scale, component_logits):
self.batch_mode = locs.dim() > 2
assert (
self.batch_mode or locs.dim() == 2
), "The locs parameter in MixtureOfDiagNormals should be K x D dimensional (or ... x B x K x D in batch mode)"
if not self.batch_mode:
assert (
coord_scale.dim() == 1
), "The coord_scale parameter in MixtureOfDiagNormals should be D dimensional"
assert (
component_logits.dim() == 1
), "The component_logits parameter in MixtureOfDiagNormals should be K dimensional"
assert component_logits.size(0) == locs.size(0)
batch_shape = ()
else:
assert (
coord_scale.dim() > 1
), "The coord_scale parameter in MixtureOfDiagNormals should be ... x B x D dimensional"
assert (
component_logits.dim() > 1
), "The component_logits parameter in MixtureOfDiagNormals should be ... x B x K dimensional"
assert component_logits.size(-1) == locs.size(-2)
batch_shape = tuple(locs.shape[:-2])
self.locs = locs
self.coord_scale = coord_scale
self.component_logits = component_logits
self.dim = locs.size(-1)
if self.dim < 2:
raise NotImplementedError("This distribution does not support D = 1")
self.categorical = Categorical(logits=component_logits)
self.probs = self.categorical.probs
super().__init__(batch_shape=batch_shape, event_shape=(self.dim,))
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(
MixtureOfDiagNormalsSharedCovariance, _instance
)
new.batch_mode = True
batch_shape = torch.Size(batch_shape)
new.dim = self.dim
new.locs = self.locs.expand(batch_shape + self.locs.shape[-2:])
coord_scale_shape = -1 if self.batch_mode else -2
new.coord_scale = self.coord_scale.expand(
batch_shape + self.coord_scale.shape[coord_scale_shape:]
)
new.component_logits = self.component_logits.expand(
batch_shape + self.component_logits.shape[-1:]
)
new.categorical = self.categorical.expand(batch_shape)
new.probs = self.probs.expand(batch_shape + self.probs.shape[-1:])
super(MixtureOfDiagNormalsSharedCovariance, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def log_prob(self, value):
coord_scale = (
self.coord_scale.unsqueeze(-2) if self.batch_mode else self.coord_scale
)
epsilon = (value.unsqueeze(-2) - self.locs) / coord_scale # L B K D
eps_sqr = 0.5 * torch.pow(epsilon, 2.0).sum(-1) # L B K
eps_sqr_min = torch.min(eps_sqr, -1)[0] # L B
result = self.categorical.logits + (
-eps_sqr + eps_sqr_min.unsqueeze(-1)
) # L B K
result = torch.logsumexp(result, dim=-1) # L B
result = result - (0.5 * math.log(2.0 * math.pi) * float(self.dim))
result = result - (torch.log(self.coord_scale).sum(-1))
result = result - eps_sqr_min
return result
def rsample(self, sample_shape=torch.Size()):
which = self.categorical.sample(sample_shape)
return _MixDiagNormalSharedCovarianceSample.apply(
self.locs,
self.coord_scale,
self.component_logits,
self.probs,
which,
sample_shape + self.coord_scale.shape,
)
class _MixDiagNormalSharedCovarianceSample(Function):
@staticmethod
def forward(ctx, locs, coord_scale, component_logits, pis, which, noise_shape):
dim = coord_scale.size(-1)
white = torch.randn(noise_shape, dtype=locs.dtype, device=locs.device)
n_unsqueezes = locs.dim() - which.dim()
for _ in range(n_unsqueezes):
which = which.unsqueeze(-1)
expand_tuple = tuple(which.shape[:-1] + (dim,))
loc = torch.gather(locs, -2, which.expand(expand_tuple)).squeeze(-2)
z = loc + coord_scale * white
ctx.save_for_backward(z, coord_scale, locs, component_logits, pis)
return z
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
z, coord_scale, locs, component_logits, pis = ctx.saved_tensors
K = component_logits.size(-1)
batch_dims = coord_scale.dim() - 1
g = grad_output # l b i
z_tilde = z / coord_scale # l b i
locs_tilde = locs / coord_scale.unsqueeze(-2) # b j i
mu_ab = locs_tilde.unsqueeze(-2) - locs_tilde.unsqueeze(-3) # b k j i
mu_ab_norm = torch.pow(mu_ab, 2.0).sum(-1).sqrt() # b k j
mu_ab /= mu_ab_norm.unsqueeze(-1) # b k j i
diagonals = torch.empty((K,), dtype=torch.long, device=z.device)
torch.arange(K, out=diagonals)
mu_ab[..., diagonals, diagonals, :] = 0.0
mu_ll_ab = (locs_tilde.unsqueeze(-2) * mu_ab).sum(-1) # b k j
z_ll_ab = (z_tilde.unsqueeze(-2).unsqueeze(-2) * mu_ab).sum(-1) # l b k j
z_perp_ab = (
z_tilde.unsqueeze(-2).unsqueeze(-2) - z_ll_ab.unsqueeze(-1) * mu_ab
) # l b k j i
z_perp_ab_sqr = torch.pow(z_perp_ab, 2.0).sum(-1) # l b k j
epsilons = z_tilde.unsqueeze(-2) - locs_tilde # l b j i
log_qs = -0.5 * torch.pow(epsilons, 2.0) # l b j i
log_q_j = log_qs.sum(-1, keepdim=True) # l b j 1
log_q_j_max = torch.max(log_q_j, -2, keepdim=True)[0]
q_j_prime = torch.exp(log_q_j - log_q_j_max) # l b j 1
q_j = torch.exp(log_q_j) # l b j 1
q_tot = (pis.unsqueeze(-1) * q_j).sum(-2) # l b 1
q_tot_prime = (pis.unsqueeze(-1) * q_j_prime).sum(-2).unsqueeze(-1) # l b 1 1
root_two = math.sqrt(2.0)
mu_ll_ba = torch.transpose(mu_ll_ab, -1, -2)
logits_grad = torch.erf((z_ll_ab - mu_ll_ab) / root_two) - torch.erf(
(z_ll_ab + mu_ll_ba) / root_two
)
logits_grad *= torch.exp(-0.5 * z_perp_ab_sqr) # l b k j
# bi lbi bkji
mu_ab_sigma_g = ((coord_scale * g).unsqueeze(-2).unsqueeze(-2) * mu_ab).sum(
-1
) # l b k j
logits_grad *= -mu_ab_sigma_g * pis.unsqueeze(-2) # l b k j
logits_grad = pis * sum_leftmost(
logits_grad.sum(-1) / q_tot, -(1 + batch_dims)
) # b k
logits_grad *= math.sqrt(0.5 * math.pi)
# b j l b j 1 l b i l b 1 1
prefactor = (
pis.unsqueeze(-1) * q_j_prime * g.unsqueeze(-2) / q_tot_prime
) # l b j i
locs_grad = sum_leftmost(prefactor, -(2 + batch_dims)) # b j i
coord_scale_grad = sum_leftmost(prefactor * epsilons, -(2 + batch_dims)).sum(
-2
) # b i
return locs_grad, coord_scale_grad, logits_grad, None, None, None
|
"""
ASGI config for sudokucreator project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sudokucreator.settings')
application = get_asgi_application()
|
"""Common get info functions for OSPF"""
# Python
import logging
import datetime
import re
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.utils.timeout import Timeout
log = logging.getLogger(__name__)
def get_ospf_interface_and_area(device):
""" Retrieve interface for ospf on junos device
Args:
device ('obj'): Device object
Returns:
interface and area value dictionary
"""
try:
out = device.parse("show ospf interface brief")
except SchemaEmptyParserError as spe:
raise SchemaEmptyParserError(
"Could not parse output for"
" command 'show ospf interface brief'") from spe
key_val = {}
try:
interface_dict = out["instance"]["master"]["areas"]
for k, v in interface_dict.items():
for interface in v["interfaces"].keys():
key_val.update({interface: k})
except KeyError as ke:
raise KeyError("Key issue with exception: {}".format(str(ke))) from ke
return key_val
def get_ospf_spf_scheduled_time(log):
"""
Get OSPF spf scheduled time in log 'Jun 12 03:32:19.068983 OSPF SPF scheduled for topology default in 8s'
Args:
log ('str'): log string
Returns:
date time ('str')
"""
# Jun 12 03:32:19.068983 OSPF SPF scheduled for topology default in 8s
p_scheduled = ('(?P<date>\S+\s+\d+) (?P<scheduled_time>\d+\:\d+\:\d+\.\d+) '\
'OSPF SPF scheduled for topology default in (?P<spf_change>\d+)s')
m = re.match(p_scheduled, log)
try:
if m:
group = m.groupdict()
scheduled_time = group['scheduled_time']
return scheduled_time
except KeyError as e:
raise KeyError(f"Key issue with exception: {str(e)}") from e
def get_ospf_spf_start_time(log):
"""
Get OSPF spf start time in log 'Jun 12 03:40:19.068983 Starting full SPF for topology default'
Args:
log ('str'): log string
Returns:
date time ('str')
"""
# Jun 12 03:40:19.068983 Starting full SPF for topology default
p_start = (
'(?P<date>\S+\s+\d+) (?P<start_time>\d+\:\d+\:\d+\.\d+) Starting full SPF for topology default'
)
m = re.match(p_start, log)
try:
if m:
group = m.groupdict()
start_time = group['start_time']
return start_time
except KeyError as e:
raise KeyError(f"Key issue with exception: {str(e)}") from e
return None
def get_ospf_database_checksum(device, lsa_type=None):
""" Get ospf data base checksum data in a list
Args:
device (obj): Device object
lsa_type (str, optional): LSA type to check for. Defaults to None.
Returns:
list: List of checksums
"""
try:
out = device.parse('show ospf database')
except SchemaEmptyParserError:
return list()
ret_list = []
# Example dict
# {
# 'ospf-database-information': {
# 'ospf-database': [{
# 'lsa-type': 'Router',
# 'checksum': '0xa9b6',
# }]
# }
# }
for entry_ in out.q.get_values('ospf-database'):
if lsa_type and entry_.get('lsa-type') != lsa_type:
continue
if entry_.get('checksum'):
ret_list.append(entry_.get('checksum'))
return ret_list
def get_ospf_router_id(device):
""" Retrieve ospf router id
Args:
device (obj): Device object
"""
try:
output = device.parse('show ospf overview')
except SchemaEmptyParserError:
return None
try:
return output.q.get_values('ospf-router-id', 0)
except Exception as e:
log.info("Error retrieving router ID: {e}".format(e=e))
return None
def get_ospf_neighbors_instance_state_count(device, expected_neighbor_state='Full', max_time=60, check_interval=10):
""" Get ospf neighbors instance state count
Args:
device (obj): Device object
expected_neighbor_state (str): Expected neighbor state. Defaults to 'Full'.
max_time (int, optional): Maximum timeout time. Defaults to 60 seconds.
check_interval (int, optional): Check interval. Defaults to 10 seconds.
"""
try:
out = device.parse('show ospf neighbor instance all')
except SchemaEmptyParserError:
return None
state_count = out.q.contains_key_value('ospf-neighbor-state',
expected_neighbor_state).count()
return state_count
def get_ospf_neighbor_count(device, expected_state=None, output=None, max_time=60, check_interval=10):
""" Get ospf neighbors count
Args:
device (`obj`): Device object
expected_state (`str`): Expected neighbor state. Defaults to None
output (`str`): output of show ospf neighbor. Default to None
max_time (`int`, optional): Maximum timeout time. Defaults to 60 seconds.
check_interval (`int`, optional): Check interval. Defaults to 10 seconds.
"""
try:
if output:
out = device.parse('show ospf neighbor', output=output)
else:
out = device.parse('show ospf neighbor')
except SchemaEmptyParserError:
return 0
# example out out
# {
# "ospf-neighbor-information": {
# "ospf-neighbor": [
# {
# "activity-timer": "32",
# "interface-name": "ge-0/0/0.0",
# "neighbor-address": "12.0.0.2",
# "neighbor-id": "2.2.2.2",
# "neighbor-priority": "128",
# "ospf-neighbor-state": "Full"
# },
if expected_state:
return len(out.q.contains_key_value('ospf-neighbor-state', expected_state))
else:
return len(out.q.get_values('ospf-neighbor'))
|
from __future__ import division, print_function
def mask_nii_2_hdf5(in_files, mask_files, hdf5_file, folder_alias):
"""masks data in in_files with masks in mask_files,
to be stored in an hdf5 file
Takes a list of 3D or 4D fMRI nifti-files and masks the
data with all masks in the list of nifti-files mask_files.
These files are assumed to represent the same space, i.e.
that of the functional acquisitions.
These are saved in hdf5_file, in the folder folder_alias.
Parameters
----------
in_files : list
list of absolute path to functional nifti-files.
all nifti files are assumed to have the same ndim
mask_files : list
list of absolute path to mask nifti-files.
mask_files are assumed to be 3D
hdf5_file : str
absolute path to hdf5 file.
folder_alias : str
name of the to-be-created folder in the hdf5 file.
Returns
-------
hdf5_file : str
absolute path to hdf5 file.
"""
import nibabel as nib
import os.path as op
import numpy as np
import tables
success = True
mask_data = [np.array(nib.load(mf).get_data(), dtype = bool) for mf in mask_files]
nifti_data = [nib.load(nf).get_data() for nf in in_files]
mask_names = [op.split(mf)[-1].split('_vol.nii.gz')[0] for mf in mask_files]
nifti_names = [op.split(nf)[-1].split('.nii.gz')[0] for nf in in_files]
h5file = tables.open_file(hdf5_file, mode = "a", title = hdf5_file)
# get or make group for alias folder
try:
folder_alias_run_group = h5file.get_node("/", name = folder_alias, classname='Group')
except tables.NoSuchNodeError:
print('Adding group ' + folder_alias + ' to this file')
folder_alias_run_group = h5file.create_group("/", folder_alias, folder_alias)
for (roi, roi_name) in zip(mask_data, mask_names):
# get or make group for alias/roi
try:
run_group = h5file.get_node(where = "/" + folder_alias, name = roi_name, classname='Group')
except tables.NoSuchNodeError:
print('Adding group ' + folder_alias + '_' + roi_name + ' to this file')
run_group = h5file.create_group("/" + folder_alias, roi_name, folder_alias + '_' + roi_name)
h5file.create_array(run_group, roi_name, roi, roi_name + ' mask file for reconstituting nii data from masked data')
for (nii_d, nii_name) in zip(nifti_data, nifti_names):
print('roi: %s, nifti: %s'%(roi_name, nii_name))
n_dims = len(nii_d.shape)
if n_dims == 3:
these_roi_data = nii_d[roi]
elif n_dims == 4: # timeseries data, last dimension is time.
these_roi_data = nii_d[roi,:]
else:
print("n_dims in data {nifti} do not fit with mask".format(nii_name))
success = False
h5file.create_array(run_group, nii_name, these_roi_data, roi_name + ' data from ' + nii_name)
h5file.close()
return hdf5_file
def roi_data_from_hdf(data_types_wildcards, roi_name_wildcard, hdf5_file, folder_alias):
"""takes data_type data from masks stored in hdf5_file
Takes a list of 4D fMRI nifti-files and masks the
data with all masks in the list of nifti-files mask_files.
These files are assumed to represent the same space, i.e.
that of the functional acquisitions.
These are saved in hdf5_file, in the folder folder_alias.
Parameters
----------
data_types_wildcards : list
list of data types to be loaded.
correspond to nifti_names in mask_2_hdf5
roi_name_wildcard : str
wildcard for masks.
corresponds to mask_name in mask_2_hdf5.
hdf5_file : str
absolute path to hdf5 file.
folder_alias : str
name of the folder in the hdf5 file from which data
should be loaded.
Returns
-------
output_data : list
list of numpy arrays corresponding to data_types and roi_name_wildcards
"""
import tables
import itertools
import fnmatch
import numpy as np
from IPython import embed as shell
h5file = tables.open_file(hdf5_file, mode = "r")
try:
folder_alias_run_group = h5file.get_node(where = '/', name = folder_alias, classname='Group')
except tables.NoSuchNodeError:
# import actual data
print('No group ' + folder_alias + ' in this file')
# return None
all_roi_names = h5file.list_nodes(where = '/' + folder_alias, classname = 'Group')
roi_names = [rn._v_name for rn in all_roi_names if roi_name_wildcard in rn._v_name]
if len(roi_names) == 0:
print('No rois corresponding to ' + roi_name_wildcard + ' in group ' + folder_alias)
# return None
data_arrays = []
for roi_name in roi_names:
try:
roi_node = h5file.get_node(where = '/' + folder_alias, name = roi_name, classname='Group')
except tables.NoSuchNodeError:
print('No data corresponding to ' + roi_name + ' in group ' + folder_alias)
pass
all_data_array_names = h5file.list_nodes(where = '/' + folder_alias + '/' + roi_name)
data_array_names = [adan._v_name for adan in all_data_array_names]
selected_data_array_names = list(itertools.chain(*[fnmatch.filter(data_array_names, dtwc) for dtwc in data_types_wildcards]))
# if sort_data_types:
selected_data_array_names = sorted(selected_data_array_names)
if len(data_array_names) == 0:
print('No data corresponding to ' + str(selected_data_array_names) + ' in group /' + folder_alias + '/' + roi_name)
pass
else:
print('Taking data corresponding to ' + str(selected_data_array_names) + ' from group /' + folder_alias + '/' + roi_name)
data_arrays.append([])
for dan in selected_data_array_names:
data_arrays[-1].append(eval('roi_node.__getattr__("' + dan + '").read()'))
print('Taken data corresponding to ' + str(selected_data_array_names) + ' from group /' + folder_alias + '/' + roi_name)
data_arrays[-1] = np.hstack(data_arrays[-1]) # stack across timepoints or other values per voxel
if len(data_arrays[-1].shape) == 1:
data_arrays[-1] = data_arrays[-1][:,np.newaxis]
all_roi_data_np = np.vstack(data_arrays) # stack across regions to create a single array of voxels by values (i.e. timepoints)
h5file.close()
return all_roi_data_np
def convert_mapper_data_to_session(workflow_output_directory, sub_id, hires_2_session_reg, example_func, str_repl = ['/rl/', '/map/'], stat_re = 'tf.feat/stats/*stat'):
import os.path as op
import glob
import nipype.pipeline as pe
from nipype.interfaces import fsl
from nipype.interfaces import freesurfer
from nipype.interfaces.utility import Function, IdentityInterface
import nipype.interfaces.io as nio
from IPython import embed as shell
input_folder = workflow_output_directory.replace(str_repl[0], str_repl[1])
input_files = glob.glob(op.join(input_folder, stat_re + '*.nii.gz'))
input_files.append(op.join(input_folder, 'reg', 'example_func.nii.gz'))
### NODES
input_node = pe.Node(IdentityInterface(
fields=['input_files',
'output_folder',
'mapper_2_hires_reg',
'hires_2_session_reg',
'template_file']), name='inputspec')
output_node = pe.Node(IdentityInterface(
fields=['output_files']), name='outputspec')
input_node.inputs.input_files = input_files
input_node.inputs.output_folder = workflow_output_directory # op.join(workflow_output_directory, 'mapper_stat')
input_node.inputs.mapper_2_hires_reg = op.join(input_folder, 'reg', 'example_func2highres.mat')
input_node.inputs.hires_2_session_reg = hires_2_session_reg # op.join(workflow_output_directory, 'reg', 'highres2example_func.mat')
input_node.inputs.template_file = example_func # op.join(workflow_output_directory, 'reg', 'example_func.nii.gz')
concat_N = pe.Node(fsl.ConvertXFM(concat_xfm = True), name = 'concat_Mapper')
vol_trans_node = pe.MapNode(interface=fsl.ApplyXfm(apply_xfm = True, interp = 'sinc', padding_size = 0), name='vol_trans', iterfield = ['in_file'])
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.parameterization = False
### WORKFLOW
convert_mapper_data_to_session_workflow = pe.Workflow(name='mapper2session')
convert_mapper_data_to_session_workflow.connect(input_node, 'mapper_2_hires_reg', concat_N, 'in_file')
convert_mapper_data_to_session_workflow.connect(input_node, 'hires_2_session_reg', concat_N, 'in_file2')
convert_mapper_data_to_session_workflow.connect(concat_N, 'out_file', vol_trans_node, 'in_matrix_file')
convert_mapper_data_to_session_workflow.connect(input_node, 'input_files', vol_trans_node, 'in_file')
convert_mapper_data_to_session_workflow.connect(input_node, 'template_file', vol_trans_node, 'reference')
convert_mapper_data_to_session_workflow.connect(input_node, 'output_folder', datasink, 'base_directory')
convert_mapper_data_to_session_workflow.connect(vol_trans_node, 'out_file', datasink, 'mapper_stat')
convert_mapper_data_to_session_workflow.connect(concat_N, 'out_file', datasink, 'mapper_stat.mat')
convert_mapper_data_to_session_workflow.connect(vol_trans_node, 'out_file', output_node, 'output_files')
convert_mapper_data_to_session_workflow.run('MultiProc', plugin_args={'n_procs': 24})
out_files = glob.glob(op.join(workflow_output_directory, 'mapper_stat', '*.nii.gz'))
return out_files
def natural_sort(l):
import re
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def import_MNI_masks(mask_folder, example_func, standard2example_func, output_folder):
"""Import MNI space masks to example_func space.
"""
import os.path as op
import glob
import nipype.pipeline as pe
from nipype.interfaces import fsl
from nipype.interfaces.utility import Function, IdentityInterface
import nipype.interfaces.io as nio
from IPython import embed as shell
### NODES
input_node = pe.Node(IdentityInterface(
fields=['masks',
'output_folder',
'standard2example_func',
'template_file']), name='inputspec')
output_node = pe.Node(IdentityInterface(
fields=['output_files']), name='outputspec')
input_node.inputs.masks = glob.glob(op.join(mask_folder, '*.nii.gz'))
input_node.inputs.output_folder = output_folder # op.join(workflow_output_directory, 'mapper_stat')
input_node.inputs.standard2example_func = standard2example_func
input_node.inputs.template_file = example_func # op.join(workflow_output_directory, 'reg', 'example_func.nii.gz')
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.parameterization = False
thresh_node = pe.MapNode(fsl.Threshold(thresh = 0.001, args = '-bin'), name='thresh', iterfield = ['in_file'])
vol_trans_node = pe.MapNode(interface=fsl.ApplyXfm(apply_xfm = True, interp = 'sinc', padding_size = 0, datatype = 'int'), name='vol_trans', iterfield = ['in_file'])
### WORKFLOW
import_MNI_masks_workflow = pe.Workflow(name='import_MNI_masks')
import_MNI_masks_workflow.connect(input_node, 'masks', thresh_node, 'in_file')
import_MNI_masks_workflow.connect(thresh_node, 'out_file', vol_trans_node, 'in_file')
import_MNI_masks_workflow.connect(input_node, 'standard2example_func', vol_trans_node, 'in_matrix_file')
import_MNI_masks_workflow.connect(input_node, 'template_file', vol_trans_node, 'reference')
import_MNI_masks_workflow.connect(input_node, 'output_folder', datasink, 'base_directory')
import_MNI_masks_workflow.connect(vol_trans_node, 'out_file', output_node, 'output_files')
import_MNI_masks_workflow.connect(vol_trans_node, 'out_file', datasink, 'roi.MNI')
import_MNI_masks_workflow.run('MultiProc', plugin_args={'n_procs': 24})
out_files = glob.glob(op.join(output_folder, 'roi', 'MNI', '*.nii.gz'))
return out_files
|
/*
* Copyright 2020 u-blox Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _U_CFG_TEST_PLATFORM_SPECIFIC_H_
#define _U_CFG_TEST_PLATFORM_SPECIFIC_H_
/* Only bring in #includes specifically related to the test framework. */
/** @file
* @brief Porting layer and configuration items passed in at application
* level when executing tests on the NRF52 platform.
*/
/* ----------------------------------------------------------------
* COMPILE-TIME MACROS: UNITY RELATED
* -------------------------------------------------------------- */
/** Macro to wrap a test assertion and map it to our Unity port.
*/
#define U_PORT_TEST_ASSERT(condition) U_PORT_UNITY_TEST_ASSERT(condition)
/** Macro to wrap the definition of a test function and
* map it to our Unity port.
*/
#define U_PORT_TEST_FUNCTION(name, group) U_PORT_UNITY_TEST_FUNCTION(name, \
group)
/* ----------------------------------------------------------------
* COMPILE-TIME MACROS: HEAP RELATED
* -------------------------------------------------------------- */
/** The minimum free heap space permitted, i.e. what's left for
* user code. This is assuming a heap size of 40 kbytes (set
* in the Makefile/FreeRTOSConfig.h file and the SES XML file).
*/
#define U_CFG_TEST_HEAP_MIN_FREE_BYTES (1024 * 8)
/* ----------------------------------------------------------------
* COMPILE-TIME MACROS: OS RELATED
* -------------------------------------------------------------- */
/** The stack size to use for the test task created during OS testing.
*/
#define U_CFG_TEST_OS_TASK_STACK_SIZE_BYTES 1280
/** The task priority to use for the task created during OS
* testing: make sure that the priority of the task RUNNING
* the tests is lower than this.
*/
#define U_CFG_TEST_OS_TASK_PRIORITY (U_CFG_OS_PRIORITY_MIN + 5)
/** The minimum free stack space permitted for the main task,
* basically what's left as a margin for user code.
*/
#define U_CFG_TEST_OS_MAIN_TASK_MIN_FREE_STACK_BYTES (1024 * 5)
/* ----------------------------------------------------------------
* COMPILE-TIME MACROS: HW RELATED
* -------------------------------------------------------------- */
/** Pin A for GPIO testing: will be used as an output and
* must be connected to pin B via a 1k resistor.
*/
#ifndef U_CFG_TEST_PIN_A
# define U_CFG_TEST_PIN_A 38 // AKA 1.06
#endif
/** Pin B for GPIO testing: will be used as both an input and
* and open drain output and must be connected both to pin A via
* a 1k resistor and directly to pin C.
*/
#ifndef U_CFG_TEST_PIN_B
# define U_CFG_TEST_PIN_B 39 // AKA 1.07
#endif
/** Pin C for GPIO testing: must be connected to pin B,
* will be used as an input only.
*/
#ifndef U_CFG_TEST_PIN_C
# define U_CFG_TEST_PIN_C 40 // AKA 1.08
#endif
/** UART HW block for UART driver testing.
*/
#ifndef U_CFG_TEST_UART_A
# define U_CFG_TEST_UART_A 1
#endif
/** UART HW block for UART driver loopback testing where
* two UARTs are employed.
*/
#ifndef U_CFG_TEST_UART_B
# define U_CFG_TEST_UART_B -1
#endif
/** The baud rate to test the UART at.
*/
#ifndef U_CFG_TEST_BAUD_RATE
# define U_CFG_TEST_BAUD_RATE 115200
#endif
/** The length of UART buffer to use during testing.
*/
#ifndef U_CFG_TEST_UART_BUFFER_LENGTH_BYTES
# define U_CFG_TEST_UART_BUFFER_LENGTH_BYTES 1024
#endif
/** Tx pin for UART testing: should be connected either to the
* Rx UART pin or to U_CFG_TEST_PIN_UART_B_RXD if that is
* connected.
*/
#ifndef U_CFG_TEST_PIN_UART_A_TXD
# define U_CFG_TEST_PIN_UART_A_TXD 42 // AKA 1.10
#endif
/** Macro to return the TXD pin for UART A: on some
* platforms this is not a simple define.
*/
#define U_CFG_TEST_PIN_UART_A_TXD_GET U_CFG_TEST_PIN_UART_A_TXD
/** Rx pin for UART testing: should be connected either to the
* Tx UART pin or to U_CFG_TEST_PIN_UART_B_TXD if that is
* connected.
*/
#ifndef U_CFG_TEST_PIN_UART_A_RXD
# define U_CFG_TEST_PIN_UART_A_RXD 43 // AKA 1.11
#endif
/** Macro to return the RXD pin for UART A: on some
* platforms this is not a simple define.
*/
#define U_CFG_TEST_PIN_UART_A_RXD_GET U_CFG_TEST_PIN_UART_A_RXD
/** CTS pin for UART testing: should be connected either to the
* RTS UART pin or to U_CFG_TEST_PIN_UART_B_RTS if that is
* connected.
*/
#ifndef U_CFG_TEST_PIN_UART_A_CTS
# define U_CFG_TEST_PIN_UART_A_CTS 44 // AKA 1.12
#endif
/** Macro to return the CTS pin for UART A: on some
* platforms this is not a simple define.
*/
#define U_CFG_TEST_PIN_UART_A_CTS_GET U_CFG_TEST_PIN_UART_A_CTS
/** RTS pin for UART testing: should be connected connected either to the
* CTS UART pin or to U_CFG_TEST_PIN_UART_B_CTS if that is
* connected.
*/
#ifndef U_CFG_TEST_PIN_UART_A_RTS
# define U_CFG_TEST_PIN_UART_A_RTS 45 // AKA 1.13
#endif
/** Macro to return the RTS pin for UART A: on some
* platforms this is not a simple define.
*/
#define U_CFG_TEST_PIN_UART_A_RTS_GET U_CFG_TEST_PIN_UART_A_RTS
/** Tx pin for dual-UART testing: if present should be connected to
* U_CFG_TEST_PIN_UART_A_RXD.
*/
#ifndef U_CFG_TEST_PIN_UART_B_TXD
# define U_CFG_TEST_PIN_UART_B_TXD -1
#endif
/** Rx pin for dual-UART testing: if present should be connected to
* U_CFG_TEST_PIN_UART_A_TXD.
*/
#ifndef U_CFG_TEST_PIN_UART_B_RXD
# define U_CFG_TEST_PIN_UART_B_RXD -1
#endif
/** CTS pin for dual-UART testing: if present should be connected to
* U_CFG_TEST_PIN_UART_A_RTS.
*/
#ifndef U_CFG_TEST_PIN_UART_B_CTS
# define U_CFG_TEST_PIN_UART_B_CTS -1
#endif
/** RTS pin for UART testing: if present should be connected to
* U_CFG_TEST_PIN_UART_A_CTS.
*/
#ifndef U_CFG_TEST_PIN_UART_B_RTS
# define U_CFG_TEST_PIN_UART_B_RTS -1
#endif
#endif // _U_CFG_TEST_PLATFORM_SPECIFIC_H_
// End of file
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#!/usr/bin/env python
from cltree import basic,symbol,constant,variable
b = basic()
c = constant()
s = symbol()
v = variable()
assert isinstance(b,basic)
assert not isinstance(b,symbol)
assert not isinstance(b,constant)
assert not isinstance(b,variable)
assert isinstance(c,basic)
assert isinstance(c,constant)
assert not isinstance(c,symbol)
assert not isinstance(c,variable)
assert not isinstance(s,basic)
assert isinstance(s,symbol)
assert not isinstance(s,constant)
assert not isinstance(s,variable)
assert isinstance(v,basic)
assert not isinstance(v,symbol)
assert not isinstance(v,constant)
assert isinstance(v,variable)
print('b=',b)
assert repr(b)=='cltree.basic()'
print('s=',s)
assert repr(s)!='cltree.wrapped_symbol()' # because not isinstance(s,basic)
print('c=',c)
assert repr(c)=='cltree.constant()'
print('v=',v)
assert repr(v)=='cltree.wrapped_variable()'
print('ok')
|
#this code write by Afshin Zolfaghari
#https://github.com/AfshinZlfgh/
#24 nov 2018
#19:35 (Tehran time)
import sys
import os
key = bytearray(open(sys.argv[1], 'rb').read())
outputPath = sys.argv[2]
inputFiles = []
for(dirpath, dirnames, filenames) in os.walk("./"):
inputFiles.extend(filenames)
break
for f in inputFiles:
if f.endswith(".pumax"):
print "decrypting %s ..."%f
inputFile = bytearray(open(("./"+f), 'rb').read())
if len(key) < len(inputFile):
size = len(key)
else:
size = len(inputFile)
print "file size is: %d"%len(inputFile)
for i in range(size):
inputFile[i] = inputFile[i] ^ key[i]
open(outputPath+f[:-6], 'wb').write(inputFile)
print "[*] decrypted \033[1;33m%s\033[1;m saved to \033[1;33m%s\033[1;m\n"%(f, (outputPath+f[:-6]))
|
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
/**
* Database schema for fridge
* @module cricket/fridge.server.model
* @name Cricket Fridge Model
* @type Model
*/
/**
* @external USER
* @see {@link user-model.html}
*/
/**
* @external SCENARIO
* @see {@link scenario-model.html}
*/
/**
* @external PHAGE
* @see {@link phage-model.html}
*/
const CricketFridgeSchema = new Schema({
/**
* @member {external:USER} owner - user who owns the fridge
*/
owner: {
type: Schema.ObjectId,
ref: 'User'
},
/**
* @member {external:SCENARIO} scenario - scenario the fridge is for
*/
scenario: {
type: Schema.ObjectId,
ref: 'CricketScenario'
},
/**
* @member {boolean} accessGranted - has access been granted by instructor
* - when `false`, phage strains are the same for all users
* - when `true`, phage are generated using random numbers
* @default false
*/
accessGranted: {
type: Boolean,
default: false
},
/**
* @member {external:PHAGE[]} strains - list of phage strains
* in the fridge
*/
strains: [{
type: Schema.ObjectId,
ref: 'CricketPhage'
}],
/**
* @member {String} scenarioDetails
* - stringified object of the scenario details generated when
* the fridge was created and is needed for performing
* experiments
* - includes `interMuteDist`, `intraMuteDist`, `mutationFreq`, `recombinationFreq`,
* `deleteSizes`, `deleteSpots`, `usedDeleteSpots`,
* `usedShiftSpots`, `wtGene`, `realStops`, `framesStopList`
*/
scenarioDetails: String,
/**
* @member {String} guesses - stringified object of user's
* guesses for locations of deletions where the key is the
* strain number of the guess and the value is an array of
* boolean values indicating if guessed a deletion
* @example
* "{'1': [false, false, false, false, true, true, ...],
* '2': [true, true, true, false, false, false, ...],
* '3': [false, false, false, false, false, false, ...]
* }"
*/
guesses: String
});
CricketFridgeSchema.set('toJSON',{getters: true});
mongoose.model('CricketFridge', CricketFridgeSchema);
|
#!/usr/bin/env python
"""Simple parsers for OS X files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import io
import os
import stat
import biplist
from future.utils import string_types
from grr_response_core.lib import parser
from grr_response_core.lib import parsers
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import plist as rdf_plist
class OSXUsersParser(parser.ArtifactFilesMultiParser):
"""Parser for Glob of /Users/*."""
output_types = [rdf_client.User]
supported_artifacts = ["MacOSUsers"]
blacklist = ["Shared"]
def ParseMultiple(self, stat_entries, knowledge_base):
"""Parse the StatEntry objects."""
_ = knowledge_base
for stat_entry in stat_entries:
# TODO: `st_mode` has to be an `int`, not `StatMode`.
if stat.S_ISDIR(int(stat_entry.st_mode)):
homedir = stat_entry.pathspec.path
username = os.path.basename(homedir)
if username not in self.blacklist:
yield rdf_client.User(username=username, homedir=homedir)
# TODO(hanuszczak): Why is a command parser in a file called `osx_file_parsers`?
class OSXSPHardwareDataTypeParser(parser.CommandParser):
"""Parser for the Hardware Data from System Profiler."""
output_types = [rdf_client.HardwareInfo]
supported_artifacts = ["OSXSPHardwareDataType"]
def Parse(self, cmd, args, stdout, stderr, return_val, knowledge_base):
"""Parse the system profiler output. We get it in the form of a plist."""
_ = stderr, args, knowledge_base # Unused
self.CheckReturn(cmd, return_val)
try:
plist = biplist.readPlist(io.BytesIO(stdout))
except biplist.InvalidPlistException as error:
raise parsers.ParseError("Failed to parse a plist file", cause=error)
if len(plist) > 1:
raise parsers.ParseError("SPHardwareDataType plist has too many items.")
hardware_list = plist[0]["_items"][0]
serial_number = hardware_list.get("serial_number", None)
system_product_name = hardware_list.get("machine_model", None)
bios_version = hardware_list.get("boot_rom_version", None)
yield rdf_client.HardwareInfo(
serial_number=serial_number,
bios_version=bios_version,
system_product_name=system_product_name)
class OSXLaunchdPlistParser(parsers.SingleFileParser):
"""Parse Launchd plist files into LaunchdPlist objects."""
output_types = [rdf_plist.LaunchdPlist]
supported_artifacts = [
"MacOSLaunchAgentsPlistFiles", "MacOSLaunchDaemonsPlistFiles"
]
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
kwargs = {}
try:
kwargs["aff4path"] = filedesc.urn
except AttributeError:
pass
direct_copy_items = [
"Label", "Disabled", "UserName", "GroupName", "Program",
"StandardInPath", "StandardOutPath", "StandardErrorPath",
"LimitLoadToSessionType", "EnableGlobbing", "EnableTransactions",
"OnDemand", "RunAtLoad", "RootDirectory", "WorkingDirectory", "Umask",
"TimeOut", "ExitTimeOut", "ThrottleInterval", "InitGroups",
"StartOnMount", "StartInterval", "Debug", "WaitForDebugger", "Nice",
"ProcessType", "AbandonProcessGroup", "LowPriorityIO", "LaunchOnlyOnce"
]
string_array_items = [
"LimitLoadToHosts", "LimitLoadFromHosts", "LimitLoadToSessionType",
"ProgramArguments", "WatchPaths", "QueueDirectories"
]
flag_only_items = ["SoftResourceLimits", "HardResourceLimits", "Sockets"]
plist = {}
try:
plist = biplist.readPlist(filedesc)
except (biplist.InvalidPlistException, ValueError, IOError) as e:
plist["Label"] = "Could not parse plist: %s" % e
# These are items that can be directly copied
for key in direct_copy_items:
kwargs[key] = plist.get(key)
# These could be a string, they could be an array, we don't know and neither
# does Apple so we check.
for key in string_array_items:
elements = plist.get(key)
if isinstance(elements, string_types):
kwargs[key] = [elements]
else:
kwargs[key] = elements
# These are complex items that can appear in multiple data structures
# so we only flag on their existence
for key in flag_only_items:
if plist.get(key):
kwargs[key] = True
if plist.get("inetdCompatability") is not None:
kwargs["inetdCompatabilityWait"] = plist.get("inetdCompatability").get(
"Wait")
keepalive = plist.get("KeepAlive")
if isinstance(keepalive, bool) or keepalive is None:
kwargs["KeepAlive"] = keepalive
else:
keepalivedict = {}
keepalivedict["SuccessfulExit"] = keepalive.get("SuccessfulExit")
keepalivedict["NetworkState"] = keepalive.get("NetworkState")
pathstates = keepalive.get("PathState")
if pathstates is not None:
keepalivedict["PathState"] = []
for pathstate in pathstates:
keepalivedict["PathState"].append(
rdf_plist.PlistBoolDictEntry(
name=pathstate, value=pathstates[pathstate]))
otherjobs = keepalive.get("OtherJobEnabled")
if otherjobs is not None:
keepalivedict["OtherJobEnabled"] = []
for otherjob in otherjobs:
keepalivedict["OtherJobEnabled"].append(
rdf_plist.PlistBoolDictEntry(
name=otherjob, value=otherjobs[otherjob]))
kwargs["KeepAliveDict"] = rdf_plist.LaunchdKeepAlive(**keepalivedict)
envvars = plist.get("EnvironmentVariables")
if envvars is not None:
kwargs["EnvironmentVariables"] = []
for envvar in envvars:
kwargs["EnvironmentVariables"].append(
rdf_plist.PlistStringDictEntry(name=envvar, value=envvars[envvar]))
startcalendarinterval = plist.get("StartCalendarInterval")
if startcalendarinterval is not None:
if isinstance(startcalendarinterval, dict):
kwargs["StartCalendarInterval"] = [
rdf_plist.LaunchdStartCalendarIntervalEntry(
Minute=startcalendarinterval.get("Minute"),
Hour=startcalendarinterval.get("Hour"),
Day=startcalendarinterval.get("Day"),
Weekday=startcalendarinterval.get("Weekday"),
Month=startcalendarinterval.get("Month"))
]
else:
kwargs["StartCalendarInterval"] = []
for entry in startcalendarinterval:
kwargs["StartCalendarInterval"].append(
rdf_plist.LaunchdStartCalendarIntervalEntry(
Minute=entry.get("Minute"),
Hour=entry.get("Hour"),
Day=entry.get("Day"),
Weekday=entry.get("Weekday"),
Month=entry.get("Month")))
yield rdf_plist.LaunchdPlist(**kwargs)
class OSXInstallHistoryPlistParser(parsers.SingleFileParser):
"""Parse InstallHistory plist files into SoftwarePackage objects."""
output_types = [rdf_client.SoftwarePackages]
supported_artifacts = ["MacOSInstallationHistory"]
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
try:
plist = biplist.readPlist(filedesc)
except biplist.InvalidPlistException as error:
raise parsers.ParseError("Failed to parse a plist file", cause=error)
if not isinstance(plist, list):
raise parsers.ParseError(
"InstallHistory plist is a '%s', expecting a list" % type(plist))
packages = []
for sw in plist:
packages.append(
rdf_client.SoftwarePackage.Installed(
name=sw.get("displayName"),
version=sw.get("displayVersion"),
description=",".join(sw.get("packageIdentifiers")),
# TODO(hanuszczak): make installed_on an RDFDatetime
installed_on=_DateToEpoch(sw.get("date"))))
if packages:
yield rdf_client.SoftwarePackages(packages=packages)
def _DateToEpoch(date):
"""Converts python datetime to epoch microseconds."""
tz_zero = datetime.datetime.utcfromtimestamp(0)
diff_sec = int((date - tz_zero).total_seconds())
return diff_sec * 1000000
|
import torch
import torch.nn as nn
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torchvision.models as torch_models
import torch.nn.functional as F
import torch.optim as optim
class ConvNetMaker(nn.Module):
"""
Creates a simple (plane) convolutional neural network
"""
def __init__(self, layers):
"""
Makes a cnn using the provided list of layers specification
The details of this list is available in the paper
:param layers: a list of strings, representing layers like ["CB32", "CB32", "FC10"]
"""
super(ConvNetMaker, self).__init__()
self.conv_layers = []
self.fc_layers = []
h, w, d = 32, 32, 3
previous_layer_filter_count = 3
previous_layer_size = h * w * d
num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])
for layer in layers:
if layer.startswith('Conv'):
filter_count = int(layer[4:])
self.conv_layers += [nn.Conv2d(previous_layer_filter_count, filter_count, kernel_size=3, padding=1),
nn.BatchNorm2d(filter_count), nn.ReLU(inplace=True)]
previous_layer_filter_count = filter_count
d = filter_count
previous_layer_size = h * w * d
elif layer.startswith('MaxPool'):
self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
h, w = int(h / 2.0), int(w / 2.0)
previous_layer_size = h * w * d
elif layer.startswith('FC'):
num_fc_layers_remained -= 1
current_layer_size = int(layer[2:])
if num_fc_layers_remained == 0:
self.fc_layers += [nn.Linear(previous_layer_size, current_layer_size)]
else:
self.fc_layers += [nn.Linear(previous_layer_size, current_layer_size), nn.ReLU(inplace=True)]
previous_layer_size = current_layer_size
conv_layers = self.conv_layers
fc_layers = self.fc_layers
self.conv_layers = nn.Sequential(*conv_layers)
self.fc_layers = nn.Sequential(*fc_layers)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), -1)
x = self.fc_layers(x)
return x
plane_cifar10_book = {
'2': ['Conv16', 'MaxPool', 'Conv16', 'MaxPool', 'FC10'],
'4': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'FC10'],
'6': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'FC10'],
'8': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool',
'Conv128', 'Conv128','MaxPool', 'FC64', 'FC10'],
'10': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256', 'Conv256', 'Conv256' , 'MaxPool', 'FC128' ,'FC10'],
}
plane_cifar100_book = {
'2': ['Conv32', 'MaxPool', 'Conv32', 'MaxPool', 'FC100'],
'4': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'FC100'],
'6': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool','Conv128', 'Conv128' ,'FC100'],
'8': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256','MaxPool', 'FC64', 'FC100'],
'10': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256', 'Conv256', 'Conv256' , 'MaxPool', 'FC512', 'FC100'],
}
|
import Axios from '@/utils/interceptor'
const BASE_URL = '/system/config'
export function getSystemConfig() {
return Axios({
method: 'get',
baseURL: BASE_URL,
url: '/getSystemConfig'
})
}
export function updateSystemConfig(systemConfig) {
return Axios({
method: 'put',
baseURL: BASE_URL,
url: '/updateSystemConfig',
data: systemConfig
})
}
|
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:twisted
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import _thrift.services.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class UserModel:
"""
Attributes:
- user_id
- username
- password
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'user_id', None, None, ), # 1
(2, TType.STRING, 'username', None, None, ), # 2
(3, TType.STRING, 'password', None, None, ), # 3
)
def __init__(self, user_id=None, username=None, password=None,):
self.user_id = user_id
self.username = username
self.password = password
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.user_id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UserModel')
if self.user_id is not None:
oprot.writeFieldBegin('user_id', TType.I32, 1)
oprot.writeI32(self.user_id)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 2)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 3)
oprot.writeString(self.password)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
import * as Constants from '../../common/constants'
import s from '../../common/locales/strings'
export default function (state = null, action) {
switch (action.type) {
case Constants.AUTH_UPDATE_PASSWORD:
// action.data.passwordStatus
const status = action.data.passwordStatus
const array = [
{ title: s.strings.must_ten_characters, value: !status.tooShort },
{
title: s.strings.must_one_lowercase,
value: !status.noLowerCase
},
{
title: s.strings.must_one_uppercase,
value: !status.noUpperCase
},
{ title: s.strings.must_one_number, value: !status.noNumber }
]
return {
passed: status.passed,
secondsToCrack: action.data.passwordCheckString,
list: array
}
default:
return state
}
}
|
'''
XXX:
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2011-04-09
'''
from sys import path as sys_path
from os.path import join as path_join
from os.path import dirname
from itertools import chain
from liblinear import LibLinearClassifier
sys_path.append(path_join(dirname(__file__), '..'))
from features import AbstractFeature, SIMPLE_SPAN_INTERNAL_CLASSES
from classifier.simstring.classifier import (SimStringEnsembleFeature,
SimStringGazetterEnsembleFeature, TsuruokaEnsembleFeature)
class SimpleInternalEnsembleFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in chain(
SIMPLE_SPAN_INTERNAL_CLASSES,
(
CollinsPatternStringFeature,
CollapsedCollinsPatternStringFeature,
FormattingStringFeature,
),
)
]
def get_id(self):
return 'SIMPLE-INTERNAL-ENSEMBLE'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
#print feature.get_id(), f_tup[0], f_tup[1]
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
#assert False
from features import (
# Single
CollinsPatternStringFeature,
CollapsedCollinsPatternStringFeature,
FormattingStringFeature,
# Sentence
SentenceCollinsPatternStringFeature,
SentenceCollapsedCollinsPatternStringFeature,
SentenceLowerCaseStringFeature,
SentencePorterStemStringFeature,
# Window
WindowStringFeature,
WindowLowerCaseStringFeature,
WindowPrefixStringFeature,
WindowSuffixStringFeature,
WindowPorterStemStringFeature,
WindowCollinsPatternStringFeature,
WindowCollapsedCollinsPatternStringFeature,
WindowFormattingStringFeature,
)
class CompetitiveEnsembleFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in chain(
SIMPLE_SPAN_INTERNAL_CLASSES,
# Single classes
(
# Single token internal
CollinsPatternStringFeature,
CollapsedCollinsPatternStringFeature,
FormattingStringFeature,
# Sentence level
#SentenceCollinsPatternStringFeature,
#SentenceCollapsedCollinsPatternStringFeature,
#SentenceLowerCaseStringFeature,
#SentencePorterStemStringFeature,
# Word window level
WindowStringFeature,
#WindowLowerCaseStringFeature,
#WindowPrefixStringFeature,
#WindowSuffixStringFeature,
#WindowPorterStemStringFeature,
#WindowCollinsPatternStringFeature,
#WindowCollapsedCollinsPatternStringFeature,
#WindowFormattingStringFeature,
)
)]
def get_id(self):
return 'COMPETITIVE-ENSEMBLE'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
#print feature.get_id(), f_tup[0], f_tup[1]
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
#assert False
'''
class SimStringCompetitiveEnsembleFeature(CompetitiveEnsembleFeature):
def __init__(self):
CompetitiveEnsembleFeature.__init__(self)
self.features.add(SimStringGazetterEnsembleFeature)
'''
class SimpleInternalEnsembleClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimpleInternalEnsembleFeature
class CompetitiveEnsembleClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = CompetitiveEnsembleFeature
DONT_FILTER_TURKU = True
class SimStringCompetitiveEnsembleFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
#SimpleInternalEnsembleFeature,
# SimString Features
SimStringEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'SIMSTRING-COMPETITIVE-ENSEMBLE'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class SimStringCompetitiveEnsembleClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimStringCompetitiveEnsembleFeature
class InternalFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
SimpleInternalEnsembleFeature,
# SimString Features
#SimStringEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class InternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = InternalFeature
from features import (SpanBoWFeature, SpanHeadFeature, SpanHeadWindowFeature)
class NPInternalFeature(AbstractFeature):
def __init__(self):
self.features =[c() for c in (
InternalFeature,
SpanBoWFeature,
SpanHeadFeature,
SpanHeadWindowFeature,
)]
def get_id(self):
return 'NP-INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class NPInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = NPInternalFeature
class SimStringInternalFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
SimpleInternalEnsembleFeature,
#XXX: XXX: HACK! REMOVE!
#WindowStringFeature,
# SimString Features
SimStringEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'SIMSTRING-INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class SimStringInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimStringInternalFeature
class SimStringNPInternalFeature(AbstractFeature):
def __init__(self):
self.features =[c() for c in (
SimStringInternalFeature,
NPInternalFeature,
)]
def get_id(self):
return 'SIMSTRING-NP-INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class SimStringNPInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimStringNPInternalFeature
class SimStringTsuruokaFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
# SimString Features
TsuruokaEnsembleFeature,
SimStringEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'SIMSTRING-TSURUOKA'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class SimStringTsuruokaInternalFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
SimpleInternalEnsembleFeature,
# SimString Features
TsuruokaEnsembleFeature,
SimStringEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'SIMSTRING-TSURUOKA-INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class SimStringTsuruokaInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimStringTsuruokaInternalFeature
class SimStringTsuruokaClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = SimStringTsuruokaFeature
class TsuruokaInternalFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
SimpleInternalEnsembleFeature,
# SimString Features
TsuruokaEnsembleFeature,
#SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'TSURUOKA-INTERNAL'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class TsuruokaInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = TsuruokaInternalFeature
class TsuruokaClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = TsuruokaEnsembleFeature
class GazetterInternalFeature(AbstractFeature):
def __init__(self):
self.features = [c() for c in [
#CompetitiveEnsembleFeature,
SimpleInternalEnsembleFeature,
# SimString Features
#SimStringEnsembleFeature,
SimStringGazetterEnsembleFeature
# TODO: Contextual SimString
]]
def get_id(self):
return 'SIMSTRING-GAZETTER'
def featurise(self, document, sentence, annotation):
for feature in self.features:
for f_tup in feature.featurise(document, sentence, annotation):
if DONT_FILTER_TURKU or 'turku' not in f_tup[0]:
yield (f_tup[0] + '-(<' + feature.get_id() + '>)', f_tup[1])
class GazetterInternalClassifier(LibLinearClassifier):
def __init__(self):
LibLinearClassifier.__init__(self)
self.feature_class = GazetterInternalFeature
|
import StreamSplitter from 'stream-splitter';
// Download operations
const downloadOperations = [
{
type: 'start',
pattern: /Starting download/,
props: []
},
{
type: 'progress',
pattern: /\[\s+(\d+)%][\s.]+\[\s*(\d+\.?\d)KB\/s]/,
props: [
'progress',
'speed'
]
},
{
type: 'connection-finished',
pattern: /Connection\s(\d+)\sfinished/,
props: [
'number'
]
}
];
// Get a stream that splits on a token
export function splitStream(stream, token = '\n') {
return stream.pipe(StreamSplitter(token));
}
// Parse stdout and return operation type and params
export function parseProgress(rawDownloadProgress) {
const operation = downloadOperations.find(item => rawDownloadProgress.match(item.pattern));
if (!operation) {
return {
type: 'none'
};
}
const matchedPattern = rawDownloadProgress.match(operation.pattern);
matchedPattern.shift();
const result = operation.props.reduce((acc, curr) => (
{
type: acc.type,
data: {
...acc.data,
[curr]: matchedPattern.shift()
}
}
), {
type: operation.type,
data: {}
});
return result;
}
|
import React from 'react'
import { FormattedMessage } from 'react-intl'
import {NavLink,Link} from 'react-router-dom'
import FA from 'react-fontawesome'
const Header = () => {
return (
<nav className="navbar navbar-expand-lg navbar-dark bg-dark">
<a className="navbar-brand" href="/"><FormattedMessage id="AppName"/></a>
<button className="navbar-toggler" type="button" data-toggle="collapse" data-target = "#navbarColor01" aria-controls = "navbarColor01" aria-expanded = "false" aria-label = "Toggle navigation">
<span className="navbar-toggler-icon"></span>
</button>
<div className="collapse navbar-collapse">
<ul className="navbar-nav mr-auto">
<li className="nav-item">
<NavLink to="/alerts" className="nav-link"><FormattedMessage id="menu.alerts"/></NavLink>
</li>
<li className="nav-item">
<NavLink to="/silences" className="nav-link"><FormattedMessage id="menu.silences"/></NavLink>
</li>
<li className="nav-item">
<NavLink to="/status" className="nav-link"><FormattedMessage id="menu.status"/></NavLink>
</li>
<li className="nav-item">
<NavLink to="/enter" className="nav-link"><FormattedMessage id="menu.setting"/></NavLink>
</li>
</ul>
<form className="form-inline">
<Link to="/silences/new" className="btn btn-outline-info my-2 my-sm-0"><FA name="bell-slash"/> <FormattedMessage id="alerts.new_silence"/></Link>
</form>
</div>
</nav>
)
}
export default Header;
|
#!/usr/bin/env python3
import argparse
import json
import os
from PIL import Image
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("input")
args = parser.parse_args()
img = Image.open(args.input)
base_name = os.path.splitext(args.input)[0]
print(f"size: {img.size}")
print(f"info: {img.info}")
with open(f"{base_name}.json", "w") as f1:
json.dump(img.info, f1)
f1.write("\n")
as_bytes = img.convert("RGBA").tobytes()
outname = os.path.splitext(args.input)[0] + ".bin"
print(f"Writing to {outname}")
with open(outname, "wb") as f2:
f2.write(as_bytes)
return 0
if __name__ == "__main__":
raise SystemExit(main())
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import hashlib
import inspect
import json
import logging
import os
import re
import time
import unittest
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from typing import Callable, Optional
logger = logging.getLogger("TestFramework.utils")
# Added in Vadercoin for easier rebranding to other projects.
config_file = "vadercoin.conf"
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s VADE too low! (Should be %s VADE)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s VADE too high! (Should be %s VADE)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode: int, output: str, fun: Callable, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode: the process return code.
output: [a substring of] the process output.
fun: the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code: the error code returned by the RPC call (defined in src/rpc/protocol.h).
Set to None if checking the error code is not required.
message: [a substring of] the error string returned by the RPC call.
Set to None if checking the error string is not required.
fun: the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError("String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting VADE values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `VadercoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
def sha256sum_file(filename):
h = hashlib.sha256()
with open(filename, 'rb') as f:
d = f.read(4096)
while len(d) > 0:
h.update(d)
d = f.read(4096)
return h.digest()
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
write_config(os.path.join(datadir, config_file), n=n, chain=chain)
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def write_config(config_path, *, n, chain, extra_config=""):
# Translate chain subdirectory name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(config_path, 'w', encoding='utf8') as f:
if chain_name_conf_arg:
f.write("{}=1\n".format(chain_name_conf_arg))
if chain_name_conf_section:
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("fixedseeds=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("natpmp=0\n")
f.write("shrinkdebugfile=0\n")
# To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync
f.write("unsafesqlitesync=1\n")
f.write(extra_config)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, config_file), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, config_file)):
with open(os.path.join(datadir, config_file), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs):
"""Build and send a transaction that spends the given inputs (specified
by lists of parent_txid:vout each), with the desired total value and fee,
equally divided up to the desired number of outputs.
Returns a tuple with the txid and the amount sent per output.
"""
send_value = satoshi_round((value - fee)/num_outputs)
inputs = []
for (txid, vout) in zip(parent_txids, vouts):
inputs.append({'txid' : txid, 'vout' : vout})
outputs = {}
for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output
return (txid, send_value)
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import tx_from_hex
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = tx_from_hex(rawtx)
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n-2, n))
|
export default function(nodes, v) {
nodes.forEach(function(n) {
n[v.__x] = n[v.__x] || 0;
n[v.__y] = n[v.__y] || 0;
n[v.__width] = n[v.__width] || v.size[0];
n[v.__height] = n[v.__height] || v.size[1];
n[v.__cx] = n[v.__cx] || n[v.__x] + n[v.__width] / 2;
n[v.__cy] = n[v.__cy] || n[v.__y] + n[v.__height] / 2;
});
return nodes;
}
|
import React from 'react';
import {Flex} from '@rebass/grid/emotion';
import {NavBox, SEO, Header} from '../components';
import {BlockText, CenteredFlex} from '../components/ui';
// TODO: could these be autogenerated from the set of product types/associated images?
import XboxOne from '../images/xbox-one.png';
import XboxOneS from '../images/xbox-one-s.png';
import PS4 from '../images/ps4.png';
import PS4Slim from '../images/ps4-slim.png';
import NintendoSwitch from '../images/nintendo-switch.png';
import Accessory from '../images/controller-grip.png';
const ShopPage = () => {
return (
<Flex flexDirection="column">
<Header fixed={true} />
<SEO title="Home" keywords={['console style', 'console skins']} />
<CenteredFlex mt="2em">
<h1
css={{
fontSize: '3em',
borderBottom: '1px solid black',
textAlign: 'center',
paddingBottom: '0.5em',
width: '50%'
}}
>
<BlockText>Catalog</BlockText>
</h1>
</CenteredFlex>
<CenteredFlex justifyContent="space-evenly" mt="3em">
{/* TODO: links for console types should be autogenerated based on provided product types */}
<NavBox
subpages={[
{title: 'Xbox One', image: XboxOne, link: 'xbox-one-skins'},
{title: 'Xbox One S', image: XboxOneS, link: 'xbox-one-s-skins'}
]}
></NavBox>
<NavBox
subpages={[
{title: 'PlayStation 4', image: PS4, link: 'playstation-4-skins'},
{
title: 'PlayStation 4 Slim',
image: PS4Slim,
link: 'playstation-4-slim-skins'
}
]}
></NavBox>
</CenteredFlex>
<CenteredFlex justifyContent="space-evenly" mt="4em">
<NavBox
subpages={[
{
title: 'Nintendo Switch',
image: NintendoSwitch,
link: 'nintendo-switch-skins'
}
]}
></NavBox>
<NavBox
subpages={[
{
title: 'Accessories',
image: Accessory,
link: 'nintendo-switch-skins'
}
]}
></NavBox>
</CenteredFlex>
</Flex>
);
};
export default ShopPage;
|
/**
* @author Richard Davey <rich@photonstorm.com>
* @copyright 2016 Photon Storm Ltd.
* @license {@link https://github.com/photonstorm/phaser/blob/master/license.txt|MIT License}
*/
/**
* The Crop component provides the ability to crop a texture based Game Object to a defined rectangle,
* which can be updated in real-time.
*
* @class
*/
Phaser.Component.Crop = function () {};
Phaser.Component.Crop.prototype = {
/**
* The Rectangle used to crop the texture this Game Object uses.
* Set this property via `crop`.
* If you modify this property directly you must call `updateCrop` in order to have the change take effect.
* @property {Phaser.Rectangle} cropRect
* @default
*/
cropRect: null,
/**
* @property {Phaser.Rectangle} _crop - Internal cache var.
* @private
*/
_crop: null,
/**
* Crop allows you to crop the texture being used to display this Game Object.
* Setting a crop rectangle modifies the core texture frame. The Game Object width and height properties will be adjusted accordingly.
*
* Cropping takes place from the top-left and can be modified in real-time either by providing an updated rectangle object to this method,
* or by modifying `cropRect` property directly and then calling `updateCrop`.
*
* The rectangle object given to this method can be either a `Phaser.Rectangle` or any other object
* so long as it has public `x`, `y`, `width`, `height`, `right` and `bottom` properties.
*
* A reference to the rectangle is stored in `cropRect` unless the `copy` parameter is `true`,
* in which case the values are duplicated to a local object.
*
* @method
* @param {Phaser.Rectangle} rect - The Rectangle used during cropping. Pass null or no parameters to clear a previously set crop rectangle.
* @param {boolean} [copy=false] - If false `cropRect` will be stored as a reference to the given rect. If true it will copy the rect values into a local Phaser Rectangle object stored in cropRect.
*/
crop: function (rect, copy) {
if (copy === undefined) { copy = false; }
if (rect)
{
if (copy && this.cropRect !== null)
{
this.cropRect.setTo(rect.x, rect.y, rect.width, rect.height);
}
else if (copy && this.cropRect === null)
{
this.cropRect = new Phaser.Rectangle(rect.x, rect.y, rect.width, rect.height);
}
else
{
this.cropRect = rect;
}
this.updateCrop();
}
else
{
this._crop = null;
this.cropRect = null;
this.resetFrame();
}
},
/**
* If you have set a crop rectangle on this Game Object via `crop` and since modified the `cropRect` property,
* or the rectangle it references, then you need to update the crop frame by calling this method.
*
* @method
*/
updateCrop: function () {
if (!this.cropRect)
{
return;
}
var oldX = this.texture.crop.x;
var oldY = this.texture.crop.y;
var oldW = this.texture.crop.width;
var oldH = this.texture.crop.height;
this._crop = Phaser.Rectangle.clone(this.cropRect, this._crop);
this._crop.x += this._frame.x;
this._crop.y += this._frame.y;
var cx = Math.max(this._frame.x, this._crop.x);
var cy = Math.max(this._frame.y, this._crop.y);
var cw = Math.min(this._frame.right, this._crop.right) - cx;
var ch = Math.min(this._frame.bottom, this._crop.bottom) - cy;
this.texture.crop.x = cx;
this.texture.crop.y = cy;
this.texture.crop.width = cw;
this.texture.crop.height = ch;
this.texture.frame.width = Math.min(cw, this.cropRect.width);
this.texture.frame.height = Math.min(ch, this.cropRect.height);
this.texture.width = this.texture.frame.width;
this.texture.height = this.texture.frame.height;
this.texture._updateUvs();
if (this.tint !== 0xffffff && (oldX !== cx || oldY !== cy || oldW !== cw || oldH !== ch))
{
this.texture.requiresReTint = true;
}
}
};
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("meupet", "0025_auto_20161106_1143")]
operations = [
migrations.AddField(model_name="pet", name="active", field=models.BooleanField(default=True)),
migrations.AddField(
model_name="pet", name="request_key", field=models.CharField(blank=True, max_length=40)
),
migrations.AddField(
model_name="pet", name="request_sent", field=models.DateTimeField(blank=True, null=True)
),
migrations.AlterField(
model_name="pet",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
migrations.AlterField(
model_name="pet",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
migrations.AlterField(
model_name="pet",
name="profile_picture",
field=models.ImageField(upload_to="pet_profiles", help_text="Maximum image size is 8MB"),
),
migrations.AlterField(
model_name="pet",
name="sex",
field=models.CharField(choices=[("FE", "Female"), ("MA", "Male")], blank=True, max_length=2),
),
migrations.AlterField(
model_name="pet",
name="size",
field=models.CharField(
choices=[("SM", "Small"), ("MD", "Medium"), ("LG", "Large")], blank=True, max_length=2
),
),
migrations.AlterField(
model_name="pet",
name="status",
field=models.CharField(
choices=[("MI", "Missing"), ("FA", "For Adoption"), ("AD", "Adopted"), ("FO", "Found")],
max_length=2,
default="MI",
),
),
]
|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import math
import os
import pathlib
import shutil
import time
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
import pytest
from torch.utils.data import DataLoader
from composer.datasets.streaming import StreamingDataset, StreamingDatasetWriter
from composer.utils import dist
@pytest.fixture
def remote_local(tmp_path: pathlib.Path) -> Tuple[str, str]:
remote = tmp_path / "remote"
local = tmp_path / "local"
remote.mkdir()
local.mkdir()
return str(remote), str(local)
def get_fake_samples_decoders(num_samples: int) -> Tuple[List[Dict[str, bytes]], Dict[str, Callable[[bytes], Any]]]:
samples = [{"uid": f"{ix:06}".encode("utf-8"), "data": (3 * ix).to_bytes(4, "big")} for ix in range(num_samples)]
decoders = {
"uid": lambda uid_bytes: uid_bytes.decode("utf-8"),
"data": lambda data_bytes: int.from_bytes(data_bytes, "big")
}
return samples, decoders
def write_synthetic_streaming_dataset(dirname: str, samples: List[Dict[str, bytes]], shard_size_limit: int) -> None:
first_sample_fields = list(samples[0].keys())
with StreamingDatasetWriter(dirname=dirname, fields=first_sample_fields,
shard_size_limit=shard_size_limit) as writer:
writer.write_samples(samples=samples)
@pytest.mark.timeout(10)
@pytest.mark.parametrize("num_samples", [100, 10000])
@pytest.mark.parametrize("shard_size_limit", [1 << 8, 1 << 16, 1 << 24])
def test_writer(remote_local: Tuple[str, str], num_samples: int, shard_size_limit: int) -> None:
dirname, _ = remote_local
samples, _ = get_fake_samples_decoders(num_samples)
first_sample_values = samples[0].values()
first_sample_byte_sizes = np.array([len(v) for v in first_sample_values], dtype=np.int64)
first_sample_bytes = len(first_sample_byte_sizes.tobytes() + b''.join(first_sample_values))
expected_samples_per_shard = shard_size_limit // first_sample_bytes
expected_num_shards = math.ceil(num_samples / expected_samples_per_shard)
expected_num_files = expected_num_shards + 1 # the index file
write_synthetic_streaming_dataset(dirname=dirname, samples=samples, shard_size_limit=shard_size_limit)
files = os.listdir(dirname)
assert len(files) == expected_num_files, f"Files written ({len(files)}) != expected ({expected_num_files})."
@pytest.mark.timeout(10)
@pytest.mark.parametrize("batch_size", [None, 1, 2])
@pytest.mark.parametrize("share_remote_local", [False, True])
@pytest.mark.parametrize("shuffle", [False, True])
def test_reader(remote_local: Tuple[str, str], batch_size: int, share_remote_local: bool, shuffle: bool):
num_samples = 117
shard_size_limit = 1 << 8
samples, decoders = get_fake_samples_decoders(num_samples)
remote, local = remote_local
if share_remote_local:
local = remote
write_synthetic_streaming_dataset(dirname=remote, samples=samples, shard_size_limit=shard_size_limit)
# Build StreamingDataset
dataset = StreamingDataset(remote=remote, local=local, shuffle=shuffle, decoders=decoders, batch_size=batch_size)
# Test basic sample order
rcvd_samples = 0
shuffle_matches = 0
for ix, sample in enumerate(dataset):
rcvd_samples += 1
uid = sample["uid"]
data = sample["data"]
expected_uid = f"{ix:06}"
expected_data = 3 * ix
if shuffle:
shuffle_matches += (expected_uid == uid)
else:
assert uid == expected_uid == uid, f"sample ix={ix} has uid={uid}, expected {expected_uid}"
assert data == expected_data, f"sample ix={ix} has data={data}, expected {expected_data}"
# If shuffling, there should be few matches
# The probability of k matches in a random permutation is ~1/(e*(k!))
if shuffle:
assert shuffle_matches < 10
# Test length
assert rcvd_samples == num_samples, f"Only received {rcvd_samples} samples, expected {num_samples}"
assert len(dataset) == num_samples, f"Got dataset length={len(dataset)} samples, expected {num_samples}"
@pytest.mark.timeout(10)
@pytest.mark.parametrize("created_ago", [0.5, 3])
@pytest.mark.parametrize("timeout", [1])
def test_reader_after_crash(remote_local: Tuple[str, str], created_ago: float, timeout: float):
num_samples = 117
shard_size_limit = 1 << 8
samples, decoders = get_fake_samples_decoders(num_samples)
remote, local = remote_local
write_synthetic_streaming_dataset(dirname=remote, samples=samples, shard_size_limit=shard_size_limit)
shutil.copy(os.path.join(remote, "index.mds"), os.path.join(local, "index.mds.tmp"))
shutil.copy(os.path.join(remote, "000003.mds"), os.path.join(local, "000003.mds.tmp"))
time.sleep(created_ago)
dataset = StreamingDataset(remote=remote, local=local, shuffle=False, decoders=decoders, timeout=timeout)
# Iterate over dataset and make sure there are no TimeoutErrors
for _ in dataset:
pass
@pytest.mark.parametrize(
"share_remote_local",
[
True,
pytest.param(False, marks=pytest.mark.xfail(reason="__getitem__ currently expects shards to exist")),
],
)
def test_reader_getitem(remote_local: Tuple[str, str], share_remote_local: bool):
num_samples = 117
shard_size_limit = 1 << 8
samples, decoders = get_fake_samples_decoders(num_samples)
remote, local = remote_local
if share_remote_local:
local = remote
write_synthetic_streaming_dataset(dirname=remote, samples=samples, shard_size_limit=shard_size_limit)
# Build StreamingDataset
dataset = StreamingDataset(remote=remote, local=local, shuffle=False, decoders=decoders)
# Test retrieving random sample
_ = dataset[17]
@pytest.mark.daily()
@pytest.mark.timeout(10)
@pytest.mark.parametrize("batch_size", [1, 2, 5])
@pytest.mark.parametrize("drop_last", [False, True])
@pytest.mark.parametrize("num_workers", [1, 2, 3])
@pytest.mark.parametrize("persistent_workers", [
False,
pytest.param(
True,
marks=pytest.mark.xfail(
reason=
"PyTorch DataLoader has non-deterministic worker cycle iterator when `persistent_workers=True`. Fixed in Mar 2022, likely landing PyTorch 1.12: https://github.com/pytorch/pytorch/pull/73675"
)),
])
@pytest.mark.parametrize("shuffle", [False, True])
def test_dataloader_single_device(remote_local: Tuple[str, str], batch_size: int, drop_last: bool, num_workers: int,
persistent_workers: bool, shuffle: bool):
num_samples = 31
shard_size_limit = 1 << 6
samples, decoders = get_fake_samples_decoders(num_samples)
remote, local = remote_local
write_synthetic_streaming_dataset(dirname=remote, samples=samples, shard_size_limit=shard_size_limit)
# Build StreamingDataset
dataset = StreamingDataset(remote=remote, local=local, shuffle=shuffle, decoders=decoders, batch_size=batch_size)
# Build DataLoader
dataloader = DataLoader(dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=drop_last,
persistent_workers=persistent_workers)
# Expected number of batches based on batch_size and drop_last
expected_num_batches = (num_samples // batch_size) if drop_last else math.ceil(num_samples / batch_size)
expected_num_samples = expected_num_batches * batch_size if drop_last else num_samples
# Iterate over DataLoader
rcvd_batches = 0
sample_order = []
for batch_ix, batch in enumerate(dataloader):
rcvd_batches += 1
# Every batch should be complete except (maybe) final one
if batch_ix + 1 < expected_num_batches:
assert len(batch["uid"]) == batch_size
else:
if drop_last:
assert len(batch["uid"]) == batch_size
else:
assert len(batch["uid"]) <= batch_size
for uid in batch["uid"]:
sample_order.append(int(uid))
# Test dataloader length
assert len(dataloader) == expected_num_batches
assert rcvd_batches == expected_num_batches
# Test that all samples arrived
assert len(sample_order) == expected_num_samples
if not drop_last:
assert len(set(sample_order)) == num_samples
# Iterate over the dataloader again to check shuffle behavior
second_sample_order = []
for batch_ix, batch in enumerate(dataloader):
for uid in batch["uid"]:
second_sample_order.append(int(uid))
assert len(sample_order) == len(second_sample_order)
if shuffle:
assert sample_order != second_sample_order
else:
assert sample_order == second_sample_order
@pytest.mark.daily()
@pytest.mark.timeout(10)
@pytest.mark.world_size(2)
@pytest.mark.parametrize("batch_size", [4])
@pytest.mark.parametrize("drop_last", [False, True])
@pytest.mark.parametrize("multinode", [False, True])
@pytest.mark.parametrize("num_samples", [30, 31])
@pytest.mark.parametrize("num_workers", [1, 3])
@pytest.mark.parametrize("shuffle", [False, True])
def test_dataloader_multi_device(remote_local: Tuple[str, str], batch_size: int, drop_last: bool, multinode: bool,
num_samples: int, num_workers: int, shuffle: bool):
if multinode:
# Force different nodes
os.environ["LOCAL_RANK"] = str(0)
os.environ["NODE_RANK"] = str(dist.get_global_rank())
os.environ["LOCAL_WORLD_SIZE"] = str(1)
global_device = dist.get_global_rank()
global_num_devices = dist.get_world_size()
node_rank = dist.get_node_rank()
assert batch_size % global_num_devices == 0
device_batch_size = batch_size // global_num_devices
shard_size_limit = 1 << 6
samples, decoders = get_fake_samples_decoders(num_samples)
# Create globally shared remote, and node-local folders
remote_local_list = list(remote_local)
dist.broadcast_object_list(remote_local_list)
remote, local = remote_local_list
node_local = os.path.join(local, str(node_rank))
# Create remote dataset on global device 0
if global_device == 0:
write_synthetic_streaming_dataset(dirname=remote, samples=samples, shard_size_limit=shard_size_limit)
dist.barrier()
# Build StreamingDataset
dataset = StreamingDataset(
remote=remote,
local=node_local,
shuffle=shuffle,
decoders=decoders,
batch_size=device_batch_size,
)
# Build DataLoader
dataloader = DataLoader(dataset=dataset,
batch_size=device_batch_size,
num_workers=num_workers,
drop_last=drop_last,
persistent_workers=False)
# Expected number of samples and batches based on global_num_devices, batch_size and drop_last
device_compatible_num_samples = global_num_devices * math.ceil(num_samples / global_num_devices)
expected_num_batches = (device_compatible_num_samples //
batch_size) if drop_last else math.ceil(device_compatible_num_samples / batch_size)
expected_num_samples = expected_num_batches * batch_size if drop_last else device_compatible_num_samples
# Iterate over DataLoader
rcvd_batches = 0
sample_order = []
for batch_ix, batch in enumerate(dataloader):
rcvd_batches += 1
# Every batch should be complete except (maybe) final one
if batch_ix + 1 < expected_num_batches:
assert len(batch["uid"]) == device_batch_size
else:
if drop_last:
assert len(batch["uid"]) == device_batch_size
else:
assert len(batch["uid"]) <= device_batch_size
device_batch_uids = [int(uid) for uid in batch["uid"]]
all_device_batch_uids = dist.all_gather_object(device_batch_uids)
for uids in all_device_batch_uids:
sample_order += uids
# Test dataloader length
assert len(dataloader) == expected_num_batches
assert rcvd_batches == expected_num_batches
# Test that all samples arrived
assert len(sample_order) == expected_num_samples
if not drop_last:
assert len(set(sample_order)) == num_samples
# Iterate over the dataloader again to check shuffle behavior
second_sample_order = []
for batch_ix, batch in enumerate(dataloader):
device_batch_uids = [int(uid) for uid in batch["uid"]]
all_device_batch_uids = dist.all_gather_object(device_batch_uids)
for uids in all_device_batch_uids:
second_sample_order += uids
assert len(sample_order) == len(second_sample_order)
if shuffle:
assert sample_order != second_sample_order
else:
assert sample_order == second_sample_order
|
/*
FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
/* Standard includes. */
#include <stdlib.h>
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Check the configuration. */
#if( configMAX_PRIORITIES > 32 )
#error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice.
#endif
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#ifndef configSETUP_TICK_INTERRUPT
#error configSETUP_TICK_INTERRUPT() must be defined in FreeRTOSConfig.h to call the function that sets up the tick interrupt.
#endif
#ifndef configCLEAR_TICK_INTERRUPT
#error configCLEAR_TICK_INTERRUPT must be defined in FreeRTOSConfig.h to clear which ever interrupt was used to generate the tick interrupt.
#endif
/* A critical section is exited when the critical section nesting count reaches
this value. */
#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
/* Tasks are not created with a floating point context, but can be given a
floating point context after they have been created. A variable is stored as
part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
does not have an FPU context, or any other value if the task does have an FPU
context. */
#define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
/* Constants required to setup the initial task context. */
#define portINITIAL_SPSR ( ( StackType_t ) 0x1f ) /* System mode, ARM mode, IRQ enabled FIQ enabled. */
#define portTHUMB_MODE_BIT ( ( StackType_t ) 0x20 )
#define portTHUMB_MODE_ADDRESS ( 0x01UL )
/* Masks all bits in the APSR other than the mode bits. */
#define portAPSR_MODE_BITS_MASK ( 0x1F )
/* The value of the mode bits in the APSR when the CPU is executing in user
mode. */
#define portAPSR_USER_MODE ( 0x10 )
/* Let the user override the pre-loading of the initial LR with the address of
prvTaskExitError() in case it messes up unwinding of the stack in the
debugger. */
#ifdef configTASK_RETURN_ADDRESS
#define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
#else
#define portTASK_RETURN_ADDRESS prvTaskExitError
#endif
/*-----------------------------------------------------------*/
/*
* Starts the first task executing. This function is necessarily written in
* assembly code so is implemented in portASM.s.
*/
extern void vPortRestoreTaskContext( void );
/*
* Used to catch tasks that attempt to return from their implementing function.
*/
static void prvTaskExitError( void );
/*-----------------------------------------------------------*/
/* A variable is used to keep track of the critical section nesting. This
variable has to be stored as part of the task context and must be initialised to
a non zero value to ensure interrupts don't inadvertently become unmasked before
the scheduler starts. As it is stored as part of the task context it will
automatically be set to 0 when the first task is started. */
volatile uint32_t ulCriticalNesting = 9999UL;
/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then
a floating point context must be saved and restored for the task. */
volatile uint32_t ulPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */
volatile uint32_t ulPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if
if the nesting depth is 0. */
volatile uint32_t ulPortInterruptNesting = 0UL;
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
{
/* Setup the initial stack of the task. The stack is set exactly as
expected by the portRESTORE_CONTEXT() macro.
The fist real value on the stack is the status register, which is set for
system mode, with interrupts enabled. A few NULLs are added first to ensure
GDB does not try decoding a non-existent return address. */
*pxTopOfStack = ( StackType_t ) NULL;
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) NULL;
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) NULL;
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) portINITIAL_SPSR;
if( ( ( uint32_t ) pxCode & portTHUMB_MODE_ADDRESS ) != 0x00UL )
{
/* The task will start in THUMB mode. */
*pxTopOfStack |= portTHUMB_MODE_BIT;
}
pxTopOfStack--;
/* Next the return address, which in this case is the start of the task. */
*pxTopOfStack = ( StackType_t ) pxCode;
pxTopOfStack--;
/* Next all the registers other than the stack pointer. */
*pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* R14 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x12121212; /* R12 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x11111111; /* R11 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x10101010; /* R10 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x09090909; /* R9 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x08080808; /* R8 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x07070707; /* R7 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x06060606; /* R6 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x05050505; /* R5 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x04040404; /* R4 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x03030303; /* R3 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x02020202; /* R2 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
pxTopOfStack--;
/* The task will start with a critical nesting count of 0 as interrupts are
enabled. */
*pxTopOfStack = portNO_CRITICAL_NESTING;
pxTopOfStack--;
/* The task will start without a floating point context. A task that uses
the floating point hardware must call vPortTaskUsesFPU() before executing
any floating point instructions. */
*pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
return pxTopOfStack;
}
/*-----------------------------------------------------------*/
static void prvTaskExitError( void )
{
/* A function that implements a task must not exit or attempt to return to
its caller as there is nothing to return to. If a task wants to exit it
should instead call vTaskDelete( NULL ).
Artificially force an assert() to be triggered if configASSERT() is
defined, then stop here so application writers can catch the error. */
configASSERT( ulPortInterruptNesting == ~0UL );
portDISABLE_INTERRUPTS();
for( ;; );
}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void )
{
uint32_t ulAPSR;
/* Only continue if the CPU is not in User mode. The CPU must be in a
Privileged mode for the scheduler to start. */
__asm volatile ( "MRS %0, APSR" : "=r" ( ulAPSR ) );
ulAPSR &= portAPSR_MODE_BITS_MASK;
configASSERT( ulAPSR != portAPSR_USER_MODE );
if( ulAPSR != portAPSR_USER_MODE )
{
/* Start the timer that generates the tick ISR. */
portDISABLE_INTERRUPTS();
configSETUP_TICK_INTERRUPT();
/* Start the first task executing. */
vPortRestoreTaskContext();
}
/* Will only get here if xTaskStartScheduler() was called with the CPU in
a non-privileged mode or the binary point register was not set to its lowest
possible value. prvTaskExitError() is referenced to prevent a compiler
warning about it being defined but not referenced in the case that the user
defines their own exit address. */
( void ) prvTaskExitError;
return 0;
}
/*-----------------------------------------------------------*/
void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
Artificially force an assert. */
configASSERT( ulCriticalNesting == 1000UL );
}
/*-----------------------------------------------------------*/
void vPortEnterCritical( void )
{
portDISABLE_INTERRUPTS();
/* Now interrupts are disabled ulCriticalNesting can be accessed
directly. Increment ulCriticalNesting to keep a count of how many times
portENTER_CRITICAL() has been called. */
ulCriticalNesting++;
/* This is not the interrupt safe version of the enter critical function so
assert() if it is being called from an interrupt context. Only API
functions that end in "FromISR" can be used in an interrupt. Only assert if
the critical nesting count is 1 to protect against recursive calls if the
assert function also uses a critical section. */
if( ulCriticalNesting == 1 )
{
configASSERT( ulPortInterruptNesting == 0 );
}
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
if( ulCriticalNesting > portNO_CRITICAL_NESTING )
{
/* Decrement the nesting count as the critical section is being
exited. */
ulCriticalNesting--;
/* If the nesting level has reached zero then all interrupt
priorities must be re-enabled. */
if( ulCriticalNesting == portNO_CRITICAL_NESTING )
{
/* Critical nesting has reached zero so all interrupt priorities
should be unmasked. */
portENABLE_INTERRUPTS();
}
}
}
/*-----------------------------------------------------------*/
void FreeRTOS_Tick_Handler( void )
{
uint32_t ulInterruptStatus;
ulInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/* Increment the RTOS tick. */
if( xTaskIncrementTick() != pdFALSE )
{
ulPortYieldRequired = pdTRUE;
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulInterruptStatus );
configCLEAR_TICK_INTERRUPT();
}
/*-----------------------------------------------------------*/
void vPortTaskUsesFPU( void )
{
uint32_t ulInitialFPSCR = 0;
/* A task is registering the fact that it needs an FPU context. Set the
FPU flag (which is saved as part of the task context). */
ulPortTaskHasFPUContext = pdTRUE;
/* Initialise the floating point status register. */
__asm volatile ( "FMXR FPSCR, %0" :: "r" (ulInitialFPSCR) );
}
/*-----------------------------------------------------------*/
|
# Copyright 2017-2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main API for Authbox.
Your business logic should subclass BaseDispatcher and set up your peripherals
in its __init__ method. Most simple uses will use callbacks for everything.
See two_button.py as an example workflow.
Peripherals are kept in other files in this same package, and should be listed
in CLASS_REGISTRY so they can be loaded lazily.
"""
from __future__ import print_function
import sys
import threading
import traceback
import types
from authbox.compat import queue
from RPi import GPIO
# The line above simplifies imports for other modules that are already importing from api.
# TODO give each object a logger and use that instead of prints
CLASS_REGISTRY = [
"authbox.badgereader_hid_keystroking.HIDKeystrokingReader",
"authbox.badgereader_wiegand_gpio.WiegandGPIOReader",
"authbox.gpio_button.Button",
"authbox.gpio_relay.Relay",
"authbox.gpio_buzzer.Buzzer",
"authbox.timer.Timer",
]
# Add this to event_queue to request a graceful shutdown.
SHUTDOWN_SENTINEL = object()
class BaseDispatcher(object):
def __init__(self, config):
self.config = config
self.event_queue = queue.Queue() # unbounded
self.threads = []
def load_config_object(self, name, **kwargs):
# N.b. args are from config, kwargs are passed from python.
# This sometimes causes confusing error messages like
# "takes at least 5 arguments (5 given)".
config_items = split_escaped(self.config.get("pins", name), preserve=True)
objs = []
for item in config_items:
options = list(split_escaped(item.strip(), glue=":"))
cls_name = options[0]
for c in CLASS_REGISTRY:
if c.endswith("." + cls_name):
cls = _import(c)
break
else:
# This is a Python for-else, which executes if the body above didn't
# execute 'break'.
raise Exception("Unknown item", name)
print("Instantiating", cls, self.event_queue, name, options[1:], kwargs)
obj = cls(self.event_queue, name, *options[1:], **kwargs)
objs.append(obj)
self.threads.append(obj)
if len(objs) == 1:
setattr(self, name, obj)
else:
setattr(self, name, MultiProxy(objs))
def run_loop(self):
# Doesn't really support calling run_loop() more than once
for th in self.threads:
th.start()
try:
while True:
# We pass a small timeout because .get(block=True) without it causes
# trouble handling Ctrl-C.
try:
item = self.event_queue.get(timeout=1.0)
except queue.Empty:
continue
if item is SHUTDOWN_SENTINEL:
break
# These only happen here to serialize access regardless of what thread
# handled it.
func, args = item[0], item[1:]
try:
func(*args)
except Exception as e:
traceback.print_exc()
print("Got exception", repr(e), "executing", func, args)
except KeyboardInterrupt:
print("Got Ctrl-C, shutting down.")
# Assuming all threads are daemonized, we will now shut down.
class BaseDerivedThread(threading.Thread):
def __init__(self, event_queue, config_name):
# TODO should they also have numeric ids?
thread_name = "%s %s" % (self.__class__.__name__, config_name)
super(BaseDerivedThread, self).__init__(name=thread_name)
self.daemon = True
self.event_queue = event_queue
self.config_name = config_name
def run(self):
while True:
try:
self.run_inner()
except Exception:
traceback.print_exc()
class BasePinThread(BaseDerivedThread):
def __init__(
self, event_queue, config_name, input_pin, output_pin, initial_output=GPIO.LOW
):
super(BasePinThread, self).__init__(event_queue, config_name)
self.input_pin = input_pin
self.output_pin = output_pin
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False) # for reusing pins
if self.input_pin:
GPIO.setup(self.input_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if self.output_pin:
GPIO.setup(self.output_pin, GPIO.OUT, initial=initial_output)
class BaseWiegandPinThread(BaseDerivedThread):
def __init__(self, event_queue, config_name, d0_pin, d1_pin):
super(BaseWiegandPinThread, self).__init__(event_queue, config_name)
self.d0_pin = d0_pin
self.d1_pin = d1_pin
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False) # for reusing pins
if self.d0_pin:
GPIO.setup(self.d0_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if self.d1_pin:
GPIO.setup(self.d1_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class NoMatchingDevice(Exception):
"""Generic exception for missing devices."""
def _import(name):
module, object_name = name.rsplit(".", 1)
# The return value of __import__ requires walking the dots, so
# this is a fairly standard workaround that's easier. Intermediate
# names appear to always get added to sys.modules.
__import__(module)
return getattr(sys.modules[module], object_name)
class MultiMethodProxy(object):
def __init__(self, objs, meth):
self.objs = objs
self.meth = meth
def __call__(self, *args, **kwargs):
for i in self.objs:
getattr(i, self.meth)(*args, **kwargs)
class MultiProxy(object):
def __init__(self, objs):
self.objs = objs
def __getattr__(self, name):
if isinstance(getattr(self.objs[0], name), types.MethodType):
return MultiMethodProxy(self.objs, name)
else:
return getattr(self.objs[0], name)
def split_escaped(s, glue=",", preserve=False):
"""Handle single-char escapes using backslash."""
buf = []
it = iter(s)
for c in it:
if c == glue:
yield "".join(buf)
del buf[:]
elif c == "\\":
if preserve:
buf.append(c)
c = next(it)
buf.append(c)
else:
buf.append(c)
if buf:
yield "".join(buf)
|
import enum
import typing
from uecp.commands.base import (
UECPCommand,
UECPCommandDecodeElementCodeMismatchError,
UECPCommandDecodeNotEnoughData,
)
from uecp.commands.mixins import InvalidDataSetNumber
class SiteEncoderAddressSetCommandMode(enum.IntEnum):
REMOVE_SINGLE = 0b00
ADD_SINGLE = 0b01
REMOVE_ALL = 0b10
@UECPCommand.register_type
class SiteAddressSetCommand(UECPCommand):
ELEMENT_CODE = 0x23
def __init__(self, mode: SiteEncoderAddressSetCommandMode, site_address: int):
self._mode: SiteEncoderAddressSetCommandMode = (
SiteEncoderAddressSetCommandMode.ADD_SINGLE
)
self._site_address = 0
self.mode = mode
self.site_address = site_address
@property
def mode(self) -> SiteEncoderAddressSetCommandMode:
return self._mode
@mode.setter
def mode(self, value):
self._mode = SiteEncoderAddressSetCommandMode(value)
@property
def site_address(self) -> int:
return self._site_address
@site_address.setter
def site_address(self, value):
value = int(value)
if not (0 <= value <= 0x3FF):
raise ValueError(
f"Site address must be in range of 0 to 0x3ff, {value:#x} given"
)
self._site_address = value
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
int(self._mode),
self._site_address >> 8,
self._site_address & 0xFF,
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["SiteAddressSetCommand", int]:
data = list(data)
if len(data) < 4:
raise UECPCommandDecodeNotEnoughData(len(data), 4)
mec, mode, site_address_high, site_address_low = data[0:4]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
site_address = site_address_high << 8 | site_address_low
self = cls(
mode=SiteEncoderAddressSetCommandMode(mode), site_address=site_address
)
return self, 4
@UECPCommand.register_type
class EncoderAddressSetCommand(UECPCommand):
ELEMENT_CODE = 0x27
def __init__(self, mode: SiteEncoderAddressSetCommandMode, encoder_address: int):
self._mode: SiteEncoderAddressSetCommandMode = (
SiteEncoderAddressSetCommandMode.ADD_SINGLE
)
self._encoder_address = 0
self.mode = mode
self.encoder_address = encoder_address
@property
def mode(self) -> SiteEncoderAddressSetCommandMode:
return self._mode
@mode.setter
def mode(self, value):
self._mode = SiteEncoderAddressSetCommandMode(value)
@property
def encoder_address(self) -> int:
return self._encoder_address
@encoder_address.setter
def encoder_address(self, value):
value = int(value)
if not (0 <= value <= 0x3F):
raise ValueError(
f"Encoder address must be in range of 0 to 0x3f, {value:#x} given"
)
self._encoder_address = value
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
int(self._mode),
self._encoder_address,
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["EncoderAddressSetCommand", int]:
data = list(data)
if len(data) < 3:
raise UECPCommandDecodeNotEnoughData(len(data), 3)
mec, mode, encoder_address = data[0:3]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
return (
cls(
mode=SiteEncoderAddressSetCommandMode(mode),
encoder_address=encoder_address,
),
3,
)
@enum.unique
class CommunicationMode(enum.IntEnum):
UNIDIRECTIONAL = 0
BIDIRECTIONAL_REQUESTED_RESPONSE = 1
BIDIRECTIONAL_SPONTANEOUS_RESPONSE = 2
@UECPCommand.register_type
class CommunicationModeSetCommand(UECPCommand):
ELEMENT_CODE = 0x2C
def __init__(self, mode: CommunicationMode):
self._mode = CommunicationMode(mode)
@property
def mode(self) -> CommunicationMode:
return self._mode
@mode.setter
def mode(self, value):
self._mode = CommunicationMode(value)
def encode(self) -> list[int]:
return [self.ELEMENT_CODE, int(self._mode)]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["CommunicationModeSetCommand", int]:
data = list(data)
if len(data) < 2:
raise UECPCommandDecodeNotEnoughData(len(data), 2)
mec, mode = data[0:2]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
return cls(mode=CommunicationMode(mode)), 2
@UECPCommand.register_type
class DataSetSelectCommand(UECPCommand):
ELEMENT_CODE = 0x1C
def __init__(self, select_data_set_number: int):
self._select_data_set_number = 0
self.select_data_set_number = select_data_set_number
@property
def select_data_set_number(self) -> int:
return self._select_data_set_number
@select_data_set_number.setter
def select_data_set_number(self, value: int):
try:
if value == int(value):
value = int(value)
else:
raise ValueError()
except ValueError:
raise InvalidDataSetNumber(value)
if not (0x01 <= value <= 0xFF):
raise InvalidDataSetNumber(value)
self._select_data_set_number = value
def encode(self) -> list[int]:
return [self.ELEMENT_CODE, self._select_data_set_number]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["DataSetSelectCommand", int]:
data = list(data)
if len(data) < 2:
raise UECPCommandDecodeNotEnoughData(len(data), 2)
mec, select_data_set_number = data[0:2]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
return cls(select_data_set_number=select_data_set_number), 2
|
from django.contrib import admin
from .models import Organization, Request
# Register your models here.
class OrganizationAdmin(admin.ModelAdmin):
list_display = ('name', 'owner')
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Request)
|
function townsToJSON(input){
let towns =[];
for (const line of input.slice(1)) {
let slicedLine = line.slice(2,-2);
let [town,latitude,longtitude] = slicedLine.split(' | ');
let townObj = { Town: town, Latitude:
JSON.parse(Number(latitude).toFixed(2)), Longitude: JSON.parse(Number(longtitude).toFixed(2)) };
towns.push(townObj);
}
console.log(JSON.stringify(towns));
}
townsToJSON(['| Town | Latitude | Longitude |',
'| Sofia | 42.696552 | 23.32601 |',
'| Beijing | 39.913818 | 116.363625 |'])
|
/** PURE_IMPORTS_START .._Subject,.._util_tryCatch,.._util_errorObject,.._OuterSubscriber,.._util_subscribeToResult PURE_IMPORTS_END */
var __extends = (this && this.__extends) || function (d, b) {
for (var p in b)
if (b.hasOwnProperty(p))
d[p] = b[p];
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
import { Subject } from '../Subject';
import { tryCatch } from '../util/tryCatch';
import { errorObject } from '../util/errorObject';
import { OuterSubscriber } from '../OuterSubscriber';
import { subscribeToResult } from '../util/subscribeToResult';
/**
* Returns an Observable that mirrors the source Observable with the exception of an `error`. If the source Observable
* calls `error`, this method will emit the Throwable that caused the error to the Observable returned from `notifier`.
* If that Observable calls `complete` or `error` then this method will call `complete` or `error` on the child
* subscription. Otherwise this method will resubscribe to the source Observable.
*
* <img src="./img/retryWhen.png" width="100%">
*
* @param {function(errors: Observable): Observable} notifier - Receives an Observable of notifications with which a
* user can `complete` or `error`, aborting the retry.
* @return {Observable} The source Observable modified with retry logic.
* @method retryWhen
* @owner Observable
*/
export function retryWhen(notifier) {
return function (source) { return source.lift(new RetryWhenOperator(notifier, source)); };
}
var RetryWhenOperator = /*@__PURE__*/ (/*@__PURE__*/ function () {
function RetryWhenOperator(notifier, source) {
this.notifier = notifier;
this.source = source;
}
RetryWhenOperator.prototype.call = function (subscriber, source) {
return source.subscribe(new RetryWhenSubscriber(subscriber, this.notifier, this.source));
};
return RetryWhenOperator;
}());
/**
* We need this JSDoc comment for affecting ESDoc.
* @ignore
* @extends {Ignored}
*/
var RetryWhenSubscriber = /*@__PURE__*/ (/*@__PURE__*/ function (_super) {
__extends(RetryWhenSubscriber, _super);
function RetryWhenSubscriber(destination, notifier, source) {
_super.call(this, destination);
this.notifier = notifier;
this.source = source;
}
RetryWhenSubscriber.prototype.error = function (err) {
if (!this.isStopped) {
var errors = this.errors;
var retries = this.retries;
var retriesSubscription = this.retriesSubscription;
if (!retries) {
errors = new Subject();
retries = tryCatch(this.notifier)(errors);
if (retries === errorObject) {
return _super.prototype.error.call(this, errorObject.e);
}
retriesSubscription = subscribeToResult(this, retries);
}
else {
this.errors = null;
this.retriesSubscription = null;
}
this._unsubscribeAndRecycle();
this.errors = errors;
this.retries = retries;
this.retriesSubscription = retriesSubscription;
errors.next(err);
}
};
/** @deprecated internal use only */ RetryWhenSubscriber.prototype._unsubscribe = function () {
var _a = this, errors = _a.errors, retriesSubscription = _a.retriesSubscription;
if (errors) {
errors.unsubscribe();
this.errors = null;
}
if (retriesSubscription) {
retriesSubscription.unsubscribe();
this.retriesSubscription = null;
}
this.retries = null;
};
RetryWhenSubscriber.prototype.notifyNext = function (outerValue, innerValue, outerIndex, innerIndex, innerSub) {
var _a = this, errors = _a.errors, retries = _a.retries, retriesSubscription = _a.retriesSubscription;
this.errors = null;
this.retries = null;
this.retriesSubscription = null;
this._unsubscribeAndRecycle();
this.errors = errors;
this.retries = retries;
this.retriesSubscription = retriesSubscription;
this.source.subscribe(this);
};
return RetryWhenSubscriber;
}(OuterSubscriber));
//# sourceMappingURL=retryWhen.js.map
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
from heapq import heappush, heappop, heappushpop
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from pyspark.storagelevel import StorageLevel
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def _extract_concise_traceback():
tb = traceback.extract_stack()
if len(tb) == 0:
return "I'm lost!"
# HACK: This function is in a file called 'rdd.py' in the top level of
# everything PySpark. Just trim off the directory name and assume
# everything in that tree is PySpark guts.
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return "%s at %s:%d" % (fun, file, line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame-1]
return "%s at %s:%d" % (sfun, ufile, uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
self._traceback = _extract_concise_traceback()
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations after the first time
it is computed. This can only be used to assign a new storage level if the RDD does not
have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
assert fraction >= 0.0, "Invalid fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed):
"""
Return a fixed-size sampled subset of this RDD (currently requires numpy).
>>> sc.parallelize(range(0, 10)).takeSample(True, 10, 1) #doctest: +SKIP
[4, 2, 1, 8, 2, 7, 0, 4, 1, 4]
"""
fraction = 0.0
total = 0
multiplier = 3.0
initialCount = self.count()
maxSelected = 0
if (num < 0):
raise ValueError
if (initialCount == 0):
return list()
if initialCount > sys.maxint - 1:
maxSelected = sys.maxint - 1
else:
maxSelected = initialCount
if num > initialCount and not withReplacement:
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
else:
fraction = multiplier * (num + 1) / initialCount
total = num
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < total:
if seed > sys.maxint - 2:
seed = -1
seed += 1
samples = self.sample(withReplacement, fraction, seed).collect()
sampler = RDDSampler(withReplacement, fraction, seed+1)
sampler.shuffle(samples)
return samples[0:total]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def _reserialize(self):
if self._jrdd_deserializer == self.ctx.serializer:
return self
else:
return self.map(lambda x: x, preservesPartitioning=True)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which corrects for bias in
estimating the standard deviation by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects for bias in
estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().top(2)
[6, 5]
"""
def topIterator(iterator):
q = []
for k in iterator:
if len(q) < num:
heappush(q, k)
else:
heappushpop(q, k)
yield q
def merge(a, b):
return next(topIterator(a + b))
return sorted(self.mapPartitions(topIterator).reduce(merge), reverse=True)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
def takeUpToNum(iterator):
taken = 0
while taken < num:
yield next(iterator)
taken += 1
# Take only up to num elements from each partition we try
mapped = self.mapPartitions(takeUpToNum)
items = []
# TODO(shivaram): Similar to the scala implementation, update the take
# method to scan multiple splits based on an estimate of how many elements
# we have per-split.
with _JavaStackTrace(self.context) as st:
for partition in range(mapped._jrdd.splits().size()):
partitionsToTake = self.ctx._gateway.new_array(self.ctx._jvm.int, 1)
partitionsToTake[0] = partition
iterator = mapped._jrdd.collectPartitions(partitionsToTake)[0].iterator()
items.extend(mapped._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func" and a neutral "zeroValue"
which may be added to the result an arbitrary number of times, and must not change
the result (e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda v: func(zeroValue, v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching key
in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> sorted(x.cogroup(y).collect())
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD. Internally, this uses
a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the first element in each RDD
second element in each RDD, etc. Assumes that the two RDDs have the same number of
partitions and the same number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if not name_:
return None
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1')
>>> rdd1.name()
'RDD1'
"""
self._jrdd.setName(name)
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if not debug_string:
return None
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, 1)
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
serializer = NoOpSerializer()
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
/**
* 通过 webpack require.context 来动态载入vuex store
* modules目录中的文件名称必须和导出对象名称保持一致
*/
const requireModule = require.context('./modules/', false, /\.js$/)
let modules = {}
requireModule.keys().forEach(fileName => {
const moduleName = fileName.replace(/(\.\/|\.js)/g, '')
modules[moduleName] = requireModule(fileName).default
})
export default modules
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import actions_pb2 as actions__pb2
class OnlineActionHandlerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self._remote_execute = channel.unary_unary(
'/OnlineActionHandler/_remote_execute',
request_serializer=actions__pb2.OnlineActionRequest.SerializeToString,
response_deserializer=actions__pb2.OnlineActionResponse.FromString,
)
self._remote_reload = channel.unary_unary(
'/OnlineActionHandler/_remote_reload',
request_serializer=actions__pb2.ReloadRequest.SerializeToString,
response_deserializer=actions__pb2.ReloadResponse.FromString,
)
self._health_check = channel.unary_unary(
'/OnlineActionHandler/_health_check',
request_serializer=actions__pb2.HealthCheckRequest.SerializeToString,
response_deserializer=actions__pb2.HealthCheckResponse.FromString,
)
class OnlineActionHandlerServicer(object):
# missing associated documentation comment in .proto file
pass
def _remote_execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def _remote_reload(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def _health_check(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OnlineActionHandlerServicer_to_server(servicer, server):
rpc_method_handlers = {
'_remote_execute': grpc.unary_unary_rpc_method_handler(
servicer._remote_execute,
request_deserializer=actions__pb2.OnlineActionRequest.FromString,
response_serializer=actions__pb2.OnlineActionResponse.SerializeToString,
),
'_remote_reload': grpc.unary_unary_rpc_method_handler(
servicer._remote_reload,
request_deserializer=actions__pb2.ReloadRequest.FromString,
response_serializer=actions__pb2.ReloadResponse.SerializeToString,
),
'_health_check': grpc.unary_unary_rpc_method_handler(
servicer._health_check,
request_deserializer=actions__pb2.HealthCheckRequest.FromString,
response_serializer=actions__pb2.HealthCheckResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'OnlineActionHandler', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BatchActionHandlerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self._remote_execute = channel.unary_unary(
'/BatchActionHandler/_remote_execute',
request_serializer=actions__pb2.BatchActionRequest.SerializeToString,
response_deserializer=actions__pb2.BatchActionResponse.FromString,
)
self._remote_reload = channel.unary_unary(
'/BatchActionHandler/_remote_reload',
request_serializer=actions__pb2.ReloadRequest.SerializeToString,
response_deserializer=actions__pb2.ReloadResponse.FromString,
)
self._health_check = channel.unary_unary(
'/BatchActionHandler/_health_check',
request_serializer=actions__pb2.HealthCheckRequest.SerializeToString,
response_deserializer=actions__pb2.HealthCheckResponse.FromString,
)
class BatchActionHandlerServicer(object):
# missing associated documentation comment in .proto file
pass
def _remote_execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def _remote_reload(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def _health_check(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BatchActionHandlerServicer_to_server(servicer, server):
rpc_method_handlers = {
'_remote_execute': grpc.unary_unary_rpc_method_handler(
servicer._remote_execute,
request_deserializer=actions__pb2.BatchActionRequest.FromString,
response_serializer=actions__pb2.BatchActionResponse.SerializeToString,
),
'_remote_reload': grpc.unary_unary_rpc_method_handler(
servicer._remote_reload,
request_deserializer=actions__pb2.ReloadRequest.FromString,
response_serializer=actions__pb2.ReloadResponse.SerializeToString,
),
'_health_check': grpc.unary_unary_rpc_method_handler(
servicer._health_check,
request_deserializer=actions__pb2.HealthCheckRequest.FromString,
response_serializer=actions__pb2.HealthCheckResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'BatchActionHandler', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
/* Solution1
exercise
[1,0,2,3,0,4,5,0] , len = 8
0 2 3 4 6 7
in = [1,0,2,3,4,5,0,8] , len = 8
0 2 3 4 5 6 8
out = [1,0,0,2,3,4,5,0]
in = [1,2,3]
0 1 2
algo:
s1. iter from left to right, count 1 while non-0, count 2 while 0, stop while cnt to n-1
s2. set wr_idx to n-1, rd_idx = where we stop at step1, iter rd_idx until 0;
*/
class Solution1 {
public:
void duplicateZeros(vector<int>& arr) {
int i, cnt=-1, n = arr.size();
for(i = 0 ; i < n && cnt<(n-1) ; i++){
if(arr[i]==0)
cnt+=2;
else
cnt+=1;
}
int wr_i = n-1;
if(cnt==n){ //special case, 0 at the end
arr[wr_i--] = arr[i-1];
i--;
}
for(i=i-1; i>=0 ; i--){
if(arr[i]==0){
arr[wr_i--] = arr[i];
}
arr[wr_i--] = arr[i];
}
}
};
/* Solution 2: same idea, more precise
wr_idx will start from the back of the extended array, however it only update to arr
while wr_idx < n
*/
class Solution{
public:
void duplicateZeros(vector<int>& arr) {
int n = arr.size(), j = n + count(arr.begin(), arr.end(), 0) - 1;
for(auto it = arr.rbegin(); it!=arr.rend() ; it++){
if(*it==0){
if(j<n) arr[j] = 0;
j--;
}
if(j<n) arr[j] = *it;
j--;
}
}
};
TEST(Solution, Test1){
Solution s;
vector<int> q, exp;
q = {1,0,2,3,0,4,5,0};
exp = {1,0,0,2,3,0,0,4};
s.duplicateZeros(q);
EXPECT_EQ(q, exp);
q = {1,2,3};
exp = {1,2,3};
s.duplicateZeros(q);
EXPECT_EQ(q, exp);
}
|
#!/usr/bin/env python
# Threat Wrangler
# Created by: nkphysics https://github.com/nkphysics
# License: Apache 2.0
# Dependent libraries
import pandas as pd
import argparse as ap
import os
import pathlib as pl
import requests
import json
import datetime as dt
class Threat_Wrangler(object):
def __init__(self):
store_stat = os.path.exists("store.csv")
api_urls = [r"https://otx.alienvault.com/api/v1", r"https://threatfox-api.abuse.ch/api/v1/"]
self.store = pd.read_csv("store.csv", index_col=None) if store_stat == True else pd.DataFrame({"URL":api_urls, "API_KEY":[0, 0]})
self.pulses = []
log_stat = os.path.exists("LOG.csv")
self.log = pd.read_csv("LOG.csv") if log_stat == True else pd.DataFrame({"Title":[], "FILE_PATH":[], "Date Added":[]})
def check_store(self):
"""
# Checks the API key store index for API Keys
"""
for i in self.store.index:
if self.store.loc[i, "API_KEY"] == 0:
print("No API KEY on record!")
key_in = str(
input("Enter API KEY for " + str(self.store.loc[i, "URL"]) + ":> ")
)
self.store.loc[i, "API_KEY"] = key_in
self.store.to_csv("store.csv", index=False)
else:
pass
def writeout(self, frame, name):
ct = name
base_dir = os.getcwd()
date = dt.datetime.now()
date_str = date.strftime("%Y-%m-%d_T%H%M%S")
outpath = pl.Path(base_dir, str(ct) + str(date_str) + ".csv")
frame.to_csv(outpath, index=False)
print("IOC file written out at: " + str(outpath))
nl = pd.Series(
data=[
ct + date_str,
outpath,
date_str,
],
index=["Title", "FILE_PATH", "Date Added"],
)
self.log = self.log.append(nl, ignore_index=True)
self.log.to_csv("LOG.csv", index=False)
def pullOTX(self):
# Pulls all of the subscribed pulses for a user
r = requests.get(
self.store.loc[0, "URL"] + "/pulses/subscribed?page=1&limit=100",
headers={"X-OTX-API-KEY": self.store.loc[0, "API_KEY"]},
)
pull0 = 0
if r.status_code == 200:
pull0 = r.text
else:
print("Issue in Retrieval from AlienVault")
while pull0:
print("*** Pulling Pulses ***")
pulses = json.loads(pull0)
if "results" in pulses:
for i in pulses["results"]:
pulse_title = i["name"]
self.pulses.append(i)
pull0 = None
break # Only here since there are so many IOCs being pulled that the actual pulling takes forever
if "next" in pulses:
if pulses["next"]:
pull0 = requests.get(
pulses["next"],
headers={"X-OTX-API-KEY": self.store.loc[0, "API_KEY"]},
).text
for i in self.pulses:
self.IOC_write(i)
print("All IOCs Written and Logged")
def pull_fox(self, time):
# Pulls all IOCs from threatfox
if time == None:
time = 1
else:
pass
print("Pulling ThreatFox IOCs")
r = requests.post(
self.store.loc[1, "URL"],
headers={"X-API-TOKEN": ""},
data=json.dumps({"query": "get_iocs", "days": time}),
)
pull0 = 0
if r.status_code == 200:
pull0 = r.text
else:
print("Issue with Retrieval from ThreatFox")
load = json.loads(pull0)
iocs = []
tags = []
for i in load["data"]:
if i["ioc_type"] == "ip:port":
ioc = i["ioc"].split(":")[0]
else:
ioc = i["ioc"]
iocs.append(ioc)
tags.append(i["malware_printable"])
df = pd.DataFrame({"Indicator": iocs, "Tag": tags})
self.writeout(df, "ThreatFox")
def show_ps(self):
# shows the pulses that were pulled (Currently not in use, but useful for future functionality)
ioc_c = 0
print("Retrieved Pulses: ")
for i in self.pulses:
title = i["name"]
author = i["author_name"]
created = i["created"]
print(str(ioc_c) + ". " + str(title))
print(" // Created:" + str(created))
print(" // Author:" + str(author))
ioc_c = ioc_c + 1
def IOC_write(self, i):
# Writes the IOC .csv files and updates the log file
title = i["name"]
tags = i["tags"]
print("Pulse: " + str(title))
iocs = []
print("Suggested Tags: " + str(tags))
for k in i["indicators"]:
iocs.append(k["indicator"])
ttags = []
ct = str(input("Tag Name: "))
while len(ttags) < len(iocs):
ttags.append(ct)
ioc_df = pd.DataFrame({"Indicator": iocs, "Tag": ttags})
self.writeout(ioc_df, ttags[0])
def pull(self, source, time):
tw.check_store()
av = ['otx', 'OTX', 'alienvault', 'AlienVault', 'av', 'AV']
tf = ['threatfox', 'threat fox', 'ThreatFox', 'tf', 'TF']
if source in av:
tw.pullOTX()
elif source in tf:
tw.pull_fox(time)
else:
print("Source Not Found")
def main(self):
p = ap.ArgumentParser()
p.add_argument("command", type=str)
p.add_argument("source", type=str, nargs="?")
p.add_argument("time_frame", type=int, nargs="?")
args = p.parse_args()
if args.command == "Pull" or args.command == "pull":
self.pull(args.source, args.time_frame)
else:
print("Unknown Command: " + str(args.command))
def cat_IOCs(self):
log = self.log
logged = len(log["Title"])
for i in self.pulses:
if logged == 0:
self.IOC_write(i)
else:
for j in log["Title"]:
if i == j:
pass
else:
self.IOC_write(i)
print("All IOCs Written and Logged")
if __name__ == "__main__":
tw = Threat_Wrangler()
tw.main()
|
define({"topics":[{"title":"Edition-Section System","shortdesc":"<p class=\"shortdesc\">File organization depends on two basic folder types<\/p>","href":"source\/t_edition_section_system.html","attributes":{"data-id":"t_edition_section_system"},"menu":{"hasChildren":true},"tocID":"t_edition_section_system-d909e2372","next":"t_edition_section_system-d909e2372"},{"title":"Folder names","shortdesc":"<p class=\"shortdesc\">As the <a href=\"glossary\/g_ocr.html\" title=\"Optical Character Recognition, a method of identifying letter forms in images and recovering their textual data as text.\"><dfn class=\"term\">OCR<\/dfn><\/a> workflow passes through its various stages, production moves into specific folders for each stage. Their names and contents are given below: <\/p>","href":"source\/r_folder_names.html","attributes":{"data-id":"folder_names"},"menu":{"hasChildren":false},"tocID":"folder_names-d909e2534","topics":[]},{"title":"Repositories","shortdesc":"<p class=\"shortdesc\">A guide to the different repositories used to store <a href=\"glossary\/g_ocr_project.html\" title=\"A collection of files used by ABBYY FineReader and stored as a single group.\"><dfn class=\"term\">ocr-project<\/dfn><\/a> data.<\/p>","href":"source\/c_repos_all.html","attributes":{"data-id":"repos-all"},"menu":{"hasChildren":true},"tocID":"repos-all-d909e2590","next":"repos-all-d909e2590"},{"title":"Setting Up the Repositories","shortdesc":"<p class=\"shortdesc\">Create local copies of the remote repositories<\/p>","href":"source\/t_setting_up_the_repositories.html","attributes":{"data-id":"t_setting_up_the_repositories"},"menu":{"hasChildren":true},"tocID":"t_setting_up_the_repositories-d909e3666","next":"t_setting_up_the_repositories-d909e3666"}]});
|
import { module, test } from 'qunit';
import { setupTest } from 'ember-qunit';
module('Unit | Controller | events/view/tickets/add order', function(hooks) {
setupTest(hooks);
test('it exists', function(assert) {
let controller = this.owner.lookup('controller:events/view/tickets/add-order');
assert.ok(controller);
});
});
|
import { logSms, logError } from '../../../shared/logger';
import ApigError from '../ApigError';
import createMessage from '../dynamodb/operations/createMessage';
import invokePusher from './invokePusher';
// Processes an inbound message
export default function processMessage({ id, sender, receiver, content, timestamp }) {
if (!(id && sender && receiver && content && timestamp)) {
logError('Invalid sms, responding 400', JSON.stringify({ id, sender, receiver, content, timestamp }));
return Promise.reject(new ApigError(400, 'Invalid sms'));
}
logSms('Processing SMS from', sender, 'to', receiver);
return Promise.all([
invokePusher({ sender, content }), // Send websocket push
createMessage({ id, sender, receiver, content, timestamp }) // Add to database
]).then(() => true); // Save brandwith?
}
|
from unittest.mock import Mock
import asyncio
import websockets
from interactive_python import Connection, GzipEncoding
from ._util import AsyncTestCase, async_test, resolve, fixture
sample_method = '{"id":0,"type":"method","method":"some_method",' \
'"params":{"foo": 42}}'
class TestInteractiveConnection(AsyncTestCase):
def setUp(self):
super(TestInteractiveConnection, self).setUp()
self._mock_socket = Mock()
self._mock_socket.close = asyncio.Future(loop=self._loop)
self._mock_socket.close.set_result(None)
send_future = asyncio.Future(loop=self._loop)
send_future.set_result(None)
self._mock_socket.send.return_value = send_future
self._queue = asyncio.Queue(loop=self._loop)
self._connection = Connection(socket=self._mock_socket, loop=self._loop)
self._mock_socket.recv = self._queue.get
self._queue.put_nowait('{"type":"method","method":"hello","seq":1}')
def tearDown(self):
if self._connection._recv_task is not None:
self._connection._recv_task.cancel()
super(TestInteractiveConnection, self).tearDown()
async def _upgrade_to_gzip(self):
result = await asyncio.gather(
self._connection.set_compression(GzipEncoding()),
self._queue.put(
'{"id":0,"type":"reply","result":{"scheme":"gzip"},"seq":2}'),
loop=self._loop)
self.assertTrue(result[0])
self.assertEqual('gzip', self._connection._encoding.name())
@async_test
def test_sends_method_calls(self):
yield from self._connection.connect()
results = yield from asyncio.gather(
self._connection.call('square', 2),
self._queue.put('{"id":0,"type":"reply","result":4,"seq":2}'),
loop=self._loop)
self.assertEqual(4, results[0])
self.assertJsonEqual(
self._mock_socket.send.call_args[0][0],
{'type': 'method', 'method': 'square',
'params': 2, 'id': 0, 'seq': 0}
)
@async_test
def test_times_out_calls(self):
yield from self._connection.connect()
with self.assertRaises(asyncio.TimeoutError):
yield from self._connection.call('square', 2, timeout=0.1)
@async_test
def test_upgrades_compression(self):
yield from self._connection.connect()
yield from self._upgrade_to_gzip()
result = yield from asyncio.gather(
self._connection.call('square', 2),
self._queue.put(fixture('gzipped_square_reply', 'rb')),
loop=self._loop)
self.assertEquals(4, result[0])
self.assertIsInstance(self._mock_socket.send.call_args[0][0], bytes)
@async_test
def test_does_not_upgrade_if_the_server_denies(self):
yield from self._connection.connect()
result = yield from asyncio.gather(
self._connection.set_compression(GzipEncoding()),
self._queue.put(
'{"id":0,"type":"reply","result":{"scheme":"text"},"seq":2}'),
loop=self._loop)
self.assertFalse(result[0])
self.assertEqual('text', self._connection._encoding.name())
@async_test
def test_falls_back_if_given_unknown_bytes(self):
yield from self._connection.connect()
yield from self._upgrade_to_gzip()
self.assertEqual('gzip', self._connection._encoding.name())
yield from self._queue.put(fixture('gzipped_square_reply', 'rb')[::-1])
yield from asyncio.sleep(0, loop=self._loop)
yield from self._queue.put('{"id":0,"type":"reply",'
'"result":{"scheme":"text"},"seq":2}')
yield from asyncio.sleep(0, loop=self._loop)
self.assertEqual('text', self._connection._encoding.name())
self.assertJsonEqual(
self._mock_socket.send.call_args[0][0],
{'type': 'method', 'method': 'setCompression', 'params': {
'scheme': ['text']}, 'id': 1, 'seq': 2})
@async_test
def test_queues_packets(self):
yield from self._connection.connect()
self._queue.put_nowait(sample_method)
has_packet = yield from self._connection.has_packet()
self.assertTrue(has_packet)
self.assertJsonEqual(self._connection.get_packet().data, '{"foo":42}')
self.assertIsNone(self._connection.get_packet())
@async_test
def test_handles_connection_closed(self):
yield from self._connection.connect()
def raise_closed():
raise websockets.ConnectionClosed(4000, "")
yield from asyncio.sleep(0)
self._mock_socket.recv = raise_closed
self._queue.put_nowait(sample_method)
has_packet = yield from self._connection.has_packet()
self.assertTrue(has_packet) # reads what we pushed to get unblocked
has_packet = yield from self._connection.has_packet()
self.assertFalse(has_packet) # gets a connection closed
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import CardsList, PackProfile, PDF, RenderSpec
class UserSerializer(serializers.HyperlinkedModelSerializer):
packprofiles = serializers.HyperlinkedRelatedField(many=True,
view_name='packprofile-detail',
read_only=True)
cardslists = serializers.HyperlinkedRelatedField(many=True,
view_name='cardslist-detail',
read_only=True)
renderspecs = serializers.HyperlinkedRelatedField(many=True,
view_name='renderspec-detail',
read_only=True)
pdfs = serializers.HyperlinkedRelatedField(many=True,
view_name='pdf-detail',
read_only=True)
class Meta:
model = User
fields = ('url', 'id', 'username', 'packprofiles', 'cardslists', 'renderspecs', 'pdfs')
class BaseSerializerMixin:
owner = serializers.ReadOnlyField(source='owner.username')
common_fields = ('url', 'id', 'owner', 'created')
class PackProfileSerializer(serializers.HyperlinkedModelSerializer, BaseSerializerMixin):
class Meta:
model = PackProfile
fields = BaseSerializerMixin.common_fields + ('value', 'color_name')
class CardsListSerializer(serializers.HyperlinkedModelSerializer, BaseSerializerMixin):
class Meta:
model = CardsList
fields = BaseSerializerMixin.common_fields + ('name', 'cards', 'is_black', 'profile')
class RenderSpecSerializer(serializers.HyperlinkedModelSerializer, BaseSerializerMixin):
class Meta:
model = RenderSpec
fields = BaseSerializerMixin.common_fields + ('name', 'append_color', 'packs')
class PDFSerializer(serializers.HyperlinkedModelSerializer, BaseSerializerMixin):
# download = serializers.HyperlinkedRelatedField()
class Meta:
model = PDF
fields = BaseSerializerMixin.common_fields + ('uuid', 'render_spec')
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOINNOOMY_QT_ADDRESSTABLEMODEL_H
#define BITCOINNOOMY_QT_ADDRESSTABLEMODEL_H
#include <QAbstractTableModel>
#include <QStringList>
class AddressTablePriv;
class WalletModel;
class CWallet;
/**
Qt model of the address book in the core. This allows views to access and modify the address book.
*/
class AddressTableModel : public QAbstractTableModel
{
Q_OBJECT
public:
explicit AddressTableModel(CWallet* wallet, WalletModel* parent = 0);
~AddressTableModel();
enum ColumnIndex {
Label = 0, /**< User specified label */
Address = 1 /**< BitcoinNoomy address */
};
enum RoleIndex {
TypeRole = Qt::UserRole /**< Type of address (#Send or #Receive) */
};
/** Return status of edit/insert operation */
enum EditStatus {
OK, /**< Everything ok */
NO_CHANGES, /**< No changes were made during edit operation */
INVALID_ADDRESS, /**< Unparseable address */
DUPLICATE_ADDRESS, /**< Address already in address book */
WALLET_UNLOCK_FAILURE, /**< Wallet could not be unlocked to create new receiving address */
KEY_GENERATION_FAILURE /**< Generating a new public key for a receiving address failed */
};
static const QString Send; /**< Specifies send address */
static const QString Receive; /**< Specifies receive address */
/** @name Methods overridden from QAbstractTableModel
@{*/
int rowCount(const QModelIndex& parent) const;
int columnCount(const QModelIndex& parent) const;
QVariant data(const QModelIndex& index, int role) const;
bool setData(const QModelIndex& index, const QVariant& value, int role);
QVariant headerData(int section, Qt::Orientation orientation, int role) const;
QModelIndex index(int row, int column, const QModelIndex& parent) const;
bool removeRows(int row, int count, const QModelIndex& parent = QModelIndex());
Qt::ItemFlags flags(const QModelIndex& index) const;
/*@}*/
/* Add an address to the model.
Returns the added address on success, and an empty string otherwise.
*/
QString addRow(const QString& type, const QString& label, const QString& address);
/* Look up label for address in address book, if not found return empty string.
*/
QString labelForAddress(const QString& address) const;
/* Look up row index of an address in the model.
Return -1 if not found.
*/
int lookupAddress(const QString& address) const;
EditStatus getEditStatus() const { return editStatus; }
private:
WalletModel* walletModel;
CWallet* wallet;
AddressTablePriv* priv;
QStringList columns;
EditStatus editStatus;
/** Notify listeners that data changed. */
void emitDataChanged(int index);
public Q_SLOTS:
/* Update address list from core.
*/
void updateEntry(const QString& address, const QString& label, bool isMine, const QString& purpose, int status);
friend class AddressTablePriv;
};
#endif // BITCOINNOOMY_QT_ADDRESSTABLEMODEL_H
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copies a handful of files over to the remote filesystem.
# The source for this operation ought to be part of the
# build process; this is currently a bit ad-hoc.
import os
import posixpath
import logging
import shutil
import django.core
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from hadoop import cluster
import jobsub.conf
from jobsub.submit import Submission
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
# The setup_level value for the CheckForSetup table
JOBSUB_SETUP_LEVEL = 200 # Stands for Hue 2.0.0
class Command(NoArgsCommand):
"""Creates file system for testing."""
def handle_noargs(self, **options):
remote_fs = cluster.get_hdfs()
if hasattr(remote_fs, "setuser"):
remote_fs.setuser(remote_fs.DEFAULT_USER)
LOG.info("Using remote fs: %s" % str(remote_fs))
# Create remote data directory if needed
remote_data_dir = Submission.create_data_dir(remote_fs)
# Copy over examples/
for dirname in ("examples",):
local_dir = os.path.join(jobsub.conf.LOCAL_DATA_DIR.get(), dirname)
remote_dir = posixpath.join(remote_data_dir, dirname)
copy_dir(local_dir, remote_fs, remote_dir)
# Copy over sample_data/
copy_dir(jobsub.conf.SAMPLE_DATA_DIR.get(),
remote_fs,
posixpath.join(remote_data_dir, "sample_data"))
# Write out the models too
fixture_path = os.path.join(os.path.dirname(__file__), "..", "..", "fixtures", "example_data.xml")
examples = django.core.serializers.deserialize("xml", open(fixture_path))
sample_user = None
sample_oozie_designs = []
sample_oozie_abstract_actions = {} # pk -> object
sample_oozie_concrete_actions = {} # oozieaction_ptr_id -> object
for example in examples:
if isinstance(example.object, User):
sample_user = example
elif isinstance(example.object, jobsub.models.OozieDesign):
sample_oozie_designs.append(example)
elif type(example.object) in (jobsub.models.OozieMapreduceAction,
jobsub.models.OozieJavaAction,
jobsub.models.OozieStreamingAction):
key = example.object.oozieaction_ptr_id
sample_oozie_concrete_actions[key] = example
elif type(example.object) is jobsub.models.OozieAction:
key = example.object.pk
sample_oozie_abstract_actions[key] = example
else:
raise Exception(_("Unexpected fixture type."))
if sample_user is None:
raise Exception(_("Expected sample user fixture."))
# Create the sample user if it doesn't exist
try:
sample_user.object = User.objects.get(username=sample_user.object.username)
except User.DoesNotExist:
sample_user.object.pk = None
sample_user.object.id = None
sample_user.save()
# Create the designs
for d in sample_oozie_designs:
#
# OozieDesign ----many-to-one---> OozieAction
#
# OozieMapreduceAction -----one-to-one---> OozieAction
# OozieStreamingAction -----one-to-one---> OozieAction
# OozieJavaAction -----one-to-one---> OozieAction
#
# We find the OozieAction pk and link everything back together
#
abstract_action_id = d.object.root_action_id
abstract_action = sample_oozie_abstract_actions[abstract_action_id]
concrete_action = sample_oozie_concrete_actions[str(abstract_action_id)]
concrete_action.object.action_type = abstract_action.object.action_type
concrete_action.object.pk = None
concrete_action.object.id = None
concrete_action.object.save()
d.object.id = None
d.object.pk = None
d.object.owner_id = sample_user.object.id
d.object.root_action = concrete_action.object
d.object.save()
# Upon success, write to the database
try:
entry = jobsub.models.CheckForSetup.objects.get(id=1)
except jobsub.models.CheckForSetup.DoesNotExist:
entry = jobsub.models.CheckForSetup(id=1)
entry.setup_run = True
entry.setup_level = JOBSUB_SETUP_LEVEL
entry.save()
def has_been_setup(self):
"""
Returns true if we think job sub examples have been setup.
"""
try:
entry = jobsub.models.CheckForSetup.objects.get(id=1)
except jobsub.models.CheckForSetup.DoesNotExist:
return False
return entry.setup_run and entry.setup_level >= JOBSUB_SETUP_LEVEL
def copy_dir(local_dir, remote_fs, remote_dir):
# Hadoop mkdir is always recursive.
remote_fs.mkdir(remote_dir)
for f in os.listdir(local_dir):
local_src = os.path.join(local_dir, f)
remote_dst = posixpath.join(remote_dir, f)
copy_file(local_src, remote_fs, remote_dst)
CHUNK_SIZE = 65536
def copy_file(local_src, remote_fs, remote_dst):
if remote_fs.exists(remote_dst):
LOG.info("%s already exists. Skipping." % remote_dst)
return
else:
LOG.info("%s does not exist. trying to copy" % remote_dst)
if os.path.isfile(local_src):
src = file(local_src)
try:
dst = remote_fs.open(remote_dst, "w")
try:
shutil.copyfileobj(src, dst, CHUNK_SIZE)
LOG.info("Copied %s -> %s" % (local_src, remote_dst))
finally:
dst.close()
finally:
src.close()
else:
LOG.info("Skipping %s (not a file)" % local_src)
|
import React, { Component } from 'react';
// import { connect } from 'react-redux'
import { Button, Layout, message } from 'antd';
// import { hashHistory } from 'react-router'
import {
fetchModuleList, // 获取模块列表
fetchModuleDelete, // 删除模块
fetchModuleDetail, // 获取模块详情
fetchChangeModuleStatus, // 修改模块详情
fetchModuleUpdateDetail, // 修改模块详情
fetchModuleAdd, // 新增模块
fetchButtonList, // 按钮权限列表
} from '@apis/manage';
import ModuleList from './moduleList';
import ModuleModal from './modal/moduleAdd'; // 新增修改模块
import ButtonModal from './modal/buttonModal'; // 按钮权限列表
import AddButtonModal from './modal/addButtonModal'; // 新增修改按钮权限
const { Content } = Layout;
// 声明组件 并对外输出
export default class userManage extends Component {
// 初始化页面常量 绑定事件方法
constructor(props) {
super(props);
this.state = {
title: '新增菜单',
pid: '',
itemId: '',
type: '',
values: {
id: '',
key: '',
module: '',
name: '',
sort: '',
type: '',
},
moduleDetailResult: {
id: '',
key: '',
module: '',
name: '',
sort: '',
type: '',
},
Visible: false,
buttonVisible: false,
addButtonVisible: false,
buttonEditState: '', // 按钮是处于修改还是新增新增状态
butttonListLoading: false, // 按钮列表加载状态
buttonEditData: {},
buttonDataSource: [], // 按钮列表数据
tableListLoading: false, // 表格列表加载状态
tableDataSource: [], // 表格列表列表数据
};
this.moduleAdd = this.moduleAdd.bind(this);
this.handleOk = this.handleOk.bind(this);
this.handleCancel = this.handleCancel.bind(this);
this.handleDelete = this.handleDelete.bind(this);
this.handleModify = this.handleModify.bind(this);
this.handleAddNode = this.handleAddNode.bind(this);
this.handleChangeStatus = this.handleChangeStatus.bind(this);
this.buttonList = this.buttonList.bind(this);
this.addButton = this.addButton.bind(this);
this.handleAdd = this.handleAdd.bind(this);
this.handleAddCancel = this.handleAddCancel.bind(this);
this.cancelButton = this.cancelButton.bind(this);
this.editButton = this.editButton.bind(this);
}
// 组件已经加载到dom中
componentWillMount() {
// 做判断,如果是超级管理员,才会显示模块管理权限
if (!(sessionStorage.getItem('roleName') === '0')) {
// if (!(sessionStorage.getItem('roleName') === '超级管理员' && sessionStorage.getItem('usercode') === 'admin')) {
// hashHistory.goBack()
// return
}
this.getTableList();
}
// 删除模块
handleDelete(id) {
fetchModuleDelete({ id: id }, (result) => {
message.success(result.msg);
this.getTableList();
});
}
// 修改模块
handleModify(id, parentid) {
fetchModuleDetail({ id: id }, (result) => {
this.setState({
Visible: true,
title: '修改菜单',
pid: parentid,
itemId: id,
type: 'modify',
});
});
}
// 更改模块状态
handleChangeStatus(id, val) {
fetchChangeModuleStatus({ id: id, status: val }, (result) => {
this.getTableList();
});
}
// 新增模块
moduleAdd() {
this.setState({
Visible: true,
title: '新增菜单',
pid: '',
type: 'add',
});
}
// 新增模块子菜单
handleAddNode(id) {
this.setState({
Visible: true,
title: '新增子菜单',
pid: id,
type: 'add',
});
}
// form 表单保存后调用
handleOk() {
this.getTableList();
this.setState({ Visible: false });
}
// 隐藏新增修改窗口
handleCancel() {
this.setState({ Visible: false, type: 'add' });
}
// 显示按钮权限窗口
buttonList(id, parentid) {
this.setState(
{
buttonVisible: true,
pid: parentid,
itemId: id,
},
() => {
this.getButtonList();
},
);
}
// 关闭按钮权限列表
cancelButton() {
this.setState({
buttonVisible: false,
});
}
// 新增按钮权限
addButton() {
this.setState({
buttonEditState: 'add',
addButtonVisible: true,
title: '新增按钮权限',
});
}
// 新增、修改按钮权限
handleAdd(params) {
if (this.state.buttonEditState !== 'add') {
fetchModuleUpdateDetail(
{ ...params, parentId: this.state.itemId },
(result) => {
message.success(result.msg);
this.handleAddCancel();
},
);
} else {
fetchModuleAdd({ ...params, parentId: this.state.itemId }, (result) => {
message.success(result.msg);
this.handleAddCancel();
});
}
}
// 取消保存
handleAddCancel() {
this.setState(
{
addButtonVisible: false,
buttonEditData: {},
},
() => {
this.getButtonList();
},
);
}
// 修改按钮数据
editButton(params) {
this.setState({
buttonEditState: 'edit',
buttonEditData: params,
addButtonVisible: true,
title: '修改按钮权限',
});
}
getButtonList = () => {
this.setState(
{
butttonListLoading: true,
},
() => {
fetchButtonList({ id: this.state.itemId }, (result) => {
this.setState({
butttonListLoading: false,
buttonDataSource: result.data.list,
});
});
},
);
};
getTableList() {
this.setState(
{
tableListLoading: true,
},
() => {
fetchModuleList({}, (result) => {
this.setState({
tableListLoading: false,
tableDataSource: result.data.list,
});
});
},
);
}
// footer() {
// return (
// <div>
// <Button type="primary">确定</Button>
// <Button>取消</Button>
// </div>
// );
// }
render() {
const {
buttonEditState,
buttonEditData,
butttonListLoading,
buttonDataSource,
tableListLoading,
tableDataSource,
moduleDetailResult,
} = this.state;
const thevalue =
this.state.type === 'modify' ? moduleDetailResult : this.state.values;
return (
<div className="page page-scrollfix page-usermanage page-modulemanage">
<Layout>
<Layout className="page-body">
<Content>
{/* <div className="page-header">
<div className="text-right">
<Button type="primary" onClick={this.moduleAdd} > 新增模块</Button>
</div>
</div> */}
<div className="page-content">
<ModuleList
dataSource={tableDataSource}
loading={tableListLoading}
// scroll={{ y: global.$GLOBALCONFIG.PAGEHEIGHT - 165 }}
onDelete={this.handleDelete}
onModify={this.handleModify}
onUpdataStatus={this.handleChangeStatus}
onAddNode={this.handleAddNode}
buttonList={this.buttonList}
/>
</div>
<div className="page-footer">
<div className="page-footer-buttons">
<Button type="primary" onClick={this.moduleAdd}>
{' '}
新增模块
</Button>
</div>
</div>
</Content>
</Layout>
</Layout>
{this.state.Visible ? (
<ModuleModal
handleOk={this.handleOk}
visible={this.state.Visible}
title={this.state.title}
pid={this.state.pid}
itemId={this.state.itemId}
values={thevalue}
type={this.state.type}
onCancel={this.handleCancel}
/>
) : null}
{this.state.buttonVisible ? (
<ButtonModal
visible={this.state.buttonVisible}
pid={this.state.pid}
itemId={this.state.itemId}
addButton={this.addButton}
cancelButton={this.cancelButton}
editButton={this.editButton}
listLoading={butttonListLoading}
dataSource={buttonDataSource}
updateList={this.getButtonList}
/>
) : null}
{this.state.addButtonVisible ? (
<AddButtonModal
title={this.state.title}
visible={this.state.addButtonVisible}
onCancel={this.handleAddCancel}
handleAdd={this.handleAdd}
state={buttonEditState}
buttonEditData={buttonEditData}
/>
) : null}
</div>
);
}
}
|
const http = require('http');
const express = require('express');
const path = require('path');
const app = express();
const server = require('http').Server(app);
app.use(express.static('./'));
app.use(express.json());
server.listen(9000, function() {
console.log("Server running at: http://localhost:" + 9000)
});
app.get('/', function (req, res) {
res.sendFile(path.join(__dirname+'/index.html'));
})
app.get('/about', function (req, res) {
res.sendFile(path.join(__dirname+'/about.html'));
})
|
// Copyright (C) 2016 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
esid: sec-%typedarray%.prototype.slice
description: >
_TypedArray_.prototype has no own property "slice"
includes: [testBigIntTypedArray.js]
features: [BigInt, TypedArray]
---*/
testWithBigIntTypedArrayConstructors(function(TA) {
assert.sameValue(TA.prototype.hasOwnProperty("slice"), false);
});
|
#include <stdio.h>
int main(){
//do while
do
{
/* code */
} while (/* condition */);
return 0;
}
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudroast.compute.fixtures import ComputeFixture
class ServerMetadataTest(ComputeFixture):
@classmethod
def setUpClass(cls):
super(ServerMetadataTest, cls).setUpClass()
server_response = cls.server_behaviors.create_active_server()
cls.server = server_response.entity
cls.resources.add(cls.server.id, cls.servers_client.delete_server)
def setUp(self):
super(ServerMetadataTest, self).setUp()
self.meta = {'meta_key_1': 'meta_value_1',
'meta_key_2': 'meta_value_2'}
self.servers_client.set_server_metadata(self.server.id, self.meta)
@tags(type='positive', net='no')
def test_list_server_metadata(self):
"""All metadata key/value pairs for a server should be returned"""
metadata_response = self.servers_client.list_server_metadata(
self.server.id)
metadata = metadata_response.entity
self.assertEqual(200, metadata_response.status_code)
self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')
self.assertEqual(metadata.get('meta_key_2'), 'meta_value_2')
@tags(type='positive', net='no')
def test_set_server_metadata(self):
"""The server's metadata should be replaced with the provided values"""
meta = {'meta1': 'data1'}
server_response = self.server_behaviors.create_active_server(
metadata=meta)
server = server_response.entity
self.resources.add(server.id, self.servers_client.delete_server)
new_meta = {'meta2': 'data2', 'meta3': 'data3'}
metadata_response = self.servers_client.set_server_metadata(
server.id, new_meta)
metadata = metadata_response.entity
self.assertEqual(200, metadata_response.status_code)
self.assertEqual(metadata.get('meta2'), 'data2')
self.assertEqual(metadata.get('meta3'), 'data3')
self.assertNotIn(
'meta1', metadata,
msg='The key should have been removed after setting new metadata')
actual_metadata_response = self.servers_client.list_server_metadata(
server.id)
actual_metadata = actual_metadata_response.entity
self.assertEqual(actual_metadata.get('meta2'), 'data2')
self.assertEqual(actual_metadata.get('meta3'), 'data3')
self.assertNotIn('meta1', actual_metadata)
@tags(type='positive', net='no')
def test_update_server_metadata(self):
"""The server's metadata should be updated to the provided values"""
meta = {'key1': 'alt1', 'key2': 'alt2', 'meta_key_1': 'alt3'}
metadata_response = self.servers_client.update_server_metadata(
self.server.id, meta)
metadata = metadata_response.entity
self.assertEqual(200, metadata_response.status_code)
self.assertEqual(metadata.get('key1'), 'alt1')
self.assertEqual(metadata.get('key2'), 'alt2')
self.assertEqual(metadata.get('meta_key_1'), 'alt3')
self.assertEqual(metadata.get('meta_key_2'), 'meta_value_2')
#Verify the values have been updated to the proper values
actual_metadata_response = self.servers_client.list_server_metadata(
self.server.id)
actual_metadata = actual_metadata_response.entity
self.assertEqual(actual_metadata.get('key1'), 'alt1')
self.assertEqual(actual_metadata.get('key2'), 'alt2')
self.assertEqual(actual_metadata.get('meta_key_1'), 'alt3')
self.assertEqual(actual_metadata.get('meta_key_2'), 'meta_value_2')
@tags(type='positive', net='no')
def test_get_server_metadata_item(self):
"""The value for a specific metadata key should be returned"""
metadata_response = self.servers_client.get_server_metadata_item(
self.server.id, 'meta_key_1')
metadata = metadata_response.entity
self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')
@tags(type='positive', net='no')
def test_set_server_metadata_item(self):
"""The value provided for the given meta item should be updated"""
metadata_response = self.servers_client.set_server_metadata_item(
self.server.id, 'meta_key_2', 'nova')
metadata = metadata_response.entity
self.assertEqual(200, metadata_response.status_code)
self.assertEqual(metadata.get('meta_key_2'), 'nova')
actual_metadata_response = self.servers_client.list_server_metadata(
self.server.id)
actual_metadata = actual_metadata_response.entity
self.assertEqual(actual_metadata.get('meta_key_2'), 'nova')
self.assertEqual(actual_metadata.get('meta_key_1'), 'meta_value_1')
@tags(type='positive', net='no')
def test_add_new_server_metadata_item(self):
""" The metadata item should be added to the server"""
metadata_response = self.servers_client.set_server_metadata_item(
self.server.id, 'meta_key_3', 'meta_value_3')
metadata = metadata_response.entity
self.assertEqual(200, metadata_response.status_code)
self.assertEqual(metadata.get('meta_key_3'), 'meta_value_3')
actual_metadata_response = self.servers_client.list_server_metadata(
self.server.id)
actual_metadata = actual_metadata_response.entity
self.assertEqual(actual_metadata.get('meta_key_3'), 'meta_value_3')
self.assertEqual(actual_metadata.get('meta_key_2'), 'meta_value_2')
self.assertEqual(actual_metadata.get('meta_key_1'), 'meta_value_1')
@tags(type='positive', net='no')
def test_delete_server_metadata_item(self):
"""The metadata value/key pair should be deleted from the server"""
response = self.servers_client.delete_server_metadata_item(
self.server.id, 'meta_key_1')
self.assertEqual(204, response.status_code)
metadata_response = self.servers_client.list_server_metadata(
self.server.id)
metadata = metadata_response.entity
self.assertNotIn('meta_key_1', metadata)
|
var express = require("express");
var router = express.Router();
var mongoose = require("mongoose");
var Post = mongoose.model("Post");
// if user is authenticated in the session, call the next() to call the next request handler
function isAuthenticated(req, res, next) {
//allow all get request methods
if (req.method === "GET") {
return next();
}
if (req.isAuthenticated()) {
return next();
}
// if the user is not authenticated then redirect him to the login page
return res.redirect("/#login");
}
//Register the authentication middleware
router.use("/posts", isAuthenticated);
// API for ALL POSTS
// prettier-ignore
router.route("/posts")
//creates a new post
.post(function(req, res){
var post = new Post();
post.content = req.body.content;
post.created_by = req.body.created_by;
post.save(function(err, post) {
if (err){
return res.send(err);
}
return res.json(post);
});
})
//gets all posts
.get(function(req, res){
Post.find(function(err, posts){
if(err){
return res.send(500, err);
}
return res.send(posts);
});
});
// API for ONE SPECIFIC POST
// prettier-ignore
router.route('/posts/:id')
//updates specified post
.put(function(req, res){
Post.findById(req.params.id, function(err, post){
if(err)
res.send(err);
// Set new values
post.created_by = req.body.created_by;
post.content = req.body.content;
// Save updated post
post.save(function(err, post){
if(err)
res.send(err);
res.json(post);
});
});
})
.get(function(req, res){
Post.findById(req.params.id, function(err, post){
if(err)
res.send(err);
res.json(post);
});
})
//deletes the post
.delete(function(req, res) {
Post.remove({
_id: req.params.id
}, function(err) {
if (err)
res.send(err);
res.json("deleted :(");
});
});
module.exports = router;
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autohoot import autodiff as ad
from autohoot import backend as T
def test_add_jacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
y = x1 + x2
jacobian_x2, = ad.jacobians(y, [x2])
executor = ad.Executor([y, jacobian_x2])
x1_val = T.tensor([[1, 1], [1, 1]])
x2_val = T.tensor([[1, 1], [1, 1]])
y_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
I = T.identity(2)
expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)
assert isinstance(y, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(y_val, x1_val + x2_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_add_jacobian_scalar(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[])
x2 = ad.Variable(name="x2", shape=[])
y = x1 + x2
jacobian_x2, = ad.jacobians(y, [x2])
executor = ad.Executor([y, jacobian_x2])
x1_val = T.tensor(1.)
x2_val = T.tensor(1.)
y_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
expected_jacobian_x2_val = T.tensor(1.)
assert isinstance(y, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(y_val, x1_val + x2_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_chainjacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2, 2])
x1.set_in_indices_length(1)
x2.set_in_indices_length(2)
y = ad.chainjacobian(x1, x2)
executor = ad.Executor([y])
x1_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]])
x2_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]])
y_val, = executor.run(feed_dict={x1: x1_val, x2: x2_val})
expected_y_val = T.einsum("abc,bcd->ad", x1_val, x2_val)
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, expected_y_val)
def test_add_jacobian_w_chain(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
x3 = ad.Variable(name="x3", shape=[2, 2])
y = x1 + x2
z = y + x3
jacobian_x2, = ad.jacobians(z, [x2])
executor = ad.Executor([z, jacobian_x2])
x1_val = T.tensor([[1, 1], [1, 1]])
x2_val = T.tensor([[1, 1], [1, 1]])
x3_val = T.tensor([[1, 1], [1, 1]])
z_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
x3: x3_val
})
I = T.identity(2)
# jacobian_z_y = T.einsum("ae,bf->abef", I, I)
# jacobian_y_x2 = T.einsum("ec,fd->efcd", I, I)
# jacobian_z_x2 = T.einsum("abef,efcd->abcd", jacobian_z_y, jacobian_y_x2)
# = T.einsum("ae,bf,ec,fd->abcd", I, I, I, I)
# = T.einsum("ac,bd->abcd", I, I)
expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)
assert isinstance(z, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(z_val, x1_val + x2_val + x3_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_add_jacobian_scalar_w_chain(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[])
x2 = ad.Variable(name="x2", shape=[])
x3 = ad.Variable(name="x3", shape=[])
y = x1 + x2
z = y + x3
jacobian_x2, = ad.jacobians(z, [x2])
executor = ad.Executor([z, jacobian_x2])
x1_val = T.tensor(1.)
x2_val = T.tensor(1.)
x3_val = T.tensor(1.)
z_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
x3: x3_val
})
expected_jacobian_x2_val = T.tensor(1.)
assert isinstance(z, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(z_val, x1_val + x2_val + x3_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_sub_jacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
y = x1 - x2
jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
executor = ad.Executor([y, jacobian_x1, jacobian_x2])
x1_val = T.tensor([[1, 1], [1, 1]])
x2_val = T.tensor([[1, 1], [1, 1]])
y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
I = T.identity(2)
expected_jacobian_x1_val = T.einsum("ac,bd->abcd", I, I)
expected_jacobian_x2_val = -T.einsum("ac,bd->abcd", I, I)
assert isinstance(y, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(y_val, x1_val - x2_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_sub_jacobian_w_chain(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
x3 = ad.Variable(name="x3", shape=[2, 2])
y = x1 - x2
z = x3 - y
jacobian_x2, = ad.jacobians(z, [x2])
executor = ad.Executor([z, jacobian_x2])
x1_val = T.tensor([[1, 1], [1, 1]])
x2_val = T.tensor([[1, 1], [1, 1]])
x3_val = T.tensor([[1, 1], [1, 1]])
z_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
x3: x3_val
})
I = T.identity(2)
expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)
assert isinstance(z, ad.Node)
assert isinstance(jacobian_x2, ad.Node)
assert T.array_equal(z_val, x3_val - x1_val + x2_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_mul_jacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
y = x1 * x2
jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
executor = ad.Executor([y, jacobian_x1, jacobian_x2])
x1_val = T.tensor([[1., 2.], [3., 4.]])
x2_val = T.tensor([[5., 6.], [7., 8.]])
y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
I = T.identity(2)
expected_jacobian_x1_val = T.einsum("ai,bj,ij->abij", I, I, x2_val)
expected_jacobian_x2_val = T.einsum("ai,bj,ij->abij", I, I, x1_val)
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, x1_val * x2_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_three_mul_jacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[2, 2])
x2 = ad.Variable(name="x2", shape=[2, 2])
x3 = ad.Variable(name="x3", shape=[2, 2])
y = x1 * x2 * x3
jacobian_x1, = ad.jacobians(y, [x1])
executor = ad.Executor([y, jacobian_x1])
x1_val = T.tensor([[1., 2.], [3., 4.]])
x2_val = T.tensor([[5., 6.], [7., 8.]])
x3_val = T.tensor([[9., 10.], [11., 12.]])
y_val, jacobian_x1_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
x3: x3_val
})
I = T.identity(2)
expected_jacobian_x1_val = T.einsum("ai,bj,ij,ij->abij", I, I, x2_val,
x3_val)
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, x1_val * x2_val * x3_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
def test_three_mul_jacobian_scalars(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[])
x2 = ad.Variable(name="x2", shape=[])
x3 = ad.Variable(name="x3", shape=[])
y = x1 * x2 * x3
jacobian_x1, = ad.jacobians(y, [x1])
executor = ad.Executor([y, jacobian_x1])
x1_val = T.tensor(1.)
x2_val = T.tensor(2.)
x3_val = T.tensor(3.)
y_val, jacobian_x1_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
x3: x3_val
})
expected_jacobian_x1_val = x2_val * x3_val
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, x1_val * x2_val * x3_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
def test_mul_jacobian_scalars(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[])
x2 = ad.Variable(name="x2", shape=[])
y = x1 * x2
jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
executor = ad.Executor([y, jacobian_x1, jacobian_x2])
x1_val = T.tensor(1.)
x2_val = T.tensor(2.)
y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
expected_jacobian_x1_val = x2_val
expected_jacobian_x2_val = x1_val
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, x1_val * x2_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_mul_jacobian_one_scalar(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[])
x2 = ad.Variable(name="x2", shape=[2, 2])
# test both cases of left and right multiply a scalar
for y in [x1 * x2, x2 * x1]:
jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
executor = ad.Executor([y, jacobian_x1, jacobian_x2])
x1_val = T.tensor(2.)
x2_val = T.tensor([[5., 6.], [7., 8.]])
y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val
})
I = T.identity(2)
expected_jacobian_x1_val = T.einsum("ai,bj,ij->ab", I, I, x2_val)
expected_jacobian_x2_val = x1_val * T.einsum("ai,bj->abij", I, I)
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, x1_val * x2_val)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_mul_const_jacobian(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x2", shape=[2, 2])
jacobian_x1, = ad.jacobians(2 * x1, [x1])
executor = ad.Executor([jacobian_x1])
x1_val = T.tensor([[5., 6.], [7., 8.]])
jacobian_x1_val, = executor.run(feed_dict={x1: x1_val})
I = T.identity(2)
expected_jacobian_x1_val = 2 * T.einsum("ai,bj->abij", I, I)
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
def test_jacobian_einsum(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x1 = ad.Variable(name="x1", shape=[3, 3, 3])
x2 = ad.Variable(name="x2", shape=[3, 3, 3])
y = ad.einsum("ikl,jkl->ijk", x1, x2)
jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
executor = ad.Executor([y, jacobian_x1, jacobian_x2])
x1_val = T.random((3, 3, 3))
x2_val = T.random((3, 3, 3))
y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
x1: x1_val,
x2: x2_val,
})
I = T.identity(3)
expected_jacobian_x1_val = T.einsum("im,kn,jno->ijkmno", I, I, x2_val)
expected_jacobian_x2_val = T.einsum("jm,kn,ino->ijkmno", I, I, x1_val)
assert isinstance(y, ad.Node)
assert T.array_equal(y_val, T.einsum("ikl,jkl->ijk", x1_val, x2_val))
assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
def test_jacobian_summation_einsum(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x = ad.Variable(name="x", shape=[2, 2])
x_sum = ad.einsum('ij->', x)
grad_x, = ad.jacobians(x_sum, [x])
executor = ad.Executor([x_sum, grad_x])
x_val = T.tensor([[1., 2.], [3., 4.]])
x_sum_val, grad_x_val = executor.run(feed_dict={x: x_val})
expected_x_sum_val = T.sum(x_val)
expected_grad_x_val = T.ones_like(x_val)
assert T.array_equal(x_sum_val, expected_x_sum_val)
assert T.array_equal(grad_x_val, expected_grad_x_val)
def test_jacobian_summation_einsum_2(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x = ad.Variable(name="x", shape=[2, 2])
y = ad.Variable(name="y", shape=[2, 2])
out = ad.einsum('ij,ab->ab', x, y)
grad_x, = ad.jacobians(out, [x])
executor = ad.Executor([out, grad_x])
x_val = T.tensor([[1., 2.], [3., 4.]])
y_val = T.tensor([[5., 6.], [7., 8.]])
out_val, grad_x_val = executor.run(feed_dict={x: x_val, y: y_val})
expected_out_val = T.einsum('ij,ab->ab', x_val, y_val)
expected_grad_x_val = T.einsum('ij,ab->abij', T.ones(x_val.shape),
y_val)
assert T.array_equal(out_val, expected_out_val)
assert T.array_equal(grad_x_val, expected_grad_x_val)
def test_jacobian_trace_einsum(backendopt):
for datatype in backendopt:
if datatype == 'taco':
continue
T.set_backend(datatype)
x = ad.Variable(name="x", shape=[2, 2])
trace = ad.einsum('ii->', x)
grad_x, = ad.jacobians(trace, [x])
executor = ad.Executor([trace, grad_x])
x_val = T.tensor([[1., 2.], [3., 4.]])
trace_val, grad_x_val = executor.run(feed_dict={x: x_val})
expected_trace_val = T.einsum('ii->', x_val)
expected_grad_x_val = T.identity(2)
assert T.array_equal(trace_val, expected_trace_val)
assert T.array_equal(grad_x_val, expected_grad_x_val)
def test_hessian_quadratic(backendopt):
for datatype in backendopt:
T.set_backend(datatype)
x = ad.Variable(name="x", shape=[3])
H = ad.Variable(name="H", shape=[3, 3])
y = ad.einsum("i,ij,j->", x, H, x)
hessian = ad.hessian(y, [x])
executor = ad.Executor([hessian[0][0]])
x_val = T.random([3])
H_val = T.random((3, 3))
hessian_val, = executor.run(feed_dict={x: x_val, H: H_val})
assert T.array_equal(hessian_val, H_val + T.transpose(H_val))
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django_orghierarchy.models import Organization
from ..models import DataSource, Event, Image, PublicationStatus
class TestImage(TestCase):
def setUp(self):
user_model = get_user_model()
self.user = user_model.objects.create(username='testuser')
self.data_source = DataSource.objects.create(
id='ds',
name='data-source',
api_key="test_api_key",
user_editable=True,
)
self.org = Organization.objects.create(
name='org',
origin_id='org',
data_source=self.data_source,
)
self.image = Image.objects.create(
name='image',
data_source=self.data_source,
publisher=self.org,
url='http://fake.url/image/',
)
def test_can_be_edited_by_super_user(self):
self.user.is_superuser = True
self.user.save()
can_be_edited = self.image.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
def test_can_be_edited_by_random_user(self):
can_be_edited = self.image.can_be_edited_by(self.user)
self.assertFalse(can_be_edited)
def test_can_be_edited_by_regular_user(self):
self.org.regular_users.add(self.user)
can_be_edited = self.image.can_be_edited_by(self.user)
self.assertFalse(can_be_edited)
def test_can_be_edited_by_admin_user(self):
self.org.admin_users.add(self.user)
can_be_edited = self.image.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
class TestEvent(TestCase):
def setUp(self):
user_model = get_user_model()
self.user = user_model.objects.create(username='testuser')
self.data_source = DataSource.objects.create(
id='ds',
name='data-source',
api_key="test_api_key",
user_editable=True,
)
self.org = Organization.objects.create(
name='org',
origin_id='org',
data_source=self.data_source,
)
self.event_1 = Event.objects.create(
id='ds:event-1',
name='event-1',
data_source=self.data_source,
publisher=self.org,
publication_status=PublicationStatus.DRAFT,
)
self.event_2 = Event.objects.create(
id='ds:event-2',
name='event-2',
data_source=self.data_source,
publisher=self.org,
publication_status=PublicationStatus.PUBLIC,
)
def test_can_be_edited_by_super_user(self):
self.user.is_superuser = True
self.user.save()
can_be_edited = self.event_1.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
can_be_edited = self.event_2.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
def test_can_be_edited_by_random_user(self):
can_be_edited = self.event_1.can_be_edited_by(self.user)
self.assertFalse(can_be_edited)
can_be_edited = self.event_2.can_be_edited_by(self.user)
self.assertFalse(can_be_edited)
def test_can_be_edited_by_regular_user(self):
self.org.regular_users.add(self.user)
can_be_edited = self.event_1.can_be_edited_by(self.user)
self.assertTrue(can_be_edited) # can edit draft event
can_be_edited = self.event_2.can_be_edited_by(self.user)
self.assertFalse(can_be_edited) # cannot edit public event
def test_can_be_edited_by_admin_user(self):
self.org.admin_users.add(self.user)
can_be_edited = self.event_1.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
can_be_edited = self.event_2.can_be_edited_by(self.user)
self.assertTrue(can_be_edited)
|
# SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <support@opennetworking.org>
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import Any, Callable, Dict, List, Optional, Type
from common.service import MagmaService
from data_models import transform_for_enb, transform_for_magma
from data_models.data_model import DataModel, TrParam
from data_models.data_model_parameters import (
ParameterName,
TrParameterType,
)
from device_config.enodeb_config_postprocessor import (
EnodebConfigurationPostProcessor,
)
from device_config.enodeb_configuration import EnodebConfiguration
from devices.baicells_qafb import (
BaicellsQafbGetObjectParametersState,
BaicellsQafbWaitGetTransientParametersState,
)
from devices.device_utils import EnodebDeviceName
from state_machines.enb_acs_impl import BasicEnodebAcsStateMachine
from state_machines.enb_acs_states import (
AddObjectsState,
DeleteObjectsState,
EnbSendRebootState,
EndSessionState,
EnodebAcsState,
ErrorState,
GetParametersState,
GetRPCMethodsState,
SendGetTransientParametersState,
SetParameterValuesState,
WaitEmptyMessageState,
WaitGetParametersState,
WaitInformMRebootState,
WaitInformState,
WaitRebootResponseState,
WaitSetParameterValuesState,
)
class BaicellsQAFAHandler(BasicEnodebAcsStateMachine):
def __init__(
self,
service: MagmaService,
) -> None:
self._state_map = {}
super().__init__(service=service, use_param_key=False)
def reboot_asap(self) -> None:
self.transition('reboot')
def is_enodeb_connected(self) -> bool:
return not isinstance(self.state, WaitInformState)
def _init_state_map(self) -> None:
self._state_map = {
'wait_inform': WaitInformState(self, when_done='get_rpc_methods'),
'get_rpc_methods': GetRPCMethodsState(self, when_done='wait_empty', when_skip='get_transient_params'),
'wait_empty': WaitEmptyMessageState(self, when_done='get_transient_params'),
'get_transient_params': SendGetTransientParametersState(self, when_done='wait_get_transient_params'),
'wait_get_transient_params': BaicellsQafbWaitGetTransientParametersState(self, when_get='get_params', when_get_obj_params='get_obj_params', when_delete='delete_objs', when_add='add_objs', when_set='set_params', when_skip='end_session'),
'get_params': GetParametersState(self, when_done='wait_get_params'),
'wait_get_params': WaitGetParametersState(self, when_done='get_obj_params'),
'get_obj_params': BaicellsQafbGetObjectParametersState(self, when_delete='delete_objs', when_add='add_objs', when_set='set_params', when_skip='end_session'),
'delete_objs': DeleteObjectsState(self, when_add='add_objs', when_skip='set_params'),
'add_objs': AddObjectsState(self, when_done='set_params'),
'set_params': SetParameterValuesState(self, when_done='wait_set_params'),
'wait_set_params': WaitSetParameterValuesState(self, when_done='check_get_params', when_apply_invasive='check_get_params'),
'check_get_params': GetParametersState(self, when_done='check_wait_get_params', request_all_params=True),
'check_wait_get_params': WaitGetParametersState(self, when_done='end_session'),
'end_session': EndSessionState(self),
# These states are only entered through manual user intervention
'reboot': EnbSendRebootState(self, when_done='wait_reboot'),
'wait_reboot': WaitRebootResponseState(self, when_done='wait_post_reboot_inform'),
'wait_post_reboot_inform': WaitInformMRebootState(self, when_done='wait_empty', when_timeout='wait_inform'),
# The states below are entered when an unexpected message type is
# received
'unexpected_fault': ErrorState(self, inform_transition_target='wait_inform'),
}
@property
def device_name(self) -> str:
return EnodebDeviceName.BAICELLS_QAFA
@property
def data_model_class(self) -> Type[DataModel]:
return BaicellsQAFATrDataModel
@property
def config_postprocessor(self) -> EnodebConfigurationPostProcessor:
return BaicellsQAFATrConfigurationInitializer()
@property
def state_map(self) -> Dict[str, EnodebAcsState]:
return self._state_map
@property
def disconnected_state_name(self) -> str:
return 'wait_inform'
@property
def unexpected_fault_state_name(self) -> str:
return 'unexpected_fault'
class BaicellsQAFATrDataModel(DataModel):
"""
Class to represent relevant data model parameters from TR-196/TR-098.
This class is effectively read-only.
This model specifically targets Qualcomm-based BaiCells units running
QAFA firmware.
These models have these idiosyncrasies (on account of running TR098):
- Parameter content root is different (InternetGatewayDevice)
- GetParameter queries with a wildcard e.g. InternetGatewayDevice. do
not respond with the full tree (we have to query all parameters)
- MME status is not exposed - we assume the MME is connected if
the eNodeB is transmitting (OpState=true)
- Parameters such as band capability/duplex config
are rooted under `boardconf.` and not the device config root
- Parameters like Admin state, CellReservedForOperatorUse,
Duplex mode, DL bandwidth and Band capability have different
formats from Intel-based Baicells units, necessitating,
formatting before configuration and transforming values
read from eNodeB state.
- Num PLMNs is not reported by these units
"""
# Mapping of TR parameter paths to aliases
DEVICE_PATH = 'InternetGatewayDevice.'
FAPSERVICE_PATH = DEVICE_PATH + 'Services.FAPService.1.'
EEPROM_PATH = 'boardconf.status.eepromInfo.'
PARAMETERS = {
# Top-level objects
ParameterName.DEVICE: TrParam(DEVICE_PATH, True, TrParameterType.OBJECT, False),
ParameterName.FAP_SERVICE: TrParam(FAPSERVICE_PATH, True, TrParameterType.OBJECT, False),
# Qualcomm units do not expose MME_Status (We assume that the eNB is broadcasting state is connected to the MME)
ParameterName.MME_STATUS: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.OpState', True, TrParameterType.BOOLEAN, False),
ParameterName.GPS_LAT: TrParam(DEVICE_PATH + 'FAP.GPS.latitude', True, TrParameterType.STRING, False),
ParameterName.GPS_LONG: TrParam(DEVICE_PATH + 'FAP.GPS.longitude', True, TrParameterType.STRING, False),
ParameterName.SW_VERSION: TrParam(DEVICE_PATH + 'DeviceInfo.SoftwareVersion', True, TrParameterType.STRING, False),
ParameterName.SERIAL_NUMBER: TrParam(DEVICE_PATH + 'DeviceInfo.SerialNumber', True, TrParameterType.STRING, False),
# Capabilities
ParameterName.DUPLEX_MODE_CAPABILITY: TrParam(EEPROM_PATH + 'div_multiple', True, TrParameterType.STRING, False),
ParameterName.BAND_CAPABILITY: TrParam(EEPROM_PATH + 'work_mode', True, TrParameterType.STRING, False),
# RF-related parameters
ParameterName.EARFCNDL: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.EARFCNDL', True, TrParameterType.INT, False),
ParameterName.PCI: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.PhyCellID', True, TrParameterType.INT, False),
ParameterName.DL_BANDWIDTH: TrParam(DEVICE_PATH + 'Services.RfConfig.1.RfCarrierCommon.carrierBwMhz', True, TrParameterType.INT, False),
ParameterName.SUBFRAME_ASSIGNMENT: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.PHY.TDDFrame.SubFrameAssignment', True, 'bool', False),
ParameterName.SPECIAL_SUBFRAME_PATTERN: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.PHY.TDDFrame.SpecialSubframePatterns', True, TrParameterType.INT, False),
ParameterName.CELL_ID: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.Common.CellIdentity', True, TrParameterType.UNSIGNED_INT, False),
# Other LTE parameters
ParameterName.ADMIN_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.AdminState', False, TrParameterType.STRING, False),
ParameterName.OP_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.OpState', True, TrParameterType.BOOLEAN, False),
ParameterName.RF_TX_STATUS: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.OpState', True, TrParameterType.BOOLEAN, False),
# Core network parameters
ParameterName.MME_IP: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkServerList', True, TrParameterType.STRING, False),
ParameterName.MME_PORT: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkPort', True, TrParameterType.INT, False),
# This parameter is standard but doesn't exist
# ParameterName.NUM_PLMNS: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNListNumberOfEntries', True, TrParameterType.INT, False),
ParameterName.TAC: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.TAC', True, TrParameterType.INT, False),
ParameterName.IP_SEC_ENABLE: TrParam('boardconf.ipsec.ipsecConfig.onBoot', False, TrParameterType.BOOLEAN, False),
# Management server parameters
ParameterName.PERIODIC_INFORM_ENABLE: TrParam(DEVICE_PATH + 'ManagementServer.PeriodicInformEnable', False, TrParameterType.BOOLEAN, False),
ParameterName.PERIODIC_INFORM_INTERVAL: TrParam(DEVICE_PATH + 'ManagementServer.PeriodicInformInterval', False, TrParameterType.INT, False),
# Performance management parameters
ParameterName.PERF_MGMT_ENABLE: TrParam(DEVICE_PATH + 'FAP.PerfMgmt.Config.Enable', False, TrParameterType.BOOLEAN, False),
ParameterName.PERF_MGMT_UPLOAD_INTERVAL: TrParam(DEVICE_PATH + 'FAP.PerfMgmt.Config.PeriodicUploadInterval', False, TrParameterType.INT, False),
ParameterName.PERF_MGMT_UPLOAD_URL: TrParam(DEVICE_PATH + 'FAP.PerfMgmt.Config.URL', False, TrParameterType.STRING, False),
}
NUM_PLMNS_IN_CONFIG = 6
TRANSFORMS_FOR_ENB = {
ParameterName.CELL_BARRED: transform_for_enb.invert_cell_barred,
}
for i in range(1, NUM_PLMNS_IN_CONFIG + 1):
TRANSFORMS_FOR_ENB[ParameterName.PLMN_N_CELL_RESERVED % i] = transform_for_enb.cell_reserved
PARAMETERS[ParameterName.PLMN_N % i] = TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.' % i, True, TrParameterType.STRING, False)
PARAMETERS[ParameterName.PLMN_N_CELL_RESERVED % i] = TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.CellReservedForOperatorUse' % i, True, TrParameterType.STRING, False)
PARAMETERS[ParameterName.PLMN_N_ENABLE % i] = TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.Enable' % i, True, TrParameterType.BOOLEAN, False)
PARAMETERS[ParameterName.PLMN_N_PRIMARY % i] = TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.IsPrimary' % i, True, TrParameterType.BOOLEAN, False)
PARAMETERS[ParameterName.PLMN_N_PLMNID % i] = TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.PLMNID' % i, True, TrParameterType.STRING, False)
TRANSFORMS_FOR_ENB[ParameterName.ADMIN_STATE] = transform_for_enb.admin_state
TRANSFORMS_FOR_MAGMA = {
# We don't set these parameters
ParameterName.BAND_CAPABILITY: transform_for_magma.band_capability,
ParameterName.DUPLEX_MODE_CAPABILITY: transform_for_magma.duplex_mode,
}
@classmethod
def get_parameter(cls, param_name: ParameterName) -> Optional[TrParam]:
return cls.PARAMETERS.get(param_name)
@classmethod
def _get_magma_transforms(
cls,
) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_MAGMA
@classmethod
def _get_enb_transforms(cls) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_ENB
@classmethod
def get_load_parameters(cls) -> List[ParameterName]:
"""
Load all the parameters instead of a subset.
"""
return list(cls.PARAMETERS.keys())
@classmethod
def get_num_plmns(cls) -> int:
return cls.NUM_PLMNS_IN_CONFIG
@classmethod
def get_parameter_names(cls) -> List[ParameterName]:
excluded_params = [
str(ParameterName.DEVICE),
str(ParameterName.FAP_SERVICE),
]
names = list(
filter(
lambda x: (not str(x).startswith('PLMN'))
and (str(x) not in excluded_params),
cls.PARAMETERS.keys(),
),
)
return names
@classmethod
def get_numbered_param_names(
cls,
) -> Dict[ParameterName, List[ParameterName]]:
names = {}
for i in range(1, cls.NUM_PLMNS_IN_CONFIG + 1):
params = []
params.append(ParameterName.PLMN_N_CELL_RESERVED % i)
params.append(ParameterName.PLMN_N_ENABLE % i)
params.append(ParameterName.PLMN_N_PRIMARY % i)
params.append(ParameterName.PLMN_N_PLMNID % i)
names[ParameterName.PLMN_N % i] = params
return names
class BaicellsQAFATrConfigurationInitializer(EnodebConfigurationPostProcessor):
def postprocess(self, mconfig: Any, service_cfg: Any, desired_cfg: EnodebConfiguration) -> None:
desired_cfg.delete_parameter(ParameterName.ADMIN_STATE)
|
from irekua_database.models import Role
from .utils import BaseFilter
search_fields = (
'name',
)
class Filter(BaseFilter):
class Meta:
model = Role
fields = (
'name',
)
|
from abc import ABC
from discord.ext import commands
import discord
intents = discord.Intents.default()
startup_extensions = ["Moderation", "Help"]
guild_ids_list = ["ID HERE"] # integer Only
class ScamBot(commands.Bot, ABC):
def __init__(self):
super().__init__(intents=intents, command_prefix=commands.when_mentioned_or("!"), help_command=None)
async def on_ready(self):
for extension in startup_extensions:
try:
bot.load_extension(extension)
print('{} Loaded!'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to load extension {}\n{}'.format(extension, exc))
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
bot = ScamBot()
# Slash commands. To use guild_id_list, add the guild_ids=guild_ids_list argument to every @bot.slash_command
@commands.is_owner()
@bot.slash_command(hidden=True)
async def load(ctx, extension_name: str):
try:
bot.load_extension(extension_name)
except (AttributeError, ImportError, discord.ExtensionNotFound, discord.ExtensionAlreadyLoaded) as e:
await ctx.respond("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await ctx.respond("{} loaded.".format(extension_name))
@commands.is_owner()
@bot.slash_command(hidden=True)
async def unload(ctx, extension_name: str):
try:
bot.unload_extension(extension_name)
except (AttributeError, ImportError, discord.ExtensionNotFound, discord.ExtensionNotLoaded) as e:
await ctx.respond("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await ctx.respond("{} unloaded.".format(extension_name))
@commands.is_owner()
@bot.slash_command(hidden=True)
async def reload(ctx, extension_name: str):
try:
bot.reload_extension(extension_name)
except (AttributeError, ImportError, discord.ExtensionNotLoaded) as e:
await ctx.respond("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await ctx.respond("{} reloaded.".format(extension_name))
if __name__ == "__main__":
bot.run('Bot Token Here')
|
export default {
html: `
<input>
`,
ssrHtml: `
<input>
`,
async test({ assert, component, target }) {
const input = target.querySelector('input');
assert.equal(input.value, '');
component.x = null;
assert.equal(input.value, '');
component.x = undefined;
assert.equal(input.value, '');
component.x = 'string';
component.x = undefined;
assert.equal(input.value, '');
component.x = 0;
assert.equal(input.value, '0');
component.x = undefined;
assert.equal(input.value, '');
}
};
|
from dagster.core.decorator_utils import (
InvalidDecoratedFunctionInfo,
split_function_parameters,
validate_decorated_fn_non_positionals,
validate_decorated_fn_positionals,
)
def decorated_function_one_positional():
def foo(bar):
return bar
return foo
def decorated_function_two_positionals_one_kwarg():
def foo_kwarg(bar, baz, qux=True):
return bar, baz, qux
return foo_kwarg
def test_get_function_positional_parameters_ok():
positionals, non_positionals = split_function_parameters(
decorated_function_one_positional(), ['bar']
)
validate_decorated_fn_positionals(positionals, ['bar'])
validate_decorated_fn_non_positionals(set(), non_positionals)
assert 'bar' in {positional.name for positional in positionals}
assert not non_positionals
def test_get_function_positional_parameters_multiple():
positionals, non_positionals = split_function_parameters(
decorated_function_two_positionals_one_kwarg(), ['bar', 'baz']
)
validate_decorated_fn_positionals(positionals, ['bar', 'baz'])
validate_decorated_fn_non_positionals({'qux'}, non_positionals)
assert {positional.name for positional in positionals} == {'bar', 'baz'}
assert {non_positional.name for non_positional in non_positionals} == {'qux'}
def test_get_function_positional_parameters_invalid():
positionals, _ = split_function_parameters(decorated_function_one_positional(), ['bat'])
assert validate_decorated_fn_positionals(positionals, ['bat']) == 'bat'
def test_get_function_non_positional_parameters_invalid():
_, non_positionals = split_function_parameters(
decorated_function_two_positionals_one_kwarg(), ['bar', 'baz']
)
invalid_function_info = validate_decorated_fn_non_positionals(set(), non_positionals)
assert invalid_function_info.error_type == InvalidDecoratedFunctionInfo.TYPES['missing_name']
|
import os
import re
import sys
import time
import signal
import traceback
import logging
import boto3
import subprocess
import six
import warnings
import pkgutil
from localstack import constants, config
from localstack.constants import (ENV_DEV, DEFAULT_REGION, LOCALSTACK_VENV_FOLDER,
DEFAULT_PORT_S3_BACKEND, DEFAULT_PORT_APIGATEWAY_BACKEND,
DEFAULT_PORT_SNS_BACKEND, DEFAULT_PORT_CLOUDFORMATION_BACKEND)
from localstack.config import (USE_SSL, PORT_ROUTE53, PORT_S3,
PORT_FIREHOSE, PORT_LAMBDA, PORT_SNS, PORT_REDSHIFT, PORT_CLOUDWATCH,
PORT_DYNAMODBSTREAMS, PORT_SES, PORT_ES, PORT_CLOUDFORMATION, PORT_APIGATEWAY,
PORT_SSM)
from localstack.utils import common, persistence
from localstack.utils.common import (run, TMP_THREADS, in_ci, run_cmd_safe,
TIMESTAMP_FORMAT, FuncThread, ShellCommandThread, mkdir)
from localstack.utils.analytics import event_publisher
from localstack.services import generic_proxy, install
from localstack.services.firehose import firehose_api
from localstack.services.awslambda import lambda_api
from localstack.services.dynamodbstreams import dynamodbstreams_api
from localstack.services.es import es_api
from localstack.services.generic_proxy import GenericProxy
# flag to indicate whether signal handlers have been set up already
SIGNAL_HANDLERS_SETUP = False
# maps plugin scope ("services", "commands") to flags which indicate whether plugins have been loaded
PLUGINS_LOADED = {}
# flag to indicate whether we've received and processed the stop signal
INFRA_STOPPED = False
# default backend host address
DEFAULT_BACKEND_HOST = '127.0.0.1'
# set up logger
LOGGER = logging.getLogger(os.path.basename(__file__))
# map of service plugins, mapping from service name to plugin details
SERVICE_PLUGINS = {}
# plugin scopes
PLUGIN_SCOPE_SERVICES = 'services'
PLUGIN_SCOPE_COMMANDS = 'commands'
# log format strings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s: %(message)s'
LOG_DATE_FORMAT = TIMESTAMP_FORMAT
# -----------------
# PLUGIN UTILITIES
# -----------------
class Plugin(object):
def __init__(self, name, start, check=None, listener=None):
self.plugin_name = name
self.start_function = start
self.listener = listener
self.check_function = check
def start(self, async):
kwargs = {
'async': async
}
if self.listener:
kwargs['update_listener'] = self.listener
return self.start_function(**kwargs)
def check(self, expect_shutdown=False, print_error=False):
if not self.check_function:
return
return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)
def name(self):
return self.plugin_name
def register_plugin(plugin):
SERVICE_PLUGINS[plugin.name()] = plugin
def load_plugin_from_path(file_path, scope=None):
if os.path.exists(file_path):
module = re.sub(r'(^|.+/)([^/]+)/plugins.py', r'\2', file_path)
method_name = 'register_localstack_plugins'
scope = scope or PLUGIN_SCOPE_SERVICES
if scope == PLUGIN_SCOPE_COMMANDS:
method_name = 'register_localstack_commands'
try:
namespace = {}
exec('from %s.plugins import %s' % (module, method_name), namespace)
method_to_execute = namespace[method_name]
except Exception as e:
return
try:
return method_to_execute()
except Exception as e:
LOGGER.warning('Unable to load plugins from file %s: %s' % (file_path, e))
def load_plugins(scope=None):
scope = scope or PLUGIN_SCOPE_SERVICES
if PLUGINS_LOADED.get(scope, None):
return
setup_logging()
loaded_files = []
result = []
for module in pkgutil.iter_modules():
file_path = None
if six.PY3 and not isinstance(module, tuple):
file_path = '%s/%s/plugins.py' % (module.module_finder.path, module.name)
elif six.PY3 or isinstance(module[0], pkgutil.ImpImporter):
if hasattr(module[0], 'path'):
file_path = '%s/%s/plugins.py' % (module[0].path, module[1])
if file_path and file_path not in loaded_files:
plugin_config = load_plugin_from_path(file_path, scope=scope)
if plugin_config:
result.append(plugin_config)
loaded_files.append(file_path)
# set global flag
PLUGINS_LOADED[scope] = result
return result
# -----------------
# API ENTRY POINTS
# -----------------
def start_apigateway(port=PORT_APIGATEWAY, async=False, update_listener=None):
return start_moto_server('apigateway', port, name='API Gateway', async=async,
backend_port=DEFAULT_PORT_APIGATEWAY_BACKEND, update_listener=update_listener)
def start_s3(port=PORT_S3, async=False, update_listener=None):
return start_moto_server('s3', port, name='S3', async=async,
backend_port=DEFAULT_PORT_S3_BACKEND, update_listener=update_listener)
def start_sns(port=PORT_SNS, async=False, update_listener=None):
return start_moto_server('sns', port, name='SNS', async=async,
backend_port=DEFAULT_PORT_SNS_BACKEND, update_listener=update_listener)
def start_cloudformation(port=PORT_CLOUDFORMATION, async=False, update_listener=None):
return start_moto_server('cloudformation', port, name='CloudFormation', async=async,
backend_port=DEFAULT_PORT_CLOUDFORMATION_BACKEND, update_listener=update_listener)
def start_cloudwatch(port=PORT_CLOUDWATCH, async=False):
return start_moto_server('cloudwatch', port, name='CloudWatch', async=async)
def start_redshift(port=PORT_REDSHIFT, async=False):
return start_moto_server('redshift', port, name='Redshift', async=async)
def start_route53(port=PORT_ROUTE53, async=False):
return start_moto_server('route53', port, name='Route53', async=async)
def start_ses(port=PORT_SES, async=False):
return start_moto_server('ses', port, name='SES', async=async)
def start_elasticsearch_service(port=PORT_ES, async=False):
return start_local_api('ES', port, method=es_api.serve, async=async)
def start_firehose(port=PORT_FIREHOSE, async=False):
return start_local_api('Firehose', port, method=firehose_api.serve, async=async)
def start_dynamodbstreams(port=PORT_DYNAMODBSTREAMS, async=False):
return start_local_api('DynamoDB Streams', port, method=dynamodbstreams_api.serve, async=async)
def start_lambda(port=PORT_LAMBDA, async=False):
return start_local_api('Lambda', port, method=lambda_api.serve, async=async)
def start_ssm(port=PORT_SSM, async=False):
return start_moto_server('ssm', port, name='SSM', async=async)
# ---------------
# HELPER METHODS
# ---------------
def setup_logging():
# determine and set log level
log_level = logging.DEBUG if is_debug() else logging.INFO
logging.basicConfig(level=log_level, format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT)
# disable some logs and warnings
warnings.filterwarnings('ignore')
logging.captureWarnings(True)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
def get_service_protocol():
return 'https' if USE_SSL else 'http'
def restore_persisted_data(apis):
for api in apis:
persistence.restore_persisted_data(api)
def register_signal_handlers():
global SIGNAL_HANDLERS_SETUP
if SIGNAL_HANDLERS_SETUP:
return
# register signal handlers
def signal_handler(signal, frame):
stop_infra()
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
SIGNAL_HANDLERS_SETUP = True
def is_debug():
return os.environ.get('DEBUG', '').strip() not in ['', '0', 'false']
def do_run(cmd, async, print_output=False):
sys.stdout.flush()
if async:
if is_debug():
print_output = True
outfile = subprocess.PIPE if print_output else None
t = ShellCommandThread(cmd, outfile=outfile)
t.start()
TMP_THREADS.append(t)
return t
else:
return run(cmd)
def start_proxy_for_service(service_name, port, default_backend_port, update_listener, quiet=False, params={}):
# check if we have a custom backend configured
custom_backend_url = os.environ.get('%s_BACKEND' % service_name.upper())
backend_url = custom_backend_url or ('http://%s:%s' % (DEFAULT_BACKEND_HOST, default_backend_port))
return start_proxy(port, backend_url=backend_url, update_listener=update_listener, quiet=quiet, params=params)
def start_proxy(port, backend_url, update_listener, quiet=False, params={}):
proxy_thread = GenericProxy(port=port, forward_url=backend_url,
ssl=USE_SSL, update_listener=update_listener, quiet=quiet, params=params)
proxy_thread.start()
TMP_THREADS.append(proxy_thread)
return proxy_thread
def start_moto_server(key, port, name=None, backend_port=None, async=False, update_listener=None):
moto_server_cmd = '%s/bin/moto_server' % LOCALSTACK_VENV_FOLDER
if not os.path.exists(moto_server_cmd):
moto_server_cmd = run('which moto_server').strip()
cmd = 'VALIDATE_LAMBDA_S3=0 %s %s -p %s -H %s' % (moto_server_cmd, key, backend_port or port, constants.BIND_HOST)
if not name:
name = key
print('Starting mock %s (%s port %s)...' % (name, get_service_protocol(), port))
if backend_port:
start_proxy_for_service(key, port, backend_port, update_listener)
elif USE_SSL:
cmd += ' --ssl'
return do_run(cmd, async)
def start_local_api(name, port, method, async=False):
print('Starting mock %s service (%s port %s)...' % (name, get_service_protocol(), port))
if async:
thread = FuncThread(method, port, quiet=True)
thread.start()
TMP_THREADS.append(thread)
return thread
else:
method(port)
def stop_infra():
global INFRA_STOPPED
if INFRA_STOPPED:
return
event_publisher.fire_event(event_publisher.EVENT_STOP_INFRA)
generic_proxy.QUIET = True
common.cleanup(files=True, quiet=True)
common.cleanup_resources()
lambda_api.cleanup()
time.sleep(2)
# TODO: optimize this (takes too long currently)
# check_infra(retries=2, expect_shutdown=True)
INFRA_STOPPED = True
def check_aws_credentials():
session = boto3.Session()
credentials = None
try:
credentials = session.get_credentials()
except Exception:
pass
if not credentials:
# set temporary dummy credentials
os.environ['AWS_ACCESS_KEY_ID'] = 'LocalStackDummyAccessKey'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'LocalStackDummySecretKey'
session = boto3.Session()
credentials = session.get_credentials()
assert credentials
# -----------------------------
# INFRASTRUCTURE HEALTH CHECKS
# -----------------------------
def check_infra(retries=8, expect_shutdown=False, apis=None, additional_checks=[]):
try:
print_error = retries <= 0
# loop through plugins and check service status
for name, plugin in SERVICE_PLUGINS.items():
if name in apis:
try:
plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)
except Exception as e:
LOGGER.warning('Service "%s" not yet available, retrying...' % name)
raise e
for additional in additional_checks:
additional(expect_shutdown=expect_shutdown)
except Exception as e:
if retries <= 0:
LOGGER.error('Error checking state of local environment (after some retries): %s' % traceback.format_exc())
raise e
time.sleep(3)
check_infra(retries - 1, expect_shutdown=expect_shutdown, apis=apis, additional_checks=additional_checks)
# -------------
# DOCKER STARTUP
# -------------
def start_infra_in_docker():
# load plugins before starting the docker container
plugin_configs = load_plugins()
plugin_run_params = ' '.join([
entry.get('docker', {}).get('run_flags', '') for entry in plugin_configs])
services = os.environ.get('SERVICES', '')
entrypoint = os.environ.get('ENTRYPOINT', '')
cmd = os.environ.get('CMD', '')
image_name = os.environ.get('IMAGE_NAME', constants.DOCKER_IMAGE_NAME)
service_ports = config.SERVICE_PORTS
force_noninteractive = os.environ.get('FORCE_NONINTERACTIVE', '')
# construct port mappings
ports_list = sorted(service_ports.values())
start_port = 0
last_port = 0
port_ranges = []
for i in range(0, len(ports_list)):
if not start_port:
start_port = ports_list[i]
if not last_port:
last_port = ports_list[i]
if ports_list[i] > last_port + 1:
port_ranges.append([start_port, last_port])
start_port = ports_list[i]
elif i >= len(ports_list) - 1:
port_ranges.append([start_port, ports_list[i]])
last_port = ports_list[i]
port_mappings = ' '.join(
'-p {start}-{end}:{start}-{end}'.format(start=entry[0], end=entry[1])
if entry[0] < entry[1] else '-p {port}:{port}'.format(port=entry[0])
for entry in port_ranges)
if services:
port_mappings = ''
for service, port in service_ports.items():
port_mappings += ' -p {port}:{port}'.format(port=port)
env_str = ''
for env_var in config.CONFIG_ENV_VARS:
value = os.environ.get(env_var, None)
if value is not None:
env_str += '-e %s="%s" ' % (env_var, value)
data_dir_mount = ''
data_dir = os.environ.get('DATA_DIR', None)
if data_dir is not None:
container_data_dir = '/tmp/localstack_data'
data_dir_mount = '-v "%s:%s" ' % (data_dir, container_data_dir)
env_str += '-e DATA_DIR="%s" ' % container_data_dir
interactive = '' if force_noninteractive or in_ci() else '-it '
# append space if parameter is set
entrypoint = '%s ' % entrypoint if entrypoint else entrypoint
plugin_run_params = '%s ' % plugin_run_params if plugin_run_params else plugin_run_params
docker_cmd = ('docker run %s%s%s%s' +
'-p 8080:8080 %s %s' +
'-v "%s:/tmp/localstack" -v "%s:%s" ' +
'-e DOCKER_HOST="unix://%s" ' +
'-e HOST_TMP_FOLDER="%s" "%s" %s') % (
interactive, entrypoint, env_str, plugin_run_params, port_mappings, data_dir_mount,
config.TMP_FOLDER, config.DOCKER_SOCK, config.DOCKER_SOCK, config.DOCKER_SOCK,
config.HOST_TMP_FOLDER, image_name, cmd
)
mkdir(config.TMP_FOLDER)
run_cmd_safe(cmd='chmod -R 777 "%s"' % config.TMP_FOLDER)
print(docker_cmd)
t = ShellCommandThread(docker_cmd, outfile=subprocess.PIPE)
t.start()
time.sleep(2)
t.process.wait()
sys.exit(t.process.returncode)
# -------------
# MAIN STARTUP
# -------------
def start_infra(async=False, apis=None):
try:
# load plugins
load_plugins()
event_publisher.fire_event(event_publisher.EVENT_START_INFRA)
# set up logging
setup_logging()
if not apis:
apis = list(config.SERVICE_PORTS.keys())
# set environment
os.environ['AWS_REGION'] = DEFAULT_REGION
os.environ['ENV'] = ENV_DEV
# register signal handlers
register_signal_handlers()
# make sure AWS credentials are configured, otherwise boto3 bails on us
check_aws_credentials()
# install libs if not present
install.install_components(apis)
# Some services take a bit to come up
sleep_time = 3
# start services
thread = None
if 'elasticsearch' in apis or 'es' in apis:
sleep_time = max(sleep_time, 8)
# loop through plugins and start each service
for name, plugin in SERVICE_PLUGINS.items():
if name in apis:
t1 = plugin.start(async=True)
thread = thread or t1
time.sleep(sleep_time)
# check that all infra components are up and running
check_infra(apis=apis)
# restore persisted data
restore_persisted_data(apis=apis)
print('Ready.')
sys.stdout.flush()
if not async and thread:
# this is a bit of an ugly hack, but we need to make sure that we
# stay in the execution context of the main thread, otherwise our
# signal handlers don't work
while True:
time.sleep(1)
return thread
except KeyboardInterrupt as e:
print('Shutdown')
except Exception as e:
print('Error starting infrastructure: %s %s' % (e, traceback.format_exc()))
sys.stdout.flush()
raise e
finally:
if not async:
stop_infra()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the FileFinder flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import glob
import hashlib
import io
import os
from absl import app
from future.utils import itervalues
import mock
from grr_response_client import vfs
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import temp
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import data_store_utils
from grr_response_server import db
from grr_response_server import file_store
from grr_response_server import flow
from grr_response_server import flow_base
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.aff4_objects import standard as aff4_standard
# TODO(user):
# TestFileFinderFlow.testTreatsGlobsAsPathsWhenMemoryPathTypeIsUsed expects
# auditing system to work. Refactor and remove the unused import
# pylint: disable=unused-import
from grr_response_server.flows.general import audit as _
# pylint: enable=unused-import
from grr_response_server.flows.general import file_finder
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import db_test_lib
from grr.test_lib import filesystem_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
# pylint:mode=test
@db_test_lib.DualDBTest
class TestFileFinderFlow(flow_test_lib.FlowTestsBaseclass):
"""Test the FileFinder flow."""
flow_base_cls = flow.GRRFlow
def FileNameToURN(self, fname):
return rdfvalue.RDFURN(self.client_id).Add("/fs/os").Add(
os.path.join(self.base_path, "searching", fname))
def FilenameToPathComponents(self, fname):
path = os.path.join(self.base_path, "searching", fname).lstrip("/")
return tuple(path.split(os.path.sep))
EXPECTED_HASHES = {
"auth.log": ("67b8fc07bd4b6efc3b2dce322e8ddf609b540805",
"264eb6ff97fc6c37c5dd4b150cb0a797",
"91c8d6287a095a6fa6437dac50ffe3fe5c5e0d06dff"
"3ae830eedfce515ad6451"),
"dpkg.log": ("531b1cfdd337aa1663f7361b2fd1c8fe43137f4a",
"26973f265ce5ecc1f86bc413e65bfc1d",
"48303a1e7ceec679f6d417b819f42779575ffe8eabf"
"9c880d286a1ee074d8145"),
"dpkg_false.log": ("a2c9cc03c613a44774ae97ed6d181fe77c13e01b",
"ab48f3548f311c77e75ac69ac4e696df",
"a35aface4b45e3f1a95b0df24efc50e14fbedcaa6a7"
"50ba32358eaaffe3c4fb0")
}
def CheckFilesHashed(self, fnames):
"""Checks the returned hashes."""
for fname in fnames:
try:
file_hashes = self.EXPECTED_HASHES[fname]
except KeyError:
raise RuntimeError("Can't check unexpected result for correct "
"hashes: %s" % fname)
if data_store.RelationalDBReadEnabled(
category="vfs") and data_store.RelationalDBReadEnabled(
category="filestore"):
path_info = data_store.REL_DB.ReadPathInfo(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=self.FilenameToPathComponents(fname))
hash_obj = path_info.hash_entry
else:
with aff4.FACTORY.Open(
self.FileNameToURN(fname), token=self.token) as fd:
hash_obj = fd.Get(fd.Schema.HASH)
self.assertEqual(str(hash_obj.sha1), file_hashes[0])
self.assertEqual(str(hash_obj.md5), file_hashes[1])
self.assertEqual(str(hash_obj.sha256), file_hashes[2])
def CheckFilesNotHashed(self, fnames):
for fname in fnames:
if data_store.RelationalDBReadEnabled(category="vfs"):
try:
path_info = data_store.REL_DB.ReadPathInfo(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=self.FilenameToPathComponents(fname))
self.assertFalse(path_info.HasField("hash_entry"))
except db.UnknownPathError:
pass # No path at all, everything is okay.
else:
with aff4.FACTORY.Open(
self.FileNameToURN(fname), token=self.token) as fd:
self.assertIsNone(fd.Get(fd.Schema.HASH))
def CheckFilesDownloaded(self, fnames):
for fname in fnames:
if data_store.RelationalDBReadEnabled(category="vfs"):
path_info = data_store.REL_DB.ReadPathInfo(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=self.FilenameToPathComponents(fname))
size = path_info.stat_entry.st_size
else:
file_urn = self.FileNameToURN(fname)
with aff4.FACTORY.Open(file_urn, token=self.token) as fd:
size = fd.Get(fd.Schema.SIZE)
with io.open(os.path.join(self.base_path, "searching", fname)) as fd:
test_data = fd.read()
self.assertEqual(size, len(test_data))
if data_store.RelationalDBReadEnabled(category="filestore"):
fd = file_store.OpenFile(
db.ClientPath(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=self.FilenameToPathComponents(fname)))
# Make sure we can actually read the file.
self.assertEqual(fd.read(), test_data)
def CheckFilesNotDownloaded(self, fnames):
for fname in fnames:
if data_store.RelationalDBReadEnabled(category="filestore"):
try:
file_store.OpenFile(
db.ClientPath(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=self.FilenameToPathComponents(fname)))
self.Fail("Found downloaded file: %s" % fname)
except file_store.FileHasNoContentError:
pass
else:
file_urn = self.FileNameToURN(fname)
with aff4.FACTORY.Open(file_urn, token=self.token) as fd:
# Directories have no size attribute.
if fd.Get(fd.Schema.TYPE) == aff4_standard.VFSDirectory.__name__:
continue
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
def CheckFiles(self, fnames, results):
if fnames is None:
self.assertFalse(results)
return
# If results are expected, check that they are present in the results.
# Also check that there are no other files.
self.assertLen(fnames, len(fnames))
for r in results:
self.assertIsInstance(r, rdf_file_finder.FileFinderResult)
self.assertCountEqual(
[r.stat_entry.AFF4Path(self.client_id).Basename() for r in results],
fnames)
def CheckReplies(self, replies, action, expected_files):
reply_count = 0
for reply in replies:
self.assertIsInstance(reply, rdf_file_finder.FileFinderResult)
reply_count += 1
if action == rdf_file_finder.FileFinderAction.Action.STAT:
self.assertTrue(reply.stat_entry)
self.assertFalse(reply.hash_entry)
elif action == rdf_file_finder.FileFinderAction.Action.DOWNLOAD:
self.assertTrue(reply.stat_entry)
self.assertTrue(reply.hash_entry)
elif action == rdf_file_finder.FileFinderAction.Action.HASH:
self.assertTrue(reply.stat_entry)
self.assertTrue(reply.hash_entry)
if action != rdf_file_finder.FileFinderAction.Action.STAT:
# Check that file's hash is correct.
file_basename = reply.stat_entry.pathspec.Basename()
try:
file_hashes = self.EXPECTED_HASHES[file_basename]
except KeyError:
raise RuntimeError("Can't check unexpected result for correct "
"hashes: %s" % file_basename)
self.assertEqual(str(reply.hash_entry.sha1), file_hashes[0])
self.assertEqual(str(reply.hash_entry.md5), file_hashes[1])
self.assertEqual(str(reply.hash_entry.sha256), file_hashes[2])
self.assertEqual(reply_count, len(expected_files))
def RunFlow(self, paths=None, conditions=None, action=None):
self.last_session_id = flow_test_lib.TestFlowHelper(
file_finder.FileFinder.__name__,
self.client_mock,
client_id=self.client_id,
paths=paths or [self.path],
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
conditions=conditions,
token=self.token)
return flow_test_lib.GetFlowResults(self.client_id, self.last_session_id)
def RunFlowAndCheckResults(
self,
conditions=None,
action=rdf_file_finder.FileFinderAction.Action.STAT,
expected_files=None,
non_expected_files=None,
paths=None):
if not isinstance(action, rdf_file_finder.FileFinderAction):
action = rdf_file_finder.FileFinderAction(action_type=action)
action_type = action.action_type
conditions = conditions or []
expected_files = expected_files or []
non_expected_files = non_expected_files or []
results = self.RunFlow(paths=paths, conditions=conditions, action=action)
self.CheckReplies(results, action_type, expected_files)
self.CheckFiles(expected_files, results)
if action_type == rdf_file_finder.FileFinderAction.Action.STAT:
self.CheckFilesNotDownloaded(expected_files + non_expected_files)
self.CheckFilesNotHashed(expected_files + non_expected_files)
elif action_type == rdf_file_finder.FileFinderAction.Action.DOWNLOAD:
self.CheckFilesHashed(expected_files)
self.CheckFilesNotHashed(non_expected_files)
self.CheckFilesDownloaded(expected_files)
self.CheckFilesNotDownloaded(non_expected_files)
# Downloaded files are hashed to allow for deduping.
elif action_type == rdf_file_finder.FileFinderAction.Action.HASH:
self.CheckFilesNotDownloaded(expected_files + non_expected_files)
self.CheckFilesHashed(expected_files)
self.CheckFilesNotHashed(non_expected_files)
return results
def setUp(self):
super(TestFileFinderFlow, self).setUp()
self.client_mock = action_mocks.FileFinderClientMockWithTimestamps()
self.fixture_path = os.path.join(self.base_path, "searching")
self.path = os.path.join(self.fixture_path, "*.log")
self.client_id = self.SetupClient(0)
def testFileFinderStatActionWithoutConditions(self):
self.RunFlowAndCheckResults(
action=rdf_file_finder.FileFinderAction.Action.STAT,
expected_files=["auth.log", "dpkg.log", "dpkg_false.log"])
def testFileFinderStat(self):
files_to_check = [
# Some files.
"netgroup",
"osx_fsdata",
# Matches lsb-release, lsb-release-bad, lsb-release-notubuntu
"lsb-release*",
# Some directories.
"a",
"checks",
"profiles"
]
paths = [os.path.join(self.fixture_path, name) for name in files_to_check]
expected_files = []
for name in paths:
for result in glob.glob(name):
expected_files.append(self.FileNameToURN(os.path.basename(result)))
# There was a bug in FileFinder with files/directories in the root dir.
paths.append("/bin")
expected_files.append(self.client_id.Add("fs/os/bin"))
results = self.RunFlow(
action=rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.STAT),
paths=paths)
stat_entries = [result.stat_entry for result in results]
result_paths = [stat.AFF4Path(self.client_id) for stat in stat_entries]
self.assertCountEqual(expected_files, result_paths)
FS_NODUMP_FL = 0x00000040
FS_UNRM_FL = 0x00000002
def testFileFinderStatExtFlags(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.Chattr(temp_filepath, attrs=["+d"])
action = rdf_file_finder.FileFinderAction.Stat()
results = self.RunFlow(action=action, paths=[temp_filepath])
self.assertLen(results, 1)
stat_entry = results[0].stat_entry
self.assertTrue(stat_entry.st_flags_linux & self.FS_NODUMP_FL)
self.assertFalse(stat_entry.st_flags_linux & self.FS_UNRM_FL)
def testFileFinderStatExtAttrs(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.SetExtAttr(
temp_filepath, name=b"user.bar", value=b"quux")
filesystem_test_lib.SetExtAttr(
temp_filepath, name=b"user.baz", value=b"norf")
action = rdf_file_finder.FileFinderAction.Stat(collect_ext_attrs=True)
results = self.RunFlow(action=action, paths=[temp_filepath])
self.assertLen(results, 1)
stat_entry = results[0].stat_entry
self.assertCountEqual(stat_entry.ext_attrs, [
rdf_client_fs.ExtAttr(name=b"user.bar", value=b"quux"),
rdf_client_fs.ExtAttr(name=b"user.baz", value=b"norf"),
])
def testFileFinderDownloadActionWithMultiplePathsAndFilesInFilestore(self):
# Do a first run to put all files into the file store.
self.RunFlowAndCheckResults(
action=rdf_file_finder.FileFinderAction.Action.DOWNLOAD,
expected_files=["auth.log", "dpkg.log", "dpkg_false.log"])
# This will get the file contents from the filestore instead of collecting
# them.
self.RunFlowAndCheckResults(
action=rdf_file_finder.FileFinderAction.Action.DOWNLOAD,
expected_files=["auth.log", "dpkg.log", "dpkg_false.log"])
def testFileFinderDownloadActionWithoutConditions(self):
self.RunFlowAndCheckResults(
action=rdf_file_finder.FileFinderAction.Action.DOWNLOAD,
expected_files=["auth.log", "dpkg.log", "dpkg_false.log"])
def testFileFinderHashActionWithoutConditions(self):
self.RunFlowAndCheckResults(
action=rdf_file_finder.FileFinderAction.Action.HASH,
expected_files=["auth.log", "dpkg.log", "dpkg_false.log"])
CONDITION_TESTS_ACTIONS = sorted(
set(itervalues(rdf_file_finder.FileFinderAction.Action.enum_dict)))
def testLiteralMatchConditionWithDifferentActions(self):
expected_files = ["auth.log"]
non_expected_files = ["dpkg.log", "dpkg_false.log"]
match = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
mode=rdf_file_finder.FileFinderContentsLiteralMatchCondition.Mode
.ALL_HITS,
bytes_before=10,
bytes_after=10,
literal=b"session opened for user dearjohn")
literal_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.CONTENTS_LITERAL_MATCH,
contents_literal_match=match)
for action in self.CONDITION_TESTS_ACTIONS:
results = self.RunFlowAndCheckResults(
action=action,
conditions=[literal_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
# Check that the results' matches fields are correctly filled.
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 350)
self.assertEqual(results[0].matches[0].data,
"session): session opened for user dearjohn by (uid=0")
def testLiteralMatchConditionWithHexEncodedValue(self):
match = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
mode=rdf_file_finder.FileFinderContentsLiteralMatchCondition.Mode
.FIRST_HIT,
bytes_before=10,
bytes_after=10,
literal=b"\x4D\x5A\x90")
literal_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.CONTENTS_LITERAL_MATCH,
contents_literal_match=match)
paths = [os.path.join(os.path.dirname(self.fixture_path), "hello.exe")]
results = self.RunFlow(paths=paths, conditions=[literal_condition])
# Check that the results' matches fields are correctly filled. Expecting a
# match from hello.exe
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 0)
self.assertEqual(results[0].matches[0].data,
b"MZ\x90\x00\x03\x00\x00\x00\x04\x00\x00\x00\xff")
def testRegexMatchConditionWithDifferentActions(self):
expected_files = ["auth.log"]
non_expected_files = ["dpkg.log", "dpkg_false.log"]
regex_condition = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=(
rdf_file_finder.FileFinderContentsRegexMatchCondition(
mode="ALL_HITS",
bytes_before=10,
bytes_after=10,
regex=b"session opened for user .*?john")))
for action in self.CONDITION_TESTS_ACTIONS:
results = self.RunFlowAndCheckResults(
action=action,
conditions=[regex_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 350)
self.assertEqual(results[0].matches[0].data,
"session): session opened for user dearjohn by (uid=0")
def testTwoRegexMatchConditionsWithDifferentActions1(self):
expected_files = ["auth.log"]
non_expected_files = ["dpkg.log", "dpkg_false.log"]
regex_condition1 = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=(
rdf_file_finder.FileFinderContentsRegexMatchCondition(
mode="ALL_HITS",
bytes_before=10,
bytes_after=10,
regex=b"session opened for user .*?john")))
regex_condition2 = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=(
rdf_file_finder.FileFinderContentsRegexMatchCondition(
mode="ALL_HITS",
bytes_before=10,
bytes_after=10,
regex=b"format.*should")))
for action in self.CONDITION_TESTS_ACTIONS:
results = self.RunFlowAndCheckResults(
action=action,
conditions=[regex_condition1, regex_condition2],
expected_files=expected_files,
non_expected_files=non_expected_files)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 2)
self.assertEqual(results[0].matches[0].offset, 350)
self.assertEqual(results[0].matches[0].data,
"session): session opened for user dearjohn by (uid=0")
self.assertEqual(results[0].matches[1].offset, 513)
self.assertEqual(results[0].matches[1].data,
"rong line format.... should not be he")
def testTwoRegexMatchConditionsWithDifferentActions2(self):
expected_files = ["auth.log"]
non_expected_files = ["dpkg.log", "dpkg_false.log"]
regex_condition1 = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=(
rdf_file_finder.FileFinderContentsRegexMatchCondition(
mode="ALL_HITS",
bytes_before=10,
bytes_after=10,
regex=b"session opened for user .*?john")))
regex_condition2 = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=(
rdf_file_finder.FileFinderContentsRegexMatchCondition(
mode="FIRST_HIT", bytes_before=10, bytes_after=10,
regex=b".*")))
for action in self.CONDITION_TESTS_ACTIONS:
results = self.RunFlowAndCheckResults(
action=action,
conditions=[regex_condition1, regex_condition2],
expected_files=expected_files,
non_expected_files=non_expected_files)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 2)
self.assertEqual(results[0].matches[0].offset, 350)
self.assertEqual(results[0].matches[0].data,
"session): session opened for user dearjohn by (uid=0")
self.assertEqual(results[0].matches[1].offset, 0)
self.assertEqual(results[0].matches[1].length, 770)
def testSizeConditionWithDifferentActions(self):
expected_files = ["dpkg.log", "dpkg_false.log"]
non_expected_files = ["auth.log"]
sizes = [
os.stat(os.path.join(self.fixture_path, f)).st_size
for f in expected_files
]
size_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.SIZE,
size=rdf_file_finder.FileFinderSizeCondition(
max_file_size=max(sizes) + 1))
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[size_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
def testDownloadAndHashActionSizeLimitWithSkipPolicy(self):
expected_files = ["dpkg.log", "dpkg_false.log"]
non_expected_files = ["auth.log"]
sizes = [
os.stat(os.path.join(self.fixture_path, f)).st_size
for f in expected_files
]
hash_action = rdf_file_finder.FileFinderAction.Hash(
max_size=max(sizes) + 1, oversized_file_policy="SKIP")
download_action = rdf_file_finder.FileFinderAction.Download(
max_size=max(sizes) + 1, oversized_file_policy="SKIP")
for action in [hash_action, download_action]:
self.RunFlowAndCheckResults(
paths=[self.path],
action=action,
expected_files=expected_files,
non_expected_files=non_expected_files)
def testDownloadAndHashActionSizeLimitWithHashTruncatedPolicy(self):
image_path = os.path.join(self.base_path, "test_img.dd")
# Read a bit more than a typical chunk (600 * 1024).
expected_size = 750 * 1024
with io.open(image_path, "rb") as fd:
expected_data = fd.read(expected_size)
d = hashlib.sha1()
d.update(expected_data)
expected_hash = d.digest()
hash_action = rdf_file_finder.FileFinderAction.Hash(
max_size=expected_size, oversized_file_policy="HASH_TRUNCATED")
download_action = rdf_file_finder.FileFinderAction.Download(
max_size=expected_size, oversized_file_policy="HASH_TRUNCATED")
for action in [hash_action, download_action]:
results = self.RunFlow(paths=[image_path], action=action)
if data_store.RelationalDBReadEnabled("vfs"):
with self.assertRaises(file_store.FileHasNoContentError):
self._ReadTestFile(["test_img.dd"],
path_type=rdf_objects.PathInfo.PathType.OS)
path_info = self._ReadTestPathInfo(
["test_img.dd"], path_type=rdf_objects.PathInfo.PathType.OS)
self.assertEqual(path_info.hash_entry.sha1, expected_hash)
self.assertEqual(path_info.hash_entry.num_bytes, expected_size)
else:
urn = rdfvalue.RDFURN(self.client_id).Add("/fs/os").Add(image_path)
vfs_file = aff4.FACTORY.Open(urn, token=self.token)
# Make sure just a VFSFile got written.
self.assertIsInstance(vfs_file, aff4_grr.VFSFile)
hash_entry = data_store_utils.GetFileHashEntry(vfs_file)
self.assertEqual(hash_entry.sha1, expected_hash)
flow_reply = results[0]
self.assertEqual(flow_reply.hash_entry.sha1, expected_hash)
def testDownloadActionSizeLimitWithDownloadTruncatedPolicy(self):
image_path = os.path.join(self.base_path, "test_img.dd")
# Read a bit more than a typical chunk (600 * 1024).
expected_size = 750 * 1024
action = rdf_file_finder.FileFinderAction.Download(
max_size=expected_size, oversized_file_policy="DOWNLOAD_TRUNCATED")
results = self.RunFlow(paths=[image_path], action=action)
with io.open(image_path, "rb") as fd:
expected_data = fd.read(expected_size)
d = hashlib.sha1()
d.update(expected_data)
expected_hash = d.digest()
if data_store.RelationalDBReadEnabled("vfs"):
data = self._ReadTestFile(["test_img.dd"],
path_type=rdf_objects.PathInfo.PathType.OS)
self.assertEqual(data, expected_data)
path_info = self._ReadTestPathInfo(
["test_img.dd"], path_type=rdf_objects.PathInfo.PathType.OS)
self.assertEqual(path_info.hash_entry.sha1, expected_hash)
self.assertEqual(path_info.hash_entry.num_bytes, expected_size)
else:
urn = rdfvalue.RDFURN(self.client_id).Add("/fs/os").Add(image_path)
blobimage = aff4.FACTORY.Open(urn, token=self.token)
# Make sure a VFSBlobImage got written.
self.assertIsInstance(blobimage, aff4_grr.VFSBlobImage)
self.assertLen(blobimage, expected_size)
data = blobimage.read(100 * expected_size)
self.assertEqual(data, expected_data)
hash_obj = data_store_utils.GetFileHashEntry(blobimage)
self.assertEqual(hash_obj.sha1, expected_hash)
flow_reply = results[0]
self.assertEqual(flow_reply.hash_entry.sha1, expected_hash)
def testSizeAndRegexConditionsWithDifferentActions(self):
files_over_size_limit = ["auth.log"]
filtered_files = ["dpkg.log", "dpkg_false.log"]
expected_files = []
non_expected_files = files_over_size_limit + filtered_files
sizes = [
os.stat(os.path.join(self.fixture_path, f)).st_size
for f in files_over_size_limit
]
size_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.SIZE,
size=rdf_file_finder.FileFinderSizeCondition(
max_file_size=min(sizes) - 1))
regex_condition = rdf_file_finder.FileFinderCondition(
condition_type=(
rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH),
contents_regex_match=rdf_file_finder
.FileFinderContentsRegexMatchCondition(
mode=(rdf_file_finder.FileFinderContentsRegexMatchCondition.Mode
.ALL_HITS),
bytes_before=10,
bytes_after=10,
regex=b"session opened for user .*?john"))
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[size_condition, regex_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
# Check that order of conditions doesn't influence results
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[regex_condition, size_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
def testModificationTimeConditionWithDifferentActions(self):
expected_files = ["dpkg.log", "dpkg_false.log"]
non_expected_files = ["auth.log"]
change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440)
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.MODIFICATION_TIME,
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=change_time))
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[modification_time_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
def testAccessTimeConditionWithDifferentActions(self):
expected_files = ["dpkg.log", "dpkg_false.log"]
non_expected_files = ["auth.log"]
change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440)
access_time_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.ACCESS_TIME,
access_time=rdf_file_finder.FileFinderAccessTimeCondition(
min_last_access_time=change_time))
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[access_time_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
def testInodeChangeTimeConditionWithDifferentActions(self):
expected_files = ["dpkg.log", "dpkg_false.log"]
non_expected_files = ["auth.log"]
change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440)
inode_change_time_condition = rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type
.INODE_CHANGE_TIME,
inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition(
min_last_inode_change_time=change_time))
for action in self.CONDITION_TESTS_ACTIONS:
self.RunFlowAndCheckResults(
action=action,
conditions=[inode_change_time_condition],
expected_files=expected_files,
non_expected_files=non_expected_files)
def _RunTSKFileFinder(self, paths):
image_path = os.path.join(self.base_path, "ntfs_img.dd")
with utils.Stubber(
vfs, "VFS_VIRTUALROOTS", {
rdf_paths.PathSpec.PathType.TSK:
rdf_paths.PathSpec(
path=image_path, pathtype="OS", offset=63 * 512)
}):
action = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
with test_lib.SuppressLogs():
flow_test_lib.TestFlowHelper(
file_finder.FileFinder.__name__,
self.client_mock,
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.TSK,
action=rdf_file_finder.FileFinderAction(action_type=action),
token=self.token)
def _ListTestChildPathInfos(self,
path_components,
path_type=rdf_objects.PathInfo.PathType.TSK):
components = self.base_path.strip("/").split("/")
components += path_components
return data_store.REL_DB.ListChildPathInfos(self.client_id.Basename(),
path_type, tuple(components))
def _ReadTestPathInfo(self,
path_components,
path_type=rdf_objects.PathInfo.PathType.TSK):
components = self.base_path.strip("/").split("/")
components += path_components
return data_store.REL_DB.ReadPathInfo(self.client_id.Basename(), path_type,
tuple(components))
def _ReadTestFile(self,
path_components,
path_type=rdf_objects.PathInfo.PathType.TSK):
components = self.base_path.strip("/").split("/")
components += path_components
fd = file_store.OpenFile(
db.ClientPath(
self.client_id.Basename(), path_type, components=tuple(components)))
return fd.read(10000000)
def testRecursiveADSHandling(self):
"""This tests some more obscure NTFS features - ADSs on directories."""
self._RunTSKFileFinder(["adstest/**"])
self._CheckDir()
self._CheckSubdir()
def testADSHandling(self):
self._RunTSKFileFinder(["adstest/*"])
self._CheckDir()
def _CheckDir(self):
if data_store.RelationalDBReadEnabled("vfs"):
self._CheckDirRelational()
else:
self._CheckDirAFF4()
def _CheckDirRelational(self):
children = self._ListTestChildPathInfos(["ntfs_img.dd:32256", "adstest"])
# There should be four entries:
# one file, one directory, and one ADS for each.
self.assertLen(children, 4)
data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "a.txt"])
self.assertEqual(data, "This is a.txt")
data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "a.txt:ads.txt"])
self.assertEqual(data, "This is the ads for a.txt")
data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "dir:ads.txt"])
self.assertEqual(data, "This is the dir ads")
def _CheckDirAFF4(self):
output = self.client_id.Add("fs/tsk").Add(
self.base_path).Add("ntfs_img.dd:63").Add("adstest")
results = list(aff4.FACTORY.Open(output, token=self.token).OpenChildren())
# There should be four entries:
# one file, one directory, and one ADS for each.
self.assertLen(results, 4)
counter = collections.Counter([type(x) for x in results])
# There should be one directory and three files. It's important that all
# ADSs have been created as files or we won't be able to access the data.
self.assertEqual(counter[aff4_grr.VFSBlobImage], 3)
self.assertEqual(counter[aff4_standard.VFSDirectory], 1)
# Make sure we can access all the data.
fd = aff4.FACTORY.Open(output.Add("a.txt"), token=self.token)
self.assertEqual(fd.read(100), "This is a.txt")
fd = aff4.FACTORY.Open(output.Add("a.txt:ads.txt"), token=self.token)
self.assertEqual(fd.read(100), "This is the ads for a.txt")
fd = aff4.FACTORY.Open(output.Add("dir:ads.txt"), token=self.token)
self.assertEqual(fd.read(100), "This is the dir ads")
def _CheckSubdir(self):
if data_store.RelationalDBReadEnabled("filestore"):
self._CheckSubdirRelational()
else:
self._CheckSubdirAFF4()
def _CheckSubdirRelational(self):
base_components = ["ntfs_img.dd:32256", "adstest", "dir"]
children = self._ListTestChildPathInfos(base_components)
# There should be three entries: two files, one has an ADS.
self.assertLen(children, 3)
data = self._ReadTestFile(base_components + ["b.txt"])
self.assertEqual(data, "This is b.txt")
data = self._ReadTestFile(base_components + ["b.txt:ads.txt"])
self.assertEqual(data, "This is the ads for b.txt")
data = self._ReadTestFile(base_components + ["no_ads.txt"])
self.assertEqual(data, "This file has no ads")
def _CheckSubdirAFF4(self):
# Also in the subdirectory.
output = self.client_id.Add("fs/tsk").Add(
self.base_path).Add("ntfs_img.dd:63").Add("adstest").Add("dir")
fd = aff4.FACTORY.Open(output, token=self.token)
results = list(fd.OpenChildren())
# Here we have two files, one has an ads.
self.assertLen(results, 3)
base_urn = fd.urn
# Make sure we can access all the data.
fd = aff4.FACTORY.Open(base_urn.Add("b.txt"), token=self.token)
self.assertEqual(fd.read(100), "This is b.txt")
fd = aff4.FACTORY.Open(base_urn.Add("b.txt:ads.txt"), token=self.token)
self.assertEqual(fd.read(100), "This is the ads for b.txt")
# This tests for a regression where ADS data attached to the base directory
# leaked into files inside the directory.
fd = aff4.FACTORY.Open(base_urn.Add("no_ads.txt"), token=self.token)
self.assertEqual(fd.read(100), "This file has no ads")
def testEmptyPathListDoesNothing(self):
flow_test_lib.TestFlowHelper(
file_finder.FileFinder.__name__,
self.client_mock,
client_id=self.client_id,
paths=[],
pathtype=rdf_paths.PathSpec.PathType.OS,
token=self.token)
class RelationalFlowFileFinderTest(db_test_lib.RelationalDBEnabledMixin,
TestFileFinderFlow):
flow_base_cls = flow_base.FlowBase
def testUseExternalStores(self):
if not data_store.RelationalDBReadEnabled("filestore"):
self.skipTest("Test uses relational filestore.")
with temp.AutoTempDirPath(remove_non_empty=True) as tempdir:
path = os.path.join(tempdir, "foo")
with io.open(path, "w") as fd:
fd.write("some content")
paths = [path]
action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.DOWNLOAD)
action.download.use_external_stores = False
with mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles") as efs:
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.FileFinder),
self.client_mock,
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(efs.call_count, 0)
# Change the file or the file finder will see that it was downloaded
# already and skip it.
with io.open(path, "w") as fd:
fd.write("some other content")
action.download.use_external_stores = True
with mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles") as efs:
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.FileFinder),
self.client_mock,
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(efs.call_count, 1)
@db_test_lib.DualDBTest
class TestClientFileFinderFlow(flow_test_lib.FlowTestsBaseclass):
"""Test the ClientFileFinder flow."""
def setUp(self):
super(TestClientFileFinderFlow, self).setUp()
self.client_id = self.SetupClient(0)
def _RunCFF(self, paths, action):
flow_id = flow_test_lib.TestFlowHelper(
file_finder.ClientFileFinder.__name__,
action_mocks.ClientFileFinderClientMock(),
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=rdf_file_finder.FileFinderAction(action_type=action),
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
return results, flow_id
def testClientFileFinder(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Action.STAT
results, _ = self._RunCFF(paths, action)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
def testUseExternalStores(self):
if not data_store.RelationalDBReadEnabled("filestore"):
self.skipTest("Test uses relational filestore.")
paths = [os.path.join(self.base_path, "test.plist")]
action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.DOWNLOAD)
action.download.use_external_stores = False
with mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles") as efs:
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.ClientFileFinder),
action_mocks.ClientFileFinderClientMock(),
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(efs.call_count, 0)
action.download.use_external_stores = True
with mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles") as efs:
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.ClientFileFinder),
action_mocks.ClientFileFinderClientMock(),
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(efs.call_count, 1)
def _VerifyDownloadedFiles(self, results):
if data_store.RelationalDBReadEnabled("filestore"):
for r in results:
original_path = r.stat_entry.pathspec.path
fd = file_store.OpenFile(
db.ClientPath(
self.client_id.Basename(),
rdf_objects.PathInfo.PathType.OS,
components=original_path.strip("/").split("/")))
with io.open(original_path, "rb") as orig_fd:
self.assertEqual(fd.read(), orig_fd.read())
else:
for r in results:
file_urn = r.stat_entry.AFF4Path(self.client_id)
fd = aff4.FACTORY.Open(file_urn, token=self.token)
data = fd.read()
with io.open(r.stat_entry.pathspec.path, "rb") as orig_fd:
self.assertEqual(data, orig_fd.read())
def testFileWithMoreThanOneChunk(self):
path = os.path.join(self.base_path, "History.plist")
s = os.stat(path).st_size
action = rdf_file_finder.FileFinderAction.Download(chunk_size=s // 4)
flow_id = flow_test_lib.TestFlowHelper(
file_finder.ClientFileFinder.__name__,
action_mocks.ClientFileFinderClientMock(),
client_id=self.client_id,
paths=[path],
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self._VerifyDownloadedFiles(results)
def testFileWithExactlyThanOneChunk(self):
path = os.path.join(self.base_path, "History.plist")
s = os.stat(path).st_size
action = rdf_file_finder.FileFinderAction.Download(chunk_size=s * 2)
flow_id = flow_test_lib.TestFlowHelper(
file_finder.ClientFileFinder.__name__,
action_mocks.ClientFileFinderClientMock(),
client_id=self.client_id,
paths=[path],
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
token=self.token)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self._VerifyDownloadedFiles(results)
def testClientFileFinderDownload(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
results, _ = self._RunCFF(paths, action)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
self._VerifyDownloadedFiles(results)
def testClientFileFinderPathCasing(self):
paths = [
os.path.join(self.base_path, "PARSER_TEST/*.plist"),
os.path.join(self.base_path, "history.plist"),
os.path.join(self.base_path, "InstallHistory.plist")
]
action = rdf_file_finder.FileFinderAction.Action.STAT
results, _ = self._RunCFF(paths, action)
self.assertLen(results, 3)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "parser_test/InstallHistory.plist",
"parser_test/com.google.code.grr.plist"
])
def _SetupUnicodePath(self, path):
try:
dir_path = os.path.join(path, u"厨房")
os.mkdir(dir_path)
except UnicodeEncodeError:
self.skipTest("Test needs a unicode capable file system.")
file_path = os.path.join(dir_path, u"卫浴洁.txt")
with io.open(file_path, "w") as f:
f.write(u"hello world!")
def testClientFileFinderUnicodeRegex(self):
self._SetupUnicodePath(self.temp_dir)
paths = [
os.path.join(self.temp_dir, "*"),
os.path.join(self.temp_dir, u"厨房/*.txt")
]
action = rdf_file_finder.FileFinderAction.Action.STAT
results, _ = self._RunCFF(paths, action)
self.assertLen(results, 2)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.temp_dir)
for p in results
]
self.assertCountEqual(relpaths, [u"厨房", u"厨房/卫浴洁.txt"])
def testClientFileFinderUnicodeLiteral(self):
self._SetupUnicodePath(self.temp_dir)
paths = [os.path.join(self.temp_dir, u"厨房/卫浴洁.txt")]
action = rdf_file_finder.FileFinderAction.Action.STAT
results, _ = self._RunCFF(paths, action)
self.assertLen(results, 1)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.temp_dir)
for p in results
]
self.assertCountEqual(relpaths, [u"厨房/卫浴洁.txt"])
def testPathInterpolation(self):
bar = rdf_client.User(username="bar")
baz = rdf_client.User(username="baz")
self.client_id = self.SetupClient(
0, system="foo", fqdn="norf", users=[bar, baz])
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
self._Touch(os.path.join(temp_dirpath, "foo", "bar"))
self._Touch(os.path.join(temp_dirpath, "foo", "baz"))
self._Touch(os.path.join(temp_dirpath, "foo", "quux"))
self._Touch(os.path.join(temp_dirpath, "thud", "norf", "plugh"))
self._Touch(os.path.join(temp_dirpath, "thud", "norf", "blargh"))
paths = [
os.path.join(temp_dirpath, "%%os%%", "%%users.username%%"),
os.path.join(temp_dirpath, "thud", "%%fqdn%%", "plugh"),
]
action = rdf_file_finder.FileFinderAction.Action.STAT
results, flow_id = self._RunCFF(paths, action)
result_paths = [result.stat_entry.pathspec.path for result in results]
self.assertCountEqual(result_paths, [
os.path.join(temp_dirpath, "foo", "bar"),
os.path.join(temp_dirpath, "foo", "baz"),
os.path.join(temp_dirpath, "thud", "norf", "plugh")
])
# Also check that the argument protobuf still has the original values.
flow_obj = flow_test_lib.GetFlowObj(self.client_id, flow_id)
args = flow_obj.args
self.assertCountEqual(args.paths, paths)
# TODO(hanuszczak): Similar function can be found in other modules. It should
# be implemented once in the test library.
def _Touch(self, filepath):
dirpath = os.path.dirname(filepath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with io.open(filepath, "wb"):
pass
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
# changelog.py - changelog class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .i18n import _
from .node import (
bin,
hex,
nullid,
)
from .thirdparty import (
attr,
)
from . import (
encoding,
error,
revlog,
util,
)
_defaultextra = {'branch': 'default'}
def _string_escape(text):
"""
>>> from .pycompat import bytechr as chr
>>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
>>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
>>> s
'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
>>> res = _string_escape(s)
>>> s == util.unescapestr(res)
True
"""
# subset of the string_escape codec
text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
return text.replace('\0', '\\0')
def decodeextra(text):
"""
>>> from .pycompat import bytechr as chr
>>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
... ).items())
[('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
>>> sorted(decodeextra(encodeextra({b'foo': b'bar',
... b'baz': chr(92) + chr(0) + b'2'})
... ).items())
[('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
"""
extra = _defaultextra.copy()
for l in text.split('\0'):
if l:
if '\\0' in l:
# fix up \0 without getting into trouble with \\0
l = l.replace('\\\\', '\\\\\n')
l = l.replace('\\0', '\0')
l = l.replace('\n', '')
k, v = util.unescapestr(l).split(':', 1)
extra[k] = v
return extra
def encodeextra(d):
# keys must be sorted to produce a deterministic changelog entry
items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
return "\0".join(items)
def stripdesc(desc):
"""strip trailing whitespace and leading and trailing empty lines"""
return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
class appender(object):
'''the changelog index must be updated last on disk, so we use this class
to delay writes to it'''
def __init__(self, vfs, name, mode, buf):
self.data = buf
fp = vfs(name, mode)
self.fp = fp
self.offset = fp.tell()
self.size = vfs.fstat(fp).st_size
self._end = self.size
def end(self):
return self._end
def tell(self):
return self.offset
def flush(self):
pass
def close(self):
self.fp.close()
def seek(self, offset, whence=0):
'''virtual file offset spans real file and data'''
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.end() + offset
if self.offset < self.size:
self.fp.seek(self.offset)
def read(self, count=-1):
'''only trick here is reads that span real file and data'''
ret = ""
if self.offset < self.size:
s = self.fp.read(count)
ret = s
self.offset += len(s)
if count > 0:
count -= len(s)
if count != 0:
doff = self.offset - self.size
self.data.insert(0, "".join(self.data))
del self.data[1:]
s = self.data[0][doff:doff + count]
self.offset += len(s)
ret += s
return ret
def write(self, s):
self.data.append(bytes(s))
self.offset += len(s)
self._end += len(s)
def _divertopener(opener, target):
"""build an opener that writes in 'target.a' instead of 'target'"""
def _divert(name, mode='r', checkambig=False):
if name != target:
return opener(name, mode)
return opener(name + ".a", mode)
return _divert
def _delayopener(opener, target, buf):
"""build an opener that stores chunks in 'buf' instead of 'target'"""
def _delay(name, mode='r', checkambig=False):
if name != target:
return opener(name, mode)
return appender(opener, name, mode, buf)
return _delay
@attr.s
class _changelogrevision(object):
# Extensions might modify _defaultextra, so let the constructor below pass
# it in
extra = attr.ib()
manifest = attr.ib(default=nullid)
user = attr.ib(default='')
date = attr.ib(default=(0, 0))
files = attr.ib(default=attr.Factory(list))
description = attr.ib(default='')
class changelogrevision(object):
"""Holds results of a parsed changelog revision.
Changelog revisions consist of multiple pieces of data, including
the manifest node, user, and date. This object exposes a view into
the parsed object.
"""
__slots__ = (
u'_offsets',
u'_text',
)
def __new__(cls, text):
if not text:
return _changelogrevision(extra=_defaultextra)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
# But doing it here is equivalent and saves an extra function call.
# format used:
# nodeid\n : manifest node in ascii
# user\n : user, no \n or \r allowed
# time tz extra\n : date (time is int or float, timezone is int)
# : extra is metadata, encoded and separated by '\0'
# : older versions ignore it
# files\n\n : files modified by the cset, no \n or \r allowed
# (.*) : comment (free text, ideally utf-8)
#
# changelog v0 doesn't use extra
nl1 = text.index('\n')
nl2 = text.index('\n', nl1 + 1)
nl3 = text.index('\n', nl2 + 1)
# The list of files may be empty. Which means nl3 is the first of the
# double newline that precedes the description.
if text[nl3 + 1:nl3 + 2] == '\n':
doublenl = nl3
else:
doublenl = text.index('\n\n', nl3 + 1)
self._offsets = (nl1, nl2, nl3, doublenl)
self._text = text
return self
@property
def manifest(self):
return bin(self._text[0:self._offsets[0]])
@property
def user(self):
off = self._offsets
return encoding.tolocal(self._text[off[0] + 1:off[1]])
@property
def _rawdate(self):
off = self._offsets
dateextra = self._text[off[1] + 1:off[2]]
return dateextra.split(' ', 2)[0:2]
@property
def _rawextra(self):
off = self._offsets
dateextra = self._text[off[1] + 1:off[2]]
fields = dateextra.split(' ', 2)
if len(fields) != 3:
return None
return fields[2]
@property
def date(self):
raw = self._rawdate
time = float(raw[0])
# Various tools did silly things with the timezone.
try:
timezone = int(raw[1])
except ValueError:
timezone = 0
return time, timezone
@property
def extra(self):
raw = self._rawextra
if raw is None:
return _defaultextra
return decodeextra(raw)
@property
def files(self):
off = self._offsets
if off[2] == off[3]:
return []
return self._text[off[2] + 1:off[3]].split('\n')
@property
def description(self):
return encoding.tolocal(self._text[self._offsets[3] + 2:])
class changelog(revlog.revlog):
def __init__(self, opener, trypending=False):
"""Load a changelog revlog using an opener.
If ``trypending`` is true, we attempt to load the index from a
``00changelog.i.a`` file instead of the default ``00changelog.i``.
The ``00changelog.i.a`` file contains index (and possibly inline
revision) data for a transaction that hasn't been finalized yet.
It exists in a separate file to facilitate readers (such as
hooks processes) accessing data before a transaction is finalized.
"""
if trypending and opener.exists('00changelog.i.a'):
indexfile = '00changelog.i.a'
else:
indexfile = '00changelog.i'
datafile = '00changelog.d'
revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
checkambig=True, mmaplargeindex=True)
if self._initempty:
# changelogs don't benefit from generaldelta
self.version &= ~revlog.FLAG_GENERALDELTA
self._generaldelta = False
# Delta chains for changelogs tend to be very small because entries
# tend to be small and don't delta well with each. So disable delta
# chains.
self.storedeltachains = False
self._realopener = opener
self._delayed = False
self._delaybuf = None
self._divert = False
self.filteredrevs = frozenset()
def tip(self):
"""filtered version of revlog.tip"""
for i in xrange(len(self) -1, -2, -1):
if i not in self.filteredrevs:
return self.node(i)
def __contains__(self, rev):
"""filtered version of revlog.__contains__"""
return (0 <= rev < len(self)
and rev not in self.filteredrevs)
def __iter__(self):
"""filtered version of revlog.__iter__"""
if len(self.filteredrevs) == 0:
return revlog.revlog.__iter__(self)
def filterediter():
for i in xrange(len(self)):
if i not in self.filteredrevs:
yield i
return filterediter()
def revs(self, start=0, stop=None):
"""filtered version of revlog.revs"""
for i in super(changelog, self).revs(start, stop):
if i not in self.filteredrevs:
yield i
@util.propertycache
def nodemap(self):
# XXX need filtering too
self.rev(self.node(0))
return self._nodecache
def reachableroots(self, minroot, heads, roots, includepath=False):
return self.index.reachableroots2(minroot, heads, roots, includepath)
def headrevs(self):
if self.filteredrevs:
try:
return self.index.headrevsfiltered(self.filteredrevs)
# AttributeError covers non-c-extension environments and
# old c extensions without filter handling.
except AttributeError:
return self._headrevs()
return super(changelog, self).headrevs()
def strip(self, *args, **kwargs):
# XXX make something better than assert
# We can't expect proper strip behavior if we are filtered.
assert not self.filteredrevs
super(changelog, self).strip(*args, **kwargs)
def rev(self, node):
"""filtered version of revlog.rev"""
r = super(changelog, self).rev(node)
if r in self.filteredrevs:
raise error.FilteredLookupError(hex(node), self.indexfile,
_('filtered node'))
return r
def node(self, rev):
"""filtered version of revlog.node"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).node(rev)
def linkrev(self, rev):
"""filtered version of revlog.linkrev"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).linkrev(rev)
def parentrevs(self, rev):
"""filtered version of revlog.parentrevs"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).parentrevs(rev)
def flags(self, rev):
"""filtered version of revlog.flags"""
if rev in self.filteredrevs:
raise error.FilteredIndexError(rev)
return super(changelog, self).flags(rev)
def delayupdate(self, tr):
"delay visibility of index updates to other readers"
if not self._delayed:
if len(self) == 0:
self._divert = True
if self._realopener.exists(self.indexfile + '.a'):
self._realopener.unlink(self.indexfile + '.a')
self.opener = _divertopener(self._realopener, self.indexfile)
else:
self._delaybuf = []
self.opener = _delayopener(self._realopener, self.indexfile,
self._delaybuf)
self._delayed = True
tr.addpending('cl-%i' % id(self), self._writepending)
tr.addfinalize('cl-%i' % id(self), self._finalize)
def _finalize(self, tr):
"finalize index updates"
self._delayed = False
self.opener = self._realopener
# move redirected index data back into place
if self._divert:
assert not self._delaybuf
tmpname = self.indexfile + ".a"
nfile = self.opener.open(tmpname)
nfile.close()
self.opener.rename(tmpname, self.indexfile, checkambig=True)
elif self._delaybuf:
fp = self.opener(self.indexfile, 'a', checkambig=True)
fp.write("".join(self._delaybuf))
fp.close()
self._delaybuf = None
self._divert = False
# split when we're done
self.checkinlinesize(tr)
def _writepending(self, tr):
"create a file containing the unfinalized state for pretxnchangegroup"
if self._delaybuf:
# make a temporary copy of the index
fp1 = self._realopener(self.indexfile)
pendingfilename = self.indexfile + ".a"
# register as a temp file to ensure cleanup on failure
tr.registertmp(pendingfilename)
# write existing data
fp2 = self._realopener(pendingfilename, "w")
fp2.write(fp1.read())
# add pending data
fp2.write("".join(self._delaybuf))
fp2.close()
# switch modes so finalize can simply rename
self._delaybuf = None
self._divert = True
self.opener = _divertopener(self._realopener, self.indexfile)
if self._divert:
return True
return False
def checkinlinesize(self, tr, fp=None):
if not self._delayed:
revlog.revlog.checkinlinesize(self, tr, fp)
def read(self, node):
"""Obtain data from a parsed changelog revision.
Returns a 6-tuple of:
- manifest node in binary
- author/user as a localstr
- date as a 2-tuple of (time, timezone)
- list of files
- commit message as a localstr
- dict of extra metadata
Unless you need to access all fields, consider calling
``changelogrevision`` instead, as it is faster for partial object
access.
"""
c = changelogrevision(self.revision(node))
return (
c.manifest,
c.user,
c.date,
c.files,
c.description,
c.extra
)
def changelogrevision(self, nodeorrev):
"""Obtain a ``changelogrevision`` for a node or revision."""
return changelogrevision(self.revision(nodeorrev))
def readfiles(self, node):
"""
short version of read that only returns the files modified by the cset
"""
text = self.revision(node)
if not text:
return []
last = text.index("\n\n")
l = text[:last].split('\n')
return l[3:]
def add(self, manifest, files, desc, transaction, p1, p2,
user, date=None, extra=None):
# Convert to UTF-8 encoded bytestrings as the very first
# thing: calling any method on a localstr object will turn it
# into a str object and the cached UTF-8 string is thus lost.
user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
user = user.strip()
# An empty username or a username with a "\n" will make the
# revision text contain two "\n\n" sequences -> corrupt
# repository since read cannot unpack the revision.
if not user:
raise error.RevlogError(_("empty username"))
if "\n" in user:
raise error.RevlogError(_("username %s contains a newline")
% repr(user))
desc = stripdesc(desc)
if date:
parseddate = "%d %d" % util.parsedate(date)
else:
parseddate = "%d %d" % util.makedate()
if extra:
branch = extra.get("branch")
if branch in ("default", ""):
del extra["branch"]
elif branch in (".", "null", "tip"):
raise error.RevlogError(_('the name \'%s\' is reserved')
% branch)
if extra:
extra = encodeextra(extra)
parseddate = "%s %s" % (parseddate, extra)
l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
text = "\n".join(l)
return self.addrevision(text, transaction, len(self), p1, p2)
def branchinfo(self, rev):
"""return the branch name and open/close state of a revision
This function exists because creating a changectx object
just to access this is costly."""
extra = self.read(rev)[5]
return encoding.tolocal(extra.get("branch")), 'close' in extra
def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
# overlay over the standard revlog._addrevision to track the new
# revision on the transaction.
rev = len(self)
node = super(changelog, self)._addrevision(node, rawtext, transaction,
*args, **kwargs)
revs = transaction.changes.get('revs')
if revs is not None:
revs.add(rev)
return node
|
import numpy as np
import rasterio as rio
import skimage as ski
import glob
import os
import click
from rasterio.plot import reshape_as_raster
@click.group()
def cli():
pass
@click.command()
@click.argument('--path', help='path to the ')
def get_sun_elevation_azimuth(path):
with open(path) as f:
metadata = json.load(f)
return (metadata['properties']['sun_elevation'], metadata['properties']['sun_azimuth'])
@click.command()
def save_blank_water_mask(path):
"""Used for testing ATSA where we know ther eis no water
In the future we can use MOD44W water mask product or something else
"""
test = rio.open(path)
meta = test.profile
meta.update(count=1) # update since we are writing a single band
b1_array, b2_array, b3_array, b4_array = test.read()
fake_mask = np.zeros(b1_array.shape)
with rio.open('fake_mask.tif', 'w', **meta) as dst:
dst.write(fake_mask.astype('uint16'), 1)
@click.command()
def stack_t_series(paths, stackname):
""""
Stack third axis-wise. all
tifs must be same extent and in sorted order by date
"""
arrs = [ski.io.imread(path) for path in paths]
stacked = reshape_as_raster(np.dstack(arrs))
img = rio.open(paths[0])
meta=img.profile
meta.update(count=len(arrs)*arrs[0].shape[2])
with rio.open(stackname, 'w', **meta) as dst:
dst.write(stacked)
print("Saved Time Series with " + str(len(arrs)) + " images and " + str(arrs[0].shape[2]) + " bands each")
cli.add_command(get_sun_elevation_azimuth)
cli.add_command(save_blank_water_mask)
cli.add_command(stack_t_series)
if __name__ == "__main__":
sr_pattern = "/home/rave/cloud-free-planet/notebooks/jan_april_2018_100ovp_50maxcloud/*SR*.tif"
img_paths = glob.glob(sr_pattern)
img_paths = sorted(img_paths)
meta_pattern = "/home/rave/cloud-free-planet/notebooks/jan-may/*metadata.json"
meta_paths = glob.glob(meta_pattern)
angles = list(map(get_sun_elevation_azimuth, meta_paths))
with open('angles.txt', 'w') as f:
for tup in angles:
f.write('%s %s\n' % tup)
save_blank_water_mask(img_paths[0])
stack_t_series(img_paths, "stacked.tif")
cli()
|
/*
* Copyright 2010 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrContext_DEFINED
#define GrContext_DEFINED
#include "GrClip.h"
#include "GrColor.h"
#include "GrPaint.h"
#include "GrPathRendererChain.h"
#include "GrRenderTarget.h"
#include "GrTextureProvider.h"
#include "SkMatrix.h"
#include "SkPathEffect.h"
#include "SkTypes.h"
class GrAARectRenderer;
class GrBatchFontCache;
class GrCaps;
struct GrContextOptions;
class GrDrawContext;
class GrDrawTarget;
class GrFragmentProcessor;
class GrGpu;
class GrGpuTraceMarker;
class GrIndexBuffer;
class GrLayerCache;
class GrOvalRenderer;
class GrPath;
class GrPathRenderer;
class GrPipelineBuilder;
class GrResourceEntry;
class GrResourceCache;
class GrResourceProvider;
class GrTestTarget;
class GrTextBlobCache;
class GrTextContext;
class GrTextureParams;
class GrVertexBuffer;
class GrStrokeInfo;
class GrSoftwarePathRenderer;
class SK_API GrContext : public SkRefCnt {
public:
/**
* Creates a GrContext for a backend context.
*/
static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
static GrContext* Create(GrBackend, GrBackendContext);
/**
* Only defined in test apps.
*/
static GrContext* CreateMockContext();
virtual ~GrContext();
/**
* The GrContext normally assumes that no outsider is setting state
* within the underlying 3D API's context/device/whatever. This call informs
* the context that the state was modified and it should resend. Shouldn't
* be called frequently for good performance.
* The flag bits, state, is dpendent on which backend is used by the
* context, either GL or D3D (possible in future).
*/
void resetContext(uint32_t state = kAll_GrBackendState);
/**
* Callback function to allow classes to cleanup on GrContext destruction.
* The 'info' field is filled in with the 'info' passed to addCleanUp.
*/
typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
/**
* Add a function to be called from within GrContext's destructor.
* This gives classes a chance to free resources held on a per context basis.
* The 'info' parameter will be stored and passed to the callback function.
*/
void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
CleanUpData* entry = fCleanUpData.push();
entry->fFunc = cleanUp;
entry->fInfo = info;
}
/**
* Abandons all GPU resources and assumes the underlying backend 3D API
* context is not longer usable. Call this if you have lost the associated
* GPU context, and thus internal texture, buffer, etc. references/IDs are
* now invalid. Should be called even when GrContext is no longer going to
* be used for two reasons:
* 1) ~GrContext will not try to free the objects in the 3D API.
* 2) Any GrGpuResources created by this GrContext that outlive
* will be marked as invalid (GrGpuResource::wasDestroyed()) and
* when they're destroyed no 3D API calls will be made.
* Content drawn since the last GrContext::flush() may be lost. After this
* function is called the only valid action on the GrContext or
* GrGpuResources it created is to destroy them.
*/
void abandonContext();
///////////////////////////////////////////////////////////////////////////
// Resource Cache
/**
* Return the current GPU resource cache limits.
*
* @param maxResources If non-null, returns maximum number of resources that
* can be held in the cache.
* @param maxResourceBytes If non-null, returns maximum number of bytes of
* video memory that can be held in the cache.
*/
void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
/**
* Gets the current GPU resource cache usage.
*
* @param resourceCount If non-null, returns the number of resources that are held in the
* cache.
* @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
* in the cache.
*/
void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
/**
* Specify the GPU resource cache limits. If the current cache exceeds either
* of these, it will be purged (LRU) to keep the cache within these limits.
*
* @param maxResources The maximum number of resources that can be held in
* the cache.
* @param maxResourceBytes The maximum number of bytes of video memory
* that can be held in the cache.
*/
void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
GrTextureProvider* textureProvider() { return fTextureProvider; }
const GrTextureProvider* textureProvider() const { return fTextureProvider; }
/**
* Frees GPU created by the context. Can be called to reduce GPU memory
* pressure.
*/
void freeGpuResources();
/**
* Purge all the unlocked resources from the cache.
* This entry point is mainly meant for timing texture uploads
* and is not defined in normal builds of Skia.
*/
void purgeAllUnlockedResources();
/** Access the context capabilities */
const GrCaps* caps() const { return fCaps; }
/**
* Returns the recommended sample count for a render target when using this
* context.
*
* @param config the configuration of the render target.
* @param dpi the display density in dots per inch.
*
* @return sample count that should be perform well and have good enough
* rendering quality for the display. Alternatively returns 0 if
* MSAA is not supported or recommended to be used by default.
*/
int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
/**
* Returns a helper object to orchestrate draws.
* Callers should take a ref if they rely on the GrDrawContext sticking around.
* NULL will be returned if the context has been abandoned.
*
* @param surfaceProps the surface properties (mainly defines text drawing)
*
* @return a draw context
*/
GrDrawContext* drawContext(const SkSurfaceProps* surfaceProps = NULL) {
return fDrawingMgr.drawContext(surfaceProps);
}
///////////////////////////////////////////////////////////////////////////
// Misc.
/**
* Flags that affect flush() behavior.
*/
enum FlushBits {
/**
* A client may reach a point where it has partially rendered a frame
* through a GrContext that it knows the user will never see. This flag
* causes the flush to skip submission of deferred content to the 3D API
* during the flush.
*/
kDiscard_FlushBit = 0x2,
};
/**
* Call to ensure all drawing to the context has been issued to the
* underlying 3D API.
* @param flagsBitfield flags that control the flushing behavior. See
* FlushBits.
*/
void flush(int flagsBitfield = 0);
void flushIfNecessary() {
if (fFlushToReduceCacheSize) {
this->flush();
}
}
/**
* These flags can be used with the read/write pixels functions below.
*/
enum PixelOpsFlags {
/** The GrContext will not be flushed before the surface read or write. This means that
the read or write may occur before previous draws have executed. */
kDontFlush_PixelOpsFlag = 0x1,
/** Any surface writes should be flushed to the backend 3D API after the surface operation
is complete */
kFlushWrites_PixelOp = 0x2,
/** The src for write or dst read is unpremultiplied. This is only respected if both the
config src and dst configs are an RGBA/BGRA 8888 format. */
kUnpremul_PixelOpsFlag = 0x4,
};
/**
* Reads a rectangle of pixels from a surface.
* @param surface the surface to read from.
* @param left left edge of the rectangle to read (inclusive)
* @param top top edge of the rectangle to read (inclusive)
* @param width width of rectangle to read in pixels.
* @param height height of rectangle to read in pixels.
* @param config the pixel config of the destination buffer
* @param buffer memory to read the rectangle into.
* @param rowBytes number of bytes bewtween consecutive rows. Zero means rows are tightly
* packed.
* @param pixelOpsFlags see PixelOpsFlags enum above.
*
* @return true if the read succeeded, false if not. The read can fail because of an unsupported
* pixel configs
*/
bool readSurfacePixels(GrSurface* surface,
int left, int top, int width, int height,
GrPixelConfig config, void* buffer,
size_t rowBytes = 0,
uint32_t pixelOpsFlags = 0);
/**
* Writes a rectangle of pixels to a surface.
* @param surface the surface to write to.
* @param left left edge of the rectangle to write (inclusive)
* @param top top edge of the rectangle to write (inclusive)
* @param width width of rectangle to write in pixels.
* @param height height of rectangle to write in pixels.
* @param config the pixel config of the source buffer
* @param buffer memory to read pixels from
* @param rowBytes number of bytes between consecutive rows. Zero
* means rows are tightly packed.
* @param pixelOpsFlags see PixelOpsFlags enum above.
* @return true if the write succeeded, false if not. The write can fail because of an
* unsupported combination of surface and src configs.
*/
bool writeSurfacePixels(GrSurface* surface,
int left, int top, int width, int height,
GrPixelConfig config, const void* buffer,
size_t rowBytes,
uint32_t pixelOpsFlags = 0);
/**
* Copies a rectangle of texels from src to dst.
* bounds.
* @param dst the surface to copy to.
* @param src the surface to copy from.
* @param srcRect the rectangle of the src that should be copied.
* @param dstPoint the translation applied when writing the srcRect's pixels to the dst.
* @param pixelOpsFlags see PixelOpsFlags enum above. (kUnpremul_PixelOpsFlag is not allowed).
*/
void copySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint,
uint32_t pixelOpsFlags = 0);
/** Helper that copies the whole surface but fails when the two surfaces are not identically
sized. */
bool copySurface(GrSurface* dst, GrSurface* src) {
if (NULL == dst || NULL == src || dst->width() != src->width() ||
dst->height() != src->height()) {
return false;
}
this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
SkIPoint::Make(0,0));
return true;
}
/**
* After this returns any pending writes to the surface will have been issued to the backend 3D API.
*/
void flushSurfaceWrites(GrSurface* surface);
/**
* Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
* if necessary.
*
* It is not necessary to call this before reading the render target via Skia/GrContext.
* GrContext will detect when it must perform a resolve before reading pixels back from the
* surface or using it as a texture.
*/
void prepareSurfaceForExternalIO(GrSurface*);
/**
* An ID associated with this context, guaranteed to be unique.
*/
uint32_t uniqueID() { return fUniqueID; }
///////////////////////////////////////////////////////////////////////////
// Functions intended for internal use only.
GrGpu* getGpu() { return fGpu; }
const GrGpu* getGpu() const { return fGpu; }
GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
GrLayerCache* getLayerCache() { return fLayerCache.get(); }
GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
bool abandoned() const { return fDrawingMgr.abandoned(); }
GrResourceProvider* resourceProvider() { return fResourceProvider; }
const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
GrResourceCache* getResourceCache() { return fResourceCache; }
// Called by tests that draw directly to the context via GrDrawTarget
void getTestTarget(GrTestTarget*);
void addGpuTraceMarker(const GrGpuTraceMarker* marker);
void removeGpuTraceMarker(const GrGpuTraceMarker* marker);
GrPathRenderer* getPathRenderer(
const GrDrawTarget* target,
const GrPipelineBuilder*,
const SkMatrix& viewMatrix,
const SkPath& path,
const GrStrokeInfo& stroke,
bool allowSW,
GrPathRendererChain::DrawType drawType = GrPathRendererChain::kColor_DrawType,
GrPathRendererChain::StencilSupport* stencilSupport = NULL);
/** Prints cache stats to the string if GR_CACHE_STATS == 1. */
void dumpCacheStats(SkString*) const;
void printCacheStats() const;
/** Prints GPU stats to the string if GR_GPU_STATS == 1. */
void dumpGpuStats(SkString*) const;
void printGpuStats() const;
private:
GrGpu* fGpu;
const GrCaps* fCaps;
GrResourceCache* fResourceCache;
// this union exists because the inheritance of GrTextureProvider->GrResourceProvider
// is in a private header.
union {
GrResourceProvider* fResourceProvider;
GrTextureProvider* fTextureProvider;
};
GrBatchFontCache* fBatchFontCache;
SkAutoTDelete<GrLayerCache> fLayerCache;
SkAutoTDelete<GrTextBlobCache> fTextBlobCache;
GrPathRendererChain* fPathRendererChain;
GrSoftwarePathRenderer* fSoftwarePathRenderer;
// Set by OverbudgetCB() to request that GrContext flush before exiting a draw.
bool fFlushToReduceCacheSize;
bool fDidTestPMConversions;
int fPMToUPMConversion;
int fUPMToPMConversion;
struct CleanUpData {
PFCleanUpFunc fFunc;
void* fInfo;
};
SkTDArray<CleanUpData> fCleanUpData;
const uint32_t fUniqueID;
GrContext(); // init must be called after the constructor.
bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
// Currently the DrawingMgr stores a separate GrDrawContext for each
// combination of text drawing options (pixel geometry x DFT use)
// and hands the appropriate one back given the user's request.
// All of the GrDrawContexts still land in the same GrDrawTarget!
//
// In the future this class will allocate a new GrDrawContext for
// each GrRenderTarget/GrDrawTarget and manage the DAG.
class DrawingMgr {
public:
DrawingMgr() : fDrawTarget(NULL) {
sk_bzero(fDrawContext, sizeof(fDrawContext));
}
~DrawingMgr();
void init(GrContext* context);
void abandon();
bool abandoned() const { return NULL == fDrawTarget; }
void purgeResources();
void reset();
void flush();
// Callers should take a ref if they rely on the GrDrawContext sticking around.
// NULL will be returned if the context has been abandoned.
GrDrawContext* drawContext(const SkSurfaceProps* surfaceProps);
private:
void cleanup();
friend class GrContext; // for access to fDrawTarget for testing
static const int kNumPixelGeometries = 5; // The different pixel geometries
static const int kNumDFTOptions = 2; // DFT or no DFT
GrContext* fContext;
GrDrawTarget* fDrawTarget;
GrDrawContext* fDrawContext[kNumPixelGeometries][kNumDFTOptions];
};
DrawingMgr fDrawingMgr;
void initMockContext();
void initCommon();
/**
* These functions create premul <-> unpremul effects if it is possible to generate a pair
* of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
* return NULL.
*/
const GrFragmentProcessor* createPMToUPMEffect(GrProcessorDataManager*, GrTexture*,
bool swapRAndB, const SkMatrix&);
const GrFragmentProcessor* createUPMToPMEffect(GrProcessorDataManager*, GrTexture*,
bool swapRAndB, const SkMatrix&);
/**
* This callback allows the resource cache to callback into the GrContext
* when the cache is still over budget after a purge.
*/
static void OverBudgetCB(void* data);
/**
* A callback similar to the above for use by the TextBlobCache
* TODO move textblob draw calls below context so we can use the call above.
*/
static void TextBlobCacheOverBudgetCB(void* data);
typedef SkRefCnt INHERITED;
};
#endif
|
#!/usr/bin/env python3
import unittest
import events
class FakeMaterials():
def __init__(self, system_name, body_name = None, lat = None, lon = None, mats = []):
self.system_name = system_name
self.body_name = body_name
self.lat = lat
self.lon = lon
self.mats = mats
self.res = { 'system': self.system_name, 'body': self.body_name,
'lat': self.lat, 'lon': self.lon, 'materials': self.mats,
'x': 0, 'y': 0, 'z': 0 }
def closest(self, pos1, mats, types):
if not self.system_name:
return None
return (12, self.res)
def matches(self, loc):
# We only check lat for a match
return self.res if self.res['lat'] and self.res['lat'] == loc['lat'] else None
def local(self, system, planet):
return [ self.res ] if self.res['lat'] else []
class NoneVisited():
def is_visited(self, loc):
return False
def set_visited(self, loc):
self._captured_visit = loc
def captured_visit(self):
return self._captured_visit
class EventsTest(unittest.TestCase):
def test_fsd_event_empty(self):
"""
Simple test we return nothing if we aren't given coords
"""
ev = events.EventEngine(FakeMaterials(None), None, NoneVisited())
self.assertIsNone(ev.process( { 'event': 'FSDJump' }, {} ))
def test_fsd_event_wrong_system(self):
"""
test when systems do not match we ask for the system to show
"""
mats = FakeMaterials('Sol', 'Luna')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Go to Sol (12 Ly)", mats.res, False), ev.process( { 'event': 'FSDJump', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Arcturus'}, {} ))
def test_fsd_event_correct_system(self):
"""
test when systems do match we ask for the planet(s) to show
"""
mats = FakeMaterials('Sol', 'Mercury')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Supercruise to Sol Mercury", mats.res, True), ev.process( { 'event': 'FSDJump', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol'}, {} ))
def test_fsd_event_correct_system_case_wrong(self):
"""
test when systems do match we ask for the planet(s) to show and
the data has case differences from the events
"""
mats = FakeMaterials('SOL', 'MERCURY')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Supercruise to SOL MERCURY", mats.res, True), ev.process( { 'event': 'FSDJump', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol'}, {} ))
def test_location_event_correct_system(self):
"""
test when systems do match we ask for the planet(s) to show
(Location even no longer causes nav events)
"""
mats = FakeMaterials('Sol', 'Venus')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Supercruise to Sol Venus", mats.res, True), ev.process( { 'event': 'Location', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol'}, {} ))
def test_approach_body_event_correct_system(self):
"""
test when system and body do match we ask for the coordinates
to go to
"""
mats = FakeMaterials('Sol', 'Venus', 12, 15, [ 'Iron' ] )
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Land at target", mats.res, True), ev.process( { 'event': 'ApproachBody', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol','Body': 'Venus'}, {} ))
def test_approach_body_event_correct_system_already_targeted(self):
"""
test when system and body match but we're already targeting a location
then we just keep targeting that, so we don't jump around.
"""
current_target = { 'target': 'fake' }
mats = FakeMaterials('Sol', 'Venus', 12, 15, [ 'Iron' ] )
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Land at target", current_target, True), ev.process( { 'event': 'ApproachBody', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol','Body': 'Venus'}, {}, current_target ))
def test_startup_event_correct_system(self):
"""
test when systems do match we ask for the planet(s) to show
"""
mats = FakeMaterials('Sol', 'Earth')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Supercruise to Sol Earth", mats.res, True), ev.process( { 'event': 'StartUp', 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol'}, {} ))
def test_touchdown_at_target(self):
"""
test that we note success when we hit a target
"""
visited = NoneVisited()
mats = FakeMaterials('Sol', 'Earth', 13, 67, ['Iron', 'Gold'])
ev = events.EventEngine(mats, ['Gold'], visited)
res = ev.process( { 'event': 'Touchdown', 'Latitude': 13, 'Longitude': 67}, {'StarSystem': 'Sol', "Body": 'Earth', 'StarPos': [ 0, 0, 0]} )
self.assertEqual("Collect Gold", res[0])
self.assertFalse(res[2])
self.assertEqual( mats.res, visited.captured_visit())
def test_touchdown_at_target_wrong_case(self):
"""
test that we note success when we hit a target but the dataset
case doesn't match the ones from the events
"""
visited = NoneVisited()
mats = FakeMaterials('SOL', 'EARTH', 13, 67, ['Iron', 'Gold'])
ev = events.EventEngine(mats, ['Gold'], visited)
res = ev.process( { 'event': 'Touchdown', 'Latitude': 13, 'Longitude': 67}, {'StarSystem': 'Sol', 'Body': 'Earth', 'StarPos': [ 0, 0, 0]} )
self.assertEqual("Collect Gold",res[0])
self.assertFalse(res[2])
self.assertEqual( mats.res, visited.captured_visit())
def test_liftoff_event_wrong_system(self):
"""
test when systems do not match we ask for the system to show
"""
mats = FakeMaterials('Sol', 'Earth')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Go to Sol (12 Ly)", mats.res, False), ev.process( { 'event': 'Liftoff'}, { 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Arcturus'} ))
def test_liftoff_event_correct_system(self):
"""
test when systems do match we ask for the planet(s) to show
"""
mats = FakeMaterials('Sol', 'Mercury')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Supercruise to Sol Mercury", mats.res, True), ev.process( { 'event': 'Liftoff'}, { 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol'} ))
def test_liftoff_event_correct_body(self):
"""
test when we are lifting off from a body that has a site we have not
yet visited - probably means multiple sites on a body
"""
mats = FakeMaterials('Sol', 'Mercury', lat=1, lon=2)
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual(("Land at target", mats.res, True), ev.process( { 'event': 'Liftoff'}, { 'StarPos': [ 0, 0, 0] , 'StarSystem': 'Sol', 'Body': 'Mercury'} ))
def test_short_body(self):
mats = FakeMaterials('Sol', 'Mercury')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual("Earth", ev.short_body("Sol", "Earth"))
self.assertEqual("1 a", ev.short_body("Achenar", "Achenar 1 a"))
def test_make_params(self):
mats = FakeMaterials('Sol', 'Mercury')
ev = events.EventEngine(mats, None, NoneVisited())
self.assertEqual( { 'a': 1} , ev.make_params({ 'a': 1 }, { 'a': 2 }))
self.assertEqual( { 'a': 2, 'b': 2} , ev.make_params({ 'b': 2 }, { 'a': 2 }))
self.assertEqual( { 'ShortBody': 'Mars', 'Body': 'Mars', 'StarSystem': 'Sol'} , ev.make_params({ 'StarSystem': 'Sol' }, { 'Body': 'Mars' }))
self.assertEqual( { 'ShortBody': '1', 'Body': 'Achenar 1', 'StarSystem': 'Achenar'} , ev.make_params({ 'StarSystem': 'Achenar' }, { 'Body': 'Achenar 1' }))
if __name__ == '__main__':
unittest.main()
|
// FIXME This is a workaround.
// @see https://github.com/facebook/react/issues/7386
jest.mock('react-dom');
import React from 'react';
import {ConnectedRevertedForm} from './RevertNotification';
import renderer from 'react-test-renderer';
import createStore from '../../redux/create';
const sampleState = './RevertNotification.json'
test('RevertedNotificationForm with required attributes', () => {
const store = createStore(Object.assign({}, sampleState));
const component = renderer.create(
<ConnectedRevertedForm
store={store}
message="email template default text"
onClose={() => {return null}}
appID={17}
/>
);
let tree = component.toJSON();
expect(tree).toMatchSnapshot();
});
|
import { h } from 'vue'
export default {
name: "SkipForwardCircleFill",
vendor: "Ph",
type: "",
tags: ["skip","forward","circle","fill"],
render() {
return h(
"svg",
{"xmlns":"http://www.w3.org/2000/svg","viewBox":"0 0 256 256","class":"v-icon","fill":"currentColor","data-name":"ph-skip-forward-circle-fill","innerHTML":" <rect width='256' height='256' fill='none'/> <path d='M128,24A104,104,0,1,0,232,128,104.12041,104.12041,0,0,0,128,24Zm36,136a8,8,0,0,1-16,0V137.61426l-43.5625,29.042A7.99612,7.99612,0,0,1,92,160V96a7.99612,7.99612,0,0,1,12.4375-6.65625L148,118.38574V96a8,8,0,0,1,16,0Z'/>"},
)
}
}
|
import { __assign } from "tslib";
import * as React from 'react';
import { StyledIconBase } from '../../StyledIconBase';
export var ArrowRightDown = React.forwardRef(function (props, ref) {
var attrs = {
"fill": "currentColor",
"xmlns": "http://www.w3.org/2000/svg",
};
return (React.createElement(StyledIconBase, __assign({ iconAttrs: attrs, iconVerticalAlign: "middle", iconViewBox: "0 0 24 24" }, props, { ref: ref }),
React.createElement("path", { fill: "none", d: "M0 0h24v24H0z", key: "k0" }),
React.createElement("path", { d: "M11.637 13.05L5.98 7.395 7.394 5.98l5.657 5.657L18 6.687V18H6.687z", key: "k1" })));
});
ArrowRightDown.displayName = 'ArrowRightDown';
export var ArrowRightDownDimensions = { height: 24, width: 24 };
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in spectral_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def _FFTSizeForGrad(grad, rank):
return math_ops.reduce_prod(array_ops.shape(grad)[-rank:])
@ops.RegisterGradient("FFT")
def _FFTGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return spectral_ops.ifft(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT")
def _IFFTGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return spectral_ops.fft(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT2D")
def _FFT2DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return spectral_ops.ifft2d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT2D")
def _IFFT2DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return spectral_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT3D")
def _FFT3DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return spectral_ops.ifft3d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT3D")
def _IFFT3DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return spectral_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
def _RFFTGradHelper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _Grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
input_shape = array_ops.shape(op.inputs[0])
is_even = math_ops.cast(1 - (fft_length[-1] % 2), dtypes.complex64)
def _TileForBroadcasting(matrix, t):
expanded = array_ops.reshape(
matrix,
array_ops.concat([
array_ops.ones([array_ops.rank(t) - 2], dtypes.int32),
array_ops.shape(matrix)
], 0))
return array_ops.tile(
expanded, array_ops.concat([array_ops.shape(t)[:-2], [1, 1]], 0))
def _MaskMatrix(length):
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = array_ops.tile(
array_ops.expand_dims(math_ops.range(length), 0), (length, 1))
b = array_ops.transpose(a, [1, 0])
return math_ops.exp(-2j * np.pi * math_ops.cast(a * b, dtypes.complex64) /
math_ops.cast(length, dtypes.complex64))
def _YMMask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return math_ops.cast(1 - 2 * (math_ops.range(length) % 2),
dtypes.complex64)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _YMMask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _MaskMatrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _TileForBroadcasting(base_mask, y0)
y0_term = math_ops.matmul(tiled_mask, math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = math_ops.matmul(tiled_mask, math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = array_ops.tile(
ym_term,
array_ops.concat([
array_ops.ones([array_ops.rank(grad) - 1], dtypes.int32),
[inner_dim]
], 0)) * _YMMask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = math_ops.to_float(_FFTSizeForGrad(op.inputs[0], rank))
irfft = irfft_fn(grad, fft_length)
return 0.5 * (irfft * input_size + math_ops.real(extra_terms)), None
return _Grad
def _IRFFTGradHelper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _Grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
is_odd = math_ops.mod(fft_length[-1], 2)
input_last_dimension = array_ops.shape(op.inputs[0])[-1]
mask = array_ops.concat(
[[1.0], 2.0 * array_ops.ones([input_last_dimension - 2 + is_odd]),
array_ops.ones([1 - is_odd])], 0)
rsize = math_ops.reciprocal(math_ops.to_float(_FFTSizeForGrad(grad, rank)))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
rfft = rfft_fn(grad, fft_length)
return rfft * math_ops.cast(rsize * mask, dtypes.complex64), None
return _Grad
ops.RegisterGradient("RFFT")(_RFFTGradHelper(1, spectral_ops.irfft))
ops.RegisterGradient("IRFFT")(_IRFFTGradHelper(1, spectral_ops.rfft))
ops.RegisterGradient("RFFT2D")(_RFFTGradHelper(2, spectral_ops.irfft2d))
ops.RegisterGradient("IRFFT2D")(_IRFFTGradHelper(2, spectral_ops.rfft2d))
|
// @flow
import path from 'path';
import format from 'string-format';
import fs from 'fs-extra';
import R from 'ramda';
import json from 'comment-json';
import logger from '../../logger/logger';
import {
BIT_MAP,
OLD_BIT_MAP,
DEFAULT_INDEX_NAME,
COMPONENT_ORIGINS,
DEFAULT_SEPARATOR,
DEFAULT_INDEX_EXTS,
BIT_VERSION,
VERSION_DELIMITER,
COMPILER_ENV_TYPE,
TESTER_ENV_TYPE,
COMPONENT_DIR
} from '../../constants';
import { InvalidBitMap, MissingMainFile, MissingBitMapComponent } from './exceptions';
import { BitId, BitIds } from '../../bit-id';
import {
outputFile,
pathNormalizeToLinux,
pathJoinLinux,
isDir,
pathIsInside,
stripTrailingChar,
sortObject
} from '../../utils';
import ComponentMap from './component-map';
import type { ComponentMapFile, ComponentOrigin, PathChange } from './component-map';
import type { PathLinux, PathOsBased, PathOsBasedRelative, PathOsBasedAbsolute, PathRelative } from '../../utils/path';
import type { BitIdStr } from '../../bit-id/bit-id';
import GeneralError from '../../error/general-error';
import InvalidConfigDir from './exceptions/invalid-config-dir';
import ComponentConfig from '../config';
import ConfigDir from './config-dir';
import WorkspaceConfig from '../config/workspace-config';
export type BitMapComponents = { [componentId: string]: ComponentMap };
export type PathChangeResult = { id: BitId, changes: PathChange[] };
export type IgnoreFilesDirs = { files: PathLinux[], dirs: PathLinux[] };
export default class BitMap {
projectRoot: string;
mapPath: string;
components: BitMapComponents;
hasChanged: boolean;
version: string;
paths: { [path: string]: BitId }; // path => componentId
pathsLowerCase: { [path: string]: BitId }; // path => componentId
markAsChangedBinded: Function;
_cacheIds: { [origin: string]: ?BitIds };
allTrackDirs: ?{ [trackDir: PathLinux]: BitId };
constructor(projectRoot: string, mapPath: string, version: string) {
this.projectRoot = projectRoot;
this.mapPath = mapPath;
this.components = {};
this.hasChanged = false;
this.version = version;
this.paths = {};
this.pathsLowerCase = {};
this._cacheIds = {};
this.markAsChangedBinded = this.markAsChanged.bind(this);
}
markAsChanged() {
this.hasChanged = true;
this._invalidateCache();
}
setComponent(bitId: BitId, componentMap: ComponentMap) {
const id = bitId.toString();
if (!bitId.hasVersion() && bitId.scope) {
throw new GeneralError(`invalid bitmap id ${id}, a component must have a version when a scope-name is included`);
}
if (componentMap.origin !== COMPONENT_ORIGINS.NESTED) {
// make sure there are no duplications (same name)
const similarIds = this.findSimilarIds(bitId, true);
if (similarIds.length) {
throw new GeneralError(`your id ${id} is duplicated with ${similarIds.toString()}`);
}
}
componentMap.id = bitId;
this.components[id] = componentMap;
this.markAsChanged();
}
setComponentProp(id: BitId, propName: $Keys<ComponentMap>, val: any) {
const componentMap = this.getComponent(id, { ignoreScopeAndVersion: true });
// $FlowFixMe
componentMap[propName] = val;
this.markAsChanged();
return componentMap;
}
isEmpty() {
return R.isEmpty(this.components);
}
removeComponentProp(id: BitId, propName: $Keys<ComponentMap>) {
const componentMap = this.getComponent(id, { ignoreScopeAndVersion: true });
// $FlowFixMe
delete componentMap[propName];
this.markAsChanged();
return componentMap;
}
static load(dirPath: PathOsBasedAbsolute): BitMap {
const { currentLocation, defaultLocation } = BitMap.getBitMapLocation(dirPath);
const mapFileContent = BitMap.loadRawSync(dirPath);
if (!mapFileContent || !currentLocation) {
return new BitMap(dirPath, defaultLocation, BIT_VERSION);
}
let componentsJson;
try {
componentsJson = json.parse(mapFileContent.toString('utf8'), null, true);
} catch (e) {
logger.error(e);
throw new InvalidBitMap(currentLocation, e.message);
}
const version = componentsJson.version;
// Don't treat version like component
delete componentsJson.version;
const bitMap = new BitMap(dirPath, currentLocation, version);
bitMap.loadComponents(componentsJson);
return bitMap;
}
static loadRawSync(dirPath: PathOsBasedAbsolute): ?Buffer {
const { currentLocation } = BitMap.getBitMapLocation(dirPath);
if (!currentLocation) {
logger.info(`bit.map: unable to find an existing ${BIT_MAP} file. Will create a new one if needed`);
return undefined;
}
const mapFileContent = fs.readFileSync(currentLocation);
return mapFileContent;
}
static getBitMapLocation(dirPath: PathOsBasedAbsolute) {
const defaultLocation = path.join(dirPath, BIT_MAP);
const oldLocation = path.join(dirPath, OLD_BIT_MAP);
const getCurrentLocation = (): ?PathOsBased => {
if (fs.existsSync(defaultLocation)) return defaultLocation;
if (fs.existsSync(oldLocation)) return oldLocation;
return null;
};
const currentLocation = getCurrentLocation();
return { currentLocation, defaultLocation };
}
/**
* if resetHard, delete the bitMap file.
* Otherwise, try to load it and only if the file is corrupted then delete it.
*/
static reset(dirPath: PathOsBasedAbsolute, resetHard: boolean): void {
const bitMapPath = path.join(dirPath, BIT_MAP);
const deleteBitMapFile = () => {
logger.info(`deleting the bitMap file at ${bitMapPath}`);
fs.removeSync(bitMapPath);
};
if (resetHard) {
deleteBitMapFile();
return;
}
try {
BitMap.load(dirPath);
} catch (err) {
if (err instanceof InvalidBitMap) {
deleteBitMapFile();
return;
}
throw err;
}
}
/**
* Return files and dirs which need to be ignored since they are config files / dirs
* @param {*} configDir
* @param {*} rootDir
* @param {*} compilerFilesPaths
* @param {*} testerFilesPaths
*/
static resolveIgnoreFilesAndDirs(
configDir?: PathLinux,
rootDir: PathLinux,
compilerFilesPaths: PathLinux[] = [],
testerFilesPaths: PathLinux[] = []
) {
const ignoreList = {
files: [],
dirs: []
};
if (!configDir) return ignoreList;
if (configDir.startsWith(`{${COMPONENT_DIR}}`)) {
const resolvedConfigDir = format(configDir, { [COMPONENT_DIR]: rootDir, ENV_TYPE: '' });
const allEnvFilesPaths = compilerFilesPaths.concat(testerFilesPaths);
allEnvFilesPaths.forEach((file) => {
const ignoreFile = pathJoinLinux(resolvedConfigDir, file);
ignoreList.files.push(ignoreFile);
});
const configDirWithoutCompDir = format(configDir, { [COMPONENT_DIR]: '', ENV_TYPE: '{ENV_TYPE}' });
// There is nested folders to ignore
if (configDirWithoutCompDir !== '' && configDirWithoutCompDir !== '/') {
const configDirWithoutCompAndEnvsDir = format(configDir, { [COMPONENT_DIR]: '', ENV_TYPE: '' });
// There is nested folder which is not the env folders - ignore it completely
if (configDirWithoutCompAndEnvsDir !== '' && configDirWithoutCompAndEnvsDir !== '/') {
const resolvedDirWithoutEnvType = format(configDir, { [COMPONENT_DIR]: rootDir, ENV_TYPE: '' });
ignoreList.dirs.push(stripTrailingChar(resolvedDirWithoutEnvType, '/'));
} else {
const resolvedCompilerConfigDir = format(configDir, {
[COMPONENT_DIR]: rootDir,
ENV_TYPE: COMPILER_ENV_TYPE
});
const resolvedTesterConfigDir = format(configDir, { [COMPONENT_DIR]: rootDir, ENV_TYPE: TESTER_ENV_TYPE });
ignoreList.dirs.push(resolvedCompilerConfigDir, resolvedTesterConfigDir);
}
}
} else {
// Ignore the whole dir since this dir is only for config files
const dirToIgnore = format(configDir, { ENV_TYPE: '' });
ignoreList.dirs.push(dirToIgnore);
}
return ignoreList;
}
/**
* this is a temporarily method until ConfigDir class is merged into master
*/
static parseConfigDir(configDir: ConfigDir, rootDir: string) {
const configDirResolved = {};
configDirResolved.compiler = configDir.getResolved({
componentDir: rootDir,
envType: COMPILER_ENV_TYPE
}).linuxDirPath;
configDirResolved.tester = configDir.getResolved({ componentDir: rootDir, envType: TESTER_ENV_TYPE }).linuxDirPath;
return configDirResolved;
}
loadComponents(componentsJson: Object) {
Object.keys(componentsJson).forEach((componentId) => {
const componentFromJson = componentsJson[componentId];
const idHasScope = (): boolean => {
if (componentFromJson.origin !== COMPONENT_ORIGINS.AUTHORED) return true;
if ('exported' in componentFromJson) {
return componentFromJson.exported;
}
// backward compatibility
return BitId.parseObsolete(componentId).hasScope();
};
componentFromJson.id = BitId.parse(componentId, idHasScope());
const componentMap = ComponentMap.fromJson(componentsJson[componentId]);
componentMap.setMarkAsChangedCb(this.markAsChangedBinded);
this.components[componentId] = componentMap;
});
}
getAllComponents(origin?: ComponentOrigin | ComponentOrigin[]): BitMapComponents {
if (!origin) return this.components;
const isOriginMatch = component => component.origin === origin;
// $FlowFixMe we know origin is an array in that case
const isOriginMatchArray = component => origin.includes(component.origin);
const filter = Array.isArray(origin) ? isOriginMatchArray : isOriginMatch;
return R.filter(filter, this.components);
}
/**
* We should ignore ejected config files and dirs
* Files might be on the root dir then we need to ignore them directly by taking them from the bit.json
* They might be in internal dirs then we need to ignore the dir completely
*/
async getConfigDirsAndFilesToIgnore(
consumerPath: PathLinux,
consumerConfig: WorkspaceConfig
): Promise<IgnoreFilesDirs> {
const ignoreList = {
files: [],
dirs: []
};
const populateIgnoreListP = R.values(this.components).map(async (component: ComponentMap) => {
const configDir = component.configDir;
const componentDir = component.getComponentDir();
if (configDir && componentDir) {
const resolvedBaseConfigDir = component.getBaseConfigDir() || '';
const fullConfigDir = path.join(consumerPath, resolvedBaseConfigDir);
const componentPkgJsonDir = component.rootDir ? path.join(consumerPath, component.rootDir) : null;
const componentConfig = await ComponentConfig.load(componentPkgJsonDir, fullConfigDir, consumerConfig);
const compilerObj = R.values(componentConfig.compiler)[0];
const compilerFilesObj = compilerObj && compilerObj.files ? compilerObj.files : undefined;
const testerObj = R.values(componentConfig.tester)[0];
const testerFilesObj = testerObj && testerObj.files ? testerObj.files : undefined;
const compilerFiles = compilerFilesObj ? R.values(compilerFilesObj) : [];
const testerFiles = testerFilesObj ? R.values(testerFilesObj) : [];
// R.values above might return array of something which is not string
// Which will not be ok with the input of resolveIgnoreFilesAndDirs
const toIgnore = BitMap.resolveIgnoreFilesAndDirs(
configDir.linuxDirPath,
componentDir,
// $FlowFixMe - see comment above
compilerFiles,
// $FlowFixMe - see comment above
testerFiles
);
ignoreList.files = ignoreList.files.concat(toIgnore.files);
ignoreList.dirs = ignoreList.dirs.concat(toIgnore.dirs);
}
});
await Promise.all(populateIgnoreListP);
return ignoreList;
}
getAllBitIds(origin?: ComponentOrigin[]): BitIds {
const ids = (componentMaps: ComponentMap[]) => BitIds.fromArray(componentMaps.map(c => c.id));
const getIdsOfOrigin = (oneOrigin?: ComponentOrigin): BitIds => {
const cacheKey = oneOrigin || 'all';
if (this._cacheIds[cacheKey]) return this._cacheIds[cacheKey];
const allComponents = R.values(this.components);
const components = oneOrigin ? allComponents.filter(c => c.origin === oneOrigin) : allComponents;
const componentIds = ids(components);
this._cacheIds[cacheKey] = componentIds;
return componentIds;
};
if (!origin) return getIdsOfOrigin();
return BitIds.fromArray(R.flatten(origin.map(oneOrigin => getIdsOfOrigin(oneOrigin))));
}
/**
* get existing bitmap bit-id by bit-id.
* throw an exception if not found
* @see also getBitIdIfExist
*/
getBitId(
bitId: BitId,
{
ignoreVersion = false,
ignoreScopeAndVersion = false
}: {
ignoreVersion?: boolean,
ignoreScopeAndVersion?: boolean
} = {}
): BitId {
if (!(bitId instanceof BitId)) {
throw new TypeError(`BitMap.getBitId expects bitId to be an instance of BitId, instead, got ${bitId}`);
}
const allIds = this.getAllBitIds();
const exactMatch = allIds.search(bitId);
if (exactMatch) return exactMatch;
if (ignoreVersion) {
const matchWithoutVersion = allIds.searchWithoutVersion(bitId);
if (matchWithoutVersion) return matchWithoutVersion;
}
if (ignoreScopeAndVersion) {
const matchWithoutScopeAndVersion = allIds.searchWithoutScopeAndVersion(bitId);
if (matchWithoutScopeAndVersion) return matchWithoutScopeAndVersion;
}
throw new MissingBitMapComponent(bitId.toString());
}
/**
* get existing bitmap bit-id by bit-id
* don't throw an exception if not found
* @see also getBitId
*/
getBitIdIfExist(
bitId: BitId,
{
ignoreVersion = false,
ignoreScopeAndVersion = false
}: {
ignoreVersion?: boolean,
ignoreScopeAndVersion?: boolean
} = {}
): ?BitId {
try {
const existingBitId = this.getBitId(bitId, { ignoreVersion, ignoreScopeAndVersion });
return existingBitId;
} catch (err) {
if (err instanceof MissingBitMapComponent) return null;
throw err;
}
}
/**
* get componentMap from bitmap by bit-id.
* throw an exception if not found.
* @see also getComponentIfExist
*/
getComponent(
bitId: BitId,
{
ignoreVersion = false,
ignoreScopeAndVersion = false
}: {
ignoreVersion?: boolean,
ignoreScopeAndVersion?: boolean
} = {}
): ComponentMap {
const existingBitId: BitId = this.getBitId(bitId, { ignoreVersion, ignoreScopeAndVersion });
return this.components[existingBitId.toString()];
}
/**
* get componentMap from bitmap by bit-id
* don't throw an exception if not found
* @see also getComponent
*/
getComponentIfExist(
bitId: BitId,
{
ignoreVersion = false,
ignoreScopeAndVersion = false
}: {
ignoreVersion?: boolean,
ignoreScopeAndVersion?: boolean
} = {}
): ?ComponentMap {
try {
const componentMap = this.getComponent(bitId, { ignoreVersion, ignoreScopeAndVersion });
return componentMap;
} catch (err) {
if (err instanceof MissingBitMapComponent) return null;
throw err;
}
}
getNonNestedComponentIfExist(bitId: BitId): ?ComponentMap {
const nonNestedIds = this.getAllBitIds([COMPONENT_ORIGINS.IMPORTED, COMPONENT_ORIGINS.AUTHORED]);
const id: ?BitId = nonNestedIds.searchWithoutScopeAndVersion(bitId);
if (!id) return null;
return this.getComponent(id);
}
getComponentPreferNonNested(bitId: BitId): ?ComponentMap {
return this.getNonNestedComponentIfExist(bitId) || this.getComponentIfExist(bitId, { ignoreVersion: true });
}
getAuthoredAndImportedBitIds(): BitIds {
return this.getAllBitIds([COMPONENT_ORIGINS.AUTHORED, COMPONENT_ORIGINS.IMPORTED]);
}
getAuthoredExportedComponents(): BitId[] {
const authoredIds = this.getAllBitIds([COMPONENT_ORIGINS.AUTHORED]);
return authoredIds.filter(id => id.hasScope());
}
validateConfigDir(compId: string, configDir: PathLinux): boolean {
const components = this.getAllComponents();
if (configDir.startsWith('./')) {
configDir = configDir.replace('./', '');
}
const comps = R.pickBy((component) => {
const compDir = component.getComponentDir();
if (compDir && pathIsInside(configDir, compDir)) {
return true;
}
const compConfigDir =
component.configDir && component.configDir instanceof ConfigDir
? component.configDir.getResolved({ componentDir: compDir || '' }).getEnvTypeCleaned().linuxDirPath
: null;
if (compConfigDir && pathIsInside(configDir, compConfigDir)) {
return true;
}
return false;
}, components);
if (!R.isEmpty(comps)) {
const id = R.keys(comps)[0];
const stringId = BitId.parse(id).toStringWithoutVersion();
if (compId !== stringId) {
throw new InvalidConfigDir(stringId);
}
}
return true;
}
_makePathRelativeToProjectRoot(pathToChange: PathRelative): PathOsBasedRelative {
const absolutePath = path.resolve(pathToChange);
return path.relative(this.projectRoot, absolutePath);
}
_searchMainFile(baseMainFile: string, files: ComponentMapFile[], rootDir: ?PathLinux): ?PathLinux {
// search for an exact relative-path
let mainFileFromFiles = files.find(file => file.relativePath === baseMainFile);
if (mainFileFromFiles) return baseMainFile;
if (rootDir) {
const mainFileUsingRootDir = files.find(file => pathJoinLinux(rootDir, file.relativePath) === baseMainFile);
if (mainFileUsingRootDir) return mainFileUsingRootDir.relativePath;
}
// search for a file-name
const potentialMainFiles = files.filter(file => file.name === baseMainFile);
if (!potentialMainFiles.length) return null;
// when there are several files that met the criteria, choose the closer to the root
const sortByNumOfDirs = (a, b) =>
a.relativePath.split(DEFAULT_SEPARATOR).length - b.relativePath.split(DEFAULT_SEPARATOR).length;
potentialMainFiles.sort(sortByNumOfDirs);
mainFileFromFiles = R.head(potentialMainFiles);
return mainFileFromFiles.relativePath;
}
_getMainFile(mainFile?: PathLinux, componentIdStr: string): PathLinux {
const componentMap: ComponentMap = this.components[componentIdStr];
const files = componentMap.files.filter(file => !file.test);
// scenario 1) user entered mainFile => search the mainFile in the files array
if (mainFile) {
const foundMainFile = this._searchMainFile(mainFile, files, componentMap.rootDir);
if (foundMainFile) return foundMainFile;
throw new MissingMainFile(componentIdStr, mainFile, files.map(file => path.normalize(file.relativePath)));
}
// scenario 2) user didn't enter mainFile and the component has only one file => use that file as the main file.
if (files.length === 1) return files[0].relativePath;
// scenario 3) user didn't enter mainFile and the component has multiple files => search for default main files (such as index.js)
let searchResult;
DEFAULT_INDEX_EXTS.forEach((ext) => {
// TODO: can be improved - stop loop if finding main file
if (!searchResult) {
const mainFileNameToSearch = `${DEFAULT_INDEX_NAME}.${ext}`;
searchResult = this._searchMainFile(mainFileNameToSearch, files, componentMap.rootDir);
}
});
if (searchResult) return searchResult;
const mainFileString = `${DEFAULT_INDEX_NAME}.[${DEFAULT_INDEX_EXTS.join(', ')}]`;
throw new MissingMainFile(componentIdStr, mainFileString, files.map(file => path.normalize(file.relativePath)));
}
/**
* find ids that have the same name but different version
* if compareWithoutScope is false, the scope should be identical in addition to the name
*/
findSimilarIds(id: BitId, compareWithoutScope: boolean = false): BitIds {
const allIds = this.getAllBitIds([COMPONENT_ORIGINS.IMPORTED, COMPONENT_ORIGINS.AUTHORED]);
const similarIds = allIds.filter((existingId: BitId) => {
const isSimilar = compareWithoutScope
? existingId.isEqualWithoutScopeAndVersion(id)
: existingId.isEqualWithoutVersion(id);
return isSimilar && !existingId.isEqual(id);
});
return BitIds.fromArray(similarIds);
}
deleteOlderVersionsOfComponent(componentId: BitId): void {
const similarIds = this.findSimilarIds(componentId);
similarIds.forEach((id) => {
const idStr = id.toString();
logger.debugAndAddBreadCrumb(
'BitMap.deleteOlderVersionsOfComponent',
'deleting an older version {idStr} of an existing component {componentId}',
{ idStr, componentId: componentId.toString() }
);
this._removeFromComponentsArray(id);
});
}
/**
* --- Don't use this function when you have the ID parsed. Use this.getBitId() instead ---
*
* id entered by the user may or may not include scope-name
* search for a similar id in the bitmap and return the full BitId
*/
getExistingBitId(id: BitIdStr, shouldThrow: boolean = true): ?BitId {
if (!R.is(String, id)) {
throw new TypeError(`BitMap.getExistingBitId expects id to be a string, instead, got ${typeof id}`);
}
const components: ComponentMap[] = R.values(this.components);
const idHasVersion = id.includes(VERSION_DELIMITER);
// start with a more strict comparison. assume the id from the user has a scope name
const componentWithScope = components.find((componentMap: ComponentMap) => {
return idHasVersion ? componentMap.id.toString() === id : componentMap.id.toStringWithoutVersion() === id;
});
if (componentWithScope) return componentWithScope.id;
// continue with searching without the scope name
const idWithoutVersion = BitId.getStringWithoutVersion(id);
const componentWithoutScope = components.find((componentMap: ComponentMap) => {
return idHasVersion
? componentMap.id.toStringWithoutScope() === id
: componentMap.id.toStringWithoutScopeAndVersion() === idWithoutVersion;
});
if (componentWithoutScope) return componentWithoutScope.id;
if (shouldThrow) {
throw new MissingBitMapComponent(id);
}
return null;
}
/**
* check if both arrays are equal according to their 'relativePath', regardless the order
*/
_areFilesArraysEqual(filesA: ComponentMapFile[], filesB: ComponentMapFile[]): boolean {
if (filesA.length !== filesB.length) return false;
const cmp = (x, y) => x.relativePath === y.relativePath;
const diff = R.differenceWith(cmp, filesA, filesB);
if (!diff.length) return true;
return false;
}
/**
* add files from filesB that are not in filesA
*/
mergeFilesArray(filesA: ComponentMapFile[], filesB: ComponentMapFile[]): ComponentMapFile[] {
return R.unionWith(R.eqBy(R.prop('relativePath')), filesA, filesB);
}
addComponent({
componentId,
files,
mainFile,
origin,
rootDir,
configDir,
trackDir,
originallySharedDir,
wrapDir
}: {
componentId: BitId,
files: ComponentMapFile[],
mainFile?: PathOsBased,
origin: ComponentOrigin,
rootDir?: PathOsBasedAbsolute | PathOsBasedRelative,
configDir?: ?ConfigDir,
trackDir?: PathOsBased,
originallySharedDir?: ?PathLinux,
wrapDir?: ?PathLinux
}): ComponentMap {
const componentIdStr = componentId.toString();
logger.debug(`adding to bit.map ${componentIdStr}`);
if (this.components[componentIdStr]) {
logger.info(`bit.map: updating an exiting component ${componentIdStr}`);
this.components[componentIdStr].files = files;
if (mainFile) {
this.components[componentIdStr].mainFile = this._getMainFile(pathNormalizeToLinux(mainFile), componentIdStr);
}
} else {
if (origin === COMPONENT_ORIGINS.IMPORTED || origin === COMPONENT_ORIGINS.AUTHORED) {
// if there are older versions, the user is updating an existing component, delete old ones from bit.map
this.deleteOlderVersionsOfComponent(componentId);
}
// $FlowFixMe not easy to fix, we can't instantiate ComponentMap with mainFile because we don't have it yet
const componentMap = new ComponentMap({ files, origin });
componentMap.setMarkAsChangedCb(this.markAsChangedBinded);
this.setComponent(componentId, componentMap);
this.components[componentIdStr].mainFile = this._getMainFile(pathNormalizeToLinux(mainFile), componentIdStr);
}
if (rootDir) {
this.components[componentIdStr].rootDir = pathNormalizeToLinux(rootDir);
}
if (configDir) {
this.components[componentIdStr].configDir = configDir;
}
if (trackDir) {
this.components[componentIdStr].trackDir = pathNormalizeToLinux(trackDir);
}
if (wrapDir) {
this.components[componentIdStr].wrapDir = wrapDir;
}
this.components[componentIdStr].removeTrackDirIfNeeded();
if (originallySharedDir) {
this.components[componentIdStr].originallySharedDir = originallySharedDir;
}
this.sortValidateAndMarkAsChanged(componentIdStr);
return this.components[componentIdStr];
}
addFilesToComponent({ componentId, files }: { componentId: BitId, files: ComponentMapFile[] }): ComponentMap {
const componentIdStr = componentId.toString();
if (!this.components[componentIdStr]) {
throw new GeneralError(`unable to add files to a non-exist component ${componentIdStr}`);
}
logger.info(`bit.map: updating an exiting component ${componentIdStr}`);
this.components[componentIdStr].files = files;
this.sortValidateAndMarkAsChanged(componentIdStr);
return this.components[componentIdStr];
}
sortValidateAndMarkAsChanged(componentIdStr: BitIdStr) {
this.components[componentIdStr].sort();
this.components[componentIdStr].validate();
this.markAsChanged();
}
_invalidateCache = () => {
this.paths = {};
this.pathsLowerCase = {};
this._cacheIds = {};
this.allTrackDirs = undefined;
};
_removeFromComponentsArray(componentId: BitId) {
delete this.components[componentId.toString()];
this.markAsChanged();
}
removeComponent(bitId: BitId) {
const bitmapComponent = this.getBitIdIfExist(bitId, { ignoreScopeAndVersion: true });
if (bitmapComponent) this._removeFromComponentsArray(bitmapComponent);
return bitmapComponent;
}
removeComponents(ids: BitIds) {
return ids.map(id => this.removeComponent(id));
}
isExistWithSameVersion(id: BitId) {
return id.hasVersion() && this.components[id.toString()];
}
/**
* needed after exporting or tagging a component.
* We don't support export/tag of nested components, only authored or imported. For authored/imported components, could be
* in the file-system only one instance with the same component-name. As a result, we can strip the
* scope-name and the version, find the older version in bit.map and update the id with the new one.
*/
updateComponentId(id: BitId, updateScopeOnly: boolean = false): BitId {
const newIdString = id.toString();
const similarIds = this.findSimilarIds(id, true);
if (!similarIds.length) {
logger.debug(`bit-map: no need to update ${newIdString}`);
return id;
}
if (similarIds.length > 1) {
throw new GeneralError(`Your ${BIT_MAP} file has more than one version of ${id.toStringWithoutScopeAndVersion()} and they
are authored or imported. This scenario is not supported`);
}
const oldId: BitId = similarIds[0];
const oldIdStr = oldId.toString();
const newId = updateScopeOnly ? oldId.changeScope(id.scope) : id;
if (newId.isEqual(oldId)) {
logger.debug(`bit-map: no need to update ${oldIdStr}`);
return oldId;
}
logger.debug(`BitMap: updating an older component ${oldIdStr} with a newer component ${newId.toString()}`);
const componentMap = this.components[oldIdStr];
if (componentMap.origin === COMPONENT_ORIGINS.NESTED) {
throw new Error('updateComponentId should not manipulate Nested components');
}
this._removeFromComponentsArray(oldId);
this.setComponent(newId, componentMap);
this.markAsChanged();
return newId;
}
/**
* Return a potential componentMap if file is supposed to be part of it
* by a path exist in the files object
*
* @param {string} componentPath relative to consumer - as stored in bit.map files object
* @returns {ComponentMap} componentMap
*/
getComponentObjectOfFileByPath(componentPath: string): BitMapComponents {
const components = this.getAllComponents();
return R.pickBy(component => pathIsInside(componentPath, component.rootDir || this.projectRoot), components);
}
/**
* Return a component id as listed in bit.map file
* by a path exist in the files object
*
* @param {string} componentPath relative to consumer - as stored in bit.map files object
* @returns {BitId} component id
* @memberof BitMap
*/
getComponentIdByPath(componentPath: string, caseSensitive: boolean = true): BitId {
this._populateAllPaths();
return caseSensitive ? this.paths[componentPath] : this.pathsLowerCase[componentPath.toLowerCase()];
}
_populateAllPaths() {
if (R.isEmpty(this.paths)) {
Object.keys(this.components).forEach((componentId) => {
const component = this.components[componentId];
component.files.forEach((file) => {
const relativeToConsumer = component.rootDir
? pathJoinLinux(component.rootDir, file.relativePath)
: file.relativePath;
this.paths[relativeToConsumer] = component.id;
this.pathsLowerCase[relativeToConsumer.toLowerCase()] = component.id;
});
});
}
}
getAllTrackDirs() {
if (!this.allTrackDirs) {
this.allTrackDirs = {};
Object.keys(this.components).forEach((componentId) => {
const component = this.components[componentId];
if (!component.trackDir) return;
// $FlowFixMe
this.allTrackDirs[component.trackDir] = component.id;
});
}
return this.allTrackDirs;
}
updatePathLocation(
from: PathOsBasedRelative,
to: PathOsBasedRelative,
existingPath: PathOsBasedAbsolute
): PathChangeResult[] {
const isPathDir = isDir(existingPath);
const allChanges = [];
Object.keys(this.components).forEach((componentId) => {
const componentMap: ComponentMap = this.components[componentId];
const changes = isPathDir ? componentMap.updateDirLocation(from, to) : componentMap.updateFileLocation(from, to);
if (changes && changes.length) allChanges.push({ id: componentMap.id.clone(), changes });
});
if (R.isEmpty(allChanges)) {
const errorMsg = isPathDir
? `directory ${from} is not a tracked component`
: `the file ${existingPath} is untracked`;
throw new GeneralError(errorMsg);
}
this.markAsChanged();
return allChanges;
}
/**
* remove the id property before saving the components to the file as they are redundant with the keys
*/
toObjects(): Object {
const components = {};
Object.keys(this.components).forEach((id) => {
const componentMap = this.components[id].clone();
if (componentMap.origin === COMPONENT_ORIGINS.AUTHORED) {
componentMap.exported = componentMap.id.hasScope();
}
delete componentMap.id;
components[id] = componentMap.toPlainObject();
});
return sortObject(components);
}
/**
* do not call this function directly, let consumer.onDestroy() call it.
* consumer.onDestroy() is being called (manually) at the end of the command process.
* the risk of calling this method in other places is a parallel writing of this file, which
* may result in a damaged file
*/
async write(): Promise<any> {
if (!this.hasChanged) return null;
logger.debug('writing to bit.map');
const bitMapContent = this.getContent();
return outputFile({ filePath: this.mapPath, content: JSON.stringify(bitMapContent, null, 4) });
}
getContent(): Object {
const bitMapContent = Object.assign({}, this.toObjects(), { version: this.version });
return bitMapContent;
}
}
|
# Copyright (c) 2019 Markus Ressel
# .
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# .
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# .
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
from aiogram.types import Message
from telegram_click_aio.permission.base import Permission
from tests import TestBase
class TruePermission(Permission):
async def evaluate(self, message: Message):
return True
class FalsePermission(Permission):
async def evaluate(self, message: Message):
return False
def _create_message_mock(chat_id: int = -12345678, chat_type: str = "private", message_id: int = 12345678,
user_id: int = 12345678, username: str = "myusername") -> Message:
"""
Helper method to create an "Update" object with mocked content
"""
import aiogram
message = lambda: None # type: Message
user = aiogram.types.User(
id=user_id,
username=username,
first_name="Max",
is_bot=False
)
chat = aiogram.types.Chat(id=chat_id, type=chat_type)
date = datetime.datetime.now().timestamp()
message = aiogram.types.Message(
message_id=message_id,
date=date,
)
message.from_user = user
message.chat = chat
return message
class PermissionTest(TestBase):
async def test_permission_nobody(self):
from telegram_click_aio.permission import NOBODY
permission = NOBODY
message = None
self.assertFalse(await permission.evaluate(message))
async def test_permission_username(self):
from telegram_click_aio.permission import USER_NAME
permission = USER_NAME("markusressel")
valid_message = _create_message_mock(username="markusressel")
self.assertTrue(await permission.evaluate(valid_message))
invalid_message = _create_message_mock(username="markus")
self.assertFalse(await permission.evaluate(invalid_message))
invalid_message = _create_message_mock(username="other")
self.assertFalse(await permission.evaluate(invalid_message))
invalid_message = _create_message_mock(username=None)
self.assertFalse(await permission.evaluate(invalid_message))
async def test_permission_user_id(self):
from telegram_click_aio.permission import USER_ID
permission = USER_ID(12345678)
valid_message = _create_message_mock(user_id=12345678)
self.assertTrue(await permission.evaluate(valid_message))
invalid_update = _create_message_mock(user_id=87654321)
self.assertFalse(await permission.evaluate(invalid_update))
async def test_permission_merged_and(self):
merged_permission = FalsePermission() & FalsePermission()
self.assertFalse(await merged_permission.evaluate(None))
merged_permission = TruePermission() & FalsePermission()
self.assertFalse(await merged_permission.evaluate(None))
merged_permission = FalsePermission() & TruePermission()
self.assertFalse(await merged_permission.evaluate(None))
merged_permission = TruePermission() & TruePermission()
self.assertTrue(await merged_permission.evaluate(None))
async def test_permission_merged_or(self):
merged_permission = FalsePermission() | FalsePermission()
self.assertFalse(await merged_permission.evaluate(None))
merged_permission = TruePermission() | FalsePermission()
self.assertTrue(await merged_permission.evaluate(None))
merged_permission = FalsePermission() | TruePermission()
self.assertTrue(await merged_permission.evaluate(None))
merged_permission = TruePermission() | TruePermission()
self.assertTrue(await merged_permission.evaluate(None))
async def test_permission_not(self):
not_permission = ~ TruePermission()
self.assertFalse(await not_permission.evaluate(None))
not_permission = ~ FalsePermission()
self.assertTrue(await not_permission.evaluate(None))
|
"""Init file for Supervisor Docker object."""
from ipaddress import IPv4Address
import logging
import os
from typing import Awaitable
from awesomeversion.awesomeversion import AwesomeVersion
import docker
import requests
from ..coresys import CoreSysAttributes
from ..exceptions import DockerError
from .interface import DockerInterface
_LOGGER: logging.Logger = logging.getLogger(__name__)
class DockerSupervisor(DockerInterface, CoreSysAttributes):
"""Docker Supervisor wrapper for Supervisor."""
@property
def name(self) -> str:
"""Return name of Docker container."""
return os.environ["SUPERVISOR_NAME"]
@property
def ip_address(self) -> IPv4Address:
"""Return IP address of this container."""
return self.sys_docker.network.supervisor
@property
def privileged(self) -> bool:
"""Return True if the container run with Privileged."""
return self.meta_host.get("Privileged", False)
def _attach(self, version: AwesomeVersion) -> None:
"""Attach to running docker container.
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers.get(self.name)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError() from err
self._meta = docker_container.attrs
_LOGGER.info(
"Attaching to Supervisor %s with version %s",
self.image,
self.sys_supervisor.version,
)
# If already attach
if docker_container in self.sys_docker.network.containers:
return
# Attach to network
_LOGGER.info("Connecting Supervisor to hassio-network")
self.sys_docker.network.attach_container(
docker_container,
alias=["supervisor"],
ipv4=self.sys_docker.network.supervisor,
)
def retag(self) -> Awaitable[None]:
"""Retag latest image to version."""
return self.sys_run_in_executor(self._retag)
def _retag(self) -> None:
"""Retag latest image to version.
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers.get(self.name)
docker_container.image.tag(self.image, tag=str(self.version))
docker_container.image.tag(self.image, tag="latest")
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't retag Supervisor version: %s", err)
raise DockerError() from err
def update_start_tag(self, image: str, version: AwesomeVersion) -> Awaitable[None]:
"""Update start tag to new version."""
return self.sys_run_in_executor(self._update_start_tag, image, version)
def _update_start_tag(self, image: str, version: AwesomeVersion) -> None:
"""Update start tag to new version.
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers.get(self.name)
docker_image = self.sys_docker.images.get(f"{image}:{version!s}")
# Find start tag
for tag in docker_container.image.tags:
start_image = tag.partition(":")[0]
start_tag = tag.partition(":")[2] or "latest"
# If version tag
if start_tag != "latest":
continue
docker_image.tag(start_image, start_tag)
docker_image.tag(start_image, version.string)
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't fix start tag: %s", err)
raise DockerError() from err
|
module.exports={C:{"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0.00501,"49":0,"50":0,"51":0,"52":0.00501,"53":0,"54":0,"55":0,"56":0.00501,"57":0.01001,"58":0,"59":0.00501,"60":0,"61":0,"62":0,"63":0.02002,"64":0,"65":0.00501,"66":0,"67":0,"68":0.02002,"69":0,"70":0,"71":0,"72":0.00501,"73":0,"74":0,"75":0,"76":0,"77":0.00501,"78":0.05006,"79":0.04005,"80":0.71586,"81":0.18522,"82":0,"83":0,"3.5":0,"3.6":0},D:{"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0.00501,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0.00501,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0.00501,"32":0,"33":0,"34":0,"35":0.01001,"36":0,"37":0,"38":0.00501,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0.04505,"50":0,"51":0,"52":0,"53":0.01502,"54":0,"55":0.00501,"56":0,"57":0,"58":0.00501,"59":0.00501,"60":0.00501,"61":0,"62":0.00501,"63":0.01001,"64":0.00501,"65":0.02002,"66":0,"67":0.01502,"68":0,"69":0.01502,"70":0.02002,"71":0.01502,"72":0.01001,"73":0.00501,"74":0.02002,"75":0.05006,"76":0.04005,"77":0.02503,"78":0.05006,"79":0.05006,"80":0.11514,"81":0.09511,"83":0.12014,"84":1.69703,"85":12.9305,"86":0.03004,"87":0.02002,"88":0},F:{"9":0,"11":0,"12":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0.00501,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"60":0.02002,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0.03504,"69":0.00501,"70":0.34041,"71":0.04505,"9.5-9.6":0,"10.0-10.1":0,"10.5":0,"10.6":0,"11.1":0,"11.5":0,"11.6":0,"12.1":0},G:{"8":0,"14":4.34637,"3.2":0.02256,"4.0-4.1":0.0376,"4.2-4.3":0,"5.0-5.1":0.00752,"6.0-6.1":0.00376,"7.0-7.1":0.01504,"8.1-8.4":0.02632,"9.0-9.2":0.0188,"9.3":0.21807,"10.0-10.2":0.06016,"10.3":0.28575,"11.0-11.2":0.17671,"11.3-11.4":0.26319,"12.0-12.1":0.41358,"12.2-12.4":2.58301,"13.0-13.1":0.39478,"13.2":0.20679,"13.3":1.50769,"13.4-13.7":4.02302},E:{"4":0,"5":0,"6":0,"7":0,"8":0.00501,"9":0,"10":0,"11":0.00501,"12":0.01502,"13":0.11013,"14":0.1702,_:"0","3.1":0,"3.2":0,"5.1":0.09011,"6.1":0,"7.1":0,"9.1":0.00501,"10.1":0.01001,"11.1":0.04505,"12.1":0.10012,"13.1":0.97617},B:{"12":0.01001,"13":0.01502,"14":0.01001,"15":0.03004,"16":0.02503,"17":0.04505,"18":0.28534,"79":0,"80":0.00501,"81":0.00501,"83":0,"84":0.06508,"85":1.57188},I:{"3":0,"4":0.24705,_:"81","2.1":0,"2.2":0,"2.3":0.00575,"4.1":0.32174,"4.2-4.3":0.12065,"4.4":0,"4.4.3-4.4.4":0.78136},P:{"4":0.1393,"5.0-5.4":0.01072,"6.2-6.4":0.02143,"7.2-7.4":0.12859,"8.2":0.01072,"9.2":0.18217,"10.1":0.09644,"11.1-11.2":0.27861,"12.0":3.97549},A:{"6":0,"7":0,"8":0.05165,"9":0.00517,"10":0.01033,"11":1.06921,"5.5":0},K:{_:"0 10 11 12 11.1 11.5 12.1"},J:{"7":0,"10":0.00499},N:{"10":0,"11":0},M:{"0":0.17978},Q:{"10.4":0.00499},O:{"0":0.53935},H:{"0":0.27895},L:{"0":53.20025},S:{"2.5":0.00999},R:{_:"0"}};
|
def run_fancy_dti_analyzes():
# Enables/disables interactive visualization
interactive = False
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti, load_nifti_data
hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi')
label_fname = get_fnames('stanford_labels')
data, affine, hardi_img = load_nifti(hardi_fname, return_img=True)
labels = load_nifti_data(label_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
white_matter = (labels == 1) | (labels == 2)
from dipy.reconst.csdeconv import auto_response_ssst
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
response, ratio = auto_response_ssst(gtab, data, roi_radii=10, fa_thr=0.7)
csa_model = CsaOdfModel(gtab, sh_order=6)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
from dipy.viz import window, actor, has_fury
if has_fury:
ren = window.Scene()
ren.add(actor.peak_slicer(csa_peaks.peak_dirs,
csa_peaks.peak_values,
colors=None))
window.record(ren, out_path='csa_direction_field.png', size=(900, 900))
if interactive:
window.show(ren, size=(800, 800))
from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
stopping_criterion = ThresholdStoppingCriterion(csa_peaks.gfa, .25)
import matplotlib.pyplot as plt
sli = csa_peaks.gfa.shape[2] // 2
plt.figure('GFA')
plt.subplot(1, 2, 1).set_axis_off()
plt.imshow(csa_peaks.gfa[:, :, sli].T, cmap='gray', origin='lower')
plt.subplot(1, 2, 2).set_axis_off()
plt.imshow((csa_peaks.gfa[:, :, sli] > 0.25).T, cmap='gray', origin='lower')
plt.savefig('gfa_tracking_mask.png')
from dipy.tracking import utils
seed_mask = (labels == 2)
seeds = utils.seeds_from_mask(seed_mask, affine, density=[2, 2, 2])
from dipy.tracking.local_tracking import LocalTracking
from dipy.tracking.streamline import Streamlines
# Initialization of LocalTracking. The computation happens in the next step.
streamlines_generator = LocalTracking(csa_peaks, stopping_criterion, seeds,
affine=affine, step_size=.5)
# Generate streamlines object
streamlines = Streamlines(streamlines_generator)
from dipy.viz import colormap
if has_fury:
# Prepare the display objects.
color = colormap.line_colors(streamlines)
streamlines_actor = actor.line(streamlines,
colormap.line_colors(streamlines))
# Create the 3D display.
r = window.Scene()
r.add(streamlines_actor)
# Save still images for this static example. Or for interactivity use
window.record(r, out_path='tractogram_EuDX.png', size=(800, 800))
if interactive:
window.show(r)
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_trk
sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_EuDX.trk", streamlines)
if __name__ == "__main__":
run_fancy_dti_analyzes()
|
'use strict';
import { Dispatcher } from 'flux';
/** Creates a singlar instance of Facebook's Dispatcher */
const appDispatcher = new Dispatcher();
export default appDispatcher;
|
module.exports = {
entry: "./src/index.tsx",
mode: "development",
module: {
rules: [
{
test: /\.tsx?$/,
exclude: /node_modules/,
use: {
loader: "babel-loader",
},
},
],
},
resolve: {
extensions: [".tsx", ".ts", ".js", ".jsx"],
},
};
|
// @flow
import React, { Component } from 'react';
import { groupBy } from 'ramda';
import { reduxForm, Field, FieldArray, change } from 'redux-form';
import styles from './ServiceDetails.css';
import { ServiceType } from '../../types/service';
import { buildService } from '../../utils/service.utils';
import DockerTasks from './DockerTasks';
import ListFields from '../ListFields';
const { dialog } = require('electron').remote;
type Props = {
createService: (service: ServiceType) => void,
updateService: (service: ServiceType) => void,
deleteService: (id: string) => void,
loadFormData: (data: any) => void,
dispatch: () => void,
handleSubmit: any,
submitting: boolean,
history: {
push: () => void
},
id: string | null,
npmscripts: { [key: string]: string }
};
const validate = values => {
const errors = {};
if (!values.name) {
errors.name = 'Required!';
}
console.log('VALIDATE', errors);
return errors;
};
class ServiceCreate extends Component<Props> {
props: Props;
constructor(props) {
super(props);
this.dialog = dialog;
}
componentWillUnmount() {
const { loadFormData } = this.props;
loadFormData({});
}
groupByTasks = groupBy(task => task.type);
handleSubmit = data => {
const { props } = this;
const {
npmscripts,
createService,
updateService,
history
} = this.props;
const service: ServiceType = buildService(data, {
npmscripts
});
if (props.id) {
updateService(service);
} else {
createService(service);
}
history.push('/');
};
handleDelete = () => {
const { deleteService, id } = this.props;
if (id) {
deleteService(id);
}
this.redirectHome();
};
redirectHome = () => {
const { history } = this.props;
history.push('/');
};
setProjectDirWithDialog = () => {
const selectedDirs = this.dialog.showOpenDialog({
properties: ['openDirectory']
});
if (!(selectedDirs && selectedDirs.length)) {
return;
}
const projectDir = selectedDirs[0];
const { loadFormData, dispatch } = this.props;
dispatch(change('service', 'npmscripts', {}));
loadFormData({
projectDir
});
};
render() {
const { handleSubmit, submitting, npmscripts, id } = this.props;
return (
<div className="window-content">
<div className={`${styles.container}`}>
<form
onSubmit={handleSubmit(data => this.handleSubmit(data))}
>
<Field name="id" component="input" type="hidden" />
<div className="form-group">
<label htmlFor="name">Service name</label>
<Field
className="form-control"
name="name"
component="input"
type="text"
/>
</div>
<div className="form-group">
<button
className="btn btn-default"
type="button"
onClick={this.setProjectDirWithDialog}
>
Project Path
</button>
</div>
<Field
style={{ width: '100%' }}
className="form-control"
disabled
name="projectDir"
component="input"
placeholder="Select project path to add editor and load npm scripts"
type="text"
/>
<div className="gutter" />
<div className="well">
<div className="form-group">
<label className="pull-left">
ENV Variables
</label>
<FieldArray
name="envvars"
component={ListFields}
props={{
placeholder: 'NODE_ENV=production'
}}
/>
</div>
</div>
<div className="gutter" />
<div className="well">
<strong>Npm Scripts</strong>
{npmscripts &&
Object.keys(npmscripts).map(key => (
<div
className="form-group--checkbox"
key={key}
>
<Field
id={key}
name={`npmscripts.${key}`}
component="input"
type="checkbox"
/>
<label htmlFor={key}>{key}</label>
</div>
))}
</div>
<div className="gutter" />
<div className="well">
<div className="form-group">
<label className="pull-left">Dockers</label>
<FieldArray
name="dockers"
component={DockerTasks}
/>
</div>
</div>
<div className="gutter" />
<div className="form-actions">
<button
type="submit"
className="btn btn-form btn-primary"
disabled={submitting}
>
Save
</button>
<button
type="button"
onClick={this.redirectHome}
className="btn btn-form btn-positive"
>
{' '}
Cancel{' '}
</button>
{id && (
<button
type="button"
onClick={this.handleDelete}
className="btn btn-form btn-negative"
>
Delete
</button>
)}
</div>
</form>
<div className="gutter" />
</div>
</div>
);
}
}
export default reduxForm({
form: 'service',
validate,
enableReinitialize: true,
keepDirtyOnReinitialize: true,
destroyOnUnmount: true
})(ServiceCreate);
|
import pyautogui as pag
import time
def Mayo():
time.sleep(.75)
mayoBottleLocation = pag.locateOnScreen('mayoBottle.PNG')
mayoX, mayoY = pag.center(mayoBottleLocation)
pag.moveTo(mayoX, mayoY)
pag.dragTo(859, 241, button = 'left')
while True:
Mayo()
|
#!/usr/bin/env python
import pika
from mist.api import config
QUEUES = ['command', 'machines', 'scripts', 'probe', 'ping',
'rules', 'deployments', 'mappings', 'networks', 'volumes',
'zones', 'buckets', 'default']
def delete_queues():
print('Deleting rabbitmq queues')
host, port = config.AMQP_URI.split(':')
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=host, port=port))
channel = connection.channel()
for queue in QUEUES:
response = channel.queue_delete(queue=queue)
if isinstance(response.method, pika.spec.Queue.DeleteOk):
print(f'Successfully deleted queue {queue}')
else:
print(f'Failed to delete queue {queue}, response: {response}')
connection.close()
if __name__ == '__main__':
delete_queues()
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for operations that can be applied to the server.
Contains classes and utilities for creating operations that are to be
applied on the server.
"""
import errors
import random
import util
import sys
PROTOCOL_VERSION = '0.22'
# Operation Types
WAVELET_APPEND_BLIP = 'wavelet.appendBlip'
WAVELET_SET_TITLE = 'wavelet.setTitle'
WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'
WAVELET_DATADOC_SET = 'wavelet.datadoc.set'
WAVELET_MODIFY_TAG = 'wavelet.modifyTag'
WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole'
BLIP_CONTINUE_THREAD = 'blip.continueThread'
BLIP_CREATE_CHILD = 'blip.createChild'
BLIP_DELETE = 'blip.delete'
DOCUMENT_APPEND_MARKUP = 'document.appendMarkup'
DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'
DOCUMENT_MODIFY = 'document.modify'
ROBOT_CREATE_WAVELET = 'robot.createWavelet'
ROBOT_FETCH_WAVE = 'robot.fetchWave'
ROBOT_NOTIFY = 'robot.notify'
ROBOT_SEARCH = 'robot.search'
# Assign always NOTIFY_OP_ID to the notify operation so
# we can easily filter it out later
NOTIFY_OP_ID = '0'
class Operation(object):
"""Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a 'delete blip' operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
"""
def __init__(self, method, opid, params):
"""Initializes this operation with contextual data.
Args:
method: Method to call or type of operation.
opid: The id of the operation. Any callbacks will refer to these.
params: An operation type dependent dictionary
"""
self.method = method
self.id = opid
self.params = params
def __str__(self):
return '%s[%s]%s' % (self.method, self.id, str(self.params))
def set_param(self, param, value):
self.params[param] = value
return self
def serialize(self, method_prefix=''):
"""Serialize the operation.
Args:
method_prefix: prefixed for each method name to allow for specifying
a namespace.
Returns:
a dict representation of the operation.
"""
if method_prefix and not method_prefix.endswith('.'):
method_prefix += '.'
return {'method': method_prefix + self.method,
'id': self.id,
'params': util.serialize(self.params)}
def set_optional(self, param, value):
"""Sets an optional parameter.
If value is None or "", this is a no op. Otherwise it calls
set_param.
"""
if value == '' or value is None:
return self
else:
return self.set_param(param, value)
class OperationQueue(object):
"""Wraps the queuing of operations using easily callable functions.
The operation queue wraps single operations as functions and queues the
resulting operations in-order. Typically there shouldn't be a need to
call this directly unless operations are needed on entities outside
of the scope of the robot. For example, to modify a blip that
does not exist in the current context, you might specify the wave, wavelet
and blip id to generate an operation.
Any calls to this will not be reflected in the robot in any way.
For example, calling wavelet_append_blip will not result in a new blip
being added to the robot, only an operation to be applied on the
server.
"""
# Some class global counters:
_next_operation_id = 1
def __init__(self, proxy_for_id=None):
self.__pending = []
self._capability_hash = None
self._proxy_for_id = proxy_for_id
def _new_blipdata(self, wave_id, wavelet_id, initial_content='',
parent_blip_id=None):
"""Creates JSON of the blip used for this session."""
temp_blip_id = 'TBD_%s_%s' % (wavelet_id,
hex(random.randint(0, sys.maxint)))
return {'waveId': wave_id,
'waveletId': wavelet_id,
'blipId': temp_blip_id,
'content': initial_content,
'parentBlipId': parent_blip_id}
def _new_waveletdata(self, domain, participants):
"""Creates an ephemeral WaveletData instance used for this session.
Args:
domain: the domain to create the data for.
participants initially on the wavelet
Returns:
Blipdata (for the rootblip), WaveletData.
"""
wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))
wavelet_id = domain + '!conv+root'
root_blip_data = self._new_blipdata(wave_id, wavelet_id)
participants = set(participants)
wavelet_data = {'waveId': wave_id,
'waveletId': wavelet_id,
'rootBlipId': root_blip_data['blipId'],
'participants': participants}
return root_blip_data, wavelet_data
def __len__(self):
return len(self.__pending)
def __iter__(self):
return self.__pending.__iter__()
def clear(self):
self.__pending = []
def proxy_for(self, proxy):
"""Return a view of this operation queue with the proxying for set to proxy.
This method returns a new instance of an operation queue that shares the
operation list, but has a different proxying_for_id set so the robot using
this new queue will send out operations with the proxying_for field set.
"""
res = OperationQueue()
res.__pending = self.__pending
res._capability_hash = self._capability_hash
res._proxy_for_id = proxy
return res
def set_capability_hash(self, capability_hash):
self._capability_hash = capability_hash
def serialize(self, method_prefix=''):
first = Operation(ROBOT_NOTIFY,
NOTIFY_OP_ID,
{'capabilitiesHash': self._capability_hash,
'protocolVersion': PROTOCOL_VERSION})
operations = [first] + self.__pending
return [op.serialize(method_prefix=method_prefix) for op in operations]
res = util.serialize(operations)
return res
def copy_operations(self, other_queue):
"""Copy the pending operations from other_queue into this one."""
for op in other_queue:
self.__pending.append(op)
def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):
"""Creates and adds a new operation to the operation list."""
if props is None:
props = {}
props.update(kwprops)
if wave_id is not None:
props['waveId'] = wave_id
if wavelet_id is not None:
props['waveletId'] = wavelet_id
if self._proxy_for_id:
props['proxyingFor'] = self._proxy_for_id
operation = Operation(method,
'op%s' % OperationQueue._next_operation_id,
props)
self.__pending.append(operation)
OperationQueue._next_operation_id += 1
return operation
def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):
"""Appends a blip to a wavelet.
Args:
wave_id: The wave id owning the containing wavelet.
wavelet_id: The wavelet id that this blip should be appended to.
initial_content: optionally the content to start with
Returns:
JSON representing the information of the new blip.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)
self.new_operation(WAVELET_APPEND_BLIP, wave_id,
wavelet_id, blipData=blip_data)
return blip_data
def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):
"""Adds a participant to a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,
participantId=participant_id)
def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):
"""Sets a key/value pair on the data document of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
name: The key name for this data.
data: The value of the data to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,
datadocName=name, datadocValue=data)
def robot_create_wavelet(self, domain, participants=None, message=''):
"""Creates a new wavelet.
Args:
domain: the domain to create the wave in
participants: initial participants on this wavelet or None if none
message: an optional payload that is returned with the corresponding
event.
Returns:
data for the root_blip, wavelet
"""
if participants is None:
participants = []
blip_data, wavelet_data = self._new_waveletdata(domain, participants)
op = self.new_operation(ROBOT_CREATE_WAVELET,
wave_id=wavelet_data['waveId'],
wavelet_id=wavelet_data['waveletId'],
waveletData=wavelet_data)
op.set_optional('message', message)
return blip_data, wavelet_data
def robot_search(self, query, index=None, num_results=None):
"""Execute a search request.
For now this only makes sense in the data API. Wave does not maintain
an index for robots so no results will be returned in that scenario.
Args:
query: what to search for
index: what index to search from
num_results: how many results to return
Returns:
The operation created.
"""
op = self.new_operation(
ROBOT_SEARCH, wave_id=None, wavelet_id=None, query=query)
if index is not None:
op.set_param('index', index)
if num_results is not None:
op.set_param('numResults', num_results)
return op
def robot_fetch_wave(self, wave_id, wavelet_id,
raw_deltas_from_version=-1, return_raw_snapshot=False):
"""Requests a snapshot of the specified wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
raw_deltas_from_version: If specified, return a raw dump of the
delta history of this wavelet, starting at the given version.
This may return only part of the history; use additional
requests with higher raw_deltas_from_version parameters to
get the rest.
return_raw_snapshot: if true, return the raw data for this
wavelet.
Returns:
The operation created.
"""
op = self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)
if raw_deltas_from_version != -1:
op.set_param('rawDeltasFromVersion', raw_deltas_from_version)
if return_raw_snapshot:
op.set_param('returnRawSnapshot', return_raw_snapshot)
return op
def wavelet_set_title(self, wave_id, wavelet_id, title):
"""Sets the title of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
title: The title to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,
waveletTitle=title)
def wavelet_modify_participant_role(
self, wave_id, wavelet_id, participant_id, role):
"""Modify the role of a participant on a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
role: the new roles
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id,
wavelet_id, participantId=participant_id,
participantRole=role)
def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):
"""Modifies a tag in a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
tag: The tag (a string).
modify_how: (optional) how to apply the tag. The default is to add
the tag. Specify 'remove' to remove. Specify None or 'add' to
add.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,
name=tag).set_optional("modify_how", modify_how)
def blip_create_child(self, wave_id, wavelet_id, blip_id):
"""Creates a child blip of another blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)
self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_continue_thread(self, wave_id, wavelet_id, blip_id):
"""Creates a blip in same thread as specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id)
self.new_operation(BLIP_CONTINUE_THREAD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_delete(self, wave_id, wavelet_id, blip_id):
"""Deletes the specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)
def document_append_markup(self, wave_id, wavelet_id, blip_id, content):
"""Appends content with markup to a document.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
content: The markup content to append.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,
blipId=blip_id, content=content)
def document_modify(self, wave_id, wavelet_id, blip_id):
"""Creates and queues a document modify operation
The returned operation still needs to be filled with details before
it makes sense.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_MODIFY,
wave_id,
wavelet_id,
blipId=blip_id)
def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):
"""Inserts an inline blip at a specific location.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
position: The position in the document to insert the blip.
Returns:
JSON data for the blip that was created for further operations.
"""
inline_blip_data = self._new_blipdata(wave_id, wavelet_id)
inline_blip_data['parentBlipId'] = blip_id
self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,
blipId=blip_id,
index=position,
blipData=inline_blip_data)
return inline_blip_data
|
var path = require('path');
var apos = require('apostrophe')({
shortName: 'apostrophe-test',
// See lib/modules for basic project-level configuration of our modules
// responsible for serving static assets, managing page templates and
// configuring user acounts.
modules: {
// Apostrophe module configuration
// Note: most configuration occurs in the respective
// modules' directories. See lib/apostrophe-assets/index.js for an example.
// However any modules that are not present by default in Apostrophe must at
// least have a minimal configuration here: `moduleName: {}`
// If a template is not found somewhere else, serve it from the top-level
// `views/` folder of the project
'apostrophe-templates': {
viewsFolderFallback: path.join(__dirname, 'views'),
},
'apostrophe-pages': {
filters: {
// Grab our ancestor pages, with two levels of subpages
ancestors: { children: { depth: 2 } },
// We usually want children of the current page, too
children: true,
},
// other apostrophe-pages options like `types` ...
},
'link-widgets': {},
'hero-widgets': {},
'page-button-widgets': {},
'page-link-widgets': {},
'navbar-widgets': {},
'drawer-widgets': {},
'one-column-widgets': {},
'two-column-widgets': {},
'three-column-widgets': {},
'single-image-widgets': {},
people: {},
'people-widgets': {
extend: 'apostrophe-pieces-widgets',
filters: {
projection: {
slug: 1,
title: 1,
type: 1,
tags: 1,
phone: 1,
thumbImg: 1,
},
},
},
},
});
|
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
//
//------------------------------------------------------------------------------
module.exports = function(elem, vars) {
elem
.style("position", "relative")
.style("margin", vars.ui.margin.css)
.style("display", vars.ui.display.value)
.style("border-style", "solid")
.style("border-width", vars.ui.border + "px")
.style("font-family", vars.font.family.value)
.style("font-size", vars.font.size + "px")
.style("font-weight", vars.font.weight)
.style("letter-spacing", vars.font.spacing + "px")
}
|
#Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão:
# para binário, 2 para octal e 3 para hexadecimal.
n = int(input('Escreva um número inteiro: '))
e = int(input('Escolha a base de conversão: \n digite 1 para binário \n digite 2 para octal \n digite 3 para hexadecimal: '))
if e == 1:
print(bin(n)[2:])
elif e == 2:
print(oct(n)[2:])
elif e == 3:
print(hex(n)[2:])
else:
print('Opção Inválida')
|
__version__ = """1.12.0"""
|
import requests
import json
re = requests.get("http://www.fateclins.edu.br/v4.0/vagasEstagioEmprego.php")
html = re.text
htmlFind = html.find('Emprego')
print(re.headers['content-type'])
|
//
// MBMrtdDetectorResult.h
// MicroblinkDev
//
// Created by Jura Skrlec on 20/03/2018.
//
#import <Foundation/Foundation.h>
#import "MBMicroblinkDefines.h"
#import "MBQuadWithSizeDetectorResult.h"
#import "MBQuadrangle.h"
NS_ASSUME_NONNULL_BEGIN
/**
* Detector that can perform detection of Machine Readable Travel Documents (MRTD).
*/
MB_CLASS_AVAILABLE_IOS(8.0)
@interface MBIMrtdDetectorResult : MBIQuadWithSizeDetectorResult <NSCopying>
MB_INIT_UNAVAILABLE
/**
* Returns the location of Machine Readable Zone in coordinate system of image in which detection was performed.
*/
@property (nonatomic, nullable, strong, readonly) MBIQuadrangle *mrzLocation;
/**
* Returns the physical height in inches of Machine Readable Zone.
*/
@property (nonatomic, assign, readonly) CGFloat mrzPhysicalHeightInInches;
@end
NS_ASSUME_NONNULL_END
|
define([], function()
{
var Button = function (text, callback, url)
{
this.DOM = document.createElement("button")
this.DOM.innerHTML = text
this.DOM.onclick = callback
this.DOM.style.position = 'relative';
this.DOM.style.padding = '4px';
this.DOM.style.margin = '4px';
this.DOM.style.cursor = 'pointer';
if (typeof url !== "undefined")
{
var link = document.createElement("a")
link.href = url;
link.setAttribute('target', '_blank')
link.appendChild(this.DOM)
document.body.appendChild(link)
}
else
{
document.body.appendChild(this.DOM)
}
}
return Button
})
|
module.exports={A:{A:{"1":"E B A","2":"J C G TB"},B:{"1":"D X g H L"},C:{"1":"0 2 4 H L M N O P Q R S T U V W u Y Z a b c d e f K h i j k l m n o p q r w x v z t s","2":"1 RB F I J C G E B A D X g PB OB"},D:{"1":"0 2 4 8 x v z t s DB AB SB BB","2":"F I J C G E B A D X g H L M N O P Q R S T U V W u Y Z a b c d e","132":"f K h i j k l m n o p q r w"},E:{"2":"7 F I J C E B A CB EB FB HB IB JB","132":"G GB"},F:{"1":"K h i j k l m n o p q r y","2":"H L M N O P Q R","4":"5 6 A D LB MB NB QB","16":"E KB","132":"S T U V W u Y Z a b c d e f"},G:{"2":"3 7 9 A UB VB WB YB ZB aB bB","132":"G XB"},H:{"1":"cB"},I:{"1":"s","2":"1 3 F dB eB fB gB hB iB"},J:{"2":"C","132":"B"},K:{"1":"K y","4":"5 6 B A D"},L:{"1":"8"},M:{"1":"t"},N:{"1":"B A"},O:{"132":"jB"},P:{"1":"I","132":"F"},Q:{"132":"kB"},R:{"132":"lB"}},B:2,C:"SVG fragment identifiers"};
|
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_signed_data_info import vscf_signed_data_info_t
class vscf_footer_info_t(Structure):
pass
class VscfFooterInfo(object):
"""Handle meta information about footer."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_footer_info_new(self):
vscf_footer_info_new = self._lib.vscf_footer_info_new
vscf_footer_info_new.argtypes = []
vscf_footer_info_new.restype = POINTER(vscf_footer_info_t)
return vscf_footer_info_new()
def vscf_footer_info_delete(self, ctx):
vscf_footer_info_delete = self._lib.vscf_footer_info_delete
vscf_footer_info_delete.argtypes = [POINTER(vscf_footer_info_t)]
vscf_footer_info_delete.restype = None
return vscf_footer_info_delete(ctx)
def vscf_footer_info_has_signed_data_info(self, ctx):
"""Retrun true if signed data info present."""
vscf_footer_info_has_signed_data_info = self._lib.vscf_footer_info_has_signed_data_info
vscf_footer_info_has_signed_data_info.argtypes = [POINTER(vscf_footer_info_t)]
vscf_footer_info_has_signed_data_info.restype = c_bool
return vscf_footer_info_has_signed_data_info(ctx)
def vscf_footer_info_signed_data_info(self, ctx):
"""Return signed data info."""
vscf_footer_info_signed_data_info = self._lib.vscf_footer_info_signed_data_info
vscf_footer_info_signed_data_info.argtypes = [POINTER(vscf_footer_info_t)]
vscf_footer_info_signed_data_info.restype = POINTER(vscf_signed_data_info_t)
return vscf_footer_info_signed_data_info(ctx)
def vscf_footer_info_set_data_size(self, ctx, data_size):
"""Set data size."""
vscf_footer_info_set_data_size = self._lib.vscf_footer_info_set_data_size
vscf_footer_info_set_data_size.argtypes = [POINTER(vscf_footer_info_t), c_size_t]
vscf_footer_info_set_data_size.restype = None
return vscf_footer_info_set_data_size(ctx, data_size)
def vscf_footer_info_data_size(self, ctx):
"""Return data size."""
vscf_footer_info_data_size = self._lib.vscf_footer_info_data_size
vscf_footer_info_data_size.argtypes = [POINTER(vscf_footer_info_t)]
vscf_footer_info_data_size.restype = c_size_t
return vscf_footer_info_data_size(ctx)
def vscf_footer_info_shallow_copy(self, ctx):
vscf_footer_info_shallow_copy = self._lib.vscf_footer_info_shallow_copy
vscf_footer_info_shallow_copy.argtypes = [POINTER(vscf_footer_info_t)]
vscf_footer_info_shallow_copy.restype = POINTER(vscf_footer_info_t)
return vscf_footer_info_shallow_copy(ctx)
|
#!/usr/bin/env python3
# pylint: disable=no-self-use, too-few-public-methods
"""Test"""
import json
import unittest
from configparser import ConfigParser
import requests
from library.config_parser import config_section_parser
CONFIG = ConfigParser()
CONFIG.read("config/config.cfg")
# HEADERS = {}
# HEADERS['Content-Type'] = 'application/json'
PORT = '5000'
class APIRequest():
"""Class for APIRequest"""
# # INITIALIZE
# def __init__(self):
# """The Constructor APIRequest class"""
# pass
def api_post(self, url, data, head=None):
"""API Post"""
api_ip = config_section_parser(CONFIG, "IPS")['my']
api_protocol = config_section_parser(CONFIG, "IPS")['my_protocol']
api_endpoint = api_protocol + "://" + api_ip + ":" + PORT + url
headers = {}
headers['content-type'] = 'application/json'
if head:
# headers = {**headers, **head}
headers = {}
req = requests.post(api_endpoint, data=json.dumps(data), headers=headers)
return req.json()
class Test(unittest.TestCase):
"""Class for Test"""
def setUp(self):
""" SET UP """
self.api_request = APIRequest()
self.user_id = ""
self.token = ""
self.email = config_section_parser(CONFIG, "ADMIN")['username']
self.password = config_section_parser(CONFIG, "ADMIN")['password']
self.test_user_login()
def test_user_login(self):
"""LOG IN"""
data = {}
data['email'] = self.email
data['password'] = self.password
response = self.api_request.api_post('/user/login', data)
self.token = response['token']
self.user_id = response['id']
self.assertEqual(response['status'], True)
def test_permission_create(self):
""" CREATE PERMISSION """
head = {}
head['token'] = str(self.token)
head['userid'] = str(self.user_id)
url = '/permission/create'
data = {}
data['permission_details'] = 'DETAILS'
data['permission_name'] = 'PERMISSION 103'
response = self.api_request.api_post(url, data, head=head)
self.assertEqual(response['status'], 'ok')
if __name__ == '__main__':
unittest.main()
|
from django import forms
from supplier.models import *
class SupplierForm(forms.ModelForm):
class Meta:
model = Supplier
exclude = ['date','materials']
|
# api/urls.py
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.authtoken.views import obtain_auth_token
from .views import CreateView
from .views import DetailsView
urlpatterns = {
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^bucketlists/$', CreateView.as_view(), name="create"),
url(r'^bucketlists/(?P<pk>[0-9]+)/$', DetailsView.as_view(), name="details"),
# url(r'^users/$', UserView.as_view(), name="users"),
# url(r'users/(?P<pk>[0-9]+)/$',UserDetailsView.as_view(), name="user_details"),
url(r'^get-token/', obtain_auth_token), # Add this line
}
urlpatterns = format_suffix_patterns(urlpatterns)
|
#ifndef SHADER_H
#define SHADER_H
#include <GL/glew.h>
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
class Shader {
public:
unsigned int ID;
Shader(const char *vertexPath, const char *fragmentPath) {
std::string vertexCode;
std::string fragmentCode;
std::ifstream vShaderFile;
std::ifstream fShaderFile;
vShaderFile.exceptions(std::ifstream::failbit | std::ifstream::badbit);
fShaderFile.exceptions(std::ifstream::failbit | std::ifstream::badbit);
try {
vShaderFile.open(vertexPath);
fShaderFile.open(fragmentPath);
std::stringstream vShaderStream, fShaderStream;
vShaderStream << vShaderFile.rdbuf();
fShaderStream << fShaderFile.rdbuf();
vShaderFile.close();
fShaderFile.close();
vertexCode = vShaderStream.str();
fragmentCode = fShaderStream.str();
}
catch (std::ifstream::failure e) {
std::cout << "ERROR::SHADER::FILE_NOT_SUCCESFULLY_READ" << std::endl;
}
const char *vShaderCode = vertexCode.c_str();
const char *fShaderCode = fragmentCode.c_str();
unsigned int vertex, fragment;
vertex = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex, 1, &vShaderCode, NULL);
glCompileShader(vertex);
checkCompileErrors(vertex, "VERTEX");
fragment = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment, 1, &fShaderCode, NULL);
glCompileShader(fragment);
checkCompileErrors(fragment, "FRAGMENT");
ID = glCreateProgram();
glAttachShader(ID, vertex);
glAttachShader(ID, fragment);
glLinkProgram(ID);
checkCompileErrors(ID, "PROGRAM");
glDeleteShader(vertex);
glDeleteShader(fragment);
}
Shader() {}
void use() {
glUseProgram(ID);
}
void setBool(const std::string &name, bool value) const {
glUniform1i(glGetUniformLocation(ID, name.c_str()), (int) value);
}
void setInt(const std::string &name, int value) const {
glUniform1i(glGetUniformLocation(ID, name.c_str()), value);
}
void setFloat(const std::string &name, float value) const {
glUniform1f(glGetUniformLocation(ID, name.c_str()), value);
}
void setMat4(const std::string name, glm::mat4 &value){
glUniformMatrix4fv(glGetUniformLocation(ID, name.c_str()), 1, false, &value[0][0]);
}
void setVec3(const std::string name, float x, float y, float z){
glUniform3f(glGetUniformLocation(ID, name.c_str()), x, y, z);
}
void setVec3(const std::string name, glm::vec3 &value){
glUniform3f(glGetUniformLocation(ID, name.c_str()), value.x, value.y, value.z);
}
private:
void checkCompileErrors(unsigned int shader, std::string type) {
int success;
char infoLog[1024];
if (type != "PROGRAM") {
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(shader, 1024, NULL, infoLog);
std::cout << "ERROR::SHADER_COMPILATION_ERROR of type: " << type << "\n" << infoLog
<< "\n -- --------------------------------------------------- -- " << std::endl;
}
} else {
glGetProgramiv(shader, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(shader, 1024, NULL, infoLog);
std::cout << "ERROR::PROGRAM_LINKING_ERROR of type: " << type << "\n" << infoLog
<< "\n -- --------------------------------------------------- -- " << std::endl;
}
}
}
};
#endif
|
/*
* 153. Find Minimum in Rotated Sorted Array
*
* Q: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/
* A: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/discuss/716821/Javascript-and-C%2B%2B-solutions
*/
// verbose
let findMin = A => {
let N = A.length,
i = 0,
j = N - 1;
while (i < j) {
let k = Math.floor((i + j) / 2);
if (A[i] <= A[k] && A[k] <= A[j]) break; // case 1: i..j is sorted, thus A[i] is the minimum 🎯
if (A[j] <= A[i] && A[i] <= A[k]) i = k + 1; // case 2: k is in the 👈 left-most maximal-partition of A
if (A[k] <= A[j] && A[j] <= A[i]) j = k; // case 3: k is in the 👉 right-most minimal-partition of A
}
return A[i];
};
// concise
let findMin = A => {
let N = A.length,
i = 0,
j = N - 1;
while (i < j) {
let k = Math.floor((i + j) / 2);
if (A[i] <= A[j]) break; // case 1: i..j is sorted, thus A[i] is the minimum 🎯
if (A[i] <= A[k]) i = k + 1; // case 2: k is in the 👈 left-most maximal-partition of A
if (A[k] <= A[j]) j = k; // case 3: k is in the 👉 right-most minimal-partition of A
}
return A[i];
};
// more concise
let findMin = A => {
let N = A.length,
i = 0,
j = N - 1;
while (i < j && A[j] < A[i]) { // case 1: if not (A[j] < A[i]), then (A[i] <= A[j]), thus i..j is sorted and A[i] is the minimum 🎯
let k = Math.floor((i + j) / 2);
if (A[i] <= A[k]) i = k + 1; // case 2: k is in the 👈 left-most maximal-partition of A
if (A[k] <= A[j]) j = k; // case 3: k is in the 👉 right-most minimal-partition of A
}
return A[i];
};
|
#include <stdbool.h>
#include <stdlib.h>
#include "serialcon/serialcon.h"
bool add_command(serialcon_connection *conn, const char *cmd) {
list_of_cmds_t *new_elem = (list_of_cmds_t *)malloc(sizeof(list_of_cmds_t));
if (!new_elem)
return false;
new_elem->cmd.cmdline = cmd;
new_elem->cmd.exitcode = -1;
new_elem->next = NULL;
list_of_cmds_t *p = &conn->cmd_list;
while (p->next != NULL)
p = p->next;
p->next = new_elem;
return true;
}
void free_cmd_list(serialcon_connection *conn) {
list_of_cmds_t *p = conn->cmd_list.next;
while (p != NULL) {
list_of_cmds_t *tmp = p;
p = p->next;
free(tmp);
}
p = conn->cmd_list_done.next;
while (p != NULL) {
list_of_cmds_t *tmp = p;
p = p->next;
free(tmp);
}
}
command_t *next_cmd(serialcon_connection *conn) {
if (!conn->cmd_list.next) return NULL;
command_t *cmd = &conn->cmd_list.next->cmd;
list_of_cmds_t *new_done = conn->cmd_list.next;
conn->cmd_list.next = conn->cmd_list.next->next;
new_done->next = NULL;
list_of_cmds_t *runner_through_done_list = &conn->cmd_list_done;
while (runner_through_done_list->next != NULL)
runner_through_done_list = runner_through_done_list->next;
runner_through_done_list->next = new_done;
return cmd;
}
command_t *failed_cmd(serialcon_connection *conn) {
list_of_cmds_t *runner_through_done_list = &conn->cmd_list_done;
if (!conn->cmd_list_done.next) return NULL;
while (runner_through_done_list->next != NULL)
runner_through_done_list = runner_through_done_list->next;
return &runner_through_done_list->cmd;
}
|