file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
utils.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
import itertools
import logging
import logging.handlers
import math
import os
import pprint
import socket
import subprocess
import time
from typing import Tuple
import editdistance
import mxnet as mx
import numpy as np
import sklearn.metrics as metrics
from mxnet import nd, gluon
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
from tqdm import tqdm
from data import AsyncDataLoader
from experiments.aws_config import aws_config
logger = logging.getLogger()
class PaddedArray:
def __init__(self, values, value_lengths):
self.values = values
self.value_lengths = value_lengths
def as_in_context(self, ctx):
new_PA = PaddedArray(self.values.as_in_context(ctx), self.value_lengths.as_in_context(ctx))
return new_PA
def get_time():
os.environ['TZ'] = 'US/Pacific'
time.tzset()
t = time.strftime('%a_%b_%d_%Y_%H%Mhrs', time.localtime())
return t
def start_logging(log_dir, debug: bool = False):
logger = logging.getLogger()
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel(log_level)
if not any(type(i) == logging.StreamHandler for i in logger.handlers):
sh = logging.StreamHandler()
sh.setLevel(log_level)
logger.addHandler(sh)
file_handlers = [i for i in logger.handlers if type(i) == logging.FileHandler]
for h in file_handlers:
logger.removeHandler(h)
os.makedirs(log_dir, exist_ok=True)
logger.info('Logging to {}'.format(log_dir))
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M'))
logger.addHandler(fh)
# Only works if you have an SMTP server like postfix running on your box
if aws_config['email_to_send_alerts_to'] and not any(
type(i) == logging.handlers.SMTPHandler for i in logger.handlers):
eh = logging.handlers.SMTPHandler('localhost',
'logger@{}'.format(socket.getfqdn()),
aws_config['email_to_send_alerts_to'],
'Log Message from {} about {}'.format(socket.getfqdn(), log_dir))
eh.setLevel(logging.ERROR)
eh.setFormatter(
logging.Formatter('Hey there, heads up:\n\n%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(eh)
return logger
def tuple_of_tuples_to_padded_array(tup_of_tups: Tuple[Tuple[int, ...], ...], ctx, pad_amount=None):
'''
Converts a tuple of tuples into a PaddedArray (i.e. glorified pair of nd.Arrays for working with SequenceMask)
Pads to the length of the longest tuple in the outer tuple, unless pad_amount is specified.
'''
value_lengths = nd.array([len(i) for i in tup_of_tups], dtype='float32',
ctx=ctx) # float type to play nice with SequenceMask later
if pad_amount is not None and value_lengths.max().asscalar() < pad_amount:
tup_of_tups = list(tup_of_tups)
tup_of_tups[0] = tup_of_tups[0] + (0,) * (pad_amount - len(tup_of_tups[0]))
values = list(itertools.zip_longest(*tup_of_tups, fillvalue=0))
values = nd.array(values, dtype='int32', ctx=ctx).T[:, :pad_amount]
return PaddedArray(values, value_lengths)
def evaluate_loss(data_loader: AsyncDataLoader, model, loss_fxn):
with data_loader as data_loader:
total_loss = nd.zeros((1,), ctx=data_loader.ctx[0])
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
losses = [loss_fxn(model(batch.data), batch.label, model.data_encoder) for batch in split_batch]
loss_sums = nd.concat(*[loss.sum().as_in_context(data_loader.ctx[0]) for loss in losses], dim=0)
total_loss += nd.sum(loss_sums)
total_loss.wait_to_read()
return total_loss.asscalar() / len(data_loader)
def evaluate_FITB_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in indicating the correct variable
'''
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
correct += int(nd.dot(prediction, label).asscalar())
return correct / len(data_loader)
def evaluate_full_name_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting the full true name, in batches
'''
logged_example = False
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
if prediction == label:
correct += 1
return correct / len(data_loader)
def evaluate_subtokenwise_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting each subtoken in the true names (with penalty for extra subtokens)
'''
logged_example = False
with data_loader as data_loader:
correct = 0
total = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
for i in range(min(len(prediction), len(label))):
if prediction[i] == label[i]:
correct += 1
total += max(len(prediction), len(label))
return correct / total
def evaluate_edit_distance(data_loader: AsyncDataLoader, model):
|
class FITBLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
label, _ = args
loss = SigmoidBinaryCrossEntropyLoss()
return loss(output, label)
class VarNamingLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Masks the outputs and returns the SoftMaxCrossEntropy loss
output is a (batch x max_name_length x len(all_node_name_subtokens)) tensor of name predictions for each graph
Note: last dimension of output are pre-softmax values - SoftmaxCrossEntropy does the softmax
'''
(label, _), data_encoder = args
softmax_xent = gluon.loss.SoftmaxCrossEntropyLoss(axis=2)
# Masking output to max(where_RNN_emitted_PAD_token, length_of_label)
output_preds = F.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == data_encoder.all_node_name_subtokens['__PAD__'])[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(output_preds.shape[1])
output_lengths = F.array(output_lengths, ctx=output.context)
mask_lengths = F.maximum(output_lengths, label.value_lengths)
output = F.SequenceMask(output, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return softmax_xent(output, label.values)
class VarNamingGraphVocabLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Returns the Softmax Cross Entropy loss of a model with a graph vocab, in the style of a sentinel pointer network
Note: Unlike VarNamingLoss, this Loss DOES expect the last dimension of output to be probabilities summing to 1
'''
(label, _), data_encoder = args
joint_label, label_lengths = label.values, label.value_lengths
# We're using pick and not just sparse labels for XEnt b/c there can be multiple ways to point to the correct subtoken
loss = nd.pick(output, joint_label, axis=2)
# Masking outputs to max(length_of_output (based on emitting value 0), length_of_label)
output_preds = nd.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)) + 1)
else:
output_lengths.append(output.shape[1])
output_lengths = nd.array(output_lengths, ctx=output.context)
mask_lengths = nd.maximum(output_lengths, label_lengths)
loss = nd.SequenceMask(loss, value=1.0, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return nd.mean(-nd.log(loss), axis=0, exclude=True)
def s3_sync(source_path: str, target_path: str):
'''
Syncs the directory/file at source_path to target_path via the aws s3 CLI
'''
cmd = "aws s3 sync {} {} --profile {}".format(source_path, target_path, aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
def s3_cp(source_path: str, target_path: str, recursive=False):
'''
Copies the directory/file at source_path to target_path via the aws s3 CLI
'''
if recursive:
recursive = '--recursive'
else:
recursive = ''
cmd = "aws s3 cp {} {} {} --profile {}".format(recursive, source_path, target_path,
aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
| '''
Measures the mean (over instances) of the characterwise edit distance (Levenshtein distance) between predicted and true names
'''
logged_example = False
with data_loader as data_loader:
cum_edit_distance = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
pred_name = ''.join(prediction)
real_name = ''.join(label)
cum_edit_distance += editdistance.eval(pred_name, real_name)
return cum_edit_distance / len(data_loader)
pred = []
true = []
for i in tqdm(range(0, math.ceil(len(dataset) / n_batch))):
data = dataset[n_batch * i:n_batch * (i + 1)]
graph, label = model.batchify(data, ctx)
output = model(graph)
predictions = nd.argmax(output, axis=2)
# Masking output to max(length_of_output, length_of_label)
output_preds = predictions.asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(model.max_name_length)
output_lengths = nd.array(output_lengths, ctx=ctx)
mask_lengths = nd.maximum(output_lengths, label.value_lengths)
output = nd.SequenceMask(predictions, value=-1, use_sequence_length=True, sequence_length=mask_lengths,
axis=1).asnumpy().astype('int32')
labels = nd.SequenceMask(label.values, value=-1, use_sequence_length=True,
sequence_length=mask_lengths.astype('int32'), axis=1).asnumpy()
pred += [i for i in output.flatten().tolist() if i >= 0]
true += [i for i in labels.flatten().tolist() if i >= 0]
return metrics.f1_score(true, pred, average='weighted') | identifier_body |
utils.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
import itertools
import logging
import logging.handlers
import math
import os
import pprint
import socket
import subprocess
import time
from typing import Tuple
import editdistance
import mxnet as mx
import numpy as np
import sklearn.metrics as metrics
from mxnet import nd, gluon
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
from tqdm import tqdm
from data import AsyncDataLoader
from experiments.aws_config import aws_config
logger = logging.getLogger()
class PaddedArray:
def __init__(self, values, value_lengths):
self.values = values
self.value_lengths = value_lengths
def as_in_context(self, ctx):
new_PA = PaddedArray(self.values.as_in_context(ctx), self.value_lengths.as_in_context(ctx))
return new_PA
def get_time():
os.environ['TZ'] = 'US/Pacific'
time.tzset()
t = time.strftime('%a_%b_%d_%Y_%H%Mhrs', time.localtime())
return t
def start_logging(log_dir, debug: bool = False):
logger = logging.getLogger()
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel(log_level)
if not any(type(i) == logging.StreamHandler for i in logger.handlers):
sh = logging.StreamHandler()
sh.setLevel(log_level)
logger.addHandler(sh)
file_handlers = [i for i in logger.handlers if type(i) == logging.FileHandler]
for h in file_handlers:
logger.removeHandler(h)
os.makedirs(log_dir, exist_ok=True)
logger.info('Logging to {}'.format(log_dir))
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M'))
logger.addHandler(fh)
# Only works if you have an SMTP server like postfix running on your box
if aws_config['email_to_send_alerts_to'] and not any(
type(i) == logging.handlers.SMTPHandler for i in logger.handlers):
eh = logging.handlers.SMTPHandler('localhost',
'logger@{}'.format(socket.getfqdn()),
aws_config['email_to_send_alerts_to'],
'Log Message from {} about {}'.format(socket.getfqdn(), log_dir))
eh.setLevel(logging.ERROR)
eh.setFormatter(
logging.Formatter('Hey there, heads up:\n\n%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(eh)
return logger
def tuple_of_tuples_to_padded_array(tup_of_tups: Tuple[Tuple[int, ...], ...], ctx, pad_amount=None):
'''
Converts a tuple of tuples into a PaddedArray (i.e. glorified pair of nd.Arrays for working with SequenceMask)
Pads to the length of the longest tuple in the outer tuple, unless pad_amount is specified.
'''
value_lengths = nd.array([len(i) for i in tup_of_tups], dtype='float32',
ctx=ctx) # float type to play nice with SequenceMask later
if pad_amount is not None and value_lengths.max().asscalar() < pad_amount:
tup_of_tups = list(tup_of_tups)
tup_of_tups[0] = tup_of_tups[0] + (0,) * (pad_amount - len(tup_of_tups[0]))
values = list(itertools.zip_longest(*tup_of_tups, fillvalue=0))
values = nd.array(values, dtype='int32', ctx=ctx).T[:, :pad_amount]
return PaddedArray(values, value_lengths)
def evaluate_loss(data_loader: AsyncDataLoader, model, loss_fxn):
with data_loader as data_loader:
total_loss = nd.zeros((1,), ctx=data_loader.ctx[0])
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
losses = [loss_fxn(model(batch.data), batch.label, model.data_encoder) for batch in split_batch]
loss_sums = nd.concat(*[loss.sum().as_in_context(data_loader.ctx[0]) for loss in losses], dim=0)
total_loss += nd.sum(loss_sums)
total_loss.wait_to_read()
return total_loss.asscalar() / len(data_loader)
def evaluate_FITB_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in indicating the correct variable
'''
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
correct += int(nd.dot(prediction, label).asscalar())
return correct / len(data_loader)
def evaluate_full_name_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting the full true name, in batches
'''
logged_example = False
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
if prediction == label:
correct += 1
return correct / len(data_loader)
def evaluate_subtokenwise_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting each subtoken in the true names (with penalty for extra subtokens)
'''
logged_example = False
with data_loader as data_loader:
correct = 0
total = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
for i in range(min(len(prediction), len(label))):
if prediction[i] == label[i]:
correct += 1
total += max(len(prediction), len(label))
return correct / total
def evaluate_edit_distance(data_loader: AsyncDataLoader, model):
'''
Measures the mean (over instances) of the characterwise edit distance (Levenshtein distance) between predicted and true names
'''
logged_example = False
with data_loader as data_loader:
cum_edit_distance = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
|
return cum_edit_distance / len(data_loader)
pred = []
true = []
for i in tqdm(range(0, math.ceil(len(dataset) / n_batch))):
data = dataset[n_batch * i:n_batch * (i + 1)]
graph, label = model.batchify(data, ctx)
output = model(graph)
predictions = nd.argmax(output, axis=2)
# Masking output to max(length_of_output, length_of_label)
output_preds = predictions.asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(model.max_name_length)
output_lengths = nd.array(output_lengths, ctx=ctx)
mask_lengths = nd.maximum(output_lengths, label.value_lengths)
output = nd.SequenceMask(predictions, value=-1, use_sequence_length=True, sequence_length=mask_lengths,
axis=1).asnumpy().astype('int32')
labels = nd.SequenceMask(label.values, value=-1, use_sequence_length=True,
sequence_length=mask_lengths.astype('int32'), axis=1).asnumpy()
pred += [i for i in output.flatten().tolist() if i >= 0]
true += [i for i in labels.flatten().tolist() if i >= 0]
return metrics.f1_score(true, pred, average='weighted')
class FITBLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
label, _ = args
loss = SigmoidBinaryCrossEntropyLoss()
return loss(output, label)
class VarNamingLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Masks the outputs and returns the SoftMaxCrossEntropy loss
output is a (batch x max_name_length x len(all_node_name_subtokens)) tensor of name predictions for each graph
Note: last dimension of output are pre-softmax values - SoftmaxCrossEntropy does the softmax
'''
(label, _), data_encoder = args
softmax_xent = gluon.loss.SoftmaxCrossEntropyLoss(axis=2)
# Masking output to max(where_RNN_emitted_PAD_token, length_of_label)
output_preds = F.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == data_encoder.all_node_name_subtokens['__PAD__'])[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(output_preds.shape[1])
output_lengths = F.array(output_lengths, ctx=output.context)
mask_lengths = F.maximum(output_lengths, label.value_lengths)
output = F.SequenceMask(output, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return softmax_xent(output, label.values)
class VarNamingGraphVocabLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Returns the Softmax Cross Entropy loss of a model with a graph vocab, in the style of a sentinel pointer network
Note: Unlike VarNamingLoss, this Loss DOES expect the last dimension of output to be probabilities summing to 1
'''
(label, _), data_encoder = args
joint_label, label_lengths = label.values, label.value_lengths
# We're using pick and not just sparse labels for XEnt b/c there can be multiple ways to point to the correct subtoken
loss = nd.pick(output, joint_label, axis=2)
# Masking outputs to max(length_of_output (based on emitting value 0), length_of_label)
output_preds = nd.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)) + 1)
else:
output_lengths.append(output.shape[1])
output_lengths = nd.array(output_lengths, ctx=output.context)
mask_lengths = nd.maximum(output_lengths, label_lengths)
loss = nd.SequenceMask(loss, value=1.0, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return nd.mean(-nd.log(loss), axis=0, exclude=True)
def s3_sync(source_path: str, target_path: str):
'''
Syncs the directory/file at source_path to target_path via the aws s3 CLI
'''
cmd = "aws s3 sync {} {} --profile {}".format(source_path, target_path, aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
def s3_cp(source_path: str, target_path: str, recursive=False):
'''
Copies the directory/file at source_path to target_path via the aws s3 CLI
'''
if recursive:
recursive = '--recursive'
else:
recursive = ''
cmd = "aws s3 cp {} {} {} --profile {}".format(recursive, source_path, target_path,
aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
| predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
pred_name = ''.join(prediction)
real_name = ''.join(label)
cum_edit_distance += editdistance.eval(pred_name, real_name) | conditional_block |
utils.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
import itertools
import logging
import logging.handlers
import math
import os
import pprint
import socket
import subprocess
import time
from typing import Tuple
import editdistance
import mxnet as mx
import numpy as np
import sklearn.metrics as metrics
from mxnet import nd, gluon
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
from tqdm import tqdm
from data import AsyncDataLoader
from experiments.aws_config import aws_config
logger = logging.getLogger()
class PaddedArray:
def __init__(self, values, value_lengths):
self.values = values
self.value_lengths = value_lengths
def as_in_context(self, ctx):
new_PA = PaddedArray(self.values.as_in_context(ctx), self.value_lengths.as_in_context(ctx))
return new_PA
def get_time():
os.environ['TZ'] = 'US/Pacific'
time.tzset()
t = time.strftime('%a_%b_%d_%Y_%H%Mhrs', time.localtime())
return t
def start_logging(log_dir, debug: bool = False):
logger = logging.getLogger()
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel(log_level)
if not any(type(i) == logging.StreamHandler for i in logger.handlers):
sh = logging.StreamHandler()
sh.setLevel(log_level)
logger.addHandler(sh)
file_handlers = [i for i in logger.handlers if type(i) == logging.FileHandler]
for h in file_handlers:
logger.removeHandler(h)
os.makedirs(log_dir, exist_ok=True)
logger.info('Logging to {}'.format(log_dir))
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M'))
logger.addHandler(fh)
# Only works if you have an SMTP server like postfix running on your box
if aws_config['email_to_send_alerts_to'] and not any(
type(i) == logging.handlers.SMTPHandler for i in logger.handlers):
eh = logging.handlers.SMTPHandler('localhost',
'logger@{}'.format(socket.getfqdn()),
aws_config['email_to_send_alerts_to'],
'Log Message from {} about {}'.format(socket.getfqdn(), log_dir))
eh.setLevel(logging.ERROR)
eh.setFormatter(
logging.Formatter('Hey there, heads up:\n\n%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(eh)
return logger
| Converts a tuple of tuples into a PaddedArray (i.e. glorified pair of nd.Arrays for working with SequenceMask)
Pads to the length of the longest tuple in the outer tuple, unless pad_amount is specified.
'''
value_lengths = nd.array([len(i) for i in tup_of_tups], dtype='float32',
ctx=ctx) # float type to play nice with SequenceMask later
if pad_amount is not None and value_lengths.max().asscalar() < pad_amount:
tup_of_tups = list(tup_of_tups)
tup_of_tups[0] = tup_of_tups[0] + (0,) * (pad_amount - len(tup_of_tups[0]))
values = list(itertools.zip_longest(*tup_of_tups, fillvalue=0))
values = nd.array(values, dtype='int32', ctx=ctx).T[:, :pad_amount]
return PaddedArray(values, value_lengths)
def evaluate_loss(data_loader: AsyncDataLoader, model, loss_fxn):
with data_loader as data_loader:
total_loss = nd.zeros((1,), ctx=data_loader.ctx[0])
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
losses = [loss_fxn(model(batch.data), batch.label, model.data_encoder) for batch in split_batch]
loss_sums = nd.concat(*[loss.sum().as_in_context(data_loader.ctx[0]) for loss in losses], dim=0)
total_loss += nd.sum(loss_sums)
total_loss.wait_to_read()
return total_loss.asscalar() / len(data_loader)
def evaluate_FITB_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in indicating the correct variable
'''
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
correct += int(nd.dot(prediction, label).asscalar())
return correct / len(data_loader)
def evaluate_full_name_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting the full true name, in batches
'''
logged_example = False
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
if prediction == label:
correct += 1
return correct / len(data_loader)
def evaluate_subtokenwise_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting each subtoken in the true names (with penalty for extra subtokens)
'''
logged_example = False
with data_loader as data_loader:
correct = 0
total = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
for i in range(min(len(prediction), len(label))):
if prediction[i] == label[i]:
correct += 1
total += max(len(prediction), len(label))
return correct / total
def evaluate_edit_distance(data_loader: AsyncDataLoader, model):
'''
Measures the mean (over instances) of the characterwise edit distance (Levenshtein distance) between predicted and true names
'''
logged_example = False
with data_loader as data_loader:
cum_edit_distance = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
pred_name = ''.join(prediction)
real_name = ''.join(label)
cum_edit_distance += editdistance.eval(pred_name, real_name)
return cum_edit_distance / len(data_loader)
pred = []
true = []
for i in tqdm(range(0, math.ceil(len(dataset) / n_batch))):
data = dataset[n_batch * i:n_batch * (i + 1)]
graph, label = model.batchify(data, ctx)
output = model(graph)
predictions = nd.argmax(output, axis=2)
# Masking output to max(length_of_output, length_of_label)
output_preds = predictions.asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(model.max_name_length)
output_lengths = nd.array(output_lengths, ctx=ctx)
mask_lengths = nd.maximum(output_lengths, label.value_lengths)
output = nd.SequenceMask(predictions, value=-1, use_sequence_length=True, sequence_length=mask_lengths,
axis=1).asnumpy().astype('int32')
labels = nd.SequenceMask(label.values, value=-1, use_sequence_length=True,
sequence_length=mask_lengths.astype('int32'), axis=1).asnumpy()
pred += [i for i in output.flatten().tolist() if i >= 0]
true += [i for i in labels.flatten().tolist() if i >= 0]
return metrics.f1_score(true, pred, average='weighted')
class FITBLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
label, _ = args
loss = SigmoidBinaryCrossEntropyLoss()
return loss(output, label)
class VarNamingLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Masks the outputs and returns the SoftMaxCrossEntropy loss
output is a (batch x max_name_length x len(all_node_name_subtokens)) tensor of name predictions for each graph
Note: last dimension of output are pre-softmax values - SoftmaxCrossEntropy does the softmax
'''
(label, _), data_encoder = args
softmax_xent = gluon.loss.SoftmaxCrossEntropyLoss(axis=2)
# Masking output to max(where_RNN_emitted_PAD_token, length_of_label)
output_preds = F.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == data_encoder.all_node_name_subtokens['__PAD__'])[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(output_preds.shape[1])
output_lengths = F.array(output_lengths, ctx=output.context)
mask_lengths = F.maximum(output_lengths, label.value_lengths)
output = F.SequenceMask(output, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return softmax_xent(output, label.values)
class VarNamingGraphVocabLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Returns the Softmax Cross Entropy loss of a model with a graph vocab, in the style of a sentinel pointer network
Note: Unlike VarNamingLoss, this Loss DOES expect the last dimension of output to be probabilities summing to 1
'''
(label, _), data_encoder = args
joint_label, label_lengths = label.values, label.value_lengths
# We're using pick and not just sparse labels for XEnt b/c there can be multiple ways to point to the correct subtoken
loss = nd.pick(output, joint_label, axis=2)
# Masking outputs to max(length_of_output (based on emitting value 0), length_of_label)
output_preds = nd.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)) + 1)
else:
output_lengths.append(output.shape[1])
output_lengths = nd.array(output_lengths, ctx=output.context)
mask_lengths = nd.maximum(output_lengths, label_lengths)
loss = nd.SequenceMask(loss, value=1.0, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return nd.mean(-nd.log(loss), axis=0, exclude=True)
def s3_sync(source_path: str, target_path: str):
'''
Syncs the directory/file at source_path to target_path via the aws s3 CLI
'''
cmd = "aws s3 sync {} {} --profile {}".format(source_path, target_path, aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
def s3_cp(source_path: str, target_path: str, recursive=False):
'''
Copies the directory/file at source_path to target_path via the aws s3 CLI
'''
if recursive:
recursive = '--recursive'
else:
recursive = ''
cmd = "aws s3 cp {} {} {} --profile {}".format(recursive, source_path, target_path,
aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) |
def tuple_of_tuples_to_padded_array(tup_of_tups: Tuple[Tuple[int, ...], ...], ctx, pad_amount=None):
''' | random_line_split |
utils.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
import itertools
import logging
import logging.handlers
import math
import os
import pprint
import socket
import subprocess
import time
from typing import Tuple
import editdistance
import mxnet as mx
import numpy as np
import sklearn.metrics as metrics
from mxnet import nd, gluon
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
from tqdm import tqdm
from data import AsyncDataLoader
from experiments.aws_config import aws_config
logger = logging.getLogger()
class PaddedArray:
def __init__(self, values, value_lengths):
self.values = values
self.value_lengths = value_lengths
def as_in_context(self, ctx):
new_PA = PaddedArray(self.values.as_in_context(ctx), self.value_lengths.as_in_context(ctx))
return new_PA
def get_time():
os.environ['TZ'] = 'US/Pacific'
time.tzset()
t = time.strftime('%a_%b_%d_%Y_%H%Mhrs', time.localtime())
return t
def start_logging(log_dir, debug: bool = False):
logger = logging.getLogger()
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel(log_level)
if not any(type(i) == logging.StreamHandler for i in logger.handlers):
sh = logging.StreamHandler()
sh.setLevel(log_level)
logger.addHandler(sh)
file_handlers = [i for i in logger.handlers if type(i) == logging.FileHandler]
for h in file_handlers:
logger.removeHandler(h)
os.makedirs(log_dir, exist_ok=True)
logger.info('Logging to {}'.format(log_dir))
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M'))
logger.addHandler(fh)
# Only works if you have an SMTP server like postfix running on your box
if aws_config['email_to_send_alerts_to'] and not any(
type(i) == logging.handlers.SMTPHandler for i in logger.handlers):
eh = logging.handlers.SMTPHandler('localhost',
'logger@{}'.format(socket.getfqdn()),
aws_config['email_to_send_alerts_to'],
'Log Message from {} about {}'.format(socket.getfqdn(), log_dir))
eh.setLevel(logging.ERROR)
eh.setFormatter(
logging.Formatter('Hey there, heads up:\n\n%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(eh)
return logger
def tuple_of_tuples_to_padded_array(tup_of_tups: Tuple[Tuple[int, ...], ...], ctx, pad_amount=None):
'''
Converts a tuple of tuples into a PaddedArray (i.e. glorified pair of nd.Arrays for working with SequenceMask)
Pads to the length of the longest tuple in the outer tuple, unless pad_amount is specified.
'''
value_lengths = nd.array([len(i) for i in tup_of_tups], dtype='float32',
ctx=ctx) # float type to play nice with SequenceMask later
if pad_amount is not None and value_lengths.max().asscalar() < pad_amount:
tup_of_tups = list(tup_of_tups)
tup_of_tups[0] = tup_of_tups[0] + (0,) * (pad_amount - len(tup_of_tups[0]))
values = list(itertools.zip_longest(*tup_of_tups, fillvalue=0))
values = nd.array(values, dtype='int32', ctx=ctx).T[:, :pad_amount]
return PaddedArray(values, value_lengths)
def evaluate_loss(data_loader: AsyncDataLoader, model, loss_fxn):
with data_loader as data_loader:
total_loss = nd.zeros((1,), ctx=data_loader.ctx[0])
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
losses = [loss_fxn(model(batch.data), batch.label, model.data_encoder) for batch in split_batch]
loss_sums = nd.concat(*[loss.sum().as_in_context(data_loader.ctx[0]) for loss in losses], dim=0)
total_loss += nd.sum(loss_sums)
total_loss.wait_to_read()
return total_loss.asscalar() / len(data_loader)
def evaluate_FITB_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in indicating the correct variable
'''
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
correct += int(nd.dot(prediction, label).asscalar())
return correct / len(data_loader)
def evaluate_full_name_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting the full true name, in batches
'''
logged_example = False
with data_loader as data_loader:
correct = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
if prediction == label:
correct += 1
return correct / len(data_loader)
def evaluate_subtokenwise_accuracy(data_loader: AsyncDataLoader, model):
'''
Measures the accuracy of the model in predicting each subtoken in the true names (with penalty for extra subtokens)
'''
logged_example = False
with data_loader as data_loader:
correct = 0
total = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
for i in range(min(len(prediction), len(label))):
if prediction[i] == label[i]:
correct += 1
total += max(len(prediction), len(label))
return correct / total
def evaluate_edit_distance(data_loader: AsyncDataLoader, model):
'''
Measures the mean (over instances) of the characterwise edit distance (Levenshtein distance) between predicted and true names
'''
logged_example = False
with data_loader as data_loader:
cum_edit_distance = 0
for split_batch, batch_length in tqdm(data_loader, total=data_loader.total_batches):
batches_outputs = [(batch, model(batch.data)) for batch in split_batch]
for batch, output in batches_outputs:
predictions_labels = model.unbatchify(batch, output)
for prediction, label in predictions_labels:
if not logged_example:
logger.info('Some example predictions:\n{}'.format(pprint.pformat(predictions_labels[:10])))
logged_example = True
pred_name = ''.join(prediction)
real_name = ''.join(label)
cum_edit_distance += editdistance.eval(pred_name, real_name)
return cum_edit_distance / len(data_loader)
pred = []
true = []
for i in tqdm(range(0, math.ceil(len(dataset) / n_batch))):
data = dataset[n_batch * i:n_batch * (i + 1)]
graph, label = model.batchify(data, ctx)
output = model(graph)
predictions = nd.argmax(output, axis=2)
# Masking output to max(length_of_output, length_of_label)
output_preds = predictions.asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(model.max_name_length)
output_lengths = nd.array(output_lengths, ctx=ctx)
mask_lengths = nd.maximum(output_lengths, label.value_lengths)
output = nd.SequenceMask(predictions, value=-1, use_sequence_length=True, sequence_length=mask_lengths,
axis=1).asnumpy().astype('int32')
labels = nd.SequenceMask(label.values, value=-1, use_sequence_length=True,
sequence_length=mask_lengths.astype('int32'), axis=1).asnumpy()
pred += [i for i in output.flatten().tolist() if i >= 0]
true += [i for i in labels.flatten().tolist() if i >= 0]
return metrics.f1_score(true, pred, average='weighted')
class FITBLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
label, _ = args
loss = SigmoidBinaryCrossEntropyLoss()
return loss(output, label)
class VarNamingLoss(mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Masks the outputs and returns the SoftMaxCrossEntropy loss
output is a (batch x max_name_length x len(all_node_name_subtokens)) tensor of name predictions for each graph
Note: last dimension of output are pre-softmax values - SoftmaxCrossEntropy does the softmax
'''
(label, _), data_encoder = args
softmax_xent = gluon.loss.SoftmaxCrossEntropyLoss(axis=2)
# Masking output to max(where_RNN_emitted_PAD_token, length_of_label)
output_preds = F.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == data_encoder.all_node_name_subtokens['__PAD__'])[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)))
else:
output_lengths.append(output_preds.shape[1])
output_lengths = F.array(output_lengths, ctx=output.context)
mask_lengths = F.maximum(output_lengths, label.value_lengths)
output = F.SequenceMask(output, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return softmax_xent(output, label.values)
class | (mx.gluon.HybridBlock):
def hybrid_forward(self, F, output, *args, **kwargs):
'''
Returns the Softmax Cross Entropy loss of a model with a graph vocab, in the style of a sentinel pointer network
Note: Unlike VarNamingLoss, this Loss DOES expect the last dimension of output to be probabilities summing to 1
'''
(label, _), data_encoder = args
joint_label, label_lengths = label.values, label.value_lengths
# We're using pick and not just sparse labels for XEnt b/c there can be multiple ways to point to the correct subtoken
loss = nd.pick(output, joint_label, axis=2)
# Masking outputs to max(length_of_output (based on emitting value 0), length_of_label)
output_preds = nd.argmax(output, axis=2).asnumpy()
output_lengths = []
for row in output_preds:
end_token_idxs = np.where(row == 0)[0]
if len(end_token_idxs):
output_lengths.append(int(min(end_token_idxs)) + 1)
else:
output_lengths.append(output.shape[1])
output_lengths = nd.array(output_lengths, ctx=output.context)
mask_lengths = nd.maximum(output_lengths, label_lengths)
loss = nd.SequenceMask(loss, value=1.0, use_sequence_length=True, sequence_length=mask_lengths, axis=1)
return nd.mean(-nd.log(loss), axis=0, exclude=True)
def s3_sync(source_path: str, target_path: str):
'''
Syncs the directory/file at source_path to target_path via the aws s3 CLI
'''
cmd = "aws s3 sync {} {} --profile {}".format(source_path, target_path, aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
def s3_cp(source_path: str, target_path: str, recursive=False):
'''
Copies the directory/file at source_path to target_path via the aws s3 CLI
'''
if recursive:
recursive = '--recursive'
else:
recursive = ''
cmd = "aws s3 cp {} {} {} --profile {}".format(recursive, source_path, target_path,
aws_config['local_config_profile_name'])
logger.info('Running: {}'.format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
| VarNamingGraphVocabLoss | identifier_name |
main.rs | /*
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
*/
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::collections::HashMap;
fn main() {
let passports = read_in_passports("src/passports.txt");
println!("Passports with required fields = {}", check_required_fields(&passports));
let mut valid_passports = 0;
for passport in passports {
if validate_passport(&passport) {
valid_passports += 1;
}
}
println!("Valid passports = {}", valid_passports);
}
// struct Passport {
// byr: Option<u32>,
// iyr: Option<u32>,
// eyr: Option<u32>,
// hgt: Option<Height>,
// hcl: Option<String>,
// ecl: Option<String>,
// pid: Option<String>,
// cid: Option<String>,
// }
// impl Passport {
// fn update_field<T>(&self, field: &str, value: T) {
// match field {
// "byr" => self.byr = Option::from(value),
// "iyr"
// }
// }
// fn check_required_fields(&self) -> bool {
// self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() ||
// self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() ||
// self.pid.is_some()
// }
// fn validate() {
// }
// }
// struct Height {
// h: u32,
// unit: String,
// }
/// Read in the passports from the defined file and return as a vector of
/// HashMap
///
/// # Arguments
///
/// * `filename` - the filename to read the passports from
fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > {
let mut passports: Vec< HashMap<String, String> > = Vec::new();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let mut lines = reader.lines();
// iterate over lines until end of file
loop {
let mut line = lines.next();
if line.is_none() {
break;
}
let mut entry = HashMap::new();
loop {
let lin = String::from(line.unwrap().unwrap().trim());
// let l = lin.trim();
if lin.is_empty() {
break;
}
for item in lin.split_whitespace() {
let mut pair = item.split(':');
let key = String::from(pair.next().unwrap());
let value = String::from(pair.next().unwrap());
entry.insert(key, value);
}
line = lines.next();
if line.is_none() {
break;
}
}
passports.push(entry);
}
passports
}
fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 {
// Check that all passports contain the required fields
// byr - iyr - eyr - hgt - hcl - ecl -pid
let required_fields = vec!(
String::from("byr"),
String::from("iyr"),
String::from("eyr"),
String::from("hgt"),
String::from("hcl"),
String::from("ecl"),
String::from("pid"));
let mut valid_passports_total = 0;
for passport in passports {
let mut valid = true;
for field in &required_fields {
if passport.contains_key(field) == false {
valid = false;
break;
}
}
valid_passports_total += if valid { 1 } else | ;
}
valid_passports_total
}
fn validate_passport(passport: &HashMap<String, String>) -> bool {
let mut result = true;
// Birth year
if let Some(byr) = passport.get("byr") {
result = result && validate_byr(byr);
} else {
result = false;
}
// Issue Year
if let Some(iyr) = passport.get("iyr") {
result = result && validate_iyr(iyr);
} else {
result = false;
}
// Expiration Year
if let Some(eyr) = passport.get("eyr") {
result = result && validate_eyr(eyr);
} else {
result = false;
}
// Height
if let Some(hgt) = passport.get("hgt") {
result = result && validate_hgt(hgt);
} else {
result = false;
}
// Hair Colour
if let Some(hcl) = passport.get("hcl") {
result = result && validate_hcl(hcl);
} else {
result = false;
}
// Eye Colour
if let Some(ecl) = passport.get("ecl") {
result = result && validate_ecl(ecl);
} else {
result = false;
}
// Passport ID
if let Some(pid) = passport.get("pid") {
result = result && validate_pid(pid);
} else {
result = false;
}
// Country ID - Ignored
result
}
fn validate_byr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 1920 and at most 2002.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 1920 && year <= 2002
}
else {
false
}
}
fn validate_iyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2010 && year <= 2020
}
else {
false
}
}
fn validate_eyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2020 && year <= 2030
}
else {
false
}
}
fn validate_hgt(field: &String) -> bool {
// Requirement are:
// - a number followed by cm or in
// - if cm -> 150-193 inclusive
// - if in -> 59-76 inclusive
// Remove last two letters
let chars = field.chars().count();
if chars > 2 {
let measurement: String = field.chars().take(chars - 2).collect();
let measurement_type: String = field.chars().rev().take(2).collect();
// Convert to number
if let Ok(value) = measurement.parse::< u32 >() {
match measurement_type.as_str() {
// Strings are reversed!!!
"mc" => value >= 150 && value <= 193,
"ni" => value >= 59 && value <= 76,
_ => false
}
}
else {
false
}
} else {
false
}
}
fn validate_hcl(field: &String) -> bool {
// Requirement are:
// - starts with #
// - followed by 6 0-9 or a-f charachters
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 7 {
if field.chars().next().unwrap() == '#' {
let hcl: String = field.chars().rev().take(6).collect();
validate_chars(&hcl)
} else {
false
}
} else {
false
}
}
fn validate_ecl(field: &String) -> bool {
// Requirement are:
// - exactly one of: amb blu brn gry grn hzl oth
let valid_colors = vec!(
String::from("amb"),
String::from("blu"),
String::from("brn"),
String::from("gry"),
String::from("grn"),
String::from("hzl"),
String::from("oth") );
valid_colors.contains(field)
}
fn validate_pid(field: &String) -> bool {
// Requirement are:
// - nine-digit number, including leading zeroes
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 9 {
validate_chars(field)
} else {
false
}
}
| { 0 } | conditional_block |
main.rs | /*
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
*/
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::collections::HashMap;
fn main() {
let passports = read_in_passports("src/passports.txt");
println!("Passports with required fields = {}", check_required_fields(&passports));
let mut valid_passports = 0;
for passport in passports {
if validate_passport(&passport) {
valid_passports += 1;
}
}
println!("Valid passports = {}", valid_passports);
}
// struct Passport {
// byr: Option<u32>,
// iyr: Option<u32>,
// eyr: Option<u32>,
// hgt: Option<Height>,
// hcl: Option<String>,
// ecl: Option<String>,
// pid: Option<String>,
// cid: Option<String>,
// }
// impl Passport {
// fn update_field<T>(&self, field: &str, value: T) {
// match field {
// "byr" => self.byr = Option::from(value),
// "iyr"
// }
// }
// fn check_required_fields(&self) -> bool {
// self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() ||
// self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() ||
// self.pid.is_some()
// }
// fn validate() {
// }
// }
// struct Height {
// h: u32,
// unit: String,
// }
/// Read in the passports from the defined file and return as a vector of
/// HashMap
///
/// # Arguments
///
/// * `filename` - the filename to read the passports from
fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > {
let mut passports: Vec< HashMap<String, String> > = Vec::new();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let mut lines = reader.lines();
// iterate over lines until end of file
loop {
let mut line = lines.next();
if line.is_none() {
break;
}
let mut entry = HashMap::new();
loop {
let lin = String::from(line.unwrap().unwrap().trim());
// let l = lin.trim();
if lin.is_empty() {
break;
}
for item in lin.split_whitespace() {
let mut pair = item.split(':');
let key = String::from(pair.next().unwrap());
let value = String::from(pair.next().unwrap());
entry.insert(key, value);
}
line = lines.next();
if line.is_none() {
break;
}
}
passports.push(entry);
}
passports
}
fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 {
// Check that all passports contain the required fields
// byr - iyr - eyr - hgt - hcl - ecl -pid
let required_fields = vec!(
String::from("byr"),
String::from("iyr"),
String::from("eyr"),
String::from("hgt"),
String::from("hcl"),
String::from("ecl"),
String::from("pid"));
let mut valid_passports_total = 0;
for passport in passports {
let mut valid = true;
for field in &required_fields {
if passport.contains_key(field) == false {
valid = false;
break;
}
}
valid_passports_total += if valid { 1 } else { 0 };
}
valid_passports_total
}
fn | (passport: &HashMap<String, String>) -> bool {
let mut result = true;
// Birth year
if let Some(byr) = passport.get("byr") {
result = result && validate_byr(byr);
} else {
result = false;
}
// Issue Year
if let Some(iyr) = passport.get("iyr") {
result = result && validate_iyr(iyr);
} else {
result = false;
}
// Expiration Year
if let Some(eyr) = passport.get("eyr") {
result = result && validate_eyr(eyr);
} else {
result = false;
}
// Height
if let Some(hgt) = passport.get("hgt") {
result = result && validate_hgt(hgt);
} else {
result = false;
}
// Hair Colour
if let Some(hcl) = passport.get("hcl") {
result = result && validate_hcl(hcl);
} else {
result = false;
}
// Eye Colour
if let Some(ecl) = passport.get("ecl") {
result = result && validate_ecl(ecl);
} else {
result = false;
}
// Passport ID
if let Some(pid) = passport.get("pid") {
result = result && validate_pid(pid);
} else {
result = false;
}
// Country ID - Ignored
result
}
fn validate_byr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 1920 and at most 2002.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 1920 && year <= 2002
}
else {
false
}
}
fn validate_iyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2010 && year <= 2020
}
else {
false
}
}
fn validate_eyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2020 && year <= 2030
}
else {
false
}
}
fn validate_hgt(field: &String) -> bool {
// Requirement are:
// - a number followed by cm or in
// - if cm -> 150-193 inclusive
// - if in -> 59-76 inclusive
// Remove last two letters
let chars = field.chars().count();
if chars > 2 {
let measurement: String = field.chars().take(chars - 2).collect();
let measurement_type: String = field.chars().rev().take(2).collect();
// Convert to number
if let Ok(value) = measurement.parse::< u32 >() {
match measurement_type.as_str() {
// Strings are reversed!!!
"mc" => value >= 150 && value <= 193,
"ni" => value >= 59 && value <= 76,
_ => false
}
}
else {
false
}
} else {
false
}
}
fn validate_hcl(field: &String) -> bool {
// Requirement are:
// - starts with #
// - followed by 6 0-9 or a-f charachters
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 7 {
if field.chars().next().unwrap() == '#' {
let hcl: String = field.chars().rev().take(6).collect();
validate_chars(&hcl)
} else {
false
}
} else {
false
}
}
fn validate_ecl(field: &String) -> bool {
// Requirement are:
// - exactly one of: amb blu brn gry grn hzl oth
let valid_colors = vec!(
String::from("amb"),
String::from("blu"),
String::from("brn"),
String::from("gry"),
String::from("grn"),
String::from("hzl"),
String::from("oth") );
valid_colors.contains(field)
}
fn validate_pid(field: &String) -> bool {
// Requirement are:
// - nine-digit number, including leading zeroes
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 9 {
validate_chars(field)
} else {
false
}
}
| validate_passport | identifier_name |
main.rs | /*
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190 |
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
*/
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::collections::HashMap;
fn main() {
let passports = read_in_passports("src/passports.txt");
println!("Passports with required fields = {}", check_required_fields(&passports));
let mut valid_passports = 0;
for passport in passports {
if validate_passport(&passport) {
valid_passports += 1;
}
}
println!("Valid passports = {}", valid_passports);
}
// struct Passport {
// byr: Option<u32>,
// iyr: Option<u32>,
// eyr: Option<u32>,
// hgt: Option<Height>,
// hcl: Option<String>,
// ecl: Option<String>,
// pid: Option<String>,
// cid: Option<String>,
// }
// impl Passport {
// fn update_field<T>(&self, field: &str, value: T) {
// match field {
// "byr" => self.byr = Option::from(value),
// "iyr"
// }
// }
// fn check_required_fields(&self) -> bool {
// self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() ||
// self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() ||
// self.pid.is_some()
// }
// fn validate() {
// }
// }
// struct Height {
// h: u32,
// unit: String,
// }
/// Read in the passports from the defined file and return as a vector of
/// HashMap
///
/// # Arguments
///
/// * `filename` - the filename to read the passports from
fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > {
let mut passports: Vec< HashMap<String, String> > = Vec::new();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let mut lines = reader.lines();
// iterate over lines until end of file
loop {
let mut line = lines.next();
if line.is_none() {
break;
}
let mut entry = HashMap::new();
loop {
let lin = String::from(line.unwrap().unwrap().trim());
// let l = lin.trim();
if lin.is_empty() {
break;
}
for item in lin.split_whitespace() {
let mut pair = item.split(':');
let key = String::from(pair.next().unwrap());
let value = String::from(pair.next().unwrap());
entry.insert(key, value);
}
line = lines.next();
if line.is_none() {
break;
}
}
passports.push(entry);
}
passports
}
fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 {
// Check that all passports contain the required fields
// byr - iyr - eyr - hgt - hcl - ecl -pid
let required_fields = vec!(
String::from("byr"),
String::from("iyr"),
String::from("eyr"),
String::from("hgt"),
String::from("hcl"),
String::from("ecl"),
String::from("pid"));
let mut valid_passports_total = 0;
for passport in passports {
let mut valid = true;
for field in &required_fields {
if passport.contains_key(field) == false {
valid = false;
break;
}
}
valid_passports_total += if valid { 1 } else { 0 };
}
valid_passports_total
}
fn validate_passport(passport: &HashMap<String, String>) -> bool {
let mut result = true;
// Birth year
if let Some(byr) = passport.get("byr") {
result = result && validate_byr(byr);
} else {
result = false;
}
// Issue Year
if let Some(iyr) = passport.get("iyr") {
result = result && validate_iyr(iyr);
} else {
result = false;
}
// Expiration Year
if let Some(eyr) = passport.get("eyr") {
result = result && validate_eyr(eyr);
} else {
result = false;
}
// Height
if let Some(hgt) = passport.get("hgt") {
result = result && validate_hgt(hgt);
} else {
result = false;
}
// Hair Colour
if let Some(hcl) = passport.get("hcl") {
result = result && validate_hcl(hcl);
} else {
result = false;
}
// Eye Colour
if let Some(ecl) = passport.get("ecl") {
result = result && validate_ecl(ecl);
} else {
result = false;
}
// Passport ID
if let Some(pid) = passport.get("pid") {
result = result && validate_pid(pid);
} else {
result = false;
}
// Country ID - Ignored
result
}
fn validate_byr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 1920 and at most 2002.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 1920 && year <= 2002
}
else {
false
}
}
fn validate_iyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2010 && year <= 2020
}
else {
false
}
}
fn validate_eyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2020 && year <= 2030
}
else {
false
}
}
fn validate_hgt(field: &String) -> bool {
// Requirement are:
// - a number followed by cm or in
// - if cm -> 150-193 inclusive
// - if in -> 59-76 inclusive
// Remove last two letters
let chars = field.chars().count();
if chars > 2 {
let measurement: String = field.chars().take(chars - 2).collect();
let measurement_type: String = field.chars().rev().take(2).collect();
// Convert to number
if let Ok(value) = measurement.parse::< u32 >() {
match measurement_type.as_str() {
// Strings are reversed!!!
"mc" => value >= 150 && value <= 193,
"ni" => value >= 59 && value <= 76,
_ => false
}
}
else {
false
}
} else {
false
}
}
fn validate_hcl(field: &String) -> bool {
// Requirement are:
// - starts with #
// - followed by 6 0-9 or a-f charachters
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 7 {
if field.chars().next().unwrap() == '#' {
let hcl: String = field.chars().rev().take(6).collect();
validate_chars(&hcl)
} else {
false
}
} else {
false
}
}
fn validate_ecl(field: &String) -> bool {
// Requirement are:
// - exactly one of: amb blu brn gry grn hzl oth
let valid_colors = vec!(
String::from("amb"),
String::from("blu"),
String::from("brn"),
String::from("gry"),
String::from("grn"),
String::from("hzl"),
String::from("oth") );
valid_colors.contains(field)
}
fn validate_pid(field: &String) -> bool {
// Requirement are:
// - nine-digit number, including leading zeroes
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 9 {
validate_chars(field)
} else {
false
}
} |
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc | random_line_split |
main.rs | /*
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
*/
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::collections::HashMap;
fn main() {
let passports = read_in_passports("src/passports.txt");
println!("Passports with required fields = {}", check_required_fields(&passports));
let mut valid_passports = 0;
for passport in passports {
if validate_passport(&passport) {
valid_passports += 1;
}
}
println!("Valid passports = {}", valid_passports);
}
// struct Passport {
// byr: Option<u32>,
// iyr: Option<u32>,
// eyr: Option<u32>,
// hgt: Option<Height>,
// hcl: Option<String>,
// ecl: Option<String>,
// pid: Option<String>,
// cid: Option<String>,
// }
// impl Passport {
// fn update_field<T>(&self, field: &str, value: T) {
// match field {
// "byr" => self.byr = Option::from(value),
// "iyr"
// }
// }
// fn check_required_fields(&self) -> bool {
// self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() ||
// self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() ||
// self.pid.is_some()
// }
// fn validate() {
// }
// }
// struct Height {
// h: u32,
// unit: String,
// }
/// Read in the passports from the defined file and return as a vector of
/// HashMap
///
/// # Arguments
///
/// * `filename` - the filename to read the passports from
fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > |
fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 {
// Check that all passports contain the required fields
// byr - iyr - eyr - hgt - hcl - ecl -pid
let required_fields = vec!(
String::from("byr"),
String::from("iyr"),
String::from("eyr"),
String::from("hgt"),
String::from("hcl"),
String::from("ecl"),
String::from("pid"));
let mut valid_passports_total = 0;
for passport in passports {
let mut valid = true;
for field in &required_fields {
if passport.contains_key(field) == false {
valid = false;
break;
}
}
valid_passports_total += if valid { 1 } else { 0 };
}
valid_passports_total
}
fn validate_passport(passport: &HashMap<String, String>) -> bool {
let mut result = true;
// Birth year
if let Some(byr) = passport.get("byr") {
result = result && validate_byr(byr);
} else {
result = false;
}
// Issue Year
if let Some(iyr) = passport.get("iyr") {
result = result && validate_iyr(iyr);
} else {
result = false;
}
// Expiration Year
if let Some(eyr) = passport.get("eyr") {
result = result && validate_eyr(eyr);
} else {
result = false;
}
// Height
if let Some(hgt) = passport.get("hgt") {
result = result && validate_hgt(hgt);
} else {
result = false;
}
// Hair Colour
if let Some(hcl) = passport.get("hcl") {
result = result && validate_hcl(hcl);
} else {
result = false;
}
// Eye Colour
if let Some(ecl) = passport.get("ecl") {
result = result && validate_ecl(ecl);
} else {
result = false;
}
// Passport ID
if let Some(pid) = passport.get("pid") {
result = result && validate_pid(pid);
} else {
result = false;
}
// Country ID - Ignored
result
}
fn validate_byr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 1920 and at most 2002.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 1920 && year <= 2002
}
else {
false
}
}
fn validate_iyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2010 && year <= 2020
}
else {
false
}
}
fn validate_eyr(field: &String) -> bool {
// Requirement are:
// - four digits
// - at least 2010 and at most 2020.
// Convert to number
if let Ok(year) = field.parse::< u32 >() {
year >= 2020 && year <= 2030
}
else {
false
}
}
fn validate_hgt(field: &String) -> bool {
// Requirement are:
// - a number followed by cm or in
// - if cm -> 150-193 inclusive
// - if in -> 59-76 inclusive
// Remove last two letters
let chars = field.chars().count();
if chars > 2 {
let measurement: String = field.chars().take(chars - 2).collect();
let measurement_type: String = field.chars().rev().take(2).collect();
// Convert to number
if let Ok(value) = measurement.parse::< u32 >() {
match measurement_type.as_str() {
// Strings are reversed!!!
"mc" => value >= 150 && value <= 193,
"ni" => value >= 59 && value <= 76,
_ => false
}
}
else {
false
}
} else {
false
}
}
fn validate_hcl(field: &String) -> bool {
// Requirement are:
// - starts with #
// - followed by 6 0-9 or a-f charachters
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 7 {
if field.chars().next().unwrap() == '#' {
let hcl: String = field.chars().rev().take(6).collect();
validate_chars(&hcl)
} else {
false
}
} else {
false
}
}
fn validate_ecl(field: &String) -> bool {
// Requirement are:
// - exactly one of: amb blu brn gry grn hzl oth
let valid_colors = vec!(
String::from("amb"),
String::from("blu"),
String::from("brn"),
String::from("gry"),
String::from("grn"),
String::from("hzl"),
String::from("oth") );
valid_colors.contains(field)
}
fn validate_pid(field: &String) -> bool {
// Requirement are:
// - nine-digit number, including leading zeroes
let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9');
let validate_chars = | hcl: &String | -> bool {
let mut result = true;
for c in hcl.chars() {
result = result && valid_chars.contains(&c);
}
result
};
let chars = field.chars().count();
if chars == 9 {
validate_chars(field)
} else {
false
}
}
| {
let mut passports: Vec< HashMap<String, String> > = Vec::new();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let mut lines = reader.lines();
// iterate over lines until end of file
loop {
let mut line = lines.next();
if line.is_none() {
break;
}
let mut entry = HashMap::new();
loop {
let lin = String::from(line.unwrap().unwrap().trim());
// let l = lin.trim();
if lin.is_empty() {
break;
}
for item in lin.split_whitespace() {
let mut pair = item.split(':');
let key = String::from(pair.next().unwrap());
let value = String::from(pair.next().unwrap());
entry.insert(key, value);
}
line = lines.next();
if line.is_none() {
break;
}
}
passports.push(entry);
}
passports
} | identifier_body |
deck.go | package models
import (
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"reflect"
"strings"
"time"
)
type Deck struct {
Id int `orm:"column(id);auto"`
Title string `orm:"column(title);size(200);null"`
ParentId int `orm:"column(parent_id);null"`
AllCardCount int
OwnCardCount int
}
func (t *Deck) TableName() string {
return "deck"
}
func init() {
orm.RegisterModel(new(Deck))
}
// AddDeck insert a new Deck into database and returns
// last inserted Id on success.
func AddDeck(m *Deck) (id int64, err error) {
o := orm.NewOrm()
if m.Title == ""{
return 0, errors.New("名称不能为空")
}
id, err = o.Insert(m)
return
}
// GetDeckById retrieves Deck by Id. Returns error if
// Id doesn't exist
func GetDeckById(id int) (v *Deck, err error) {
o := orm.NewOrm()
v = &Deck{Id: id}
if err = o.Read(v); err == nil {
return v, nil
}
return nil, err
}
// GetAllDeck retrieves all Deck matches certain condition. Returns empty list if
// no records exist
func GetAllDeck(query map[string]string, fields []string, sortby []string, order []string,
offset int64, limit int64) (ml []interface{}, err error) {
o := orm.NewOrm()
qs := o.QueryTable(new(Deck))
// query k=v
for k, v := range query {
// rewrite dot-notation to Object__Attribute
k = strings.Replace(k, ".", "__", -1)
if strings.Contains(k, "isnull") {
qs = qs.Filter(k, (v == "true" || v == "1"))
} else {
qs = qs.Filter(k, v)
}
}
// order by:
var sortFields []string
if len(sortby) != 0 {
if len(sortby) == len(order) {
// 1) for each sort field, there is an associated order
for i, v := range sortby {
orderby := ""
if order[i] == "desc" {
orderby = "-" + v
} else if order[i] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
qs = qs.OrderBy(sortFields...)
} else if len(sortby) != len(order) && len(order) == 1 {
// 2) there is exactly one order, all the sorted fields will be sorted by this order
for _, v := range sortby {
orderby := ""
if order[0] == "desc" {
orderby = "-" + v
} else if order[0] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
} else if len(sortby) != len(order) && len(order) != 1 {
return nil, errors.New("Error: 'sortby', 'order' sizes mismatch or 'order' size is not 1")
}
} else {
if len(order) != 0 {
return nil, errors.New("Error: unused 'order' fields")
}
}
var l []Deck
qs = qs.OrderBy(sortFields...)
if _, err = qs.Limit(limit, offset).All(&l, fields...); err == nil {
if len(fields) == 0 {
for _, v := range l {
ml = append(ml, v)
}
} else {
// trim unused fields
for _, v := range l {
m := make(map[string]interface{})
val := reflect.ValueOf(v)
for _, fname := range fields {
m[fname] = val.FieldByName(fname).Interface()
}
ml = append(ml, m)
}
}
return ml, nil
}
return nil, err
}
// UpdateKlgDir updates Deck by Id and returns error if
// the record to be updated doesn't exist
func UpdateDeckById(m *Deck) (err error) {
o := orm.NewOrm()
v := Deck{Id: m.Id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
o.Begin()
if num, err = o.Update(m); err == nil {
fmt.Println("Number of records updated in database:", num)
}
hasCycle, err := CycleCheck(&v, o)
if err != nil{
return err
}
if hasCycle{
o.Rollback()
return errors.New("牌组有环")
}else{
o.Commit()
}
}
return
}
// DeleteDeck deletes Deck by Id and returns error if
// the record to be deleted doesn't exist
func DeleteDeck(id int) (err error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
child_count, err := qs.Filter("parent_id", id).Count()
if err != nil{
return err
}
if child_count > 0{
return errors.New("请先将子目录移走")
}
v := Deck{Id: id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
if num, err = o.Delete(&Deck{Id: id}); err == nil {
fmt.Println("Number of records deleted in database:", num)
}
}
return
}
func CycleCheck(this *Deck, o orm.Ormer) (bool, error) {
childs := GetSons(this, o)
hasShow := map[int]int {this.Id: 1}
for len(childs) > 0{
pop := childs[0]
childs = childs[1:]
if hasShow[pop.Id] == 1{
return true, nil
}else{
hasShow[pop.Id] = 1
}
newChilds := GetSons(pop, o)
childs = append(childs, newChilds...)
}
return false, nil
}
func GetSons(this *Deck, o orm.Ormer)[]*Deck{
sons := []*Deck{}
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&sons)
return sons
}
func GetSubDirs(this *Deck, with_self bool)([]*Deck, error){
if this == nil{
return nil, errors.New("it's null dir")
}
childs := []*Deck{}
o := orm.NewOrm()
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&childs)
for _, c := range(childs){
child_sub_dirs, err := GetSubDirs(c, false)
if err != nil{
return nil, err
}
childs = append(childs, child_sub_dirs...)
}
if with_self{
println("with self", this.Id)
childs = append(childs, this)
}
return childs, nil
}
func GetSonCard(this *Deck)([]*Card){
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
qs.Filter("did", this.Id).All(&cards)
return cards
}
func GetCards(this *Deck, user *User)([]*Card, error){
//先获取所有子目录的id
subDirs, err := GetSubDirs(this, true)
if err != nil{
return nil, err
}
//获取所有task, 并且ready_time > now的
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
sub_dir_ids := []int{}
for _, sd := range(subDirs){
sub_dir_ids = append(sub_dir_ids, sd.Id)
}
qs.Filter("uid", user.Id).Filter("did__in", sub_dir_ids).All(&cards)
return cards, nil
}
func syncCards(this *Deck, user *User)(error) {
//1 生成左右两份 map
//2 将map中一样的部分消除
//3 将nid表多出来的,插入card
//4 将card表多出来的,删除
nidMap := map[int]int{}
cidMap := map[int]*Card{}
o := orm.NewOrm()
qs1 := o.QueryTable("note")
notes := []*Note{}
qs1.Filter("did", this.Id).All(¬es)
for i:=0; i<len(notes); i++{
n := notes[i]
nidMap[n.Id] = 1
}
qs2 := o.QueryTable("card")
cards := []*Card{}
qs2.Filter("uid", user.Id).Filter("did", this.Id).All(&cards)
for i:=0; i<len(cards); i++{
cidMap[cards[i].Note.Id] = cards[i]
}
// 都有的 不去管
// cid 没有的,添加进去
allHave := map[int]int{}
cidLess := map[int]int{}
for _, note := range notes {
nid := note.Id
if cidMap[nid] != nil{
allHave[nid] = 1
}else{
beego.Info("have no", nid)
cidLess[nid] = 1
}
}
for nid, _ := range allHave{
delete(cidMap, nid)
delete(nidMap, nid)
}
// cid有,nid没有的,删掉cid
for _, card := range cidMap{
o.Delete(card)
}
// nid有,cid没有的,添加到user名下
for nid, _ := range cidLess{
beego.Info("insert %v", nid)
c := Card{Note: &Note{Id: nid}, Uid: user.Id, Did: this.Id, Level: 0, Loop: &Loop{Id: 1}}
o.Insert(&c)
}
return nil
}
func GetReadyCards(this *Deck, user *User)([]*Card, error){
//筛选子目录下的所有card,然后过滤readytime
err := syncCards(this, user)
if err != nil{
return nil, err
}
cards, err := GetCards(this, user)
now := time.Now()
ready_cards := []*Card{}
if err != nil{
return ready_cards, err
}
o := orm.NewOrm()
// 筛选出来没有建立task的进行新建
for i:=0; i<len(cards); i++{
card := cards[i]
if card.NextTrigger.Unix() < now.Unix(){
ready_cards = append(ready_cards, card)
}
}
if len(ready_cards) > 100{
ready_cards = ready_cards[:100]
}
//relation cardInfo
for i:=0; i<len(ready_cards); i++{
card := ready_cards[i]
o.LoadRelated(card, "note")
}
return ready_cards, nil
}
func buildMemCardFromAnkiCard(ankiCard *AnkiCard, o orm.Ormer) (*Card, error){
note := Note{Title: ankiCard.Q, Content: ankiCard.A, Type: "anki"}
nid, err := o.Insert(¬e)
if err != nil{
return nil, errors.New(fmt.Sprintf("insert note error, %v", err.Error()))
}
note.Id = int(nid)
newCard := Card{Note: ¬e, Loop: &Loop{Id: 1}}
_, e := o.Insert(&newCard)
if e != nil{
return nil, errors.New(fmt.Sprintf("insert card fail, %v", e.Error()))
}
return &newCard, nil
}
func copyCards(deck *AnkiDeck, newDir *Deck, user *User, o orm.Ormer) error{
ankiCards := []*AnkiCard{}
qs := o.QueryTable("anki_card")
qs.Filter("did", deck.DeckId).All(&ankiCards)
//批量插入notes
notes := []*Note{}
for i:=0; i<len(ankiCards); i++{
dc : | ds[i]
note := Note{Title: dc.Q, Content: dc.A, Type: "anki", Did: newDir.Id}
notes = append(notes, ¬e)
}
o.InsertMulti(len(notes), notes)
//根据notes批量插入cards
sql := fmt.Sprintf("insert into card (level, nid, did, finish, loop_id) select 0, id, did, 0, 1 from note where did=%v", newDir.Id)
_, err := o.Raw(sql).Exec()
return err
}
func GetRootDirs(user *User) ([]*Deck, error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
dirs := []*Deck{}
_, err := qs.Filter("user_id", user.Id).All(&dirs)
return dirs, err
}
func CopyAnkiDeckToMemPlus(trade *Trade, user *User) error {
//先copy新deck
//批量插入note
//批量生成cards
o := orm.NewOrm()
o.Begin()
qs := o.QueryTable("anki_deck")
var decks []*AnkiDeck
qs.All(&decks)
deckMap := map[int64]*AnkiDeck{}
for i:=0; i<len(decks); i++{
deckMap[decks[i].DeckId] = decks[i]
}
newDirs := map[int64]*Deck{}
//初始化deck
for _, deck := range deckMap{
names := strings.Split(deck.Name, ":")
name := deck.Name
if len(names) > 1{
name = names[len(names)-1]
}
newDir := &Deck{Title: name}
did, err := o.Insert(newDir)
newDir.Id = int(did)
rela := UserDeckRela{Uid: user.Id, Deck: &Deck{Id: newDir.Id}}
_, err = o.Insert(&rela)
if err != nil{
return err
}
newDirs[deck.DeckId] = newDir
}
success := true
//copy deck
for deckId, newDir := range newDirs{
deck := deckMap[deckId]
if deck.Pdid != 0{
parentDir := newDirs[deck.Pdid]
newDir.ParentId = parentDir.Id
o.Update(newDir)
}
err := copyCards(deck, newDir, user, o)
if err != nil{
success = false
}
}
if success{
trade.Status = "finish"
}else{
trade.Status = "fail"
}
o.Update(trade)
o.Commit()
return nil
}
func refreshDeck(rela *UserDeckRela, o orm.Ormer, handles map[int]*UserDeckRela) (*UserDeckRela, error){
if d, ok := handles[rela.Id]; ok{
return d, nil
}
o.LoadRelated(rela, "did")
qs := o.QueryTable("card").Filter("did", rela.Deck.Id)
cardsCount, err := qs.Count()
if err != nil{
return nil, err
}
newCount, err := qs.Filter("level", 0).Count()
if err != nil{
return nil, err
}
now := time.Now()
readyCount, err := qs.Filter("trigger_start_time__lt", now).Count()
if err != nil{
return nil, err
}
rela.ReadyCount = int(readyCount)
rela.NewCount = int(newCount)
rela.Deck.OwnCardCount = int(cardsCount)
rela.Deck.AllCardCount = int(cardsCount)
son_decks := GetSons(rela.Deck, o)
son_deck_ids := []int{}
for i:=0; i<len(son_decks); i++{
dc := son_decks[i]
son_deck_ids = append(son_deck_ids, dc.Id)
}
son_rela_deck := []*UserDeckRela{}
if len(son_deck_ids) > 0{
o.QueryTable(TABLE_USER_DECK_RELA).Filter("did__in", son_deck_ids).Filter("uid", rela.Uid).All(&son_rela_deck)
for i:=0; i<len(son_rela_deck); i++{
son := son_rela_deck[i]
son, err = refreshDeck(son, o, handles)
if err != nil{
return nil, err
}
rela.ReadyCount += son.ReadyCount
rela.NewCount += son.NewCount
rela.Deck.AllCardCount += son.Deck.AllCardCount
fmt.Println(rela.Deck.Title, son.Deck.Title, son.Deck.AllCardCount)
}
}
o.Update(rela)
o.Update(rela.Deck)
handles[rela.Id] = rela
return rela, nil
}
func RefreshCount(relas []*UserDeckRela) {
o := orm.NewOrm()
handles := map[int]*UserDeckRela{}
for i:=0; i<len(relas); i++{
_, err := refreshDeck(relas[i], o, handles)
if err != nil{
fmt.Println(err.Error())
}
}
return
}
func GetDeckForUser(user *User)(decks []*Deck, err error){
o := orm.NewOrm()
relas := []*UserDeckRela{}
qs := o.QueryTable("user_deck_rela")
qs.Filter("uid", user.Id).All(&relas)
if err != nil{
return nil, err
}
ids := []int{}
for i:=0; i<len(relas); i++{
ids = append(ids, relas[i].Deck.Id)
}
if len(ids) != 0{
qs = o.QueryTable("deck")
qs.Filter("id__in", ids).All(&decks)
}
return decks, nil
} | = ankiCar | identifier_name |
deck.go | package models
import (
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"reflect"
"strings"
"time"
)
type Deck struct {
Id int `orm:"column(id);auto"`
Title string `orm:"column(title);size(200);null"`
ParentId int `orm:"column(parent_id);null"`
AllCardCount int
OwnCardCount int
}
func (t *Deck) TableName() string {
return "deck"
}
func init() {
orm.RegisterModel(new(Deck))
}
// AddDeck insert a new Deck into database and returns
// last inserted Id on success.
func AddDeck(m *Deck) (id int64, err error) {
o := orm.NewOrm()
if m.Title == ""{
return 0, errors.New("名称不能为空")
}
id, err = o.Insert(m)
return
}
// GetDeckById retrieves Deck by Id. Returns error if
// Id doesn't exist
func GetDeckById(id int) (v *Deck, err error) {
o := orm.NewOrm()
v = &Deck{Id: id}
if err = o.Read(v); err == nil {
return v, nil
}
return nil, err
}
// GetAllDeck retrieves all Deck matches certain condition. Returns empty list if
// no records exist
func GetAllDeck(query map[string]string, fields []string, sortby []string, order []string,
offset int64, limit int64) (ml []interface{}, err error) {
o := orm.NewOrm()
qs := o.QueryTable(new(Deck))
// query k=v
for k, v := range query {
// rewrite dot-notation to Object__Attribute
k = strings.Replace(k, ".", "__", -1)
if strings.Contains(k, "isnull") {
qs = qs.Filter(k, (v == "true" || v == "1"))
} else {
qs = qs.Filter(k, v)
}
}
// order by:
var sortFields []string
if len(sortby) != 0 {
if len(sortby) == len(order) {
// 1) for each sort field, there is an associated order
for i, v := range sortby {
orderby := ""
if order[i] == "desc" {
orderby = "-" + v
} else if order[i] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
qs = qs.OrderBy(sortFields...)
} else if len(sortby) != len(order) && len(order) == 1 {
// 2) there is exactly one order, all the sorted fields will be sorted by this order
for _, v := range sortby {
orderby := ""
if order[0] == "desc" {
orderby = "-" + v
} else if order[0] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
} else if len(sortby) != len(order) && len(order) != 1 {
return nil, errors.New("Error: 'sortby', 'order' sizes mismatch or 'order' size is not 1")
}
} else {
if len(order) != 0 {
return nil, errors.New("Error: unused 'order' fields")
}
}
var l []Deck
qs = qs.OrderBy(sortFields...)
if _, err = qs.Limit(limit, offset).All(&l, fields...); err == nil {
if len(fields) == 0 {
for _, v := range l {
ml = append(ml, v)
}
} else {
// trim unused fields
for _, v := range l {
m := make(map[string]interface{})
val := reflect.ValueOf(v)
for _, fname := range fields {
m[fname] = val.FieldByName(fname).Interface()
}
ml = append(ml, m)
}
}
return ml, nil
}
return nil, err
}
// UpdateKlgDir updates Deck by Id and returns error if
// the record to be updated doesn't exist
func UpdateDeckById(m *Deck) (err error) {
o := orm.NewOrm()
v := Deck{Id: m.Id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
o.Begin()
if num, err = o.Update(m); err == nil {
fmt.Println("Number of records updated in database:", num)
}
hasCycle, err := CycleCheck(&v, o)
if err != nil{
return err
}
if hasCycle{
o.Rollback()
return errors.New("牌组有环")
}else{
o.Commit()
}
}
return
}
// DeleteDeck deletes Deck by Id and returns error if
// the record to be deleted doesn't exist
func DeleteDeck(id int) (err error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
child_count, err := qs.Filter("parent_id", id).Count()
if err != nil{
return err
}
if child_count > 0{
return errors.New("请先将子目录移走")
}
v := Deck{Id: id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
if num, err = o.Delete(&Deck{Id: id}); err == nil {
fmt.Println("Number of records deleted in database:", num)
}
}
return
}
func CycleCheck(this *Deck, o orm.Ormer) (bool, error) {
childs := GetSons(this, o)
hasShow := map[int]int {this.Id: 1}
for len(childs) > 0{
pop := childs[0]
childs = childs[1:]
if hasShow[pop.Id] == 1{
return true, nil
}else{
hasShow[pop.Id] = 1
}
newChilds := GetSons(pop, o)
childs = append(childs, newChilds...)
}
return false, nil
}
func GetSons(this *Deck, o orm.Ormer)[]*Deck{
sons := []*Deck{}
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&sons)
return sons
}
func GetSubDirs(this *Deck, with_self bool)([]*Deck, error){
if this == nil{
return nil, errors.New("it's null dir")
}
childs := []*Deck{}
o := orm.NewOrm()
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&childs)
for _, c := range(childs){
child_sub_dirs, err := GetSubDirs(c, false)
if err != nil{
return nil, err
}
childs = append(childs, child_sub_dirs...)
}
if with_self{
println("with self", this.Id)
childs = append(childs, this)
}
return childs, nil
}
func GetSonCard(this *Deck)([]*Card){
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
qs.Filter("did", this.Id).All(&cards)
return cards
}
func GetCards(this *Deck, user *User)([]*Card, error){
//先获取所有子目录的id
subDirs, err := GetSubDirs(this, true)
if err != nil{
return nil, err
}
//获取所有task, 并且ready_time > now的
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
sub_dir_ids := []int{}
for _, sd := range(subDirs){
sub_dir_ids = append(sub_dir_ids, sd.Id)
}
qs.Filter("uid", user.Id).Filter("did__in", sub_dir_ids).All(&cards)
return cards, nil
}
func syncCards(this *Deck, user *User)(error) {
//1 生成左右两份 map
//2 将map中一样的部分消除
//3 将nid表多出来的,插入card
//4 将card表多出来的,删除
nidMap := map[int]int{}
cidMap := map[int]*Card{}
o := orm.NewOrm()
qs1 := o.QueryTable("note")
notes := []*Note{}
qs1.Filter("did", this.Id).All(¬es)
for i:=0; i<len(notes); i++{
n := notes[i]
nidMap[n.Id] = 1
}
qs2 := o.QueryTable("card")
cards := []*Card{}
qs2.Filter("uid", user.Id).Filter("did", this.Id).All(&cards)
for i:=0; i<len(cards); i++{
cidMap[cards[i].Note.Id] = cards[i]
}
// 都有的 不去管
// cid 没有的,添加进去
allHave := map[int]int{}
cidLess := map[int]int{}
for _, note := range notes {
nid := note.Id
if cidMap[nid] != nil{
allHave[nid] = 1
}else{
beego.Info("have no", nid)
cidLess[nid] = 1
}
}
for nid, _ := range allHave{
delete(cidMap, nid)
delete(nidMap, nid)
}
// cid有,nid没有的,删掉cid
for _, card := range cidMap{
o.Delete(card)
}
// nid有,cid没有的,添加到user名下
for nid, _ := range cidLess{
beego.Info("insert %v", nid)
c := Card{Note: &Note{Id: nid}, Uid: user.Id, Did: this.Id, Level: 0, Loop: &Loop{Id: 1}}
o.Insert(&c)
}
return nil
}
func GetReadyCards(this *Deck, user *User)([]*Card, error){
//筛选子目录下的所有card,然后过滤readytime
err := syncCards(this, user)
if err != nil{
return nil, err
}
cards, err := GetCards(this, user)
now := time.Now()
ready_cards := []*Card{}
if err != nil{
return ready_cards, err
}
o := orm.NewOrm()
// 筛选出来没有建立task的进行新建
for i:=0; i<len(cards); i++{
card := cards[i]
if card.NextTrigger.Unix() < now.Unix(){
ready_cards = append(ready_cards, card)
}
}
if len(ready_cards) > 100{
ready_cards = ready_cards[:100]
}
//relation cardInfo
for i:=0; i<len(ready_cards); i++{
card := ready_cards[i]
o.LoadRelated(card, "note")
}
return ready_cards, nil
}
func buildMemCardFromAnkiCard(ankiCard *AnkiCard, o orm.Ormer) (*Card, error){
note := Note{Title: ankiCard.Q, Content: ankiCard.A, Type: "anki"}
nid, err := o.Insert(¬e)
if err != nil{
return nil, errors.New(fmt.Sprintf("insert note error, %v", err.Error()))
}
note.Id = int(nid)
newCard := Card{Note: ¬e, Loop: &Loop{Id: 1}}
_, e := o.Insert(&newCard)
if e != nil{
return nil, errors.New(fmt.Sprintf("insert card fail, %v", e.Error()))
}
return &newCard, nil
}
func copyCards(deck *AnkiDeck, newDir *Deck, user *User, o orm.Ormer) error{
ankiCards := []*AnkiCard{}
qs := o.QueryTable("anki_card")
qs.Filter("did", deck.DeckId).All(&ankiCards)
//批量插入notes
notes := []*Note{}
for i:=0; i<len(ankiCards); i++{
dc := ankiCards[i]
note := Note{Title: dc.Q, Content: dc.A, Type: "anki", Did: newDir.Id}
notes = append(notes, ¬e)
}
o.InsertMulti(len(notes), notes)
//根据notes批量插入cards
sql := fmt.Sprintf("insert into card (level, nid, did, finish, loop_id) select 0, id, did, 0, 1 from note where did=%v", newDir.Id)
_, err := o.Raw(sql).Exec()
return err
}
func GetRootDirs(user *User) ([]*Deck, error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
dirs := []*Deck{}
_, err := qs.Filter("user_id", user.Id).All(&dirs)
return dirs, err
}
func CopyAnkiDeckToMemPlus(trade *Trade, user *User) error {
//先copy新deck
//批量插入note
//批量生成cards
o := orm.NewOrm()
o.Begin()
qs := o.QueryTable("anki_deck")
var decks []*AnkiDeck
qs.All(&decks)
deckMap := map[int64]*AnkiDeck{}
for i:=0; i<len(decks); i++{
deckMap[decks[i].DeckId] = decks[i]
}
newDirs := map[int64]*Deck{}
//初始化deck
for _, deck := range deckMap{
names := strings.Split(deck.Name, ":")
name := deck.Name
if len(names) > 1{
name = names[len(names)-1]
}
newDir := &Deck{Title: name}
did, err := o.Insert(newDir)
newDir.Id = int(did)
rela := UserDeckRela{Uid: user.Id, Deck: &Deck{Id: newDir.Id}}
_, err = o.Insert(&rela)
if err != nil{
return err
}
newDirs[deck.DeckId] = newDir
}
success := true
//copy deck
for deckId, newDir := range newDirs{
deck := deckMap[deckId]
if deck.Pdid != 0{
parentDir := newDirs[deck.Pdid]
newDir.ParentId = parentDir.Id
o.Update(newDir)
}
err := copyCards(deck, newDir, user, o)
if err != nil{
success = false
}
}
if success{
trade.Status = "finish"
}else{
trade.Status = "fail"
}
o.Update(trade)
o.Commit()
return nil
}
func refreshDeck(rela *UserDeckRela, o orm.Ormer, handles map[int]*UserDeckRela) (*UserDeckRela, error){
if d, ok := handles[rela.Id]; ok{
return d, n | a, "did")
qs := o.QueryTable("card").Filter("did", rela.Deck.Id)
cardsCount, err := qs.Count()
if err != nil{
return nil, err
}
newCount, err := qs.Filter("level", 0).Count()
if err != nil{
return nil, err
}
now := time.Now()
readyCount, err := qs.Filter("trigger_start_time__lt", now).Count()
if err != nil{
return nil, err
}
rela.ReadyCount = int(readyCount)
rela.NewCount = int(newCount)
rela.Deck.OwnCardCount = int(cardsCount)
rela.Deck.AllCardCount = int(cardsCount)
son_decks := GetSons(rela.Deck, o)
son_deck_ids := []int{}
for i:=0; i<len(son_decks); i++{
dc := son_decks[i]
son_deck_ids = append(son_deck_ids, dc.Id)
}
son_rela_deck := []*UserDeckRela{}
if len(son_deck_ids) > 0{
o.QueryTable(TABLE_USER_DECK_RELA).Filter("did__in", son_deck_ids).Filter("uid", rela.Uid).All(&son_rela_deck)
for i:=0; i<len(son_rela_deck); i++{
son := son_rela_deck[i]
son, err = refreshDeck(son, o, handles)
if err != nil{
return nil, err
}
rela.ReadyCount += son.ReadyCount
rela.NewCount += son.NewCount
rela.Deck.AllCardCount += son.Deck.AllCardCount
fmt.Println(rela.Deck.Title, son.Deck.Title, son.Deck.AllCardCount)
}
}
o.Update(rela)
o.Update(rela.Deck)
handles[rela.Id] = rela
return rela, nil
}
func RefreshCount(relas []*UserDeckRela) {
o := orm.NewOrm()
handles := map[int]*UserDeckRela{}
for i:=0; i<len(relas); i++{
_, err := refreshDeck(relas[i], o, handles)
if err != nil{
fmt.Println(err.Error())
}
}
return
}
func GetDeckForUser(user *User)(decks []*Deck, err error){
o := orm.NewOrm()
relas := []*UserDeckRela{}
qs := o.QueryTable("user_deck_rela")
qs.Filter("uid", user.Id).All(&relas)
if err != nil{
return nil, err
}
ids := []int{}
for i:=0; i<len(relas); i++{
ids = append(ids, relas[i].Deck.Id)
}
if len(ids) != 0{
qs = o.QueryTable("deck")
qs.Filter("id__in", ids).All(&decks)
}
return decks, nil
} | il
}
o.LoadRelated(rel | conditional_block |
deck.go | package models
import (
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"reflect"
"strings"
"time"
)
type Deck struct {
Id int `orm:"column(id);auto"`
Title string `orm:"column(title);size(200);null"`
ParentId int `orm:"column(parent_id);null"`
AllCardCount int
OwnCardCount int
}
func (t *Deck) TableName() string {
return "deck"
}
func init() {
orm.RegisterModel(new(Deck))
}
// AddDeck insert a new Deck into database and returns
// last inserted Id on success.
func AddDeck(m *Deck) (id int64, err error) {
o := orm.NewOrm()
if m.Title == ""{
return 0, errors.New("名称不能为空")
}
id, err = o.Insert(m)
return
}
// GetDeckById retrieves Deck by Id. Returns error if
// Id doesn't exist
func GetDeckById(id int) (v *Deck, err error) {
o := orm.NewOrm()
v = &Deck{Id: id}
if err = o.Read(v); err == nil {
return v, nil
}
return nil, err
}
// GetAllDeck retrieves all Deck matches certain condition. Returns empty list if
// no records exist
func GetAllDeck(query map[string]string, fields []string, sortby []string, order []string,
offset int64, limit int64) (ml []interface{}, err error) {
o := orm.NewOrm()
qs := o.QueryTable(new(Deck))
// query k=v
for k, v := range query {
// rewrite dot-notation to Object__Attribute
k = strings.Replace(k, ".", "__", -1)
if strings.Contains(k, "isnull") {
qs = qs.Filter(k, (v == "true" || v == "1"))
} else {
qs = qs.Filter(k, v)
}
}
// order by:
var sortFields []string
if len(sortby) != 0 {
if len(sortby) == len(order) {
// 1) for each sort field, there is an associated order
for i, v := range sortby {
orderby := ""
if order[i] == "desc" {
orderby = "-" + v
} else if order[i] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
qs = qs.OrderBy(sortFields...)
} else if len(sortby) != len(order) && len(order) == 1 {
// 2) there is exactly one order, all the sorted fields will be sorted by this order
for _, v := range sortby {
orderby := ""
if order[0] == "desc" {
orderby = "-" + v
} else if order[0] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
} else if len(sortby) != len(order) && len(order) != 1 {
return nil, errors.New("Error: 'sortby', 'order' sizes mismatch or 'order' size is not 1")
}
} else {
if len(order) != 0 {
return nil, errors.New("Error: unused 'order' fields")
}
}
var l []Deck
qs = qs.OrderBy(sortFields...)
if _, err = qs.Limit(limit, offset).All(&l, fields...); err == nil {
if len(fields) == 0 {
for _, v := range l {
ml = append(ml, v)
}
} else {
// trim unused fields
for _, v := range l {
m := make(map[string]interface{})
val := reflect.ValueOf(v)
for _, fname := range fields {
m[fname] = val.FieldByName(fname).Interface()
}
ml = append(ml, m)
}
}
return ml, nil
}
return nil, err
}
// UpdateKlgDir updates Deck by Id and returns error if
// the record to be updated doesn't exist
func UpdateDeckById(m *Deck) (err error) {
o := orm.NewOrm()
v := Deck{Id: m.Id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
o.Begin()
if num, err = o.Update(m); err == nil {
fmt.Println("Number of records updated in database:", num)
}
hasCycle, err := CycleCheck(&v, o)
if err != nil{
return err
}
if hasCycle{
o.Rollback()
return errors.New("牌组有环")
}else{
o.Commit()
}
}
return
}
// DeleteDeck deletes Deck by Id and returns error if
// the record to be deleted doesn't exist
func DeleteDeck(id int) (err error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
child_count, err := qs.Filter("parent_id", id).Count()
if err != nil{
return err
}
if child_count > 0{
return errors.New("请先将子目录移走")
}
v := Deck{Id: id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
if num, err = o.Delete(&Deck{Id: id}); err == nil {
fmt.Println("Number of records deleted in database:", num)
}
}
return
}
func CycleCheck(this *Deck, o orm.Ormer) (bool, error) {
childs := GetSons(this, o)
hasShow := map[int]int {this.Id: 1}
for len(childs) > 0{
pop := childs[0]
childs = childs[1:]
if hasShow[pop.Id] == 1{
return true, nil
}else{
hasShow[pop.Id] = 1
}
newChilds := GetSons(pop, o)
childs = append(childs, newChilds...)
}
return false, nil
}
func GetSons(this *Deck, o orm.Ormer)[]*Deck{
sons := []*Deck{}
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&sons)
return sons
}
func GetSubDirs(this *Deck, with_self bool)([]*Deck, error){
if this == nil{
return nil, errors.New("it's null dir")
}
childs := []*Deck{}
o := orm.NewOrm()
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&childs)
for _, c := range(childs){
child_sub_dirs, err := GetSubDirs(c, false)
if err != nil{
return nil, err
}
childs = append(childs, child_sub_dirs...)
}
if with_self{
println("with self", this.Id)
childs = append(childs, this)
}
return childs, nil
}
func GetSonCard(this *Deck)([]*Card){
cards := []*Card{}
o := orm.NewO | ser)([]*Card, error){
//先获取所有子目录的id
subDirs, err := GetSubDirs(this, true)
if err != nil{
return nil, err
}
//获取所有task, 并且ready_time > now的
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
sub_dir_ids := []int{}
for _, sd := range(subDirs){
sub_dir_ids = append(sub_dir_ids, sd.Id)
}
qs.Filter("uid", user.Id).Filter("did__in", sub_dir_ids).All(&cards)
return cards, nil
}
func syncCards(this *Deck, user *User)(error) {
//1 生成左右两份 map
//2 将map中一样的部分消除
//3 将nid表多出来的,插入card
//4 将card表多出来的,删除
nidMap := map[int]int{}
cidMap := map[int]*Card{}
o := orm.NewOrm()
qs1 := o.QueryTable("note")
notes := []*Note{}
qs1.Filter("did", this.Id).All(¬es)
for i:=0; i<len(notes); i++{
n := notes[i]
nidMap[n.Id] = 1
}
qs2 := o.QueryTable("card")
cards := []*Card{}
qs2.Filter("uid", user.Id).Filter("did", this.Id).All(&cards)
for i:=0; i<len(cards); i++{
cidMap[cards[i].Note.Id] = cards[i]
}
// 都有的 不去管
// cid 没有的,添加进去
allHave := map[int]int{}
cidLess := map[int]int{}
for _, note := range notes {
nid := note.Id
if cidMap[nid] != nil{
allHave[nid] = 1
}else{
beego.Info("have no", nid)
cidLess[nid] = 1
}
}
for nid, _ := range allHave{
delete(cidMap, nid)
delete(nidMap, nid)
}
// cid有,nid没有的,删掉cid
for _, card := range cidMap{
o.Delete(card)
}
// nid有,cid没有的,添加到user名下
for nid, _ := range cidLess{
beego.Info("insert %v", nid)
c := Card{Note: &Note{Id: nid}, Uid: user.Id, Did: this.Id, Level: 0, Loop: &Loop{Id: 1}}
o.Insert(&c)
}
return nil
}
func GetReadyCards(this *Deck, user *User)([]*Card, error){
//筛选子目录下的所有card,然后过滤readytime
err := syncCards(this, user)
if err != nil{
return nil, err
}
cards, err := GetCards(this, user)
now := time.Now()
ready_cards := []*Card{}
if err != nil{
return ready_cards, err
}
o := orm.NewOrm()
// 筛选出来没有建立task的进行新建
for i:=0; i<len(cards); i++{
card := cards[i]
if card.NextTrigger.Unix() < now.Unix(){
ready_cards = append(ready_cards, card)
}
}
if len(ready_cards) > 100{
ready_cards = ready_cards[:100]
}
//relation cardInfo
for i:=0; i<len(ready_cards); i++{
card := ready_cards[i]
o.LoadRelated(card, "note")
}
return ready_cards, nil
}
func buildMemCardFromAnkiCard(ankiCard *AnkiCard, o orm.Ormer) (*Card, error){
note := Note{Title: ankiCard.Q, Content: ankiCard.A, Type: "anki"}
nid, err := o.Insert(¬e)
if err != nil{
return nil, errors.New(fmt.Sprintf("insert note error, %v", err.Error()))
}
note.Id = int(nid)
newCard := Card{Note: ¬e, Loop: &Loop{Id: 1}}
_, e := o.Insert(&newCard)
if e != nil{
return nil, errors.New(fmt.Sprintf("insert card fail, %v", e.Error()))
}
return &newCard, nil
}
func copyCards(deck *AnkiDeck, newDir *Deck, user *User, o orm.Ormer) error{
ankiCards := []*AnkiCard{}
qs := o.QueryTable("anki_card")
qs.Filter("did", deck.DeckId).All(&ankiCards)
//批量插入notes
notes := []*Note{}
for i:=0; i<len(ankiCards); i++{
dc := ankiCards[i]
note := Note{Title: dc.Q, Content: dc.A, Type: "anki", Did: newDir.Id}
notes = append(notes, ¬e)
}
o.InsertMulti(len(notes), notes)
//根据notes批量插入cards
sql := fmt.Sprintf("insert into card (level, nid, did, finish, loop_id) select 0, id, did, 0, 1 from note where did=%v", newDir.Id)
_, err := o.Raw(sql).Exec()
return err
}
func GetRootDirs(user *User) ([]*Deck, error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
dirs := []*Deck{}
_, err := qs.Filter("user_id", user.Id).All(&dirs)
return dirs, err
}
func CopyAnkiDeckToMemPlus(trade *Trade, user *User) error {
//先copy新deck
//批量插入note
//批量生成cards
o := orm.NewOrm()
o.Begin()
qs := o.QueryTable("anki_deck")
var decks []*AnkiDeck
qs.All(&decks)
deckMap := map[int64]*AnkiDeck{}
for i:=0; i<len(decks); i++{
deckMap[decks[i].DeckId] = decks[i]
}
newDirs := map[int64]*Deck{}
//初始化deck
for _, deck := range deckMap{
names := strings.Split(deck.Name, ":")
name := deck.Name
if len(names) > 1{
name = names[len(names)-1]
}
newDir := &Deck{Title: name}
did, err := o.Insert(newDir)
newDir.Id = int(did)
rela := UserDeckRela{Uid: user.Id, Deck: &Deck{Id: newDir.Id}}
_, err = o.Insert(&rela)
if err != nil{
return err
}
newDirs[deck.DeckId] = newDir
}
success := true
//copy deck
for deckId, newDir := range newDirs{
deck := deckMap[deckId]
if deck.Pdid != 0{
parentDir := newDirs[deck.Pdid]
newDir.ParentId = parentDir.Id
o.Update(newDir)
}
err := copyCards(deck, newDir, user, o)
if err != nil{
success = false
}
}
if success{
trade.Status = "finish"
}else{
trade.Status = "fail"
}
o.Update(trade)
o.Commit()
return nil
}
func refreshDeck(rela *UserDeckRela, o orm.Ormer, handles map[int]*UserDeckRela) (*UserDeckRela, error){
if d, ok := handles[rela.Id]; ok{
return d, nil
}
o.LoadRelated(rela, "did")
qs := o.QueryTable("card").Filter("did", rela.Deck.Id)
cardsCount, err := qs.Count()
if err != nil{
return nil, err
}
newCount, err := qs.Filter("level", 0).Count()
if err != nil{
return nil, err
}
now := time.Now()
readyCount, err := qs.Filter("trigger_start_time__lt", now).Count()
if err != nil{
return nil, err
}
rela.ReadyCount = int(readyCount)
rela.NewCount = int(newCount)
rela.Deck.OwnCardCount = int(cardsCount)
rela.Deck.AllCardCount = int(cardsCount)
son_decks := GetSons(rela.Deck, o)
son_deck_ids := []int{}
for i:=0; i<len(son_decks); i++{
dc := son_decks[i]
son_deck_ids = append(son_deck_ids, dc.Id)
}
son_rela_deck := []*UserDeckRela{}
if len(son_deck_ids) > 0{
o.QueryTable(TABLE_USER_DECK_RELA).Filter("did__in", son_deck_ids).Filter("uid", rela.Uid).All(&son_rela_deck)
for i:=0; i<len(son_rela_deck); i++{
son := son_rela_deck[i]
son, err = refreshDeck(son, o, handles)
if err != nil{
return nil, err
}
rela.ReadyCount += son.ReadyCount
rela.NewCount += son.NewCount
rela.Deck.AllCardCount += son.Deck.AllCardCount
fmt.Println(rela.Deck.Title, son.Deck.Title, son.Deck.AllCardCount)
}
}
o.Update(rela)
o.Update(rela.Deck)
handles[rela.Id] = rela
return rela, nil
}
func RefreshCount(relas []*UserDeckRela) {
o := orm.NewOrm()
handles := map[int]*UserDeckRela{}
for i:=0; i<len(relas); i++{
_, err := refreshDeck(relas[i], o, handles)
if err != nil{
fmt.Println(err.Error())
}
}
return
}
func GetDeckForUser(user *User)(decks []*Deck, err error){
o := orm.NewOrm()
relas := []*UserDeckRela{}
qs := o.QueryTable("user_deck_rela")
qs.Filter("uid", user.Id).All(&relas)
if err != nil{
return nil, err
}
ids := []int{}
for i:=0; i<len(relas); i++{
ids = append(ids, relas[i].Deck.Id)
}
if len(ids) != 0{
qs = o.QueryTable("deck")
qs.Filter("id__in", ids).All(&decks)
}
return decks, nil
} | rm()
qs := o.QueryTable("card")
qs.Filter("did", this.Id).All(&cards)
return cards
}
func GetCards(this *Deck, user *U | identifier_body |
deck.go | package models
import (
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"reflect"
"strings"
"time"
)
type Deck struct {
Id int `orm:"column(id);auto"`
Title string `orm:"column(title);size(200);null"`
ParentId int `orm:"column(parent_id);null"`
AllCardCount int
OwnCardCount int
}
func (t *Deck) TableName() string {
return "deck"
}
func init() {
orm.RegisterModel(new(Deck))
}
// AddDeck insert a new Deck into database and returns
// last inserted Id on success.
func AddDeck(m *Deck) (id int64, err error) {
o := orm.NewOrm()
if m.Title == ""{
return 0, errors.New("名称不能为空")
}
id, err = o.Insert(m)
return
}
// GetDeckById retrieves Deck by Id. Returns error if
// Id doesn't exist
func GetDeckById(id int) (v *Deck, err error) {
o := orm.NewOrm()
v = &Deck{Id: id}
if err = o.Read(v); err == nil {
return v, nil
}
return nil, err
}
// GetAllDeck retrieves all Deck matches certain condition. Returns empty list if
// no records exist
func GetAllDeck(query map[string]string, fields []string, sortby []string, order []string,
offset int64, limit int64) (ml []interface{}, err error) {
o := orm.NewOrm()
qs := o.QueryTable(new(Deck))
// query k=v
for k, v := range query {
// rewrite dot-notation to Object__Attribute
k = strings.Replace(k, ".", "__", -1)
if strings.Contains(k, "isnull") {
qs = qs.Filter(k, (v == "true" || v == "1"))
} else {
qs = qs.Filter(k, v)
}
}
// order by:
var sortFields []string
if len(sortby) != 0 {
if len(sortby) == len(order) {
// 1) for each sort field, there is an associated order
for i, v := range sortby {
orderby := ""
if order[i] == "desc" {
orderby = "-" + v
} else if order[i] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
qs = qs.OrderBy(sortFields...)
} else if len(sortby) != len(order) && len(order) == 1 {
// 2) there is exactly one order, all the sorted fields will be sorted by this order
for _, v := range sortby {
orderby := ""
if order[0] == "desc" {
orderby = "-" + v
} else if order[0] == "asc" {
orderby = v
} else {
return nil, errors.New("Error: Invalid order. Must be either [asc|desc]")
}
sortFields = append(sortFields, orderby)
}
} else if len(sortby) != len(order) && len(order) != 1 {
return nil, errors.New("Error: 'sortby', 'order' sizes mismatch or 'order' size is not 1")
}
} else {
if len(order) != 0 {
return nil, errors.New("Error: unused 'order' fields")
}
}
var l []Deck
qs = qs.OrderBy(sortFields...)
if _, err = qs.Limit(limit, offset).All(&l, fields...); err == nil {
if len(fields) == 0 {
for _, v := range l {
ml = append(ml, v)
}
} else {
// trim unused fields
for _, v := range l {
m := make(map[string]interface{})
val := reflect.ValueOf(v)
for _, fname := range fields {
m[fname] = val.FieldByName(fname).Interface()
}
ml = append(ml, m)
}
}
return ml, nil
}
return nil, err
}
// UpdateKlgDir updates Deck by Id and returns error if
// the record to be updated doesn't exist
func UpdateDeckById(m *Deck) (err error) {
o := orm.NewOrm()
v := Deck{Id: m.Id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
o.Begin()
if num, err = o.Update(m); err == nil {
fmt.Println("Number of records updated in database:", num)
}
hasCycle, err := CycleCheck(&v, o)
if err != nil{
return err
}
if hasCycle{
o.Rollback()
return errors.New("牌组有环")
}else{
o.Commit()
}
}
return
}
// DeleteDeck deletes Deck by Id and returns error if
// the record to be deleted doesn't exist
func DeleteDeck(id int) (err error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
child_count, err := qs.Filter("parent_id", id).Count()
if err != nil{
return err
}
if child_count > 0{
return errors.New("请先将子目录移走")
}
v := Deck{Id: id}
// ascertain id exists in the database
if err = o.Read(&v); err == nil {
var num int64
if num, err = o.Delete(&Deck{Id: id}); err == nil {
fmt.Println("Number of records deleted in database:", num)
}
}
return
}
func CycleCheck(this *Deck, o orm.Ormer) (bool, error) {
childs := GetSons(this, o)
hasShow := map[int]int {this.Id: 1}
for len(childs) > 0{
pop := childs[0]
childs = childs[1:]
if hasShow[pop.Id] == 1{
return true, nil
}else{
hasShow[pop.Id] = 1
}
newChilds := GetSons(pop, o)
childs = append(childs, newChilds...)
}
return false, nil
}
func GetSons(this *Deck, o orm.Ormer)[]*Deck{
sons := []*Deck{}
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&sons)
return sons
}
func GetSubDirs(this *Deck, with_self bool)([]*Deck, error){
if this == nil{
return nil, errors.New("it's null dir")
}
childs := []*Deck{}
o := orm.NewOrm()
qs := o.QueryTable("deck")
qs.Filter("parent_id", this.Id).All(&childs)
for _, c := range(childs){
child_sub_dirs, err := GetSubDirs(c, false)
if err != nil{
return nil, err
}
childs = append(childs, child_sub_dirs...)
}
if with_self{
println("with self", this.Id)
childs = append(childs, this)
}
return childs, nil
}
func GetSonCard(this *Deck)([]*Card){
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
qs.Filter("did", this.Id).All(&cards)
return cards
}
func GetCards(this *Deck, user *User)([]*Card, error){
//先获取所有子目录的id
subDirs, err := GetSubDirs(this, true)
if err != nil{
return nil, err
}
//获取所有task, 并且ready_time > now的
cards := []*Card{}
o := orm.NewOrm()
qs := o.QueryTable("card")
sub_dir_ids := []int{}
for _, sd := range(subDirs){
sub_dir_ids = append(sub_dir_ids, sd.Id)
}
qs.Filter("uid", user.Id).Filter("did__in", sub_dir_ids).All(&cards)
return cards, nil
}
| //2 将map中一样的部分消除
//3 将nid表多出来的,插入card
//4 将card表多出来的,删除
nidMap := map[int]int{}
cidMap := map[int]*Card{}
o := orm.NewOrm()
qs1 := o.QueryTable("note")
notes := []*Note{}
qs1.Filter("did", this.Id).All(¬es)
for i:=0; i<len(notes); i++{
n := notes[i]
nidMap[n.Id] = 1
}
qs2 := o.QueryTable("card")
cards := []*Card{}
qs2.Filter("uid", user.Id).Filter("did", this.Id).All(&cards)
for i:=0; i<len(cards); i++{
cidMap[cards[i].Note.Id] = cards[i]
}
// 都有的 不去管
// cid 没有的,添加进去
allHave := map[int]int{}
cidLess := map[int]int{}
for _, note := range notes {
nid := note.Id
if cidMap[nid] != nil{
allHave[nid] = 1
}else{
beego.Info("have no", nid)
cidLess[nid] = 1
}
}
for nid, _ := range allHave{
delete(cidMap, nid)
delete(nidMap, nid)
}
// cid有,nid没有的,删掉cid
for _, card := range cidMap{
o.Delete(card)
}
// nid有,cid没有的,添加到user名下
for nid, _ := range cidLess{
beego.Info("insert %v", nid)
c := Card{Note: &Note{Id: nid}, Uid: user.Id, Did: this.Id, Level: 0, Loop: &Loop{Id: 1}}
o.Insert(&c)
}
return nil
}
func GetReadyCards(this *Deck, user *User)([]*Card, error){
//筛选子目录下的所有card,然后过滤readytime
err := syncCards(this, user)
if err != nil{
return nil, err
}
cards, err := GetCards(this, user)
now := time.Now()
ready_cards := []*Card{}
if err != nil{
return ready_cards, err
}
o := orm.NewOrm()
// 筛选出来没有建立task的进行新建
for i:=0; i<len(cards); i++{
card := cards[i]
if card.NextTrigger.Unix() < now.Unix(){
ready_cards = append(ready_cards, card)
}
}
if len(ready_cards) > 100{
ready_cards = ready_cards[:100]
}
//relation cardInfo
for i:=0; i<len(ready_cards); i++{
card := ready_cards[i]
o.LoadRelated(card, "note")
}
return ready_cards, nil
}
func buildMemCardFromAnkiCard(ankiCard *AnkiCard, o orm.Ormer) (*Card, error){
note := Note{Title: ankiCard.Q, Content: ankiCard.A, Type: "anki"}
nid, err := o.Insert(¬e)
if err != nil{
return nil, errors.New(fmt.Sprintf("insert note error, %v", err.Error()))
}
note.Id = int(nid)
newCard := Card{Note: ¬e, Loop: &Loop{Id: 1}}
_, e := o.Insert(&newCard)
if e != nil{
return nil, errors.New(fmt.Sprintf("insert card fail, %v", e.Error()))
}
return &newCard, nil
}
func copyCards(deck *AnkiDeck, newDir *Deck, user *User, o orm.Ormer) error{
ankiCards := []*AnkiCard{}
qs := o.QueryTable("anki_card")
qs.Filter("did", deck.DeckId).All(&ankiCards)
//批量插入notes
notes := []*Note{}
for i:=0; i<len(ankiCards); i++{
dc := ankiCards[i]
note := Note{Title: dc.Q, Content: dc.A, Type: "anki", Did: newDir.Id}
notes = append(notes, ¬e)
}
o.InsertMulti(len(notes), notes)
//根据notes批量插入cards
sql := fmt.Sprintf("insert into card (level, nid, did, finish, loop_id) select 0, id, did, 0, 1 from note where did=%v", newDir.Id)
_, err := o.Raw(sql).Exec()
return err
}
func GetRootDirs(user *User) ([]*Deck, error) {
o := orm.NewOrm()
qs := o.QueryTable("deck")
dirs := []*Deck{}
_, err := qs.Filter("user_id", user.Id).All(&dirs)
return dirs, err
}
func CopyAnkiDeckToMemPlus(trade *Trade, user *User) error {
//先copy新deck
//批量插入note
//批量生成cards
o := orm.NewOrm()
o.Begin()
qs := o.QueryTable("anki_deck")
var decks []*AnkiDeck
qs.All(&decks)
deckMap := map[int64]*AnkiDeck{}
for i:=0; i<len(decks); i++{
deckMap[decks[i].DeckId] = decks[i]
}
newDirs := map[int64]*Deck{}
//初始化deck
for _, deck := range deckMap{
names := strings.Split(deck.Name, ":")
name := deck.Name
if len(names) > 1{
name = names[len(names)-1]
}
newDir := &Deck{Title: name}
did, err := o.Insert(newDir)
newDir.Id = int(did)
rela := UserDeckRela{Uid: user.Id, Deck: &Deck{Id: newDir.Id}}
_, err = o.Insert(&rela)
if err != nil{
return err
}
newDirs[deck.DeckId] = newDir
}
success := true
//copy deck
for deckId, newDir := range newDirs{
deck := deckMap[deckId]
if deck.Pdid != 0{
parentDir := newDirs[deck.Pdid]
newDir.ParentId = parentDir.Id
o.Update(newDir)
}
err := copyCards(deck, newDir, user, o)
if err != nil{
success = false
}
}
if success{
trade.Status = "finish"
}else{
trade.Status = "fail"
}
o.Update(trade)
o.Commit()
return nil
}
func refreshDeck(rela *UserDeckRela, o orm.Ormer, handles map[int]*UserDeckRela) (*UserDeckRela, error){
if d, ok := handles[rela.Id]; ok{
return d, nil
}
o.LoadRelated(rela, "did")
qs := o.QueryTable("card").Filter("did", rela.Deck.Id)
cardsCount, err := qs.Count()
if err != nil{
return nil, err
}
newCount, err := qs.Filter("level", 0).Count()
if err != nil{
return nil, err
}
now := time.Now()
readyCount, err := qs.Filter("trigger_start_time__lt", now).Count()
if err != nil{
return nil, err
}
rela.ReadyCount = int(readyCount)
rela.NewCount = int(newCount)
rela.Deck.OwnCardCount = int(cardsCount)
rela.Deck.AllCardCount = int(cardsCount)
son_decks := GetSons(rela.Deck, o)
son_deck_ids := []int{}
for i:=0; i<len(son_decks); i++{
dc := son_decks[i]
son_deck_ids = append(son_deck_ids, dc.Id)
}
son_rela_deck := []*UserDeckRela{}
if len(son_deck_ids) > 0{
o.QueryTable(TABLE_USER_DECK_RELA).Filter("did__in", son_deck_ids).Filter("uid", rela.Uid).All(&son_rela_deck)
for i:=0; i<len(son_rela_deck); i++{
son := son_rela_deck[i]
son, err = refreshDeck(son, o, handles)
if err != nil{
return nil, err
}
rela.ReadyCount += son.ReadyCount
rela.NewCount += son.NewCount
rela.Deck.AllCardCount += son.Deck.AllCardCount
fmt.Println(rela.Deck.Title, son.Deck.Title, son.Deck.AllCardCount)
}
}
o.Update(rela)
o.Update(rela.Deck)
handles[rela.Id] = rela
return rela, nil
}
func RefreshCount(relas []*UserDeckRela) {
o := orm.NewOrm()
handles := map[int]*UserDeckRela{}
for i:=0; i<len(relas); i++{
_, err := refreshDeck(relas[i], o, handles)
if err != nil{
fmt.Println(err.Error())
}
}
return
}
func GetDeckForUser(user *User)(decks []*Deck, err error){
o := orm.NewOrm()
relas := []*UserDeckRela{}
qs := o.QueryTable("user_deck_rela")
qs.Filter("uid", user.Id).All(&relas)
if err != nil{
return nil, err
}
ids := []int{}
for i:=0; i<len(relas); i++{
ids = append(ids, relas[i].Deck.Id)
}
if len(ids) != 0{
qs = o.QueryTable("deck")
qs.Filter("id__in", ids).All(&decks)
}
return decks, nil
} | func syncCards(this *Deck, user *User)(error) {
//1 生成左右两份 map | random_line_split |
queryRedEnvelopesGrantCtrl.js | /**
* 红包发放查询
*/
angular.module('inspinia').controller('queryRedEnvelopesGrantCtrl',function($scope,$http,$state,$stateParams,$compile,$uibModal,SweetAlert,$log,i18nService,$document,$timeout){
i18nService.setCurrentLang('zh-cn'); //设置语言为中文
$scope.statusSelect=[{text:"全部",value:''},{text:"初始化",value:'-1'},{text:"发放中",value:'0'},{text:"已领完",value:'1'},
{text:"已到期",value:'2'}];
$scope.statusStr=angular.toJson($scope.statusSelect);
$scope.pushTypeSelect=$scope.redPushTypes;
$scope.pushTypeStr=angular.toJson($scope.pushTypeSelect);
$scope.receiveTypeSelect=$scope.redReceiveTypes;
$scope.receiveTypeStr=angular.toJson($scope.receiveTypeSelect);
$scope.busTypeSelect= $scope.redBusTypes;
$scope.busTypeStr=angular.toJson($scope.busTypeSelect);
$scope.pushAreaSelect=$scope.redPushAreas;
$scope.pushAreaStr=angular.toJson($scope.pushAreaSelect);
$scope.hasProfitSelect=[{text:"全部",value:''},{text:"是",value:'0'},{text:"否",value:'1'}];
$scope.hasProfitStr=angular.toJson($scope.hasProfitSelect);
$scope.statusRiskSelect=[{text:"全部",value:''},{text:"正常",value:'0'},{text:"已屏蔽",value:'1'}];
$scope.statusRiskStr=angular.toJson($scope.statusRiskSelect);
$scope.statusRecoverySelect=[{text:"全部",value:''},{text:"待处理",value:'0'},{text:"处理成功",value:'1'},
{text:"处理失败",value:'2'},{text:"处理中",value:'3'}];
$scope.statusRecoveryStr=angular.toJson($scope.statusRecoverySelect);
$scope.statusAccountSelect=[{text:"全部",value:''},{text:"待入账",value:'0'},{text:"已记账",value:'1'},
{text:"记账失败",value:'2'}];
$scope.statusAccountStr=angular.toJson($scope.statusAccountSelect);
$scope.recoveryTypeSelect=[{text:"全部",value:''},{text:"原路退回",value:'0'},{text:"归平台所有",value:'1'},
{text:"无需处理",value:'2'}];
$scope.recoveryTypeStr=angular.toJson($scope.recoveryTypeSelect);
//支付方式(0分润账户余额1微信支付2红包账户余额3内部账户4支付宝支付
$scope.payTypeSelect=[{text:"全部",value:''},{text:"分润账户余额",value:'0'},{text:"微信支付",value:'1'},
{text:"红包账户余额",value:'2'},{text:"内部账户",value:'3'},{text:"支付宝支付",value:'4'}];
$scope.payTypeStr=angular.toJson($scope.payTypeSelect);
//清空
$scope.clear=function() {
$scope.info = {
status: "", pushType: "", receiveType: "", busType: "", hasProfit: "", statusRisk: "",
recoveryType: "", statusRecovery: "", pushArea: "", payType: "", orgId: "",
createDateMin: moment(new Date().getTime() - 6 * 24 * 60 * 60 * 1000).format('YYYY-MM-DD') + ' 00:00:00',
createDateMax: moment(new Date().getTime()).format('YYYY-MM-DD') + ' 23:59:59'
};
}
$scope.clear();
$scope.allCount=0;
$scope.amountCount=0;
//查询所有银行家组织
$scope.orgInfoList = [];
$scope.getOrgInfoList = function () {
$http({
url:"superBank/getOrgInfoList",
method:"POST"
}).success(function(msg){
if(msg.status){
$scope.orgInfoList = msg.data;
$scope.orgInfoList.unshift({orgId:"",orgName:"全部"});
}
}).error(function(){
$scope.notice("获取组织信息异常");
})
};
$scope.getOrgInfoList();
$scope.query=function(){
if ($scope.loadImg) {
return;
}
$scope.loadImg = true;
$http.post("redEnvelopesGrant/selectByParam","info=" + angular.toJson($scope.info)+"&pageNo="+
$scope.paginationOptions.pageNo+"&pageSize="+$scope.paginationOptions.pageSize,
{headers: {'Content-Type': 'application/x-www-form-urlencoded'}})
.success(function(data){
if(data.status){
$scope.result=data.page.result;
$scope.allCount=data.page.totalCount;
$scope.gridOptions.totalItems = data.page.totalCount;
if(data.sunOrder!=null){
$scope.sunOrder=data.sunOrder;
}
}else{
$scope.notice(data.msg);
}
$scope.loadImg = false;
}).error(function () {
$scope.submitting = false;
$scope.loadImg = false;
$scope.notice('服务器异常,请稍后再试.');
});
}
//$scope.query();手动查询
$scope.gridOptions={ //配置表格
data: 'result',
paginationPageSize:10, //分页数量
paginationPageSizes: [10,20,50,100], //切换每页记录数
useExternalPagination: true, //开启拓展名
enableHorizontalScrollbar: true, //横向滚动条
enableVerticalScrollbar : true, //纵向滚动条
columnDefs:[ //表格数据
{ field: 'id',displayName:'红包ID',width:180 },
{ field: 'confId',displayName:'红包配置ID',width:180 },
{ field: 'busType',displayName:'业务类型',cellFilter:"formatDropping:"+$scope.busTypeStr,width:150},
{ field: 'pushType',displayName:'发放人类型',cellFilter:"formatDropping:"+$scope.pushTypeStr,width:150},
{ field: 'receiveType',displayName:'接收人类型',cellFilter:"formatDropping:"+$scope.receiveTypeStr,width:150},
{ field: 'orgName',displayName:'发放人组织名称',width:180 },
{ field: 'hasProfit',displayName:'是否收取佣金',cellFilter:"formatDropping:"+$scope.hasProfitStr,width:150},
{ field: 'status',displayName:'红包状态',cellFilter:"formatDropping:"+$scope.statusStr,width:150},
{ field: 'statusRisk',displayName:'风控状态',cellFilter:"formatDropping:"+$scope.statusRiskStr,width:150},
{ field: 'recoveryType',displayName:'剩余金额处理方式',cellFilter:"formatDropping:"+$scope.recoveryTypeStr,width:150},
{ field: 'statusRecovery',displayName:'剩余金额处理状态',cellFilter:"formatDropping:"+$scope.statusRecoveryStr,width:150},
{ field: 'pushArea',displayName:'发放范围',cellFilter:"formatDropping:"+$scope.pushAreaStr,width:150},
{ field: 'pushAmount',displayName:'红包金额',width:180,cellFilter:"currency:''" },
{ field: 'pushNum',displayName:'个数',width:180 },
// { field: 'pushEachAmount',displayName:'单个领取金额',width:180 },
{ field: 'pushUserCode',displayName:'发红包用户ID',width:180 },
{ field: 'pushRealName',displayName:'发红包用户姓名',width:180 },
{ field: 'pushUserName',displayName:'发红包用户昵称',width:180 },
{ field: 'pushUserPhone',displayName:'发红包手机号',width:180 },
{ field: 'dxUserCode',displayName:'单个定向接收用户ID',width:190 },
{ field: 'dxUserName',displayName:'单个定向接收用户昵称',width:190 },
{ field: 'dxUserPhone',displayName:'单个定向接收用户手机号',width:200 },
{ field: 'payType',displayName:'支付方式',cellFilter:"formatDropping:"+$scope.payTypeStr,width:150},
{ field: 'orderNo',displayName:'关联业务订单ID',width:180 },
{ field: 'payOrderNo',displayName:'关联支付订单ID',width:180 },
{ field: 'pushFee',displayName:'服务费',width:180,cellFilter:"currency:''" },
{ field: 'oneUserProfit',displayName:'一级分润',width:180,cellFilter:"currency:''" },
{ field: 'oneUserCode',displayName:'一级编号',width:180 },
{ field: 'twoUserProfit',displayName:'二级分润',width:180,cellFilter:"currency:''" },
{ field: 'twoUserCode',displayName:'二级编号',width:180 },
{ field: 'thrUserProfit',displayName:'三级分润',width:180,cellFilter:"currency:''" },
{ field: 'thrUserCode',displayName:'三级编号',width:180 },
{ field: 'fouUserProfit',displayName:'四级分润',width:180,cellFilter:"currency:''" },
{ field: 'fouUserCode',displayName:'四级编号',width:180 },
{ field: 'plateProfit',displayName:'平台分润',width:180,cellFilter:"currency:''" },
{ field: 'orgProfit',displayName:'OEM品牌分润',width:180,cellFilter:"currency:''" },
{ field: 'createDate',displayName:'红包创建时间',cellFilter: 'date:"yyyy-MM-dd HH:mm:ss"',width:180},
{ field: 'expDate',displayName:'红包失效时间',cellFilter: 'date:"yyyy-MM-dd HH:mm:ss"',width:180},
{ field: 'id',displayName:'操作',pinnedRight:true,width:180,
cellTemplate:'<div class="lh30"> ' +
'<a ui-sref="red.redEnvelopesGrantDetail({id:row.entity.id})" target="_blank" >详情</a>' +
'<a ng-show="row.entity.busType==0&&grid.appScope.hasPermit(\'redEnvelopesGrant.update\')" ui-sref="red.redEnvelopesGrantExamine({id:row.entity.id})" target="_blank" > | 审核</a>' + | $scope.gridApi = gridApi;
gridApi.pagination.on.paginationChanged($scope, function (newPage, pageSize) {
$scope.paginationOptions.pageNo = newPage;
$scope.paginationOptions.pageSize = pageSize;
$scope.query();
});
}
};
//<a ng-show="row.entity.busType==0&&row.entity.statusRisk==1&&grid.appScope.hasPermit(\'redEnvelopesGrant.updateStatusRisk\')" ng-click="grid.appScope.openStatusRisk(row.entity)" > | 取消关闭</a>
$scope.import=function(){
SweetAlert.swal({
title: "确认导出?",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "提交",
cancelButtonText: "取消",
closeOnConfirm: true,
closeOnCancel: true
},
function (isConfirm) {
if (isConfirm) {
location.href="redEnvelopesGrant/exportInfo?info="+encodeURI(angular.toJson($scope.info));
}
});
};
$scope.changeRecoveryType=function(){
if($scope.info.recoveryType=='2'){
$scope.info.statusRecovery='0';
}else{
$scope.info.statusRecovery='';
}
}
//风控关闭红包modal
$scope.modifyStatusRisk = function(entity){
$scope.redOrdersOption = {reason:'1', redOrderId:entity.id,status:'1'};
$('#riskModal').modal('show');
}
//风控关闭红包提交数据
$scope.riskClose = function(redOrdersOption){
$http({
url:'redEnvelopesGrant/updateStatusRisk',
method:'POST',
data:redOrdersOption
}).success(function(msg){
$scope.notice(msg.msg);
if(msg.status){
$scope.cancel();
$scope.query();
}
});
}
//风控开启红包
$scope.openStatusRisk = function(entity){
SweetAlert.swal({
title: "取消关闭?",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "提交",
cancelButtonText: "取消",
closeOnConfirm: true,
closeOnCancel: true },
function (isConfirm) {
if (isConfirm) {
$scope.redOrdersOption = {redOrderId:entity.id,status:'0'};
$http({
url:'redEnvelopesGrant/updateStatusRisk',
method:'POST',
data:$scope.redOrdersOption
}).success(function(msg){
$scope.notice(msg.msg);
if(msg.status){
$scope.query();
}
});
}
});
}
$scope.cancel = function(){
$('#riskModal').modal('hide');
}
//页面绑定回车事件
$document.bind("keypress", function(event) {
$scope.$apply(function (){
if(event.keyCode == 13){
$scope.query();
}
})
});
}); | '<a ng-show="row.entity.busType==0&&row.entity.statusRisk==0&&grid.appScope.hasPermit(\'redEnvelopesGrant.updateStatusRisk\')" ng-click="grid.appScope.modifyStatusRisk(row.entity)" > | 风控关闭</a>' +
'</div>'
}
],
onRegisterApi: function(gridApi) { | random_line_split |
queryRedEnvelopesGrantCtrl.js | /**
* 红包发放查询
*/
angular.module('inspinia').controller('queryRedEnvelopesGrantCtrl',function($scope,$http,$state,$stateParams,$compile,$uibModal,SweetAlert,$log,i18nService,$document,$timeout){
i18nService.setCurrentLang('zh-cn'); //设置语言为中文
$scope.statusSelect=[{text:"全部",value:''},{text:"初始化",value:'-1'},{text:"发放中",value:'0'},{text:"已领完",value:'1'},
{text:"已到期",value:'2'}];
$scope.statusStr=angular.toJson($scope.statusSelect);
$scope.pushTypeSelect=$scope.redPushTypes;
$scope.pushTypeStr=angular.toJson($scope.pushTypeSelect);
$scope.receiveTypeSelect=$scope.redReceiveTypes;
$scope.receiveTypeStr=angular.toJson($scope.receiveTypeSelect);
$scope.busTypeSelect= $scope.redBusTypes;
$scope.busTypeStr=angular.toJson($scope.busTypeSelect);
$scope.pushAreaSelect=$scope.redPushAreas;
$scope.pushAreaStr=angular.toJson($scope.pushAreaSelect);
$scope.hasProfitSelect=[{text:"全部",value:''},{text:"是",value:'0'},{text:"否",value:'1'}];
$scope.hasProfitStr=angular.toJson($scope.hasProfitSelect);
$scope.statusRiskSelect=[{text:"全部",value:''},{text:"正常",value:'0'},{text:"已屏蔽",value:'1'}];
$scope.statusRiskStr=angular.toJson($scope.statusRiskSelect);
$scope.statusRecoverySelect=[{text:"全部",value:''},{text:"待处理",value:'0'},{text:"处理成功",value:'1'},
{text:"处理失败",value:'2'},{text:"处理中",value:'3'}];
$scope.statusRecoveryStr=angular.toJson($scope.statusRecoverySelect);
$scope.statusAccountSelect=[{text:"全部",value:''},{text:"待入账",value:'0'},{text:"已记账",value:'1'},
{text:"记账失败",value:'2'}];
$scope.statusAccountStr=angular.toJson($scope.statusAccountSelect);
$scope.recoveryTypeSelect=[{text:"全部",value:''},{text:"原路退回",value:'0'},{text:"归平台所有",value:'1'},
{text:"无需处理",value:'2'}];
$scope.recoveryTypeStr=angular.toJson($scope.recoveryTypeSelect);
//支付方式(0分润账户余额1微信支付2红包账户余额3内部账户4支付宝支付
$scope.payTypeSelect=[{text:"全部",value:''},{text:"分润账户余额",value:'0'},{text:"微信支付",value:'1'},
{text:"红包账户余额",value:'2'},{text:"内部账户",value:'3'},{text:"支付宝支付",value:'4'}];
$scope.payTypeStr=angular.toJson($scope.payTypeSelect);
//清空
$scope.clear=function() {
$scope.info = {
status: "", pushType: "", receiveType: "", busType: "", hasProfit: "", statusRisk: "",
recoveryType: "", statusRecovery: "", pushArea: "", payType: "", orgId: "",
createDateMin: moment(new Date().getTime() - 6 * 24 * 60 * 60 * 1000).format('YYYY-MM-DD') + ' 00:00:00',
createDateMax: moment(new Date().getTime()).format('YYYY-MM-DD') + ' 23:59:59'
};
}
$scope.clear();
$scope.allCount=0;
$scope.amountCount=0;
//查询所有银行家组织
$scope.orgInfoList = [];
$scope.getOrgInfoList = function () {
$http({
url:"superBank/getOrgInfoList",
method:"POST"
}).success(function(msg){
if(msg.status){
$scope.orgInfoList = msg.data;
$scope.orgInfoList.unshift({orgId:"",orgName:"全部"});
}
}).error(function(){
$scope.notice("获取组织信息异常");
})
};
$scope.getOrgInfoList();
$scope.query=function(){
if ($scope.loadImg) {
return;
}
$scope.loadImg = true;
$http.post("redEnvelopesGrant/selectByParam","info=" + angular.toJson($scope.info)+"&pageNo="+
$scope.paginationOptions.pageNo+"&pageSize="+$scope.paginationOptions.pageSize,
{headers: {'Content-Type': 'application/x-www-form-urlencoded'}})
.success(function(data){
if(data.status){
$scope.result=data.page.result;
$scope.allCount=data.page.totalCount;
$scope.gridOptions.totalItems = data.page.totalCount;
if(data.sunOrder!=null){
$scope.sunOrder=data.sunOrder;
}
}else{
$scope.notice(data.msg);
}
$scope.loadImg = false;
}).error(function () {
$scope.submitting = false;
$scope.loadImg = false;
$scope.notice('服务器异常,请稍后再试.');
});
}
//$scope.query();手动查询
$scope.gridOptions={ //配置表格
data: 'result',
paginationPageSize:10, //分页数量
paginationPageSizes: [10,20,50,100], //切换每页记录数
useExternalPagination: true, //开启拓展名
enableHorizontalScrollbar: true, //横向滚动条
enableVerticalScrollbar : true, //纵向滚动条
columnDefs:[ //表格数据
{ field: 'id',displayName:'红包ID',width:180 },
{ field: 'confId',displayName:'红包配置ID',width:180 },
{ field: 'busType',displayName:'业务类型',cellFilter:"formatDropping:"+$scope.busTypeStr,width:150},
{ field: 'pushType',displayName:'发放人类型',cellFilter:"formatDropping:"+$scope.pushTypeStr,width:150},
{ field: 'receiveType',displayName:'接收人类型',cellFilter:"formatDropping:"+$scope.receiveTypeStr,width:150},
{ field: 'orgName',displayName:'发放人组织名称',width:180 },
{ field: 'hasProfit',displayName:'是否收取佣金',cellFilter:"formatDropping:"+$scope.hasProfitStr,width:150},
{ field: 'status',displayName:'红包状态',cellFilter:"formatDropping:"+$scope.statusStr,width:150},
{ field: 'statusRisk',displayName:'风控状态',cellFilter:"formatDropping:"+$scope.statusRiskStr,width:150},
{ field: 'recoveryType',displayName:'剩余金额处理方式',cellFilter:"formatDropping:"+$scope.recoveryTypeStr,width:150},
{ field: 'statusRecovery',displayName:'剩余金额处理状态',cellFilter:"formatDropping:"+$scope.statusRecoveryStr,width:150},
{ field: 'pushArea',displayName:'发放范围',cellFilter:"formatDropping:"+$scope.pushAreaStr,width:150},
{ field: 'pushAmount',displayName:'红包金额',width:180,cellFilter:"currency:''" },
{ field: 'pushNum',displayName:'个数',width:180 },
// { field: 'pushEachAmount',displayName:'单个领取金额',width:180 },
{ field: 'pushUserCode',displayName:'发红包用户ID',width:180 },
{ field: 'pushRealName',displayName:'发红包用户姓名',width:180 },
{ field: 'pushUserName',displayName:'发红包用户昵称',width:180 },
{ field: 'pushUserPhone',displayName:'发红包手机号',width:180 },
{ field: 'dxUserCode',displayName:'单个定向接收用户ID',width:190 },
{ field: 'dxUserName',displayName:'单个定向接收用户昵称',width:190 },
{ field: 'dxUserPhone',displayName:'单个定向接收用户手机号',width:200 },
{ field: 'payType',displayName:'支付方式',cellFilter:"formatDropping:"+$scope.payTypeStr,width:150},
{ field: 'orderNo',displayName:'关联业务订单ID',width:180 },
{ field: 'payOrderNo',displayName:'关联支付订单ID',width:180 },
{ field: 'pushFee',displayName:'服务费',width:180,cellFilter:"currency:''" },
{ field: 'oneUserProfit',displayName:'一级分润',width:180,cellFilter:"currency:''" },
{ field: 'oneUserCode',displayName:'一级编号',width:180 },
{ field: 'twoUserProfit',displayName:'二级分润',width:180,cellFilter:"currency:''" },
{ field: 'twoUserCode',displayName:'二级编号',width:180 },
{ field: 'thrUserProfit',displayName:'三级分润',width:180,cellFilter:"currency:''" },
{ field: 'thrUserCode',displayName:'三级编号',width:180 },
{ field: 'fouUserProfit',displayName:'四级分润',width:180,cellFilter:"currency:''" },
{ field: 'fouUserCode',displayName:'四级编号',width:180 },
{ field: 'plateProfit',displayName:'平台分润',width:180,cellFilter:"currency:''" },
{ field: 'orgProfit',displayName:'OEM品牌分润',width:180,cellFilter:"currency:''" },
{ field: 'createDate',displayName:'红包创建时间',cellFilter: 'date:"yyyy-MM-dd HH:mm:ss"',width:180},
{ field: 'expDate',displayName:'红包失效时间',cellFilter: 'date:"yyyy-MM-dd HH:mm:ss"',width:180},
{ field: 'id',displayName:'操作',pinnedRight:true,width:180,
cellTemplate:'<div class="lh30"> ' +
'<a ui-sref="red.redEnvelopesGrantDetail({id:row.entity.id})" target="_blank" >详情</a>' +
'<a ng-show="row.entity.busType==0&&grid.appScope.hasPermit(\'redEnvelopesGrant.update\')" ui-sref="red.redEnvelopesGrantExamine({id:row.entity.id})" target="_blank" > | 审核</a>' +
'<a ng-show="row.entity.busType==0&&row.entity.statusRisk==0&&grid.appScope.hasPermit(\'redEnvelopesGrant.updateStatusRisk\')" ng-click="grid.appScope.modifyStatusRisk(row.entity)" > | 风控关闭</a>' +
'</div>'
}
],
onRegisterApi: function(gridApi) {
$scope.gridApi = gridApi;
gridApi.pagination.on.paginationChanged($scope, function (newPage, pageSize) {
$scope.paginationOptions.pageNo = newPage;
$scope.paginationOptions.pageSize = pageSize;
$scope.query();
});
}
};
//<a ng-show="row.entity.busType==0&&row.entity.statusRisk==1&&grid.appScope.hasPermit(\'redEnvelopesGrant.updateStatusRisk\')" ng-click="grid.appScope.openStatusRisk(row.entity)" > | 取消关闭</a>
$scope.import=function(){
SweetAlert.swal({
title: "确认导出?",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "提交",
cancelButtonText: "取消",
closeOnConfirm: true,
closeOnCancel: true
},
function (isConfirm) {
if (isConfirm) {
location.href="redEnvelopesGrant/exportInfo?info="+encodeURI(angular.toJson($scope.info));
}
});
};
$scope.changeRecoveryType=function(){
if($scope.info.recoveryType=='2'){
$scope.info.statusRecovery='0';
}else{
$scope.info.statusRecovery='';
}
}
//风控关闭红包modal
$scope.modifyStatusRisk = function(entity){
$scope.redOrdersOption = {reason:'1', redOrderId:entity.id,status:'1'};
$('#riskModal').modal('show');
}
//风控关闭红包提交数据
$scope.riskClose = function(redOrdersOption){
$http({
url:'redEnvelopesGrant/updateStatusRisk',
method:'POST',
data:redOrdersOption
}).success(function(msg){
$scope.notice(msg.msg);
if(msg.status){
$scope.cancel();
$scope.query();
}
});
}
//风控开启红包
$scope.openStatusRisk = function(entity){
SweetAlert.swal({
title: "取消关闭?",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "提交",
cancelButtonText: "取消",
closeOnConfirm: true,
closeOnCancel: true },
function (isConfirm) {
if (isConfirm) {
$scope.redOrdersOption = {redOrderId:entity.id,status:'0'};
$http({
url:'redEnvelopesGrant/updateStatusRisk',
method:'POST',
data:$scope.redOrdersOption
}).success(function(msg){
$scope.notice(msg.msg);
if(msg.status){
$scope.query();
}
});
}
});
}
$scope.cancel = function(){
$('#riskModal').modal('hide');
}
//页面绑定回车事件
$document.bind("keypress", function(event) {
$scope.$apply(function (){
if(event.keyCode == 13){
$scope.query();
}
})
});
}); | conditional_block | ||
index.d.ts | // Type definitions for non-npm package PexRTC 26.0
// Project: https://docs.pexip.com/api_client/api_pexrtc.htm
// Definitions by: 10Clouds <https://github.com/10clouds>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare class | {
constructor();
/**
* ## Callbacks
*/
onCallTransfer: (alias: string) => void;
onChatMessage: (message: PexRTC.ChatMessage) => void;
onConnect: (stream: PexRTC.PexMediaStream | null) => void;
onConferenceUpdate: (properties: {
guest_muted: boolean;
locked: boolean;
started: boolean;
}) => void;
onDisconnect: (reason: string) => void;
onError: (reason: string) => void;
onLayoutUpdate: (layout: PexRTC.LayoutResponse) => void;
onLog: () => {};
onMicActivity: (activity: string) => void;
onParticipantCreate: (participant: PexRTC.AnyParticipant) => void;
onParticipantDelete: (
participant: Pick<PexRTC.AnyParticipant, 'uuid'>
) => void;
onParticipantUpdate: (participant: PexRTC.AnyParticipant) => void;
onPresentation: (
setting: boolean,
presenter: string | null,
uuid?: string
) => void;
onPresentationConnected: (stream: PexRTC.PexMediaStream) => void;
onPresentationDisconnected: (reason: string) => void;
onPresentationReload: (url: string) => void;
/**
* @deprecated in favor of onParticipantCreate/Update/Delete.
*/
onRosterList: (roster: any[]) => void;
onScreenshareConnected: (stream: PexRTC.PexMediaStream) => void;
onScreenshareStopped: (reason: string) => void;
onSetup: (
stream: PexRTC.PexMediaStream | null,
pin_status: PexRTC.PinStatus,
conference_extension?: 'standard' | 'mssip'
) => void;
onStageUpdate: (
stage: Array<{
participant_uuid: string;
stage_index: number;
vad: number;
}>
) => void;
/**
* ## Client control functions
*/
readonly makeCall: (
node: string,
conference: string,
name: string,
bandwidth?: number,
call_type?: PexRTC.CallTypes,
flash?: { [key: string]: unknown }
) => void;
readonly connect: (pin: string | null, extension?: string) => void;
readonly muteAudio: (setting: boolean) => boolean;
readonly muteVideo: (setting: boolean) => boolean;
readonly sendChatMessage: (message: string) => void;
readonly disconnect: () => void;
readonly disconnectcall: () => void;
readonly addCall: (call_type?: PexRTC.CallTypes) => void;
readonly renegotiate: (resend_sdp?: boolean) => void;
readonly getPresentation: () => void;
readonly stopPresentation: () => void;
readonly present: (type?: 'screen') => void;
readonly getMediaStatistics: () => PexRTC.Statistics;
/**
* ## Conference control functions
*/
readonly dialOut: (
destination: string,
protocol?: 'sip' | 'h323' | 'rtmp' | 'mssip' | 'auto',
role?: PexRTC.Role,
cb?: (res: { result: string[] }) => void,
params?: {
presentation_uri?: string;
streaming?: boolean;
dtmf_sequence?: string;
call_type?: 'video' | 'video-only' | 'audio';
keep_conference_alive?:
| 'keep_conference_alive'
| 'keep_conference_alive_if_multiple'
| 'keep_conference_alive_never';
remote_display_name?: string;
overlay_text?: string;
}
) => void;
readonly setConferenceLock: (setting: boolean) => void;
readonly setMuteAllGuests: (setting: boolean) => void;
readonly setParticipantMute: (uuid: string, setting: boolean) => void;
readonly videoMuted: (uuid?: string) => void;
readonly videoUnmuted: (uuid?: string) => void;
readonly setParticipantRxPresentation: (
uuid: string,
setting: boolean
) => void;
readonly setParticipantSpotlight: (uuid: string, setting: boolean) => void;
readonly setParticipantText: (uuid: string, text: string) => void;
readonly setRole: (uuid: string, role: PexRTC.ParticipantRole) => void;
readonly unlockParticipant: (uuid: string) => void;
readonly transferParticipant: (
uuid: string,
destination: string,
role: string,
pin?: string
) => void;
readonly startConference: () => void;
readonly disconnectParticipant: (uuid: string) => void;
readonly disconnectAll: () => void;
readonly sendDTMF: (digits: string, uuid: string) => void;
readonly sendFECC: (
action: 'start' | 'stop' | 'continue',
axis: 'pan' | 'tilt' | 'zoom',
direction: 'left' | 'right' | 'up' | 'down' | 'in' | 'out',
target: string | null,
timeout: number
) => void;
readonly setBuzz: () => void;
readonly clearBuzz: (uuid: string) => void;
readonly clearAllBuzz: () => void;
readonly transformLayout: (transforms: {
layout: PexRTC.LayoutTypes;
host_layout: PexRTC.LayoutTypes;
guest_layout: PexRTC.LayoutTypes;
streaming_indicator: boolean;
recording_indicator: boolean;
enable_active_speaker_indication: boolean;
enable_overlay_text: boolean;
free_form_overlay_text: string;
streaming: {
layout: PexRTC.LayoutTypes;
waiting_screen_enabled: boolean;
plus_n_pip_enabled: boolean;
indicators_enabled: boolean;
};
}) => void;
/**
* ## Instance variables
*
* A few additional configuration changes can be undertaken via instance
* variables on the PexRTC object, before calling makeCall:
*/
/**
* audio_source, video_source
* Values can be:
* null: default sources
* false: do not request
* string: a uuid of a media source gathered through device enumeration (Chrome only)
*/
audio_source: string | null | false;
video_source: string | null | false;
bandwidth_in: number;
bandwidth_out: number;
call_tag: string;
default_stun: string;
ice_timeout: number;
png_presentation: boolean;
recv_audio: boolean;
recv_video: boolean;
screenshare_fps: number;
turn_server: null | {
url: string;
username: string;
credential: string;
};
/**
* A MediaStream object to use instead of PexRTC calling getUserMedia
*/
user_media_stream: MediaStream;
/**
* A MediaStream object to use for presentation instead of
* PexRTC calling getDisplayMedia
*/
user_presentation_stream: MediaStream;
/**
* ## Fields
*
* The following fields on the PexRTC object are immutable but can be probed
* after onSetup, and provide useful information about the connection:
*/
readonly chat_enabled: boolean;
readonly current_service_type: PexRTC.ServiceType;
readonly role: PexRTC.Role;
readonly service_type: 'conference' | 'gateway' | 'test_call';
readonly uuid: string;
readonly version: string;
/**
* ## Undocumented fields
*/
readonly allow_1080p: boolean;
readonly analytics_enabled: boolean;
readonly basic_password: string;
readonly basic_username: string;
readonly call: {
readonly mutedAudio: boolean;
readonly mutedVideo: boolean;
};
readonly call_type: undefined;
readonly chrome_ver: number;
readonly conference: string;
readonly conference_extension: string;
readonly conference_name: string;
readonly conference_uri: string;
readonly display_name: string;
readonly dtmf_queue: {};
readonly edge_ver: number;
readonly error: string;
readonly event_listener: string;
readonly event_source: EventSource;
readonly onmessage: string;
readonly onopen: () => {};
readonly readyState: number;
readonly url: string;
readonly withCredentials: boolean;
readonly event_source_timeout: number;
readonly fecc_enabled: boolean;
readonly fecc_queue: {};
readonly firefox_ver: number;
readonly flash: undefined;
readonly force_hd: number;
readonly guests_can_present: boolean;
readonly h264_enabled: boolean;
readonly is_android: boolean;
readonly is_electron: boolean;
readonly is_mobile: boolean;
readonly last_ping: string;
readonly localStream: string;
readonly mutedAudio: boolean;
readonly mutedVideo: boolean;
readonly node: string;
readonly oneTimeToken: string;
readonly orig_bandwidth_in: number;
readonly orig_bandwidth_out: number;
readonly outstanding_requests: {};
readonly pc: string;
readonly pcConfig: {
readonly iceServers: number[];
};
readonly pin: string;
readonly pin_status: PexRTC.PinStatus;
readonly powerLineFrequency: number;
readonly presentation: string;
readonly presentation_event_id: string;
readonly presentation_msg: { status: string };
readonly registration_token: string;
readonly remote_call_type: string;
readonly return_media_stream: boolean;
readonly rosterList: { [key: string]: PexRTC.Participant };
readonly rtmp_enabled: boolean;
readonly rtsp_enabled: boolean;
readonly safari_ver: number;
readonly screenshare: string;
readonly screenshare_api: string;
readonly screenshare_height: number;
readonly screenshare_width: number;
readonly socket: string;
readonly state: string;
readonly stats: {};
readonly stats_interval: string;
readonly token: string;
readonly token_refresh: number;
readonly trans: {};
readonly use_trickle_ice: boolean;
readonly vp9_enabled: boolean;
readonly xhr_timeout: number;
}
declare namespace PexRTC {
type PexMediaStream = MediaStream | string;
type Role = 'HOST' | 'GUEST';
type PinStatus = 'none' | 'required' | 'optional';
type YesNo = 'YES' | 'NO';
type ParticipantRole = 'chair' | 'guest';
type LayoutTypes =
| '1:0'
| '1:7'
| '1:21'
| '2:21'
| '4:0'
| '5:7'
| 'ac';
type ServiceType =
| 'connecting'
| 'waiting_room'
| 'ivr'
| 'conference'
| 'lecture'
| 'gateway'
| 'test_call';
type CallTypes =
| 'presentation'
| 'screen'
| 'audioonly'
| 'recvonly'
| 'rtmp'
| 'stream'
| 'none';
interface LayoutResponse {
participants: string[];
view: LayoutTypes;
}
interface ChatMessage {
readonly origin: string;
readonly uuid: string;
readonly type: string;
readonly payload: string;
}
interface AudioStatistics {
readonly 'packets-sent': number;
readonly bitrate: string;
readonly codec: string;
readonly 'packets-lost': number;
readonly 'percentage-lost': string;
readonly 'percentage-lost-recent': string;
}
interface VideoStatistics extends AudioStatistics {
readonly framerate: number;
readonly resolution: string;
}
interface Statistics {
readonly incoming: {
audio: AudioStatistics;
video: VideoStatistics;
};
readonly outgoing: {
audio: AudioStatistics;
video: VideoStatistics;
};
}
interface Participant {
readonly api_url: string;
readonly buzz_time: number;
readonly call_direction: 'in' | 'out';
readonly call_tag: string;
readonly disconnect_supported: YesNo;
readonly display_name: string;
readonly encryption: string;
readonly external_node_uuid: string;
readonly fecc_supported: YesNo;
readonly has_media: boolean;
readonly is_audio_only_call: YesNo;
readonly is_external: boolean;
readonly is_muted: YesNo;
readonly is_presenting: YesNo;
readonly is_streaming_conference: boolean;
readonly is_video_call: YesNo;
readonly local_alias: string;
readonly mute_supported: YesNo;
readonly overlay_text: string;
readonly presentation_supported: YesNo;
readonly protocol: string;
readonly role: ParticipantRole;
readonly rx_presentation_policy: 'allow' | 'deny';
readonly service_type: ServiceType;
readonly spotlight: number;
readonly start_time: number;
readonly transfer_supported: YesNo;
readonly uri: string;
readonly uuid: string;
readonly vendor: string;
}
interface GuestParticipant extends Participant {
readonly role: 'guest';
}
interface HostParticipant extends Participant {
readonly role: 'chair';
}
type AnyParticipant =
| GuestParticipant
| HostParticipant;
}
| PexRTC | identifier_name |
index.d.ts | // Type definitions for non-npm package PexRTC 26.0
// Project: https://docs.pexip.com/api_client/api_pexrtc.htm
// Definitions by: 10Clouds <https://github.com/10clouds>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare class PexRTC {
constructor();
/**
* ## Callbacks
*/
onCallTransfer: (alias: string) => void;
onChatMessage: (message: PexRTC.ChatMessage) => void;
onConnect: (stream: PexRTC.PexMediaStream | null) => void;
onConferenceUpdate: (properties: {
guest_muted: boolean;
locked: boolean;
started: boolean;
}) => void;
onDisconnect: (reason: string) => void;
onError: (reason: string) => void;
onLayoutUpdate: (layout: PexRTC.LayoutResponse) => void;
onLog: () => {};
onMicActivity: (activity: string) => void;
onParticipantCreate: (participant: PexRTC.AnyParticipant) => void;
onParticipantDelete: (
participant: Pick<PexRTC.AnyParticipant, 'uuid'>
) => void;
onParticipantUpdate: (participant: PexRTC.AnyParticipant) => void;
onPresentation: (
setting: boolean,
presenter: string | null,
uuid?: string
) => void;
onPresentationConnected: (stream: PexRTC.PexMediaStream) => void;
onPresentationDisconnected: (reason: string) => void;
onPresentationReload: (url: string) => void;
/**
* @deprecated in favor of onParticipantCreate/Update/Delete.
*/
onRosterList: (roster: any[]) => void;
onScreenshareConnected: (stream: PexRTC.PexMediaStream) => void;
onScreenshareStopped: (reason: string) => void;
onSetup: (
stream: PexRTC.PexMediaStream | null,
pin_status: PexRTC.PinStatus,
conference_extension?: 'standard' | 'mssip'
) => void;
onStageUpdate: (
stage: Array<{
participant_uuid: string;
stage_index: number;
vad: number;
}>
) => void;
/**
* ## Client control functions
*/
readonly makeCall: (
node: string,
conference: string,
name: string,
bandwidth?: number,
call_type?: PexRTC.CallTypes,
flash?: { [key: string]: unknown }
) => void;
readonly connect: (pin: string | null, extension?: string) => void;
readonly muteAudio: (setting: boolean) => boolean;
readonly muteVideo: (setting: boolean) => boolean;
readonly sendChatMessage: (message: string) => void;
readonly disconnect: () => void;
readonly disconnectcall: () => void;
readonly addCall: (call_type?: PexRTC.CallTypes) => void;
readonly renegotiate: (resend_sdp?: boolean) => void;
readonly getPresentation: () => void;
readonly stopPresentation: () => void;
readonly present: (type?: 'screen') => void;
readonly getMediaStatistics: () => PexRTC.Statistics; | readonly dialOut: (
destination: string,
protocol?: 'sip' | 'h323' | 'rtmp' | 'mssip' | 'auto',
role?: PexRTC.Role,
cb?: (res: { result: string[] }) => void,
params?: {
presentation_uri?: string;
streaming?: boolean;
dtmf_sequence?: string;
call_type?: 'video' | 'video-only' | 'audio';
keep_conference_alive?:
| 'keep_conference_alive'
| 'keep_conference_alive_if_multiple'
| 'keep_conference_alive_never';
remote_display_name?: string;
overlay_text?: string;
}
) => void;
readonly setConferenceLock: (setting: boolean) => void;
readonly setMuteAllGuests: (setting: boolean) => void;
readonly setParticipantMute: (uuid: string, setting: boolean) => void;
readonly videoMuted: (uuid?: string) => void;
readonly videoUnmuted: (uuid?: string) => void;
readonly setParticipantRxPresentation: (
uuid: string,
setting: boolean
) => void;
readonly setParticipantSpotlight: (uuid: string, setting: boolean) => void;
readonly setParticipantText: (uuid: string, text: string) => void;
readonly setRole: (uuid: string, role: PexRTC.ParticipantRole) => void;
readonly unlockParticipant: (uuid: string) => void;
readonly transferParticipant: (
uuid: string,
destination: string,
role: string,
pin?: string
) => void;
readonly startConference: () => void;
readonly disconnectParticipant: (uuid: string) => void;
readonly disconnectAll: () => void;
readonly sendDTMF: (digits: string, uuid: string) => void;
readonly sendFECC: (
action: 'start' | 'stop' | 'continue',
axis: 'pan' | 'tilt' | 'zoom',
direction: 'left' | 'right' | 'up' | 'down' | 'in' | 'out',
target: string | null,
timeout: number
) => void;
readonly setBuzz: () => void;
readonly clearBuzz: (uuid: string) => void;
readonly clearAllBuzz: () => void;
readonly transformLayout: (transforms: {
layout: PexRTC.LayoutTypes;
host_layout: PexRTC.LayoutTypes;
guest_layout: PexRTC.LayoutTypes;
streaming_indicator: boolean;
recording_indicator: boolean;
enable_active_speaker_indication: boolean;
enable_overlay_text: boolean;
free_form_overlay_text: string;
streaming: {
layout: PexRTC.LayoutTypes;
waiting_screen_enabled: boolean;
plus_n_pip_enabled: boolean;
indicators_enabled: boolean;
};
}) => void;
/**
* ## Instance variables
*
* A few additional configuration changes can be undertaken via instance
* variables on the PexRTC object, before calling makeCall:
*/
/**
* audio_source, video_source
* Values can be:
* null: default sources
* false: do not request
* string: a uuid of a media source gathered through device enumeration (Chrome only)
*/
audio_source: string | null | false;
video_source: string | null | false;
bandwidth_in: number;
bandwidth_out: number;
call_tag: string;
default_stun: string;
ice_timeout: number;
png_presentation: boolean;
recv_audio: boolean;
recv_video: boolean;
screenshare_fps: number;
turn_server: null | {
url: string;
username: string;
credential: string;
};
/**
* A MediaStream object to use instead of PexRTC calling getUserMedia
*/
user_media_stream: MediaStream;
/**
* A MediaStream object to use for presentation instead of
* PexRTC calling getDisplayMedia
*/
user_presentation_stream: MediaStream;
/**
* ## Fields
*
* The following fields on the PexRTC object are immutable but can be probed
* after onSetup, and provide useful information about the connection:
*/
readonly chat_enabled: boolean;
readonly current_service_type: PexRTC.ServiceType;
readonly role: PexRTC.Role;
readonly service_type: 'conference' | 'gateway' | 'test_call';
readonly uuid: string;
readonly version: string;
/**
* ## Undocumented fields
*/
readonly allow_1080p: boolean;
readonly analytics_enabled: boolean;
readonly basic_password: string;
readonly basic_username: string;
readonly call: {
readonly mutedAudio: boolean;
readonly mutedVideo: boolean;
};
readonly call_type: undefined;
readonly chrome_ver: number;
readonly conference: string;
readonly conference_extension: string;
readonly conference_name: string;
readonly conference_uri: string;
readonly display_name: string;
readonly dtmf_queue: {};
readonly edge_ver: number;
readonly error: string;
readonly event_listener: string;
readonly event_source: EventSource;
readonly onmessage: string;
readonly onopen: () => {};
readonly readyState: number;
readonly url: string;
readonly withCredentials: boolean;
readonly event_source_timeout: number;
readonly fecc_enabled: boolean;
readonly fecc_queue: {};
readonly firefox_ver: number;
readonly flash: undefined;
readonly force_hd: number;
readonly guests_can_present: boolean;
readonly h264_enabled: boolean;
readonly is_android: boolean;
readonly is_electron: boolean;
readonly is_mobile: boolean;
readonly last_ping: string;
readonly localStream: string;
readonly mutedAudio: boolean;
readonly mutedVideo: boolean;
readonly node: string;
readonly oneTimeToken: string;
readonly orig_bandwidth_in: number;
readonly orig_bandwidth_out: number;
readonly outstanding_requests: {};
readonly pc: string;
readonly pcConfig: {
readonly iceServers: number[];
};
readonly pin: string;
readonly pin_status: PexRTC.PinStatus;
readonly powerLineFrequency: number;
readonly presentation: string;
readonly presentation_event_id: string;
readonly presentation_msg: { status: string };
readonly registration_token: string;
readonly remote_call_type: string;
readonly return_media_stream: boolean;
readonly rosterList: { [key: string]: PexRTC.Participant };
readonly rtmp_enabled: boolean;
readonly rtsp_enabled: boolean;
readonly safari_ver: number;
readonly screenshare: string;
readonly screenshare_api: string;
readonly screenshare_height: number;
readonly screenshare_width: number;
readonly socket: string;
readonly state: string;
readonly stats: {};
readonly stats_interval: string;
readonly token: string;
readonly token_refresh: number;
readonly trans: {};
readonly use_trickle_ice: boolean;
readonly vp9_enabled: boolean;
readonly xhr_timeout: number;
}
declare namespace PexRTC {
type PexMediaStream = MediaStream | string;
type Role = 'HOST' | 'GUEST';
type PinStatus = 'none' | 'required' | 'optional';
type YesNo = 'YES' | 'NO';
type ParticipantRole = 'chair' | 'guest';
type LayoutTypes =
| '1:0'
| '1:7'
| '1:21'
| '2:21'
| '4:0'
| '5:7'
| 'ac';
type ServiceType =
| 'connecting'
| 'waiting_room'
| 'ivr'
| 'conference'
| 'lecture'
| 'gateway'
| 'test_call';
type CallTypes =
| 'presentation'
| 'screen'
| 'audioonly'
| 'recvonly'
| 'rtmp'
| 'stream'
| 'none';
interface LayoutResponse {
participants: string[];
view: LayoutTypes;
}
interface ChatMessage {
readonly origin: string;
readonly uuid: string;
readonly type: string;
readonly payload: string;
}
interface AudioStatistics {
readonly 'packets-sent': number;
readonly bitrate: string;
readonly codec: string;
readonly 'packets-lost': number;
readonly 'percentage-lost': string;
readonly 'percentage-lost-recent': string;
}
interface VideoStatistics extends AudioStatistics {
readonly framerate: number;
readonly resolution: string;
}
interface Statistics {
readonly incoming: {
audio: AudioStatistics;
video: VideoStatistics;
};
readonly outgoing: {
audio: AudioStatistics;
video: VideoStatistics;
};
}
interface Participant {
readonly api_url: string;
readonly buzz_time: number;
readonly call_direction: 'in' | 'out';
readonly call_tag: string;
readonly disconnect_supported: YesNo;
readonly display_name: string;
readonly encryption: string;
readonly external_node_uuid: string;
readonly fecc_supported: YesNo;
readonly has_media: boolean;
readonly is_audio_only_call: YesNo;
readonly is_external: boolean;
readonly is_muted: YesNo;
readonly is_presenting: YesNo;
readonly is_streaming_conference: boolean;
readonly is_video_call: YesNo;
readonly local_alias: string;
readonly mute_supported: YesNo;
readonly overlay_text: string;
readonly presentation_supported: YesNo;
readonly protocol: string;
readonly role: ParticipantRole;
readonly rx_presentation_policy: 'allow' | 'deny';
readonly service_type: ServiceType;
readonly spotlight: number;
readonly start_time: number;
readonly transfer_supported: YesNo;
readonly uri: string;
readonly uuid: string;
readonly vendor: string;
}
interface GuestParticipant extends Participant {
readonly role: 'guest';
}
interface HostParticipant extends Participant {
readonly role: 'chair';
}
type AnyParticipant =
| GuestParticipant
| HostParticipant;
} |
/**
* ## Conference control functions
*/ | random_line_split |
store.go | package store
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/url"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
tpMongo "github.com/tidepool-org/go-common/clients/mongo"
)
const (
dataCollectionName = "deviceData"
dataSetsCollectionName = "deviceDataSets" // all datum with type == "upload" go in this collection as opposed to the deviceData collection. These act as the root/parent for all other data.
dataStoreAPIPrefix = "api/data/store "
RFC3339NanoSortable = "2006-01-02T15:04:05.00000000Z07:00"
medtronicDateFormat = "2006-01-02"
medtronicIndexDate = "2017-09-01"
)
type (
// StorageIterator - Interface for the query iterator
StorageIterator interface {
Next(context.Context) bool
Decode(interface{}) error
Close(context.Context) error
}
// Storage - Interface for our storage layer
Storage interface {
Close()
Ping() error
GetDeviceData(p *Params) StorageIterator
}
// MongoStoreClient - Mongo Storage Client
MongoStoreClient struct {
client *mongo.Client
context context.Context
database string
}
// SchemaVersion struct
SchemaVersion struct {
Minimum int
Maximum int
}
// Params struct
Params struct {
UserID string
Types []string
SubTypes []string
Date
*SchemaVersion
Carelink bool
Dexcom bool
DexcomDataSource bson.M
DeviceID string
Latest bool
Medtronic bool
MedtronicDate string
MedtronicUploadIds []string
UploadID string
}
// Date struct
Date struct {
Start time.Time
End time.Time
}
latestIterator struct {
results []bson.Raw
pos int
}
// multiStorageIterator is a StorageIterator reads from multiple iterators
// until there is no more data this is needed in the case that we are
// reading multiple types and need to read both uploads and data.
multiStorageIterator struct {
iters []StorageIterator
currentIterIdx int
}
)
func cleanDateString(dateString string) (time.Time, error) {
date := time.Time{}
if dateString == "" {
return date, nil
}
date, err := time.Parse(time.RFC3339Nano, dateString)
if err != nil {
return date, err
}
return date, nil
}
// GetParams parses a URL to set parameters
func GetParams(q url.Values, schema *SchemaVersion) (*Params, error) {
startDate, err := cleanDateString(q.Get("startDate"))
if err != nil {
return nil, err
}
endDate, err := cleanDateString(q.Get("endDate"))
if err != nil {
return nil, err
}
carelink := false
if values, ok := q["carelink"]; ok {
if len(values) < 1 {
return nil, errors.New("carelink parameter not valid")
}
carelink, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("carelink parameter not valid")
}
}
dexcom := false
if values, ok := q["dexcom"]; ok {
if len(values) < 1 {
return nil, errors.New("dexcom parameter not valid")
}
dexcom, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("dexcom parameter not valid")
}
}
latest := false
if values, ok := q["latest"]; ok {
if len(values) < 1 {
return nil, errors.New("latest parameter not valid")
}
latest, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("latest parameter not valid")
}
}
medtronic := false
if values, ok := q["medtronic"]; ok {
if len(values) < 1 {
return nil, errors.New("medtronic parameter not valid")
}
medtronic, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("medtronic parameter not valid")
}
}
p := &Params{
UserID: q.Get(":userID"),
DeviceID: q.Get("deviceId"),
UploadID: q.Get("uploadId"),
//the query params for type and subtype can contain multiple values seperated
//by a comma e.g. "type=smbg,cbg" so split them out into an array of values
Types: strings.Split(q.Get("type"), ","),
SubTypes: strings.Split(q.Get("subType"), ","),
Date: Date{startDate, endDate},
SchemaVersion: schema,
Carelink: carelink,
Dexcom: dexcom,
Latest: latest,
Medtronic: medtronic,
}
return p, nil
}
// NewMongoStoreClient creates a new MongoStoreClient
func NewMongoStoreClient(config *tpMongo.Config) *MongoStoreClient {
connectionString, err := config.ToConnectionString()
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB configuration: %s", err))
}
clientOptions := options.Client().ApplyURI(connectionString)
mongoClient, err := mongo.Connect(context.Background(), clientOptions)
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB connection string: %s", err))
}
return &MongoStoreClient{
client: mongoClient,
context: context.Background(),
database: config.Database,
}
}
// WithContext returns a shallow copy of c with its context changed
// to ctx. The provided ctx must be non-nil.
func (c *MongoStoreClient) WithContext(ctx context.Context) *MongoStoreClient {
if ctx == nil {
panic("nil context")
}
c2 := new(MongoStoreClient)
*c2 = *c
c2.context = ctx
return c2
}
// EnsureIndexes exist for the MongoDB collection. EnsureIndexes uses the Background() context, in order
// to pass back the MongoDB errors, rather than any context errors.
func (c *MongoStoreClient) EnsureIndexes() error {
medtronicIndexDateTime, _ := time.Parse(medtronicDateFormat, medtronicIndexDate)
dataIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataCollection(c).Indexes().CreateMany(context.Background(), dataIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
// Not sure if all these indexes need to also be on the deviceDataSets collection.
dataSetsIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataSetsCollection(c).Indexes().CreateMany(context.Background(), dataSetsIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
return nil
}
func dataCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataCollectionName)
}
func dataSetsCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataSetsCollectionName)
}
// generateMongoQuery takes in a number of parameters and constructs a mongo query
// to retrieve objects from the Tidepool database. It is used by the router.Add("GET", "/{userID}"
// endpoint, which implements the Tide-whisperer API. See that function for further documentation
// on parameters
func generateMongoQuery(p *Params) bson.M {
groupDataQuery := bson.M{
"_userId": p.UserID,
"_active": true}
//if optional parameters are present, then add them to the query
if len(p.Types) > 0 && p.Types[0] != "" {
groupDataQuery["type"] = bson.M{"$in": p.Types}
}
if len(p.SubTypes) > 0 && p.SubTypes[0] != "" {
groupDataQuery["subType"] = bson.M{"$in": p.SubTypes}
}
// The Golang implementation of time.RFC3339Nano does not use a fixed number of digits after the
// decimal point and therefore is not reliably sortable. And so we use our own custom format for
// database range queries that will properly sort any data with time stored as an ISO string.
// See https://github.com/golang/go/issues/19635
if !p.Date.Start.IsZero() && !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start, "$lte": p.Date.End}
} else if !p.Date.Start.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start}
} else if !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$lte": p.Date.End}
}
if !p.Carelink {
groupDataQuery["source"] = bson.M{"$ne": "carelink"}
}
if p.DeviceID != "" {
groupDataQuery["deviceId"] = p.DeviceID
}
// If we have an explicit upload ID to filter by, we don't need or want to apply any further
// data source-based filtering
if p.UploadID != "" {
groupDataQuery["uploadId"] = p.UploadID
} else {
andQuery := []bson.M{}
if !p.Dexcom && p.DexcomDataSource != nil {
dexcomQuery := []bson.M{
{"type": bson.M{"$ne": "cbg"}},
{"uploadId": bson.M{"$in": p.DexcomDataSource["dataSetIds"]}},
}
// more redundant OR query for multiple date field types
earliestDataTime := p.DexcomDataSource["earliestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$lt": earliestDataTime}},
)
latestDataTime := p.DexcomDataSource["latestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$gt": latestDataTime}},
)
andQuery = append(andQuery, bson.M{"$or": dexcomQuery})
}
if !p.Medtronic && len(p.MedtronicUploadIds) > 0 {
medtronicDateTime, err := time.Parse(medtronicDateFormat, p.MedtronicDate)
if err != nil {
medtronicDateTime, _ = time.Parse(time.RFC3339, p.MedtronicDate)
}
medtronicQuery := []bson.M{
{"time": bson.M{"$lt": medtronicDateTime}},
{"type": bson.M{"$nin": []string{"basal", "bolus", "cbg"}}},
{"uploadId": bson.M{"$nin": p.MedtronicUploadIds}},
}
andQuery = append(andQuery, bson.M{"$or": medtronicQuery})
}
if len(andQuery) > 0 {
groupDataQuery["$and"] = andQuery
}
}
return groupDataQuery
}
// Ping the MongoDB database
func (c *MongoStoreClient) Ping() error {
// do we have a store session
return c.client.Ping(c.context, nil)
}
// Disconnect from the MongoDB database
func (c *MongoStoreClient) Disconnect() error {
return c.client.Disconnect(c.context)
}
// HasMedtronicDirectData - check whether the userID has Medtronic data that has been uploaded via Uploader
func (c *MongoStoreClient) HasMedtronicDirectData(userID string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
query := bson.M{
"_userId": userID,
"type": "upload",
"_state": "closed",
"_active": true,
"deletedTime": bson.M{
"$exists": false,
},
"deviceManufacturers": "Medtronic",
}
// Try reading from both collections until migration from type=upload from
// deviceData to deviceDataSets is complete.
err := dataCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == nil {
return true, nil
}
err = dataSetsCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetDexcomDataSource - get
func (c *MongoStoreClient) GetDexcomDataSource(userID string) (bson.M, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
// `earliestDataTime` and `latestDataTime` are bson.Date fields. Internally, they are int64's
// so if they exist, the must be set to something, even if 0 (ie Unix epoch)
query := bson.M{
"userId": userID,
"providerType": "oauth",
"providerName": "dexcom",
"dataSetIds": bson.M{
"$exists": true,
"$not": bson.M{
"$size": 0,
},
},
"earliestDataTime": bson.M{
"$exists": true,
},
"latestDataTime": bson.M{
"$exists": true,
},
}
dataSource := bson.M{}
err := c.client.Database("tidepool").Collection("data_sources").FindOne(c.context, query).Decode(&dataSource)
if err == mongo.ErrNoDocuments {
return nil, nil
}
if err != nil {
return nil, err
}
return dataSource, nil
}
// HasMedtronicLoopDataAfter checks the database to see if Loop data exists for `userID` that originated
// from a Medtronic device after `date`
func (c *MongoStoreClient) HasMedtronicLoopDataAfter(userID string, date string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
if date == "" {
return false, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return false, errors.New("date is invalid")
}
opts := options.FindOne()
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"origin.payload.device.manufacturer": "Medtronic",
}
err = dataCollection(c).FindOne(c.context, query, opts).Err()
if err == nil {
return true, nil
}
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
err = dataSetsCollection(c).FindOne(c.context, query, opts).Err()
if err == mongo.ErrNoDocuments {
return false, nil | // GetLoopableMedtronicDirectUploadIdsAfter returns all Upload IDs for `userID` where Loop data was found
// for a Medtronic device after `date`.
func (c *MongoStoreClient) GetLoopableMedtronicDirectUploadIdsAfter(userID string, date string) ([]string, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
if date == "" {
return nil, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return nil, errors.New("date is invalid")
}
opts := options.Find()
opts.SetHint("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime")
opts.SetProjection(bson.M{"_id": 0, "uploadId": 1})
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"type": "upload", // redundant since all types in collection is deviceDataSets is upload but just leaving the original query here.
"deviceModel": bson.M{"$in": []string{"523", "523K", "554", "723", "723K", "754"}},
}
var objects []struct {
UploadID string `bson:"uploadId"`
}
cursor, err := dataSetsCollection(c).Find(c.context, query, opts)
if err != nil {
return nil, err
}
defer cursor.Close(c.context)
err = cursor.All(c.context, &objects)
if err != nil {
return nil, err
}
uploadIds := make([]string, len(objects))
for index, object := range objects {
uploadIds[index] = object.UploadID
}
return uploadIds, nil
}
// GetDeviceData returns all the device data for a user
func (c *MongoStoreClient) GetDeviceData(p *Params) (StorageIterator, error) {
// _schemaVersion is still in the list of fields to remove. Although we don't query for it, data can still exist for it
// until BACK-1281 is done.
removeFieldsForReturn := bson.M{"_id": 0, "_userId": 0, "_groupId": 0, "_version": 0, "_active": 0, "_schemaVersion": 0, "createdTime": 0, "modifiedTime": 0, "_migrationMarker": 0}
if p.Latest {
latest := &latestIterator{pos: -1}
var typeRanges []string
if len(p.Types) > 0 && p.Types[0] != "" {
typeRanges = p.Types
} else {
typeRanges = []string{"physicalActivity", "basal", "cbg", "smbg", "bloodKetone", "bolus", "wizard", "deviceEvent", "food", "insulin", "cgmSettings", "pumpSettings", "reportedState", "upload"}
}
var err error
for _, theType := range typeRanges {
query := generateMongoQuery(p)
query["type"] = theType
opts := options.FindOne().SetProjection(removeFieldsForReturn).SetSort(bson.M{"time": -1})
// collections to search. stop at first collection that has data.
collection := dataCollection(c)
if theType == "upload" {
// Uploads are only in the deviceDataSets collection after migration completes.
collection = dataSetsCollection(c)
}
result, resultErr := collection.
FindOne(c.context, query, opts).
DecodeBytes()
if resultErr != nil {
if resultErr == mongo.ErrNoDocuments {
continue
}
err = resultErr
break
}
latest.results = append(latest.results, result)
}
return latest, err
}
opts := options.Find().SetProjection(removeFieldsForReturn)
// If query only needs to read from one collection use the collection directly.
switch {
case len(p.Types) == 1 && p.Types[0] == "upload":
return dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
// Have to check for empty string as sometimes that is the type sent.
case len(p.Types) > 0 && !contains("upload", p.Types) && p.Types[0] != "":
return dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
}
// Otherwise query needs to read from both deviceData and deviceDataSets collection.
dataIter, err := dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
dataSetIter, err := dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
return &multiStorageIterator{
iters: []StorageIterator{
dataIter,
dataSetIter,
},
}, nil
}
func (l *latestIterator) Next(context.Context) bool {
l.pos++
return l.pos < len(l.results)
}
func (l *latestIterator) Decode(result interface{}) error {
return bson.Unmarshal(l.results[l.pos], result)
}
func (l *latestIterator) Close(context.Context) error {
return nil
}
func (l *multiStorageIterator) Next(ctx context.Context) bool {
if l.currentIterIdx >= len(l.iters) {
return false
}
hasNext := l.iters[l.currentIterIdx].Next(ctx)
if hasNext {
return true
}
l.currentIterIdx++
return l.Next(ctx)
}
func (l *multiStorageIterator) Decode(result interface{}) error {
if l.currentIterIdx >= len(l.iters) {
return io.EOF
}
return l.iters[l.currentIterIdx].Decode(result)
}
func (l *multiStorageIterator) Close(ctx context.Context) error {
for _, iter := range l.iters {
if err := iter.Close(ctx); err != nil {
return err
}
}
return nil
}
func contains(needle string, haystack []string) bool {
for _, x := range haystack {
if needle == x {
return true
}
}
return false
} | }
return err == nil, err
}
| random_line_split |
store.go | package store
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/url"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
tpMongo "github.com/tidepool-org/go-common/clients/mongo"
)
const (
dataCollectionName = "deviceData"
dataSetsCollectionName = "deviceDataSets" // all datum with type == "upload" go in this collection as opposed to the deviceData collection. These act as the root/parent for all other data.
dataStoreAPIPrefix = "api/data/store "
RFC3339NanoSortable = "2006-01-02T15:04:05.00000000Z07:00"
medtronicDateFormat = "2006-01-02"
medtronicIndexDate = "2017-09-01"
)
type (
// StorageIterator - Interface for the query iterator
StorageIterator interface {
Next(context.Context) bool
Decode(interface{}) error
Close(context.Context) error
}
// Storage - Interface for our storage layer
Storage interface {
Close()
Ping() error
GetDeviceData(p *Params) StorageIterator
}
// MongoStoreClient - Mongo Storage Client
MongoStoreClient struct {
client *mongo.Client
context context.Context
database string
}
// SchemaVersion struct
SchemaVersion struct {
Minimum int
Maximum int
}
// Params struct
Params struct {
UserID string
Types []string
SubTypes []string
Date
*SchemaVersion
Carelink bool
Dexcom bool
DexcomDataSource bson.M
DeviceID string
Latest bool
Medtronic bool
MedtronicDate string
MedtronicUploadIds []string
UploadID string
}
// Date struct
Date struct {
Start time.Time
End time.Time
}
latestIterator struct {
results []bson.Raw
pos int
}
// multiStorageIterator is a StorageIterator reads from multiple iterators
// until there is no more data this is needed in the case that we are
// reading multiple types and need to read both uploads and data.
multiStorageIterator struct {
iters []StorageIterator
currentIterIdx int
}
)
func cleanDateString(dateString string) (time.Time, error) {
date := time.Time{}
if dateString == "" {
return date, nil
}
date, err := time.Parse(time.RFC3339Nano, dateString)
if err != nil {
return date, err
}
return date, nil
}
// GetParams parses a URL to set parameters
func GetParams(q url.Values, schema *SchemaVersion) (*Params, error) {
startDate, err := cleanDateString(q.Get("startDate"))
if err != nil {
return nil, err
}
endDate, err := cleanDateString(q.Get("endDate"))
if err != nil {
return nil, err
}
carelink := false
if values, ok := q["carelink"]; ok {
if len(values) < 1 {
return nil, errors.New("carelink parameter not valid")
}
carelink, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("carelink parameter not valid")
}
}
dexcom := false
if values, ok := q["dexcom"]; ok {
if len(values) < 1 {
return nil, errors.New("dexcom parameter not valid")
}
dexcom, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("dexcom parameter not valid")
}
}
latest := false
if values, ok := q["latest"]; ok {
if len(values) < 1 {
return nil, errors.New("latest parameter not valid")
}
latest, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("latest parameter not valid")
}
}
medtronic := false
if values, ok := q["medtronic"]; ok {
if len(values) < 1 {
return nil, errors.New("medtronic parameter not valid")
}
medtronic, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("medtronic parameter not valid")
}
}
p := &Params{
UserID: q.Get(":userID"),
DeviceID: q.Get("deviceId"),
UploadID: q.Get("uploadId"),
//the query params for type and subtype can contain multiple values seperated
//by a comma e.g. "type=smbg,cbg" so split them out into an array of values
Types: strings.Split(q.Get("type"), ","),
SubTypes: strings.Split(q.Get("subType"), ","),
Date: Date{startDate, endDate},
SchemaVersion: schema,
Carelink: carelink,
Dexcom: dexcom,
Latest: latest,
Medtronic: medtronic,
}
return p, nil
}
// NewMongoStoreClient creates a new MongoStoreClient
func NewMongoStoreClient(config *tpMongo.Config) *MongoStoreClient {
connectionString, err := config.ToConnectionString()
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB configuration: %s", err))
}
clientOptions := options.Client().ApplyURI(connectionString)
mongoClient, err := mongo.Connect(context.Background(), clientOptions)
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB connection string: %s", err))
}
return &MongoStoreClient{
client: mongoClient,
context: context.Background(),
database: config.Database,
}
}
// WithContext returns a shallow copy of c with its context changed
// to ctx. The provided ctx must be non-nil.
func (c *MongoStoreClient) WithContext(ctx context.Context) *MongoStoreClient {
if ctx == nil {
panic("nil context")
}
c2 := new(MongoStoreClient)
*c2 = *c
c2.context = ctx
return c2
}
// EnsureIndexes exist for the MongoDB collection. EnsureIndexes uses the Background() context, in order
// to pass back the MongoDB errors, rather than any context errors.
func (c *MongoStoreClient) EnsureIndexes() error {
medtronicIndexDateTime, _ := time.Parse(medtronicDateFormat, medtronicIndexDate)
dataIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataCollection(c).Indexes().CreateMany(context.Background(), dataIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
// Not sure if all these indexes need to also be on the deviceDataSets collection.
dataSetsIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataSetsCollection(c).Indexes().CreateMany(context.Background(), dataSetsIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
return nil
}
func dataCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataCollectionName)
}
func dataSetsCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataSetsCollectionName)
}
// generateMongoQuery takes in a number of parameters and constructs a mongo query
// to retrieve objects from the Tidepool database. It is used by the router.Add("GET", "/{userID}"
// endpoint, which implements the Tide-whisperer API. See that function for further documentation
// on parameters
func generateMongoQuery(p *Params) bson.M {
groupDataQuery := bson.M{
"_userId": p.UserID,
"_active": true}
//if optional parameters are present, then add them to the query
if len(p.Types) > 0 && p.Types[0] != "" {
groupDataQuery["type"] = bson.M{"$in": p.Types}
}
if len(p.SubTypes) > 0 && p.SubTypes[0] != "" {
groupDataQuery["subType"] = bson.M{"$in": p.SubTypes}
}
// The Golang implementation of time.RFC3339Nano does not use a fixed number of digits after the
// decimal point and therefore is not reliably sortable. And so we use our own custom format for
// database range queries that will properly sort any data with time stored as an ISO string.
// See https://github.com/golang/go/issues/19635
if !p.Date.Start.IsZero() && !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start, "$lte": p.Date.End}
} else if !p.Date.Start.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start}
} else if !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$lte": p.Date.End}
}
if !p.Carelink {
groupDataQuery["source"] = bson.M{"$ne": "carelink"}
}
if p.DeviceID != "" {
groupDataQuery["deviceId"] = p.DeviceID
}
// If we have an explicit upload ID to filter by, we don't need or want to apply any further
// data source-based filtering
if p.UploadID != "" {
groupDataQuery["uploadId"] = p.UploadID
} else {
andQuery := []bson.M{}
if !p.Dexcom && p.DexcomDataSource != nil {
dexcomQuery := []bson.M{
{"type": bson.M{"$ne": "cbg"}},
{"uploadId": bson.M{"$in": p.DexcomDataSource["dataSetIds"]}},
}
// more redundant OR query for multiple date field types
earliestDataTime := p.DexcomDataSource["earliestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$lt": earliestDataTime}},
)
latestDataTime := p.DexcomDataSource["latestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$gt": latestDataTime}},
)
andQuery = append(andQuery, bson.M{"$or": dexcomQuery})
}
if !p.Medtronic && len(p.MedtronicUploadIds) > 0 {
medtronicDateTime, err := time.Parse(medtronicDateFormat, p.MedtronicDate)
if err != nil {
medtronicDateTime, _ = time.Parse(time.RFC3339, p.MedtronicDate)
}
medtronicQuery := []bson.M{
{"time": bson.M{"$lt": medtronicDateTime}},
{"type": bson.M{"$nin": []string{"basal", "bolus", "cbg"}}},
{"uploadId": bson.M{"$nin": p.MedtronicUploadIds}},
}
andQuery = append(andQuery, bson.M{"$or": medtronicQuery})
}
if len(andQuery) > 0 {
groupDataQuery["$and"] = andQuery
}
}
return groupDataQuery
}
// Ping the MongoDB database
func (c *MongoStoreClient) Ping() error {
// do we have a store session
return c.client.Ping(c.context, nil)
}
// Disconnect from the MongoDB database
func (c *MongoStoreClient) Disconnect() error {
return c.client.Disconnect(c.context)
}
// HasMedtronicDirectData - check whether the userID has Medtronic data that has been uploaded via Uploader
func (c *MongoStoreClient) HasMedtronicDirectData(userID string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
query := bson.M{
"_userId": userID,
"type": "upload",
"_state": "closed",
"_active": true,
"deletedTime": bson.M{
"$exists": false,
},
"deviceManufacturers": "Medtronic",
}
// Try reading from both collections until migration from type=upload from
// deviceData to deviceDataSets is complete.
err := dataCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == nil {
return true, nil
}
err = dataSetsCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetDexcomDataSource - get
func (c *MongoStoreClient) GetDexcomDataSource(userID string) (bson.M, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
// `earliestDataTime` and `latestDataTime` are bson.Date fields. Internally, they are int64's
// so if they exist, the must be set to something, even if 0 (ie Unix epoch)
query := bson.M{
"userId": userID,
"providerType": "oauth",
"providerName": "dexcom",
"dataSetIds": bson.M{
"$exists": true,
"$not": bson.M{
"$size": 0,
},
},
"earliestDataTime": bson.M{
"$exists": true,
},
"latestDataTime": bson.M{
"$exists": true,
},
}
dataSource := bson.M{}
err := c.client.Database("tidepool").Collection("data_sources").FindOne(c.context, query).Decode(&dataSource)
if err == mongo.ErrNoDocuments {
return nil, nil
}
if err != nil {
return nil, err
}
return dataSource, nil
}
// HasMedtronicLoopDataAfter checks the database to see if Loop data exists for `userID` that originated
// from a Medtronic device after `date`
func (c *MongoStoreClient) HasMedtronicLoopDataAfter(userID string, date string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
if date == "" {
return false, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return false, errors.New("date is invalid")
}
opts := options.FindOne()
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"origin.payload.device.manufacturer": "Medtronic",
}
err = dataCollection(c).FindOne(c.context, query, opts).Err()
if err == nil {
return true, nil
}
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
err = dataSetsCollection(c).FindOne(c.context, query, opts).Err()
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetLoopableMedtronicDirectUploadIdsAfter returns all Upload IDs for `userID` where Loop data was found
// for a Medtronic device after `date`.
func (c *MongoStoreClient) GetLoopableMedtronicDirectUploadIdsAfter(userID string, date string) ([]string, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
if date == "" {
return nil, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return nil, errors.New("date is invalid")
}
opts := options.Find()
opts.SetHint("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime")
opts.SetProjection(bson.M{"_id": 0, "uploadId": 1})
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"type": "upload", // redundant since all types in collection is deviceDataSets is upload but just leaving the original query here.
"deviceModel": bson.M{"$in": []string{"523", "523K", "554", "723", "723K", "754"}},
}
var objects []struct {
UploadID string `bson:"uploadId"`
}
cursor, err := dataSetsCollection(c).Find(c.context, query, opts)
if err != nil {
return nil, err
}
defer cursor.Close(c.context)
err = cursor.All(c.context, &objects)
if err != nil {
return nil, err
}
uploadIds := make([]string, len(objects))
for index, object := range objects {
uploadIds[index] = object.UploadID
}
return uploadIds, nil
}
// GetDeviceData returns all the device data for a user
func (c *MongoStoreClient) | (p *Params) (StorageIterator, error) {
// _schemaVersion is still in the list of fields to remove. Although we don't query for it, data can still exist for it
// until BACK-1281 is done.
removeFieldsForReturn := bson.M{"_id": 0, "_userId": 0, "_groupId": 0, "_version": 0, "_active": 0, "_schemaVersion": 0, "createdTime": 0, "modifiedTime": 0, "_migrationMarker": 0}
if p.Latest {
latest := &latestIterator{pos: -1}
var typeRanges []string
if len(p.Types) > 0 && p.Types[0] != "" {
typeRanges = p.Types
} else {
typeRanges = []string{"physicalActivity", "basal", "cbg", "smbg", "bloodKetone", "bolus", "wizard", "deviceEvent", "food", "insulin", "cgmSettings", "pumpSettings", "reportedState", "upload"}
}
var err error
for _, theType := range typeRanges {
query := generateMongoQuery(p)
query["type"] = theType
opts := options.FindOne().SetProjection(removeFieldsForReturn).SetSort(bson.M{"time": -1})
// collections to search. stop at first collection that has data.
collection := dataCollection(c)
if theType == "upload" {
// Uploads are only in the deviceDataSets collection after migration completes.
collection = dataSetsCollection(c)
}
result, resultErr := collection.
FindOne(c.context, query, opts).
DecodeBytes()
if resultErr != nil {
if resultErr == mongo.ErrNoDocuments {
continue
}
err = resultErr
break
}
latest.results = append(latest.results, result)
}
return latest, err
}
opts := options.Find().SetProjection(removeFieldsForReturn)
// If query only needs to read from one collection use the collection directly.
switch {
case len(p.Types) == 1 && p.Types[0] == "upload":
return dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
// Have to check for empty string as sometimes that is the type sent.
case len(p.Types) > 0 && !contains("upload", p.Types) && p.Types[0] != "":
return dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
}
// Otherwise query needs to read from both deviceData and deviceDataSets collection.
dataIter, err := dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
dataSetIter, err := dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
return &multiStorageIterator{
iters: []StorageIterator{
dataIter,
dataSetIter,
},
}, nil
}
func (l *latestIterator) Next(context.Context) bool {
l.pos++
return l.pos < len(l.results)
}
func (l *latestIterator) Decode(result interface{}) error {
return bson.Unmarshal(l.results[l.pos], result)
}
func (l *latestIterator) Close(context.Context) error {
return nil
}
func (l *multiStorageIterator) Next(ctx context.Context) bool {
if l.currentIterIdx >= len(l.iters) {
return false
}
hasNext := l.iters[l.currentIterIdx].Next(ctx)
if hasNext {
return true
}
l.currentIterIdx++
return l.Next(ctx)
}
func (l *multiStorageIterator) Decode(result interface{}) error {
if l.currentIterIdx >= len(l.iters) {
return io.EOF
}
return l.iters[l.currentIterIdx].Decode(result)
}
func (l *multiStorageIterator) Close(ctx context.Context) error {
for _, iter := range l.iters {
if err := iter.Close(ctx); err != nil {
return err
}
}
return nil
}
func contains(needle string, haystack []string) bool {
for _, x := range haystack {
if needle == x {
return true
}
}
return false
}
| GetDeviceData | identifier_name |
store.go | package store
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/url"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
tpMongo "github.com/tidepool-org/go-common/clients/mongo"
)
const (
dataCollectionName = "deviceData"
dataSetsCollectionName = "deviceDataSets" // all datum with type == "upload" go in this collection as opposed to the deviceData collection. These act as the root/parent for all other data.
dataStoreAPIPrefix = "api/data/store "
RFC3339NanoSortable = "2006-01-02T15:04:05.00000000Z07:00"
medtronicDateFormat = "2006-01-02"
medtronicIndexDate = "2017-09-01"
)
type (
// StorageIterator - Interface for the query iterator
StorageIterator interface {
Next(context.Context) bool
Decode(interface{}) error
Close(context.Context) error
}
// Storage - Interface for our storage layer
Storage interface {
Close()
Ping() error
GetDeviceData(p *Params) StorageIterator
}
// MongoStoreClient - Mongo Storage Client
MongoStoreClient struct {
client *mongo.Client
context context.Context
database string
}
// SchemaVersion struct
SchemaVersion struct {
Minimum int
Maximum int
}
// Params struct
Params struct {
UserID string
Types []string
SubTypes []string
Date
*SchemaVersion
Carelink bool
Dexcom bool
DexcomDataSource bson.M
DeviceID string
Latest bool
Medtronic bool
MedtronicDate string
MedtronicUploadIds []string
UploadID string
}
// Date struct
Date struct {
Start time.Time
End time.Time
}
latestIterator struct {
results []bson.Raw
pos int
}
// multiStorageIterator is a StorageIterator reads from multiple iterators
// until there is no more data this is needed in the case that we are
// reading multiple types and need to read both uploads and data.
multiStorageIterator struct {
iters []StorageIterator
currentIterIdx int
}
)
func cleanDateString(dateString string) (time.Time, error) {
date := time.Time{}
if dateString == "" {
return date, nil
}
date, err := time.Parse(time.RFC3339Nano, dateString)
if err != nil {
return date, err
}
return date, nil
}
// GetParams parses a URL to set parameters
func GetParams(q url.Values, schema *SchemaVersion) (*Params, error) {
startDate, err := cleanDateString(q.Get("startDate"))
if err != nil {
return nil, err
}
endDate, err := cleanDateString(q.Get("endDate"))
if err != nil {
return nil, err
}
carelink := false
if values, ok := q["carelink"]; ok {
if len(values) < 1 {
return nil, errors.New("carelink parameter not valid")
}
carelink, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("carelink parameter not valid")
}
}
dexcom := false
if values, ok := q["dexcom"]; ok {
if len(values) < 1 {
return nil, errors.New("dexcom parameter not valid")
}
dexcom, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("dexcom parameter not valid")
}
}
latest := false
if values, ok := q["latest"]; ok {
if len(values) < 1 {
return nil, errors.New("latest parameter not valid")
}
latest, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("latest parameter not valid")
}
}
medtronic := false
if values, ok := q["medtronic"]; ok {
if len(values) < 1 |
medtronic, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("medtronic parameter not valid")
}
}
p := &Params{
UserID: q.Get(":userID"),
DeviceID: q.Get("deviceId"),
UploadID: q.Get("uploadId"),
//the query params for type and subtype can contain multiple values seperated
//by a comma e.g. "type=smbg,cbg" so split them out into an array of values
Types: strings.Split(q.Get("type"), ","),
SubTypes: strings.Split(q.Get("subType"), ","),
Date: Date{startDate, endDate},
SchemaVersion: schema,
Carelink: carelink,
Dexcom: dexcom,
Latest: latest,
Medtronic: medtronic,
}
return p, nil
}
// NewMongoStoreClient creates a new MongoStoreClient
func NewMongoStoreClient(config *tpMongo.Config) *MongoStoreClient {
connectionString, err := config.ToConnectionString()
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB configuration: %s", err))
}
clientOptions := options.Client().ApplyURI(connectionString)
mongoClient, err := mongo.Connect(context.Background(), clientOptions)
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB connection string: %s", err))
}
return &MongoStoreClient{
client: mongoClient,
context: context.Background(),
database: config.Database,
}
}
// WithContext returns a shallow copy of c with its context changed
// to ctx. The provided ctx must be non-nil.
func (c *MongoStoreClient) WithContext(ctx context.Context) *MongoStoreClient {
if ctx == nil {
panic("nil context")
}
c2 := new(MongoStoreClient)
*c2 = *c
c2.context = ctx
return c2
}
// EnsureIndexes exist for the MongoDB collection. EnsureIndexes uses the Background() context, in order
// to pass back the MongoDB errors, rather than any context errors.
func (c *MongoStoreClient) EnsureIndexes() error {
medtronicIndexDateTime, _ := time.Parse(medtronicDateFormat, medtronicIndexDate)
dataIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataCollection(c).Indexes().CreateMany(context.Background(), dataIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
// Not sure if all these indexes need to also be on the deviceDataSets collection.
dataSetsIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataSetsCollection(c).Indexes().CreateMany(context.Background(), dataSetsIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
return nil
}
func dataCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataCollectionName)
}
func dataSetsCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataSetsCollectionName)
}
// generateMongoQuery takes in a number of parameters and constructs a mongo query
// to retrieve objects from the Tidepool database. It is used by the router.Add("GET", "/{userID}"
// endpoint, which implements the Tide-whisperer API. See that function for further documentation
// on parameters
func generateMongoQuery(p *Params) bson.M {
groupDataQuery := bson.M{
"_userId": p.UserID,
"_active": true}
//if optional parameters are present, then add them to the query
if len(p.Types) > 0 && p.Types[0] != "" {
groupDataQuery["type"] = bson.M{"$in": p.Types}
}
if len(p.SubTypes) > 0 && p.SubTypes[0] != "" {
groupDataQuery["subType"] = bson.M{"$in": p.SubTypes}
}
// The Golang implementation of time.RFC3339Nano does not use a fixed number of digits after the
// decimal point and therefore is not reliably sortable. And so we use our own custom format for
// database range queries that will properly sort any data with time stored as an ISO string.
// See https://github.com/golang/go/issues/19635
if !p.Date.Start.IsZero() && !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start, "$lte": p.Date.End}
} else if !p.Date.Start.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start}
} else if !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$lte": p.Date.End}
}
if !p.Carelink {
groupDataQuery["source"] = bson.M{"$ne": "carelink"}
}
if p.DeviceID != "" {
groupDataQuery["deviceId"] = p.DeviceID
}
// If we have an explicit upload ID to filter by, we don't need or want to apply any further
// data source-based filtering
if p.UploadID != "" {
groupDataQuery["uploadId"] = p.UploadID
} else {
andQuery := []bson.M{}
if !p.Dexcom && p.DexcomDataSource != nil {
dexcomQuery := []bson.M{
{"type": bson.M{"$ne": "cbg"}},
{"uploadId": bson.M{"$in": p.DexcomDataSource["dataSetIds"]}},
}
// more redundant OR query for multiple date field types
earliestDataTime := p.DexcomDataSource["earliestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$lt": earliestDataTime}},
)
latestDataTime := p.DexcomDataSource["latestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$gt": latestDataTime}},
)
andQuery = append(andQuery, bson.M{"$or": dexcomQuery})
}
if !p.Medtronic && len(p.MedtronicUploadIds) > 0 {
medtronicDateTime, err := time.Parse(medtronicDateFormat, p.MedtronicDate)
if err != nil {
medtronicDateTime, _ = time.Parse(time.RFC3339, p.MedtronicDate)
}
medtronicQuery := []bson.M{
{"time": bson.M{"$lt": medtronicDateTime}},
{"type": bson.M{"$nin": []string{"basal", "bolus", "cbg"}}},
{"uploadId": bson.M{"$nin": p.MedtronicUploadIds}},
}
andQuery = append(andQuery, bson.M{"$or": medtronicQuery})
}
if len(andQuery) > 0 {
groupDataQuery["$and"] = andQuery
}
}
return groupDataQuery
}
// Ping the MongoDB database
func (c *MongoStoreClient) Ping() error {
// do we have a store session
return c.client.Ping(c.context, nil)
}
// Disconnect from the MongoDB database
func (c *MongoStoreClient) Disconnect() error {
return c.client.Disconnect(c.context)
}
// HasMedtronicDirectData - check whether the userID has Medtronic data that has been uploaded via Uploader
func (c *MongoStoreClient) HasMedtronicDirectData(userID string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
query := bson.M{
"_userId": userID,
"type": "upload",
"_state": "closed",
"_active": true,
"deletedTime": bson.M{
"$exists": false,
},
"deviceManufacturers": "Medtronic",
}
// Try reading from both collections until migration from type=upload from
// deviceData to deviceDataSets is complete.
err := dataCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == nil {
return true, nil
}
err = dataSetsCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetDexcomDataSource - get
func (c *MongoStoreClient) GetDexcomDataSource(userID string) (bson.M, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
// `earliestDataTime` and `latestDataTime` are bson.Date fields. Internally, they are int64's
// so if they exist, the must be set to something, even if 0 (ie Unix epoch)
query := bson.M{
"userId": userID,
"providerType": "oauth",
"providerName": "dexcom",
"dataSetIds": bson.M{
"$exists": true,
"$not": bson.M{
"$size": 0,
},
},
"earliestDataTime": bson.M{
"$exists": true,
},
"latestDataTime": bson.M{
"$exists": true,
},
}
dataSource := bson.M{}
err := c.client.Database("tidepool").Collection("data_sources").FindOne(c.context, query).Decode(&dataSource)
if err == mongo.ErrNoDocuments {
return nil, nil
}
if err != nil {
return nil, err
}
return dataSource, nil
}
// HasMedtronicLoopDataAfter checks the database to see if Loop data exists for `userID` that originated
// from a Medtronic device after `date`
func (c *MongoStoreClient) HasMedtronicLoopDataAfter(userID string, date string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
if date == "" {
return false, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return false, errors.New("date is invalid")
}
opts := options.FindOne()
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"origin.payload.device.manufacturer": "Medtronic",
}
err = dataCollection(c).FindOne(c.context, query, opts).Err()
if err == nil {
return true, nil
}
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
err = dataSetsCollection(c).FindOne(c.context, query, opts).Err()
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetLoopableMedtronicDirectUploadIdsAfter returns all Upload IDs for `userID` where Loop data was found
// for a Medtronic device after `date`.
func (c *MongoStoreClient) GetLoopableMedtronicDirectUploadIdsAfter(userID string, date string) ([]string, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
if date == "" {
return nil, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return nil, errors.New("date is invalid")
}
opts := options.Find()
opts.SetHint("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime")
opts.SetProjection(bson.M{"_id": 0, "uploadId": 1})
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"type": "upload", // redundant since all types in collection is deviceDataSets is upload but just leaving the original query here.
"deviceModel": bson.M{"$in": []string{"523", "523K", "554", "723", "723K", "754"}},
}
var objects []struct {
UploadID string `bson:"uploadId"`
}
cursor, err := dataSetsCollection(c).Find(c.context, query, opts)
if err != nil {
return nil, err
}
defer cursor.Close(c.context)
err = cursor.All(c.context, &objects)
if err != nil {
return nil, err
}
uploadIds := make([]string, len(objects))
for index, object := range objects {
uploadIds[index] = object.UploadID
}
return uploadIds, nil
}
// GetDeviceData returns all the device data for a user
func (c *MongoStoreClient) GetDeviceData(p *Params) (StorageIterator, error) {
// _schemaVersion is still in the list of fields to remove. Although we don't query for it, data can still exist for it
// until BACK-1281 is done.
removeFieldsForReturn := bson.M{"_id": 0, "_userId": 0, "_groupId": 0, "_version": 0, "_active": 0, "_schemaVersion": 0, "createdTime": 0, "modifiedTime": 0, "_migrationMarker": 0}
if p.Latest {
latest := &latestIterator{pos: -1}
var typeRanges []string
if len(p.Types) > 0 && p.Types[0] != "" {
typeRanges = p.Types
} else {
typeRanges = []string{"physicalActivity", "basal", "cbg", "smbg", "bloodKetone", "bolus", "wizard", "deviceEvent", "food", "insulin", "cgmSettings", "pumpSettings", "reportedState", "upload"}
}
var err error
for _, theType := range typeRanges {
query := generateMongoQuery(p)
query["type"] = theType
opts := options.FindOne().SetProjection(removeFieldsForReturn).SetSort(bson.M{"time": -1})
// collections to search. stop at first collection that has data.
collection := dataCollection(c)
if theType == "upload" {
// Uploads are only in the deviceDataSets collection after migration completes.
collection = dataSetsCollection(c)
}
result, resultErr := collection.
FindOne(c.context, query, opts).
DecodeBytes()
if resultErr != nil {
if resultErr == mongo.ErrNoDocuments {
continue
}
err = resultErr
break
}
latest.results = append(latest.results, result)
}
return latest, err
}
opts := options.Find().SetProjection(removeFieldsForReturn)
// If query only needs to read from one collection use the collection directly.
switch {
case len(p.Types) == 1 && p.Types[0] == "upload":
return dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
// Have to check for empty string as sometimes that is the type sent.
case len(p.Types) > 0 && !contains("upload", p.Types) && p.Types[0] != "":
return dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
}
// Otherwise query needs to read from both deviceData and deviceDataSets collection.
dataIter, err := dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
dataSetIter, err := dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
return &multiStorageIterator{
iters: []StorageIterator{
dataIter,
dataSetIter,
},
}, nil
}
func (l *latestIterator) Next(context.Context) bool {
l.pos++
return l.pos < len(l.results)
}
func (l *latestIterator) Decode(result interface{}) error {
return bson.Unmarshal(l.results[l.pos], result)
}
func (l *latestIterator) Close(context.Context) error {
return nil
}
func (l *multiStorageIterator) Next(ctx context.Context) bool {
if l.currentIterIdx >= len(l.iters) {
return false
}
hasNext := l.iters[l.currentIterIdx].Next(ctx)
if hasNext {
return true
}
l.currentIterIdx++
return l.Next(ctx)
}
func (l *multiStorageIterator) Decode(result interface{}) error {
if l.currentIterIdx >= len(l.iters) {
return io.EOF
}
return l.iters[l.currentIterIdx].Decode(result)
}
func (l *multiStorageIterator) Close(ctx context.Context) error {
for _, iter := range l.iters {
if err := iter.Close(ctx); err != nil {
return err
}
}
return nil
}
func contains(needle string, haystack []string) bool {
for _, x := range haystack {
if needle == x {
return true
}
}
return false
}
| {
return nil, errors.New("medtronic parameter not valid")
} | conditional_block |
store.go | package store
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/url"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
tpMongo "github.com/tidepool-org/go-common/clients/mongo"
)
const (
dataCollectionName = "deviceData"
dataSetsCollectionName = "deviceDataSets" // all datum with type == "upload" go in this collection as opposed to the deviceData collection. These act as the root/parent for all other data.
dataStoreAPIPrefix = "api/data/store "
RFC3339NanoSortable = "2006-01-02T15:04:05.00000000Z07:00"
medtronicDateFormat = "2006-01-02"
medtronicIndexDate = "2017-09-01"
)
type (
// StorageIterator - Interface for the query iterator
StorageIterator interface {
Next(context.Context) bool
Decode(interface{}) error
Close(context.Context) error
}
// Storage - Interface for our storage layer
Storage interface {
Close()
Ping() error
GetDeviceData(p *Params) StorageIterator
}
// MongoStoreClient - Mongo Storage Client
MongoStoreClient struct {
client *mongo.Client
context context.Context
database string
}
// SchemaVersion struct
SchemaVersion struct {
Minimum int
Maximum int
}
// Params struct
Params struct {
UserID string
Types []string
SubTypes []string
Date
*SchemaVersion
Carelink bool
Dexcom bool
DexcomDataSource bson.M
DeviceID string
Latest bool
Medtronic bool
MedtronicDate string
MedtronicUploadIds []string
UploadID string
}
// Date struct
Date struct {
Start time.Time
End time.Time
}
latestIterator struct {
results []bson.Raw
pos int
}
// multiStorageIterator is a StorageIterator reads from multiple iterators
// until there is no more data this is needed in the case that we are
// reading multiple types and need to read both uploads and data.
multiStorageIterator struct {
iters []StorageIterator
currentIterIdx int
}
)
func cleanDateString(dateString string) (time.Time, error) {
date := time.Time{}
if dateString == "" {
return date, nil
}
date, err := time.Parse(time.RFC3339Nano, dateString)
if err != nil {
return date, err
}
return date, nil
}
// GetParams parses a URL to set parameters
func GetParams(q url.Values, schema *SchemaVersion) (*Params, error) {
startDate, err := cleanDateString(q.Get("startDate"))
if err != nil {
return nil, err
}
endDate, err := cleanDateString(q.Get("endDate"))
if err != nil {
return nil, err
}
carelink := false
if values, ok := q["carelink"]; ok {
if len(values) < 1 {
return nil, errors.New("carelink parameter not valid")
}
carelink, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("carelink parameter not valid")
}
}
dexcom := false
if values, ok := q["dexcom"]; ok {
if len(values) < 1 {
return nil, errors.New("dexcom parameter not valid")
}
dexcom, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("dexcom parameter not valid")
}
}
latest := false
if values, ok := q["latest"]; ok {
if len(values) < 1 {
return nil, errors.New("latest parameter not valid")
}
latest, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("latest parameter not valid")
}
}
medtronic := false
if values, ok := q["medtronic"]; ok {
if len(values) < 1 {
return nil, errors.New("medtronic parameter not valid")
}
medtronic, err = strconv.ParseBool(values[len(values)-1])
if err != nil {
return nil, errors.New("medtronic parameter not valid")
}
}
p := &Params{
UserID: q.Get(":userID"),
DeviceID: q.Get("deviceId"),
UploadID: q.Get("uploadId"),
//the query params for type and subtype can contain multiple values seperated
//by a comma e.g. "type=smbg,cbg" so split them out into an array of values
Types: strings.Split(q.Get("type"), ","),
SubTypes: strings.Split(q.Get("subType"), ","),
Date: Date{startDate, endDate},
SchemaVersion: schema,
Carelink: carelink,
Dexcom: dexcom,
Latest: latest,
Medtronic: medtronic,
}
return p, nil
}
// NewMongoStoreClient creates a new MongoStoreClient
func NewMongoStoreClient(config *tpMongo.Config) *MongoStoreClient {
connectionString, err := config.ToConnectionString()
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB configuration: %s", err))
}
clientOptions := options.Client().ApplyURI(connectionString)
mongoClient, err := mongo.Connect(context.Background(), clientOptions)
if err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Invalid MongoDB connection string: %s", err))
}
return &MongoStoreClient{
client: mongoClient,
context: context.Background(),
database: config.Database,
}
}
// WithContext returns a shallow copy of c with its context changed
// to ctx. The provided ctx must be non-nil.
func (c *MongoStoreClient) WithContext(ctx context.Context) *MongoStoreClient {
if ctx == nil {
panic("nil context")
}
c2 := new(MongoStoreClient)
*c2 = *c
c2.context = ctx
return c2
}
// EnsureIndexes exist for the MongoDB collection. EnsureIndexes uses the Background() context, in order
// to pass back the MongoDB errors, rather than any context errors.
func (c *MongoStoreClient) EnsureIndexes() error {
medtronicIndexDateTime, _ := time.Parse(medtronicDateFormat, medtronicIndexDate)
dataIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataCollection(c).Indexes().CreateMany(context.Background(), dataIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
// Not sure if all these indexes need to also be on the deviceDataSets collection.
dataSetsIndexes := []mongo.IndexModel{
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "deviceModel", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "type", Value: "upload"},
{Key: "deviceModel", Value: bson.M{
"$exists": true,
}},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "origin.payload.device.manufacturer", Value: 1}, {Key: "fakefield", Value: 1}},
Options: options.Index().
SetName("HasMedtronicLoopDataAfter_v2_DateTime").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
{Key: "origin.payload.device.manufacturer", Value: "Medtronic"},
{Key: "time", Value: bson.M{
"$gte": medtronicIndexDateTime,
}},
},
),
},
{
Keys: bson.D{{Key: "_userId", Value: 1}, {Key: "time", Value: -1}, {Key: "type", Value: 1}},
Options: options.Index().
SetName("UserIdTimeWeighted_v2").
SetBackground(true).
SetPartialFilterExpression(
bson.D{
{Key: "_active", Value: true},
},
),
},
}
if _, err := dataSetsCollection(c).Indexes().CreateMany(context.Background(), dataSetsIndexes); err != nil {
log.Fatal(dataStoreAPIPrefix, fmt.Sprintf("Unable to create indexes: %s", err))
}
return nil
}
func dataCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataCollectionName)
}
func dataSetsCollection(msc *MongoStoreClient) *mongo.Collection {
return msc.client.Database(msc.database).Collection(dataSetsCollectionName)
}
// generateMongoQuery takes in a number of parameters and constructs a mongo query
// to retrieve objects from the Tidepool database. It is used by the router.Add("GET", "/{userID}"
// endpoint, which implements the Tide-whisperer API. See that function for further documentation
// on parameters
func generateMongoQuery(p *Params) bson.M {
groupDataQuery := bson.M{
"_userId": p.UserID,
"_active": true}
//if optional parameters are present, then add them to the query
if len(p.Types) > 0 && p.Types[0] != "" {
groupDataQuery["type"] = bson.M{"$in": p.Types}
}
if len(p.SubTypes) > 0 && p.SubTypes[0] != "" {
groupDataQuery["subType"] = bson.M{"$in": p.SubTypes}
}
// The Golang implementation of time.RFC3339Nano does not use a fixed number of digits after the
// decimal point and therefore is not reliably sortable. And so we use our own custom format for
// database range queries that will properly sort any data with time stored as an ISO string.
// See https://github.com/golang/go/issues/19635
if !p.Date.Start.IsZero() && !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start, "$lte": p.Date.End}
} else if !p.Date.Start.IsZero() {
groupDataQuery["time"] = bson.M{"$gte": p.Date.Start}
} else if !p.Date.End.IsZero() {
groupDataQuery["time"] = bson.M{"$lte": p.Date.End}
}
if !p.Carelink {
groupDataQuery["source"] = bson.M{"$ne": "carelink"}
}
if p.DeviceID != "" {
groupDataQuery["deviceId"] = p.DeviceID
}
// If we have an explicit upload ID to filter by, we don't need or want to apply any further
// data source-based filtering
if p.UploadID != "" {
groupDataQuery["uploadId"] = p.UploadID
} else {
andQuery := []bson.M{}
if !p.Dexcom && p.DexcomDataSource != nil {
dexcomQuery := []bson.M{
{"type": bson.M{"$ne": "cbg"}},
{"uploadId": bson.M{"$in": p.DexcomDataSource["dataSetIds"]}},
}
// more redundant OR query for multiple date field types
earliestDataTime := p.DexcomDataSource["earliestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$lt": earliestDataTime}},
)
latestDataTime := p.DexcomDataSource["latestDataTime"].(primitive.DateTime).Time().UTC()
dexcomQuery = append(dexcomQuery,
bson.M{"time": bson.M{"$gt": latestDataTime}},
)
andQuery = append(andQuery, bson.M{"$or": dexcomQuery})
}
if !p.Medtronic && len(p.MedtronicUploadIds) > 0 {
medtronicDateTime, err := time.Parse(medtronicDateFormat, p.MedtronicDate)
if err != nil {
medtronicDateTime, _ = time.Parse(time.RFC3339, p.MedtronicDate)
}
medtronicQuery := []bson.M{
{"time": bson.M{"$lt": medtronicDateTime}},
{"type": bson.M{"$nin": []string{"basal", "bolus", "cbg"}}},
{"uploadId": bson.M{"$nin": p.MedtronicUploadIds}},
}
andQuery = append(andQuery, bson.M{"$or": medtronicQuery})
}
if len(andQuery) > 0 {
groupDataQuery["$and"] = andQuery
}
}
return groupDataQuery
}
// Ping the MongoDB database
func (c *MongoStoreClient) Ping() error {
// do we have a store session
return c.client.Ping(c.context, nil)
}
// Disconnect from the MongoDB database
func (c *MongoStoreClient) Disconnect() error {
return c.client.Disconnect(c.context)
}
// HasMedtronicDirectData - check whether the userID has Medtronic data that has been uploaded via Uploader
func (c *MongoStoreClient) HasMedtronicDirectData(userID string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
query := bson.M{
"_userId": userID,
"type": "upload",
"_state": "closed",
"_active": true,
"deletedTime": bson.M{
"$exists": false,
},
"deviceManufacturers": "Medtronic",
}
// Try reading from both collections until migration from type=upload from
// deviceData to deviceDataSets is complete.
err := dataCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == nil {
return true, nil
}
err = dataSetsCollection(c).FindOne(c.context, query).Err()
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetDexcomDataSource - get
func (c *MongoStoreClient) GetDexcomDataSource(userID string) (bson.M, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
// `earliestDataTime` and `latestDataTime` are bson.Date fields. Internally, they are int64's
// so if they exist, the must be set to something, even if 0 (ie Unix epoch)
query := bson.M{
"userId": userID,
"providerType": "oauth",
"providerName": "dexcom",
"dataSetIds": bson.M{
"$exists": true,
"$not": bson.M{
"$size": 0,
},
},
"earliestDataTime": bson.M{
"$exists": true,
},
"latestDataTime": bson.M{
"$exists": true,
},
}
dataSource := bson.M{}
err := c.client.Database("tidepool").Collection("data_sources").FindOne(c.context, query).Decode(&dataSource)
if err == mongo.ErrNoDocuments {
return nil, nil
}
if err != nil {
return nil, err
}
return dataSource, nil
}
// HasMedtronicLoopDataAfter checks the database to see if Loop data exists for `userID` that originated
// from a Medtronic device after `date`
func (c *MongoStoreClient) HasMedtronicLoopDataAfter(userID string, date string) (bool, error) {
if userID == "" {
return false, errors.New("user id is missing")
}
if date == "" {
return false, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return false, errors.New("date is invalid")
}
opts := options.FindOne()
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"origin.payload.device.manufacturer": "Medtronic",
}
err = dataCollection(c).FindOne(c.context, query, opts).Err()
if err == nil {
return true, nil
}
if err != nil && err != mongo.ErrNoDocuments {
return false, err
}
err = dataSetsCollection(c).FindOne(c.context, query, opts).Err()
if err == mongo.ErrNoDocuments {
return false, nil
}
return err == nil, err
}
// GetLoopableMedtronicDirectUploadIdsAfter returns all Upload IDs for `userID` where Loop data was found
// for a Medtronic device after `date`.
func (c *MongoStoreClient) GetLoopableMedtronicDirectUploadIdsAfter(userID string, date string) ([]string, error) {
if userID == "" {
return nil, errors.New("user id is missing")
}
if date == "" {
return nil, errors.New("date is missing")
}
dateTime, err := time.Parse(medtronicDateFormat, date)
if err != nil {
dateTime, err = time.Parse(time.RFC3339, date)
}
if err != nil {
return nil, errors.New("date is invalid")
}
opts := options.Find()
opts.SetHint("GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime")
opts.SetProjection(bson.M{"_id": 0, "uploadId": 1})
query := bson.M{
"_active": true,
"_userId": userID,
"time": bson.M{"$gte": dateTime},
"type": "upload", // redundant since all types in collection is deviceDataSets is upload but just leaving the original query here.
"deviceModel": bson.M{"$in": []string{"523", "523K", "554", "723", "723K", "754"}},
}
var objects []struct {
UploadID string `bson:"uploadId"`
}
cursor, err := dataSetsCollection(c).Find(c.context, query, opts)
if err != nil {
return nil, err
}
defer cursor.Close(c.context)
err = cursor.All(c.context, &objects)
if err != nil {
return nil, err
}
uploadIds := make([]string, len(objects))
for index, object := range objects {
uploadIds[index] = object.UploadID
}
return uploadIds, nil
}
// GetDeviceData returns all the device data for a user
func (c *MongoStoreClient) GetDeviceData(p *Params) (StorageIterator, error) {
// _schemaVersion is still in the list of fields to remove. Although we don't query for it, data can still exist for it
// until BACK-1281 is done.
removeFieldsForReturn := bson.M{"_id": 0, "_userId": 0, "_groupId": 0, "_version": 0, "_active": 0, "_schemaVersion": 0, "createdTime": 0, "modifiedTime": 0, "_migrationMarker": 0}
if p.Latest {
latest := &latestIterator{pos: -1}
var typeRanges []string
if len(p.Types) > 0 && p.Types[0] != "" {
typeRanges = p.Types
} else {
typeRanges = []string{"physicalActivity", "basal", "cbg", "smbg", "bloodKetone", "bolus", "wizard", "deviceEvent", "food", "insulin", "cgmSettings", "pumpSettings", "reportedState", "upload"}
}
var err error
for _, theType := range typeRanges {
query := generateMongoQuery(p)
query["type"] = theType
opts := options.FindOne().SetProjection(removeFieldsForReturn).SetSort(bson.M{"time": -1})
// collections to search. stop at first collection that has data.
collection := dataCollection(c)
if theType == "upload" {
// Uploads are only in the deviceDataSets collection after migration completes.
collection = dataSetsCollection(c)
}
result, resultErr := collection.
FindOne(c.context, query, opts).
DecodeBytes()
if resultErr != nil {
if resultErr == mongo.ErrNoDocuments {
continue
}
err = resultErr
break
}
latest.results = append(latest.results, result)
}
return latest, err
}
opts := options.Find().SetProjection(removeFieldsForReturn)
// If query only needs to read from one collection use the collection directly.
switch {
case len(p.Types) == 1 && p.Types[0] == "upload":
return dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
// Have to check for empty string as sometimes that is the type sent.
case len(p.Types) > 0 && !contains("upload", p.Types) && p.Types[0] != "":
return dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
}
// Otherwise query needs to read from both deviceData and deviceDataSets collection.
dataIter, err := dataCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
dataSetIter, err := dataSetsCollection(c).Find(c.context, generateMongoQuery(p), opts)
if err != nil {
return nil, err
}
return &multiStorageIterator{
iters: []StorageIterator{
dataIter,
dataSetIter,
},
}, nil
}
func (l *latestIterator) Next(context.Context) bool {
l.pos++
return l.pos < len(l.results)
}
func (l *latestIterator) Decode(result interface{}) error {
return bson.Unmarshal(l.results[l.pos], result)
}
func (l *latestIterator) Close(context.Context) error {
return nil
}
func (l *multiStorageIterator) Next(ctx context.Context) bool {
if l.currentIterIdx >= len(l.iters) {
return false
}
hasNext := l.iters[l.currentIterIdx].Next(ctx)
if hasNext {
return true
}
l.currentIterIdx++
return l.Next(ctx)
}
func (l *multiStorageIterator) Decode(result interface{}) error {
if l.currentIterIdx >= len(l.iters) {
return io.EOF
}
return l.iters[l.currentIterIdx].Decode(result)
}
func (l *multiStorageIterator) Close(ctx context.Context) error {
for _, iter := range l.iters {
if err := iter.Close(ctx); err != nil {
return err
}
}
return nil
}
func contains(needle string, haystack []string) bool | {
for _, x := range haystack {
if needle == x {
return true
}
}
return false
} | identifier_body | |
commonAPI.js | /**
* 摘要:文章资讯 API处理类
* author:Gavin.guo
* date:2015/4/23
*/
var router = require('express').Router();
var request = require('request');
var constant = require('../../constant/constant');//引入常量
var config = require('../../resources/config');//引入配置
var crypto=require('crypto');//提取加密模块
var xml2js = require('xml2js');
var common = require('../../util/common'); //引入公共的js
var Utils = require('../../util/Utils'); //引入工具类js
var logger = require('../../resources/logConf').getLogger('commonAPI');
var SyllabusService = require('../../service/syllabusService');
var EmailService = require('../../service/emailService');
var articleService = require('../../service/articleService');
var ApiResult = require('../../util/ApiResult');
var errorMessage = require('../../util/errorMessage.js');
var Redirect4FXAPI = require('./redirect4FXAPI.js');
/**
* 提取24k报价数据
* 先在缓存服务器中提取,没有则到24k链接中提取
*/
router.get('/get24kPrice', function(req, res) {
var cacheClient=require('../../cache/cacheClient');
cacheClient.get("24kPrice", function(err, replayData){
if(replayData){
res.json(JSON.parse(replayData));
}else{
request(config.web24kPriceUrl,function(error, response, data){
if (!error && response.statusCode == 200 && common.isValid(data)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false,attrkey:'attr' });
try{
parser.parseString(data,function(err, result){
if(err){
logger.error("get24kPrice>>>error:"+err);
result=null;
}
cacheClient.set("24kPrice",JSON.stringify(result));
cacheClient.expire("24kPrice", 5);//5秒钟有效
res.json(result);
});
}catch(e){
logger.error("get24kPrice has error:"+e);
res.json(null);
}
}else{
logger.error("get24kPrice has error:"+error);
res.json(null);
}
});
}
});
});
/**
* 提取即时资讯或专业评论
* @param pageNo
* @param pageSize
* @param lang
* @param contentType 2:即时资讯,3:专业评论
*/
router.get('/getNewsInfoList', function(req, res) {
var pageNo=req.query["pageNo"],pageSize=req.query["pageSize"],lang=req.query["lang"],contentType1=req.query["contentType1"],contentType2=req.query["contentType2"];
if(common.isBlank(pageNo)||common.isBlank(pageSize)||common.isBlank(lang)||common.isBlank(contentType1)){//参数输入有误,则返回空结果
res.json(null);
}else{
var time=Date.now();
var md5 = crypto.createHash('md5');
var gwApiAuthorKey='YHJK786sdbbmkyusd';//授权码
md5.update(gwApiAuthorKey+time);
var token=md5.digest('hex');
var param={token:token,platTypeKey:'web24k',timeStamp:time,lang:'zh',contenttype1:contentType1,siteflg:1,pageno:pageNo,pagesize:pageSize};
if(common.isValid(contentType2)){
param.contenttype2=contentType2;
}
request.post({strictSSL:false,url:(config.gwApiUrl+'/information/list'),form:param}, function(error,response,data){
i | as error:"+error);
res.json(null);
}else{
try {
res.json(data ? JSON.parse(data) : null);
}catch(e){
logger.error("getNewsInfoList has error:"+e);
res.json(null);
}
}
});
}
});
/**
* 提取实盘直播
* @param platform
* @param dateStr
* @param lang
*/
router.get('/getBroadStrateList', function(req, res) {
var lang=req.query["lang"],platform=req.query["platform"],dateStr=req.query["dateStr"];
if(common.isBlank(lang)||common.isBlank(platform)||common.isBlank(dateStr)){//参数输入有误,则返回空结果
res.json(null);
}else{
var time=Date.now();
var md5 = crypto.createHash('md5');
var gwApiAuthorKey='',siteflg=0;
if("web24k"==platform){
gwApiAuthorKey='YHJK786sdbbmkyusd';//授权码
siteflg=1;
}
md5.update(gwApiAuthorKey+time);
var token=md5.digest('hex');
var param={token:token,platTypeKey:platform,timeStamp:time,lang:lang,datestr:dateStr,siteflg:siteflg};
request.post({strictSSL:false,url:(config.gwApiUrl+'/broadcast/index.json'),form:param}, function(error,response,data){
if(error){
logger.error("getBroadStrateList has error:"+error);
res.json(null);
}else{
try{
res.json(data?JSON.parse(data):null);
}catch(e){
logger.error("getBroadStrateList has error:"+e);
res.json(null);
}
}
});
}
});
/**
* 获取指定日期课程安排
*/
router.get("/getCourse", function(req, res) {
var loc_params = {
type : req.query["type"],
platform : req.query["platform"],
groupType : req.query["groupType"],
groupId : req.query["groupId"],
flag : req.query["flag"]
};
var cfg = constant.studioThirdUsed.getConfig(loc_params.type, loc_params.platform);
if(cfg){
loc_params.groupType = cfg.groupType;
loc_params.groupId = cfg.roomId;
loc_params.flag = common.isValid(loc_params.flag)?loc_params.flag:cfg.flag;
}
if(!loc_params.groupType){
res.json(ApiResult.result(errorMessage.code_1000, null));
return;
}
if(Redirect4FXAPI.needRedirect4Fxstudio(req, loc_params.groupType)){
Redirect4FXAPI.redirect(req, res);
return;
}
//查询课程安排
SyllabusService.getCourse(loc_params.groupType, loc_params.groupId, new Date(), loc_params.flag, function(apiResult){
res.json(apiResult);
});
});
/**
* 获取指定分析师的下次课程安排
*/
router.get("/getNextCourses", function(req, res) {
var loc_params = {
type : req.query["type"],
platform : req.query["platform"],
groupType : req.query["groupType"],
groupId : req.query["groupId"],
analystIds : req.query["analystIds"]
};
var cfg = constant.studioThirdUsed.getConfig(loc_params.type, loc_params.platform);
if(cfg){
loc_params.groupType = cfg.groupType;
loc_params.groupId = cfg.roomId;
}
if(!loc_params.groupType || !loc_params.groupId){
res.json(null);
return;
}
if(Redirect4FXAPI.needRedirect4Fxstudio(req, loc_params.groupType)){
Redirect4FXAPI.redirect(req, res);
return;
}
if(loc_params.analystIds) {
loc_params.analystIds = loc_params.analystIds.split(/[,,]/);
}
SyllabusService.getNextCources(new Date(), loc_params.groupType, loc_params.groupId, loc_params.analystIds, function(courses){
res.json(courses);
});
});
/**
* 备份课程表
*/
router.get("/bakSyllabus", function(req, res) {
var date = req.query["date"];
var timezoneOffset = new Date().getTimezoneOffset() * 60000;
if(date){
date = new Date(date).getTime();
date = new Date(date - (date % 86400000) + timezoneOffset);
}else{//默认备份前一天课程表
date = new Date().getTime();
date = new Date(date - (date % 86400000) - 86400000 + timezoneOffset);
}
SyllabusService.bakSyllabus(date, function(isOK){
res.json(ApiResult.result(null, isOK));
});
});
/**
* 发送电子邮件
*/
router.post("/email", function(req, res) {
var loc_params = {
key : req.body["key"],
data : req.body["data"]
};
if(typeof loc_params.data == "string"){
try{
loc_params.data = JSON.parse(loc_params.data);
}catch(e){
logger.warn("parse JSON data error!" + e);
}
}
if(!loc_params.data){
loc_params.data = {};
}
if(!loc_params.data.date){
loc_params.data.date = Utils.dateFormat(new Date(), "yyyy-MM-dd hh:mm:ss");
}
EmailService.send(loc_params.key, loc_params.data, function(result){
res.json(result);
});
});
/**
* 提取24kCFTC持仓比例数据
*/
router.get('/get24kCftc', function(req, res) {
var limit = req.query['limit'] ? req.query['limit'] : 0; //默认只取最新的一条持仓比例数据
request(config.web24k + '/cftc.xml', function(error, response, data){
if (!error && response.statusCode == 200 && common.isValid(data)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false, attrkey: 'attr' });
try{
parser.parseString(data, function(err, result){
if(err){
logger.error("get24kCftc>>>error:"+err);
result=null;
}
//res.json(result);
if(limit == 0){
//只取第一条数据并返回组成新的json数组
var size = result.cftc.column.length;
var json = {};
var jsonData = [];
for(var i = 0; i < size; i++){
//json.name = result.cftc.column[i].attr.name;
//json.item = result.cftc.column[i].item[0].attr;
json[result.cftc.column[i].attr.name] = result.cftc.column[i].item[0].attr;
json[result.cftc.column[i].attr.name].name = result.cftc.column[i].attr.name;
//jsonData.push(json);
}
res.json(json);
}
else{
//返回请求到的全部转换为json的数据
res.json(result);
}
});
}catch(e){
logger.error("get24kCftc has error:" + e);
res.json(null);
}
}else{
logger.error("get24kCftc has error:" + error);
res.json(null);
}
});
});
/**
* 获取新闻快讯
*/
router.get('/getInformation', function(req, res){
var cacheClient = require('../../cache/cacheClient');
/*var date = new Date();//如需设置过期时间,则需要加入日期作为key的一部分
var key = "fx678_information"+date.getUTCFullYear()+(date.getUTCMonth()+1)+date.getUTCDate();*/
var key = "fx678_information";
cacheClient.get(key, function(err, result){
if(err){
logger.error("getInformationCache fail:" + err);
res.json({isOK:false, data:null});
}
else if(!result){
request(config.fx678ApiUrl + "/union/jdgjs/news/flash.xml", function(error, data){
if (!error && common.isValid(data.body)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false, attrkey: 'attr' });
try{
parser.parseString(data.body, function(parseError, result){
if(parseError){
logger.error("getInformation for fx678 parser>>>error:"+parseError);
res.json({isOK:false, data:null});
return;
}
cacheClient.set(key, JSON.stringify(result));
cacheClient.expire(key, 5*60);//设置有效时间
res.json({isOK:true, data:result});
});
}catch(e){
logger.error("getInformation for fx678 has error:" + e);
res.json({isOK:false, data:null});
}
}else{
logger.error("getInformation for fx678 has error:" + err);
res.json({isOK:false, data:null});
}
});
}
else{
res.json({isOK:true, data:JSON.parse(result)});//获取的结果是字符串,需要转为json对象
}
});
});
/**
* 更新点赞数或下载次数
*/
router.post('/modifyArticle', function(req, res){
var _id = req.body['id'] || req.query['id'];
var type = req.body['type'] || req.query['type'];
if(common.isBlank(_id) || common.isBlank(type)){
res.json({isOk: false, msg: '参数错误'});
return;
}
articleService.modifyPraiseOrDownloads(_id, type, function(apiResult){
res.json(apiResult);
});
});
module.exports = router; | f(error){
logger.error("getNewsInfoList h | conditional_block |
commonAPI.js | /**
* 摘要:文章资讯 API处理类
* author:Gavin.guo
* date:2015/4/23
*/
var router = require('express').Router();
var request = require('request');
var constant = require('../../constant/constant');//引入常量
var config = require('../../resources/config');//引入配置
var crypto=require('crypto');//提取加密模块
var xml2js = require('xml2js');
var common = require('../../util/common'); //引入公共的js
var Utils = require('../../util/Utils'); //引入工具类js
var logger = require('../../resources/logConf').getLogger('commonAPI');
var SyllabusService = require('../../service/syllabusService');
var EmailService = require('../../service/emailService');
var articleService = require('../../service/articleService');
var ApiResult = require('../../util/ApiResult');
var errorMessage = require('../../util/errorMessage.js');
var Redirect4FXAPI = require('./redirect4FXAPI.js');
/**
* 提取24k报价数据
* 先在缓存服务器中提取,没有则到24k链接中提取
*/
router.get('/get24kPrice', function(req, res) {
var cacheClient=require('../../cache/cacheClient');
cacheClient.get("24kPrice", function(err, replayData){
if(replayData){
res.json(JSON.parse(replayData));
}else{
request(config.web24kPriceUrl,function(error, response, data){
if (!error && response.statusCode == 200 && common.isValid(data)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false,attrkey:'attr' });
try{
parser.parseString(data,function(err, result){
if(err){
logger.error("get24kPrice>>>error:"+err);
result=null;
}
cacheClient.set("24kPrice",JSON.stringify(result));
cacheClient.expire("24kPrice", 5);//5秒钟有效
res.json(result);
});
}catch(e){
logger.error("get24kPrice has error:"+e);
res.json(null);
}
}else{
logger.error("get24kPrice has error:"+error);
res.json(null);
}
});
}
});
});
/**
* 提取即时资讯或专业评论
* @param pageNo
* @param pageSize
* @param lang
* @param contentType 2:即时资讯,3:专业评论
*/
router.get('/getNewsInfoList', function(req, res) {
var pageNo=req.query["pageNo"],pageSize=req.query["pageSize"],lang=req.query["lang"],contentType1=req.query["contentType1"],contentType2=req.query["contentType2"];
if(common.isBlank(pageNo)||common.isBlank(pageSize)||common.isBlank(lang)||common.isBlank(contentType1)){//参数输入有误,则返回空结果
res.json(null);
}else{
var time=Date.now();
var md5 = crypto.createHash('md5');
var gwApiAuthorKey='YHJK786sdbbmkyusd';//授权码
md5.update(gwApiAuthorKey+time);
var token=md5.digest('hex');
var param={token:token,platTypeKey:'web24k',timeStamp:time,lang:'zh',contenttype1:contentType1,siteflg:1,pageno:pageNo,pagesize:pageSize};
if(common.isValid(contentType2)){
param.contenttype2=contentType2;
}
request.post({strictSSL:false,url:(config.gwApiUrl+'/information/list'),form:param}, function(error,response,data){
if(error){
logger.error("getNewsInfoList has error:"+error);
res.json(null);
}else{
try {
res.json(data ? JSON.parse(data) : null);
}catch(e){
logger.error("getNewsInfoList has error:"+e);
res.json(null);
}
}
});
}
});
/**
* 提取实盘直播
* @param platform
* @param dateStr
* @param lang
*/
router.get('/getBroadStrateList', function(req, res) {
var lang=req.query["lang"],platform=req.query["platform"],dateStr=req.query["dateStr"];
if(common.isBlank(lang)||common.isBlank(platform)||common.isBlank(dateStr)){//参数输入有误,则返回空结果
res.json(null);
}else{
var time=Date.now();
var md5 = crypto.createHash('md5');
var gwApiAuthorKey='',siteflg=0;
if("web24k"==platform){
gwApiAuthorKey='YHJK786sdbbmkyusd';//授权码
siteflg=1;
}
md5.update(gwApiAuthorKey+time);
var token=md5.digest('hex');
var param={token:token,platTypeKey:platform,timeStamp:time,lang:lang,datestr:dateStr,siteflg:siteflg};
request.post({strictSSL:false,url:(config.gwApiUrl+'/broadcast/index.json'),form:param}, function(error,response,data){
if(error){
logger.error("getBroadStrateList has error:"+error);
res.json(null);
}else{
try{
res.json(data?JSON.parse(data):null);
}catch(e){
logger.error("getBroadStrateList has error:"+e);
res.json(null);
}
}
});
}
});
/**
* 获取指定日期课程安排
*/
router.get("/getCourse", function(req, res) {
var loc_params = {
type : req.query["type"],
platform : req.query["platform"],
groupType : req.query["groupType"],
groupId : req.query["groupId"],
flag : req.query["flag"]
};
var cfg = constant.studioThirdUsed.getConfig(loc_params.type, loc_params.platform);
if(cfg){
loc_params.groupType = cfg.groupType;
loc_params.groupId = cfg.roomId;
loc_params.flag = common.isValid(loc_params.flag)?loc_params.flag:cfg.flag;
}
if(!loc_params.groupType){
res.json(ApiResult.result(errorMessage.code_1000, null));
return;
}
if(Redirect4FXAPI.needRedirect4Fxstudio(req, loc_params.groupType)){
Redirect4FXAPI.redirect(req, res);
return;
}
//查询课程安排
| SyllabusService.getCourse(loc_params.groupType, loc_params.groupId, new Date(), loc_params.flag, function(apiResult){
res.json(apiResult);
});
});
/**
* 获取指定分析师的下次课程安排
*/
router.get("/getNextCourses", function(req, res) {
var loc_params = {
type : req.query["type"],
platform : req.query["platform"],
groupType : req.query["groupType"],
groupId : req.query["groupId"],
analystIds : req.query["analystIds"]
};
var cfg = constant.studioThirdUsed.getConfig(loc_params.type, loc_params.platform);
if(cfg){
loc_params.groupType = cfg.groupType;
loc_params.groupId = cfg.roomId;
}
if(!loc_params.groupType || !loc_params.groupId){
res.json(null);
return;
}
if(Redirect4FXAPI.needRedirect4Fxstudio(req, loc_params.groupType)){
Redirect4FXAPI.redirect(req, res);
return;
}
if(loc_params.analystIds) {
loc_params.analystIds = loc_params.analystIds.split(/[,,]/);
}
SyllabusService.getNextCources(new Date(), loc_params.groupType, loc_params.groupId, loc_params.analystIds, function(courses){
res.json(courses);
});
});
/**
* 备份课程表
*/
router.get("/bakSyllabus", function(req, res) {
var date = req.query["date"];
var timezoneOffset = new Date().getTimezoneOffset() * 60000;
if(date){
date = new Date(date).getTime();
date = new Date(date - (date % 86400000) + timezoneOffset);
}else{//默认备份前一天课程表
date = new Date().getTime();
date = new Date(date - (date % 86400000) - 86400000 + timezoneOffset);
}
SyllabusService.bakSyllabus(date, function(isOK){
res.json(ApiResult.result(null, isOK));
});
});
/**
* 发送电子邮件
*/
router.post("/email", function(req, res) {
var loc_params = {
key : req.body["key"],
data : req.body["data"]
};
if(typeof loc_params.data == "string"){
try{
loc_params.data = JSON.parse(loc_params.data);
}catch(e){
logger.warn("parse JSON data error!" + e);
}
}
if(!loc_params.data){
loc_params.data = {};
}
if(!loc_params.data.date){
loc_params.data.date = Utils.dateFormat(new Date(), "yyyy-MM-dd hh:mm:ss");
}
EmailService.send(loc_params.key, loc_params.data, function(result){
res.json(result);
});
});
/**
* 提取24kCFTC持仓比例数据
*/
router.get('/get24kCftc', function(req, res) {
var limit = req.query['limit'] ? req.query['limit'] : 0; //默认只取最新的一条持仓比例数据
request(config.web24k + '/cftc.xml', function(error, response, data){
if (!error && response.statusCode == 200 && common.isValid(data)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false, attrkey: 'attr' });
try{
parser.parseString(data, function(err, result){
if(err){
logger.error("get24kCftc>>>error:"+err);
result=null;
}
//res.json(result);
if(limit == 0){
//只取第一条数据并返回组成新的json数组
var size = result.cftc.column.length;
var json = {};
var jsonData = [];
for(var i = 0; i < size; i++){
//json.name = result.cftc.column[i].attr.name;
//json.item = result.cftc.column[i].item[0].attr;
json[result.cftc.column[i].attr.name] = result.cftc.column[i].item[0].attr;
json[result.cftc.column[i].attr.name].name = result.cftc.column[i].attr.name;
//jsonData.push(json);
}
res.json(json);
}
else{
//返回请求到的全部转换为json的数据
res.json(result);
}
});
}catch(e){
logger.error("get24kCftc has error:" + e);
res.json(null);
}
}else{
logger.error("get24kCftc has error:" + error);
res.json(null);
}
});
});
/**
* 获取新闻快讯
*/
router.get('/getInformation', function(req, res){
var cacheClient = require('../../cache/cacheClient');
/*var date = new Date();//如需设置过期时间,则需要加入日期作为key的一部分
var key = "fx678_information"+date.getUTCFullYear()+(date.getUTCMonth()+1)+date.getUTCDate();*/
var key = "fx678_information";
cacheClient.get(key, function(err, result){
if(err){
logger.error("getInformationCache fail:" + err);
res.json({isOK:false, data:null});
}
else if(!result){
request(config.fx678ApiUrl + "/union/jdgjs/news/flash.xml", function(error, data){
if (!error && common.isValid(data.body)) {
var parser = new xml2js.Parser({ explicitArray : false, ignoreAttrs : false, attrkey: 'attr' });
try{
parser.parseString(data.body, function(parseError, result){
if(parseError){
logger.error("getInformation for fx678 parser>>>error:"+parseError);
res.json({isOK:false, data:null});
return;
}
cacheClient.set(key, JSON.stringify(result));
cacheClient.expire(key, 5*60);//设置有效时间
res.json({isOK:true, data:result});
});
}catch(e){
logger.error("getInformation for fx678 has error:" + e);
res.json({isOK:false, data:null});
}
}else{
logger.error("getInformation for fx678 has error:" + err);
res.json({isOK:false, data:null});
}
});
}
else{
res.json({isOK:true, data:JSON.parse(result)});//获取的结果是字符串,需要转为json对象
}
});
});
/**
* 更新点赞数或下载次数
*/
router.post('/modifyArticle', function(req, res){
var _id = req.body['id'] || req.query['id'];
var type = req.body['type'] || req.query['type'];
if(common.isBlank(_id) || common.isBlank(type)){
res.json({isOk: false, msg: '参数错误'});
return;
}
articleService.modifyPraiseOrDownloads(_id, type, function(apiResult){
res.json(apiResult);
});
});
module.exports = router; | random_line_split | |
keyrebels_new.py | from collections import OrderedDict
import pandas as pd
import statistics
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
EU_Groups = ["GUE-NGL", "ECR", "EPP", "S&D", "ALDE", "EFDD", "Greens-EFA", "Non-attached", "ENF"]
genders = ["male", "female"]
votes = ["FOR", "ABSTAINED", "NOT PRESENT", "AGAINST"]
all_countries = ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia", "France", "Germany", "Italy", "Belgium", "Netherlands",
"Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]
country_groups = {
'eu13': ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia"],
'eu15': ["France", "Germany", "Italy", "Belgium", "Netherlands", "Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]}
def outputToCsv(outputName, df):
fileName = "output/" + outputName + ".csv"
df.to_csv(fileName, index=False, encoding="utf-8-sig")
# function from https://matplotlib.org/
def survey(results, category_names, name):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1, dtype=float)
category_colors = plt.get_cmap('RdYlGn_r')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(11, 5.95))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
# print(starts)
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.2 else 'black'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(float(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# ax.set_title("Proportion of MEPs' votes by political group")
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.1, top=0.8)
# plt.tight_layout()
plt.savefig(str(name) + "out.png")
plt.show
return fig, ax
# print(listEUGroups("Political_Group",df))
def listEUGroups(a, data):
p_groups = {}
for index, row in data.iterrows():
curGroup = row[a]
if curGroup not in p_groups.keys():
p_groups[curGroup] = 1
else:
p_groups[curGroup] += 1
return list(p_groups.keys())
def filterEUGroupRow(df, group):
return df[df.Political_Group == group]
def filterCountryGroups(df, group):
|
def filterGender(df, group):
return df[df.Gender == group]
def filterAge(df, criteria, threshold):
if criteria == "above":
return df[df.Age >= threshold]
elif criteria == "below":
return df[df.Age <= threshold]
def combineVotes(dict):
ordered = OrderedDict([('FOR', 0), ('ABSTAINED', 0), ('NOT PRESENT', 0), ('AGAINST', 0)])
for i in votes:
a = i + " - CORRECTED"
if a in dict.keys() and i in dict.keys():
dict[i] += dict.pop(a)
if i in dict.keys():
ordered[i] = dict[i]
return ordered
def toPercentage(data, vote):
voteCount = data[vote].value_counts().to_frame()
voteDict = voteCount.to_dict()[vote]
voteDict = combineVotes(voteDict)
voteTot = len(data.index)
for key, value in voteDict.items():
voteDict[key] = float("{0:.2f}".format((value / voteTot) * 100))
return voteDict
def minIndices(votePct, data):
"""
Parameters
----------
votePct - vote results (in percentages) per party
data - dataframe filtered by party
Returns
-------
dataframe row ids of minority voters in the party
"""
groupMinIds = list()
if (votePct["FOR"] < votePct["AGAINST"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'FOR'].tolist())
elif (votePct["AGAINST"] < votePct["FOR"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'AGAINST'].tolist())
return groupMinIds
def main():
cols_to_use = [0, 1, 2, 4, 5, 7, 8, 16]
df = pd.read_csv("data/dataset.csv", usecols=cols_to_use, encoding='utf-8-sig')
# voting for the passing but being in the party minority is more interesting
# is there a correlation in groups?
# minIndex = list()
# results = {}
results_for_output = {}
# for i in EU_Groups:
# # filters df by EP group i
# data = filterEUGroupRow(df, i)
# # gets percentages for the visual vote representation
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# # if i != "Non-attached":
# # minIndex.extend(minIndices(a,data))
# results[i + ' (' + str(len(data.index)) + ')'] = list(a.values())
# this section gets indices of EP group minority MEPs (excluding non-attached)
# df2 = pd.read_csv("data/dataset.csv", encoding='utf-8-sig')
# outputToCsv("minorities", df2.iloc[minIndex, :])
# # is there a correlation in gender?
# results2 = {}
# for i in genders:
# data = filterGender(df, i)
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# results2[i] = list(a.values())
#
# # is there a correlation in age?
results3 = {}
age_avg = 55.48666667
for i in ["above","below"]:
age_filtered = filterAge(df, i, age_avg)
a_vote_percent = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
results3[i] = list(a_vote_percent.values())
print(results3)
results_for_output["above avg. age"] = [52.03, 3.55, 12.94, 31.47]
results_for_output["below avg. age"] = [37.36, 6.18, 11.8, 44.66]
#
# # eu15/eu13 analyses?
# results4 = {}
# for j in country_groups:
# d = filterCountryGroups(df, country_groups[j])
# a = toPercentage(d, '_26_March_2019_Final_Vote')
# results4[j] = list(a.values())
# results5 = {}
# data = filterCountryGroups(df, country_groups['eu15'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results5[i+' eu15 ('+str(len(data1.index))+')'] = list(a.values())
#
# results6 = {}
# data = filterCountryGroups(df, country_groups['eu13'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results6[i + ' eu13 (' + str(len(data1.index)) + ')'] = list(a.values())
# results7 = {}
# for i in country_groups:
# data7 = filterCountryGroups(df, country_groups[i])
# print(i)
# for j in genders:
# print(j)
# data8 = filterGender(data7, j)
# a = toPercentage(data8, '_26_March_2019_Final_Vote')
# results7[i + " " + j] = list(a.values())
# results8 = {}
# for i in country_groups:
# data = filterCountryGroups(df, country_groups[i])
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results8[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results9 = {}
# for i in genders:
# data = filterGender(df, i)
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results9[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results10 = {}
# results11 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# age_avg = 55.48666667
# age_filtered = filterAge(data, "above", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results10[i + " " + 'above' + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# age_filtered = filterAge(data, "below", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results11[i + " " + 'below' + ' (' + str(len(age_filtered.index)) + ')'] = list(a.values())
# results12 = {}
# results13 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# data1 = filterGender(data, "male")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results12[i + " " + 'male' + ' ('+ str(len(data1.index)) + ')'] = list(a.values())
# data1 = filterGender(data, "female")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results13[i + " " + 'female' + ' (' + str(len(data1.index)) + ')'] = list(a.values())
# DEPTH1
# survey(results, votes, "first-level/groups")
# survey(results2, votes,"first-level/gender")
# survey(results3, votes,"first-level/age")
survey(results_for_output, votes, "first-level/age")
# survey(results4, votes,"first-level/location")
# DEPTH2
# survey(results5, votes, "second-level/eu15+groups")
# survey(results6, votes, "second-level/eu13+groups")
# survey(results7, votes, "second-level/gender+location")
# survey(results8, votes, "second-level/age+location")
# survey(results9, votes, "second-level/gender+age")
# survey(results10, votes, "second-level/group+above")
# survey(results11, votes, "second-level/group+below")
# survey(results12, votes, "second-level/group+male")
# survey(results13, votes, "second-level/group+female")
plt.show()
if __name__ == '__main__':
main()
| return df[df['Country'].isin(group)] | identifier_body |
keyrebels_new.py | from collections import OrderedDict
import pandas as pd
import statistics
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
EU_Groups = ["GUE-NGL", "ECR", "EPP", "S&D", "ALDE", "EFDD", "Greens-EFA", "Non-attached", "ENF"]
genders = ["male", "female"]
votes = ["FOR", "ABSTAINED", "NOT PRESENT", "AGAINST"]
all_countries = ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia", "France", "Germany", "Italy", "Belgium", "Netherlands",
"Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]
country_groups = {
'eu13': ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia"],
'eu15': ["France", "Germany", "Italy", "Belgium", "Netherlands", "Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]}
def outputToCsv(outputName, df):
fileName = "output/" + outputName + ".csv"
df.to_csv(fileName, index=False, encoding="utf-8-sig")
# function from https://matplotlib.org/
def survey(results, category_names, name):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1, dtype=float)
category_colors = plt.get_cmap('RdYlGn_r')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(11, 5.95))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
# print(starts)
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.2 else 'black'
for y, (x, c) in enumerate(zip(xcenters, widths)):
|
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# ax.set_title("Proportion of MEPs' votes by political group")
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.1, top=0.8)
# plt.tight_layout()
plt.savefig(str(name) + "out.png")
plt.show
return fig, ax
# print(listEUGroups("Political_Group",df))
def listEUGroups(a, data):
p_groups = {}
for index, row in data.iterrows():
curGroup = row[a]
if curGroup not in p_groups.keys():
p_groups[curGroup] = 1
else:
p_groups[curGroup] += 1
return list(p_groups.keys())
def filterEUGroupRow(df, group):
return df[df.Political_Group == group]
def filterCountryGroups(df, group):
return df[df['Country'].isin(group)]
def filterGender(df, group):
return df[df.Gender == group]
def filterAge(df, criteria, threshold):
if criteria == "above":
return df[df.Age >= threshold]
elif criteria == "below":
return df[df.Age <= threshold]
def combineVotes(dict):
ordered = OrderedDict([('FOR', 0), ('ABSTAINED', 0), ('NOT PRESENT', 0), ('AGAINST', 0)])
for i in votes:
a = i + " - CORRECTED"
if a in dict.keys() and i in dict.keys():
dict[i] += dict.pop(a)
if i in dict.keys():
ordered[i] = dict[i]
return ordered
def toPercentage(data, vote):
voteCount = data[vote].value_counts().to_frame()
voteDict = voteCount.to_dict()[vote]
voteDict = combineVotes(voteDict)
voteTot = len(data.index)
for key, value in voteDict.items():
voteDict[key] = float("{0:.2f}".format((value / voteTot) * 100))
return voteDict
def minIndices(votePct, data):
"""
Parameters
----------
votePct - vote results (in percentages) per party
data - dataframe filtered by party
Returns
-------
dataframe row ids of minority voters in the party
"""
groupMinIds = list()
if (votePct["FOR"] < votePct["AGAINST"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'FOR'].tolist())
elif (votePct["AGAINST"] < votePct["FOR"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'AGAINST'].tolist())
return groupMinIds
def main():
cols_to_use = [0, 1, 2, 4, 5, 7, 8, 16]
df = pd.read_csv("data/dataset.csv", usecols=cols_to_use, encoding='utf-8-sig')
# voting for the passing but being in the party minority is more interesting
# is there a correlation in groups?
# minIndex = list()
# results = {}
results_for_output = {}
# for i in EU_Groups:
# # filters df by EP group i
# data = filterEUGroupRow(df, i)
# # gets percentages for the visual vote representation
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# # if i != "Non-attached":
# # minIndex.extend(minIndices(a,data))
# results[i + ' (' + str(len(data.index)) + ')'] = list(a.values())
# this section gets indices of EP group minority MEPs (excluding non-attached)
# df2 = pd.read_csv("data/dataset.csv", encoding='utf-8-sig')
# outputToCsv("minorities", df2.iloc[minIndex, :])
# # is there a correlation in gender?
# results2 = {}
# for i in genders:
# data = filterGender(df, i)
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# results2[i] = list(a.values())
#
# # is there a correlation in age?
results3 = {}
age_avg = 55.48666667
for i in ["above","below"]:
age_filtered = filterAge(df, i, age_avg)
a_vote_percent = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
results3[i] = list(a_vote_percent.values())
print(results3)
results_for_output["above avg. age"] = [52.03, 3.55, 12.94, 31.47]
results_for_output["below avg. age"] = [37.36, 6.18, 11.8, 44.66]
#
# # eu15/eu13 analyses?
# results4 = {}
# for j in country_groups:
# d = filterCountryGroups(df, country_groups[j])
# a = toPercentage(d, '_26_March_2019_Final_Vote')
# results4[j] = list(a.values())
# results5 = {}
# data = filterCountryGroups(df, country_groups['eu15'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results5[i+' eu15 ('+str(len(data1.index))+')'] = list(a.values())
#
# results6 = {}
# data = filterCountryGroups(df, country_groups['eu13'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results6[i + ' eu13 (' + str(len(data1.index)) + ')'] = list(a.values())
# results7 = {}
# for i in country_groups:
# data7 = filterCountryGroups(df, country_groups[i])
# print(i)
# for j in genders:
# print(j)
# data8 = filterGender(data7, j)
# a = toPercentage(data8, '_26_March_2019_Final_Vote')
# results7[i + " " + j] = list(a.values())
# results8 = {}
# for i in country_groups:
# data = filterCountryGroups(df, country_groups[i])
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results8[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results9 = {}
# for i in genders:
# data = filterGender(df, i)
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results9[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results10 = {}
# results11 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# age_avg = 55.48666667
# age_filtered = filterAge(data, "above", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results10[i + " " + 'above' + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# age_filtered = filterAge(data, "below", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results11[i + " " + 'below' + ' (' + str(len(age_filtered.index)) + ')'] = list(a.values())
# results12 = {}
# results13 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# data1 = filterGender(data, "male")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results12[i + " " + 'male' + ' ('+ str(len(data1.index)) + ')'] = list(a.values())
# data1 = filterGender(data, "female")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results13[i + " " + 'female' + ' (' + str(len(data1.index)) + ')'] = list(a.values())
# DEPTH1
# survey(results, votes, "first-level/groups")
# survey(results2, votes,"first-level/gender")
# survey(results3, votes,"first-level/age")
survey(results_for_output, votes, "first-level/age")
# survey(results4, votes,"first-level/location")
# DEPTH2
# survey(results5, votes, "second-level/eu15+groups")
# survey(results6, votes, "second-level/eu13+groups")
# survey(results7, votes, "second-level/gender+location")
# survey(results8, votes, "second-level/age+location")
# survey(results9, votes, "second-level/gender+age")
# survey(results10, votes, "second-level/group+above")
# survey(results11, votes, "second-level/group+below")
# survey(results12, votes, "second-level/group+male")
# survey(results13, votes, "second-level/group+female")
plt.show()
if __name__ == '__main__':
main()
| ax.text(x, y, str(float(c)), ha='center', va='center',
color=text_color) | conditional_block |
keyrebels_new.py | from collections import OrderedDict
import pandas as pd
import statistics
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
EU_Groups = ["GUE-NGL", "ECR", "EPP", "S&D", "ALDE", "EFDD", "Greens-EFA", "Non-attached", "ENF"]
genders = ["male", "female"]
votes = ["FOR", "ABSTAINED", "NOT PRESENT", "AGAINST"]
all_countries = ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia", "France", "Germany", "Italy", "Belgium", "Netherlands",
"Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]
country_groups = {
'eu13': ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia"],
'eu15': ["France", "Germany", "Italy", "Belgium", "Netherlands", "Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]}
def outputToCsv(outputName, df):
fileName = "output/" + outputName + ".csv"
df.to_csv(fileName, index=False, encoding="utf-8-sig")
# function from https://matplotlib.org/
def survey(results, category_names, name):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1, dtype=float)
category_colors = plt.get_cmap('RdYlGn_r')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(11, 5.95))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
# print(starts)
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.2 else 'black'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(float(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# ax.set_title("Proportion of MEPs' votes by political group")
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.1, top=0.8)
# plt.tight_layout()
plt.savefig(str(name) + "out.png")
plt.show
return fig, ax
# print(listEUGroups("Political_Group",df))
def listEUGroups(a, data):
p_groups = {}
for index, row in data.iterrows():
curGroup = row[a]
if curGroup not in p_groups.keys():
p_groups[curGroup] = 1
else:
p_groups[curGroup] += 1
return list(p_groups.keys())
def filterEUGroupRow(df, group):
return df[df.Political_Group == group]
def filterCountryGroups(df, group):
return df[df['Country'].isin(group)]
def | (df, group):
return df[df.Gender == group]
def filterAge(df, criteria, threshold):
if criteria == "above":
return df[df.Age >= threshold]
elif criteria == "below":
return df[df.Age <= threshold]
def combineVotes(dict):
ordered = OrderedDict([('FOR', 0), ('ABSTAINED', 0), ('NOT PRESENT', 0), ('AGAINST', 0)])
for i in votes:
a = i + " - CORRECTED"
if a in dict.keys() and i in dict.keys():
dict[i] += dict.pop(a)
if i in dict.keys():
ordered[i] = dict[i]
return ordered
def toPercentage(data, vote):
voteCount = data[vote].value_counts().to_frame()
voteDict = voteCount.to_dict()[vote]
voteDict = combineVotes(voteDict)
voteTot = len(data.index)
for key, value in voteDict.items():
voteDict[key] = float("{0:.2f}".format((value / voteTot) * 100))
return voteDict
def minIndices(votePct, data):
"""
Parameters
----------
votePct - vote results (in percentages) per party
data - dataframe filtered by party
Returns
-------
dataframe row ids of minority voters in the party
"""
groupMinIds = list()
if (votePct["FOR"] < votePct["AGAINST"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'FOR'].tolist())
elif (votePct["AGAINST"] < votePct["FOR"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'AGAINST'].tolist())
return groupMinIds
def main():
cols_to_use = [0, 1, 2, 4, 5, 7, 8, 16]
df = pd.read_csv("data/dataset.csv", usecols=cols_to_use, encoding='utf-8-sig')
# voting for the passing but being in the party minority is more interesting
# is there a correlation in groups?
# minIndex = list()
# results = {}
results_for_output = {}
# for i in EU_Groups:
# # filters df by EP group i
# data = filterEUGroupRow(df, i)
# # gets percentages for the visual vote representation
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# # if i != "Non-attached":
# # minIndex.extend(minIndices(a,data))
# results[i + ' (' + str(len(data.index)) + ')'] = list(a.values())
# this section gets indices of EP group minority MEPs (excluding non-attached)
# df2 = pd.read_csv("data/dataset.csv", encoding='utf-8-sig')
# outputToCsv("minorities", df2.iloc[minIndex, :])
# # is there a correlation in gender?
# results2 = {}
# for i in genders:
# data = filterGender(df, i)
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# results2[i] = list(a.values())
#
# # is there a correlation in age?
results3 = {}
age_avg = 55.48666667
for i in ["above","below"]:
age_filtered = filterAge(df, i, age_avg)
a_vote_percent = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
results3[i] = list(a_vote_percent.values())
print(results3)
results_for_output["above avg. age"] = [52.03, 3.55, 12.94, 31.47]
results_for_output["below avg. age"] = [37.36, 6.18, 11.8, 44.66]
#
# # eu15/eu13 analyses?
# results4 = {}
# for j in country_groups:
# d = filterCountryGroups(df, country_groups[j])
# a = toPercentage(d, '_26_March_2019_Final_Vote')
# results4[j] = list(a.values())
# results5 = {}
# data = filterCountryGroups(df, country_groups['eu15'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results5[i+' eu15 ('+str(len(data1.index))+')'] = list(a.values())
#
# results6 = {}
# data = filterCountryGroups(df, country_groups['eu13'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results6[i + ' eu13 (' + str(len(data1.index)) + ')'] = list(a.values())
# results7 = {}
# for i in country_groups:
# data7 = filterCountryGroups(df, country_groups[i])
# print(i)
# for j in genders:
# print(j)
# data8 = filterGender(data7, j)
# a = toPercentage(data8, '_26_March_2019_Final_Vote')
# results7[i + " " + j] = list(a.values())
# results8 = {}
# for i in country_groups:
# data = filterCountryGroups(df, country_groups[i])
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results8[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results9 = {}
# for i in genders:
# data = filterGender(df, i)
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results9[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results10 = {}
# results11 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# age_avg = 55.48666667
# age_filtered = filterAge(data, "above", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results10[i + " " + 'above' + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# age_filtered = filterAge(data, "below", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results11[i + " " + 'below' + ' (' + str(len(age_filtered.index)) + ')'] = list(a.values())
# results12 = {}
# results13 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# data1 = filterGender(data, "male")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results12[i + " " + 'male' + ' ('+ str(len(data1.index)) + ')'] = list(a.values())
# data1 = filterGender(data, "female")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results13[i + " " + 'female' + ' (' + str(len(data1.index)) + ')'] = list(a.values())
# DEPTH1
# survey(results, votes, "first-level/groups")
# survey(results2, votes,"first-level/gender")
# survey(results3, votes,"first-level/age")
survey(results_for_output, votes, "first-level/age")
# survey(results4, votes,"first-level/location")
# DEPTH2
# survey(results5, votes, "second-level/eu15+groups")
# survey(results6, votes, "second-level/eu13+groups")
# survey(results7, votes, "second-level/gender+location")
# survey(results8, votes, "second-level/age+location")
# survey(results9, votes, "second-level/gender+age")
# survey(results10, votes, "second-level/group+above")
# survey(results11, votes, "second-level/group+below")
# survey(results12, votes, "second-level/group+male")
# survey(results13, votes, "second-level/group+female")
plt.show()
if __name__ == '__main__':
main()
| filterGender | identifier_name |
keyrebels_new.py | from collections import OrderedDict
import pandas as pd
import statistics
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import LinearSegmentedColormap
EU_Groups = ["GUE-NGL", "ECR", "EPP", "S&D", "ALDE", "EFDD", "Greens-EFA", "Non-attached", "ENF"]
genders = ["male", "female"]
votes = ["FOR", "ABSTAINED", "NOT PRESENT", "AGAINST"]
all_countries = ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia", "France", "Germany", "Italy", "Belgium", "Netherlands",
"Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]
country_groups = {
'eu13': ["Estonia", "Latvia", "Lithuania", "Poland", "Czech Republic", "Slovakia", "Hungary", "Malta", "Cyprus",
"Slovenia", "Romania", "Bulgaria", "Croatia"],
'eu15': ["France", "Germany", "Italy", "Belgium", "Netherlands", "Luxembourg", "United Kingdom", "Ireland",
"Denmark", "Greece", "Spain", "Portugal", "Austria", "Sweden", "Finland"]}
def outputToCsv(outputName, df):
fileName = "output/" + outputName + ".csv"
df.to_csv(fileName, index=False, encoding="utf-8-sig")
# function from https://matplotlib.org/
def survey(results, category_names, name):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1, dtype=float)
category_colors = plt.get_cmap('RdYlGn_r')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(11, 5.95))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
# print(starts)
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.2 else 'black'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(float(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# ax.set_title("Proportion of MEPs' votes by political group")
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.1, top=0.8)
# plt.tight_layout()
plt.savefig(str(name) + "out.png")
plt.show
return fig, ax
# print(listEUGroups("Political_Group",df))
def listEUGroups(a, data):
p_groups = {}
for index, row in data.iterrows():
curGroup = row[a]
if curGroup not in p_groups.keys():
p_groups[curGroup] = 1
else:
p_groups[curGroup] += 1
return list(p_groups.keys())
def filterEUGroupRow(df, group):
return df[df.Political_Group == group]
def filterCountryGroups(df, group):
return df[df['Country'].isin(group)]
def filterGender(df, group):
|
def filterAge(df, criteria, threshold):
if criteria == "above":
return df[df.Age >= threshold]
elif criteria == "below":
return df[df.Age <= threshold]
def combineVotes(dict):
ordered = OrderedDict([('FOR', 0), ('ABSTAINED', 0), ('NOT PRESENT', 0), ('AGAINST', 0)])
for i in votes:
a = i + " - CORRECTED"
if a in dict.keys() and i in dict.keys():
dict[i] += dict.pop(a)
if i in dict.keys():
ordered[i] = dict[i]
return ordered
def toPercentage(data, vote):
voteCount = data[vote].value_counts().to_frame()
voteDict = voteCount.to_dict()[vote]
voteDict = combineVotes(voteDict)
voteTot = len(data.index)
for key, value in voteDict.items():
voteDict[key] = float("{0:.2f}".format((value / voteTot) * 100))
return voteDict
def minIndices(votePct, data):
"""
Parameters
----------
votePct - vote results (in percentages) per party
data - dataframe filtered by party
Returns
-------
dataframe row ids of minority voters in the party
"""
groupMinIds = list()
if (votePct["FOR"] < votePct["AGAINST"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'FOR'].tolist())
elif (votePct["AGAINST"] < votePct["FOR"]):
groupMinIds.extend(data.index[data['_26_March_2019_Final_Vote'] == 'AGAINST'].tolist())
return groupMinIds
def main():
cols_to_use = [0, 1, 2, 4, 5, 7, 8, 16]
df = pd.read_csv("data/dataset.csv", usecols=cols_to_use, encoding='utf-8-sig')
# voting for the passing but being in the party minority is more interesting
# is there a correlation in groups?
# minIndex = list()
# results = {}
results_for_output = {}
# for i in EU_Groups:
# # filters df by EP group i
# data = filterEUGroupRow(df, i)
# # gets percentages for the visual vote representation
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# # if i != "Non-attached":
# # minIndex.extend(minIndices(a,data))
# results[i + ' (' + str(len(data.index)) + ')'] = list(a.values())
# this section gets indices of EP group minority MEPs (excluding non-attached)
# df2 = pd.read_csv("data/dataset.csv", encoding='utf-8-sig')
# outputToCsv("minorities", df2.iloc[minIndex, :])
# # is there a correlation in gender?
# results2 = {}
# for i in genders:
# data = filterGender(df, i)
# a = toPercentage(data, '_26_March_2019_Final_Vote')
# results2[i] = list(a.values())
#
# # is there a correlation in age?
results3 = {}
age_avg = 55.48666667
for i in ["above","below"]:
age_filtered = filterAge(df, i, age_avg)
a_vote_percent = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
results3[i] = list(a_vote_percent.values())
print(results3)
results_for_output["above avg. age"] = [52.03, 3.55, 12.94, 31.47]
results_for_output["below avg. age"] = [37.36, 6.18, 11.8, 44.66]
#
# # eu15/eu13 analyses?
# results4 = {}
# for j in country_groups:
# d = filterCountryGroups(df, country_groups[j])
# a = toPercentage(d, '_26_March_2019_Final_Vote')
# results4[j] = list(a.values())
# results5 = {}
# data = filterCountryGroups(df, country_groups['eu15'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results5[i+' eu15 ('+str(len(data1.index))+')'] = list(a.values())
#
# results6 = {}
# data = filterCountryGroups(df, country_groups['eu13'])
# print(data.head)
# for i in EU_Groups:
# data1 = filterEUGroupRow(data, i)
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results6[i + ' eu13 (' + str(len(data1.index)) + ')'] = list(a.values())
# results7 = {}
# for i in country_groups:
# data7 = filterCountryGroups(df, country_groups[i])
# print(i)
# for j in genders:
# print(j)
# data8 = filterGender(data7, j)
# a = toPercentage(data8, '_26_March_2019_Final_Vote')
# results7[i + " " + j] = list(a.values())
# results8 = {}
# for i in country_groups:
# data = filterCountryGroups(df, country_groups[i])
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results8[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results9 = {}
# for i in genders:
# data = filterGender(df, i)
# age_avg = 55.48666667
# for j in ["above", "below"]:
# age_filtered = filterAge(data, j, age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results9[i + " " + j + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# results10 = {}
# results11 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# age_avg = 55.48666667
# age_filtered = filterAge(data, "above", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results10[i + " " + 'above' + ' ('+ str(len(age_filtered.index)) + ')'] = list(a.values())
# age_filtered = filterAge(data, "below", age_avg)
# a = toPercentage(age_filtered, '_26_March_2019_Final_Vote')
# results11[i + " " + 'below' + ' (' + str(len(age_filtered.index)) + ')'] = list(a.values())
# results12 = {}
# results13 = {}
# for i in EU_Groups:
# data = filterEUGroupRow(df, i)
# data1 = filterGender(data, "male")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results12[i + " " + 'male' + ' ('+ str(len(data1.index)) + ')'] = list(a.values())
# data1 = filterGender(data, "female")
# a = toPercentage(data1, '_26_March_2019_Final_Vote')
# results13[i + " " + 'female' + ' (' + str(len(data1.index)) + ')'] = list(a.values())
# DEPTH1
# survey(results, votes, "first-level/groups")
# survey(results2, votes,"first-level/gender")
# survey(results3, votes,"first-level/age")
survey(results_for_output, votes, "first-level/age")
# survey(results4, votes,"first-level/location")
# DEPTH2
# survey(results5, votes, "second-level/eu15+groups")
# survey(results6, votes, "second-level/eu13+groups")
# survey(results7, votes, "second-level/gender+location")
# survey(results8, votes, "second-level/age+location")
# survey(results9, votes, "second-level/gender+age")
# survey(results10, votes, "second-level/group+above")
# survey(results11, votes, "second-level/group+below")
# survey(results12, votes, "second-level/group+male")
# survey(results13, votes, "second-level/group+female")
plt.show()
if __name__ == '__main__':
main() | return df[df.Gender == group]
| random_line_split |
COCDataCrawler.py | import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import bs4
import re
from enum import Enum
from xml.dom.minidom import Document
from xml.dom.minidom import parse
import xml.dom.minidom
import os
LOG_LEVEL = Enum('LOG_LEVEL',('Trace','Log'))
CURR_LOG_LEVEL = LOG_LEVEL.Log
class CFunTrace:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Entered...')
def __del__(self):
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Exited...')
class CLog:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Log.value:
print(self.__funName)
class CCard:
def __init__(self,name,count):
self.name = name
self.count = count
class CFigure:
def __init__(self,figure_name):
funLog = CFunTrace('CFigure:__init__')
self.cardlist = []
self.name = figure_name
log = CLog('添加人物:{nameStr}'.format(nameStr = figure_name))
def addCard(self,name,count):
funLog = CFunTrace('CFigure:addCard()')
card = CCard(name,count)
self.cardlist.append(card)
log = CLog('添加卡牌,名称:{nameStr},数量:{countStr}'.format(nameStr = name,countStr = count))
class CMonster:
def __init__(self,name=''):
self.name = name
self.inGameNames = []
self.hp = ''
self.draw = ''
self.vpWorth = ''
self.defaultMove = ''
self.cardList = []
self.pngName = ''
self.bGetAllInfo = False
class CGetWebData:
def __init__(self):
self.figureHeaders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'xf_user=15819%2C9cb5406f5957d4c36620c1cc70c424d6e6a08605; xf_session=14aa3e308c605034afe321882df74a44',
'Host': 'forums.cardhunter.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', }
self.fugureURLs = ['http://forums.cardhunter.com/threads/coc-pcs.9397/',
'http://forums.cardhunter.com/threads/coc-pcs.9397/page-2']
self.firstFindString = 'http://live.cardhunter.com/assets/large_portraits/'
# 人物列表
self.figureList = []
# 怪物卡组相关变量
self.monsterXMLPath = 'output/MonsterBuild.xml'
self.monsterNameHtmPaths = ['input/MonsterName1.htm','input/MonsterName2.htm']
self.monsterList = [] # 怪物列表
self.eachMonsterHeaders = {
'Host': 'wiki.cardhuntria.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Cookie': '_ga=GA1.2.216598363.1582447086; _gid=GA1.2.1486757786.1582447086; _gat=1',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',}
def ProcessFigureUrl(self, url):
response = requests.get(url, headers=self.figureHeaders)
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
firstFindStringLength = len(self.firstFindString)
for k in soup.find_all('blockquote'):
img_url = str(k.contents[1])
nstr = img_url.find(self.firstFindString)
nend = img_url.find('.png')
if nstr >= 0 and nend > nstr :
figureName = img_url[nstr + firstFindStringLength:nend]
newFigure = CFigure(figureName)
for i in range(2,len(k.contents),3):
tmpStr = str(k.contents[i + 1])
endIndex = tmpStr.find('x ')
if endIndex >= 0:
cardCount = int(tmpStr[1:endIndex]) # 求卡牌数量
tmpStr = str(k.contents[i + 2])
nCardStart = tmpStr.find(';">')
nCardEnd = tmpStr.find('</a>')
if nCardStart >= 0 and nCardEnd > nCardStart :
cardName = tmpStr[nCardStart + 3:nCardEnd]
newFigure.addCard(cardName,cardCount)
self.figureList.append(newFigure)
def ProcessAllFigureUrls(self):
for eachUrl in self.fugureURLs:
self.ProcessFigureUrl(eachUrl)
log = CLog('人物数据采集完成,数量:{count}'.format(count = len(self.figureList)))
def AddTestData(self):
newFigure = CFigure("DwarfWizardF04C")
newFigure.addCard("Hard to Pin Down", 1)
newFigure.addCard("Vulnerable", 2)
self.figureList.append(newFigure)
newFigure1 = CFigure("ElfWizardF01C")
newFigure1.addCard("Elven Trickery", 4)
newFigure1.addCard("Powerful Spark", 3)
self.figureList.append(newFigure1)
def WriteFigureDataToXML(self,filename):
doc = Document() #创建doc
root = doc.createElement("FigureBuild") #创建根节点
root.setAttribute("URL", self.fugureURLs[0])
doc.appendChild(root)
for eachFigure in self.figureList:
figureNode = doc.createElement("Figure")
figureNode.setAttribute("name", eachFigure.name)
for eachCard in eachFigure.cardlist:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
figureNode.appendChild(cardNode)
root.appendChild(figureNode)
with open(filename, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterName(self):
for eachPath in self.monsterNameHtmPaths:
bStart = False
for line in open(eachPath):
if not bStart:
if line.find('<!-- bodycontent -->') != -1:
bStart = True
else:
if line.find('<li><a href=') != -1:
startPos = line.find('title="')
endPos = line.find('">',startPos, len(line))
if startPos != -1 and endPos != -1:
newMonster = CMonster(line[startPos + 7:endPos])
# newMonster.inGameNames.append("ingame1")
# newMonster.inGameNames.append("ingame2")
# newCard1 = CCard("card1", 1)
# newCard2 = CCard("card2", 2)
# newMonster.cardList.append(newCard1)
# newMonster.cardList.append(newCard2)
self.monsterList.append(newMonster)
else:
if line.find('<!-- /bodycontent -->') != -1:
break
def WriteMonsterBuildsToXML(self):
doc = Document() # 创建doc
root = doc.createElement("MonsterBuild") # 创建根节点
doc.appendChild(root)
for eachMonster in self.monsterList:
monsterNode = doc.createElement("Monster")
monsterNode.setAttribute("Name", eachMonster.name)
monsterNode.setAttribute("HP", eachMonster.hp)
monsterNode.setAttribute("Draw", eachMonster.draw)
monsterNode.setAttribute("VPWorth", eachMonster.vpWorth)
monsterNode.setAttribute("DefaultMove", eachMonster.defaultMove)
monsterNode.setAttribute("bGetAllInfo", str(eachMonster.bGetAllInfo))
for eachInGameName in eachMonster.inGameNames:
inGameNameNode = doc.createElement("InGameName")
inGameNameNode.setAttribute("Name", eachInGameName)
monsterNode.appendChild(inGameNameNode)
for eachCard in eachMonster.cardList:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.set | ml(indent='\t'))
def ReadMonsterBuildFromXML(self):
self.monsterList.clear();
dom = xml.dom.minidom.parse(self.monsterXMLPath)
root = dom.documentElement
monsters = root.getElementsByTagName("Monster") # 获取Monster节点
# print(len(monsters))
for eachMonster in monsters:
newMonster = CMonster()
newMonster.name = eachMonster.getAttribute("Name")
newMonster.hp = eachMonster.getAttribute("HP")
newMonster.draw = eachMonster.getAttribute("Draw")
newMonster.vpWorth = eachMonster.getAttribute("VPWorth")
newMonster.defaultMove = eachMonster.getAttribute("DefaultMove")
newMonster.bGetAllInfo = eachMonster.getAttribute("bGetAllInfo") == "True"
inGameNames = eachMonster.getElementsByTagName("InGameName")
for eachInGameName in inGameNames:
newMonster.inGameNames.append(eachInGameName.getAttribute("Name"))
cards = eachMonster.getElementsByTagName("Card")
for eachCard in cards:
newCard = CCard(eachCard.getAttribute("Name"),int(eachCard.getAttribute("Count")))
newMonster.cardList.append(newCard)
self.monsterList.append(newMonster)
def CollectMonsterDataFromURL(self):
bAllDone = True
for eachMonster in self.monsterList:
if eachMonster.bGetAllInfo:
continue
bAllDone = False
url = "http://wiki.cardhuntria.com/wiki/" + eachMonster.name
try:
response = requests.get(url, headers=self.eachMonsterHeaders,timeout=20)
except:
print("请求" + url +"超时")
continue
if response.status_code == 200:
print(url + "请求成功")
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
self.GetOneMonsterData(soup,eachMonster)
else:
print("{}请求失败,返回值为{}".format(url,response.status_code))
if bAllDone:
print("所有怪物卡已经抓取完成")
else:
print("仍有怪物卡未抓取完成")
def GetOneMonsterData(self,soup,newMonster):
for tr in soup.find_all('tr'):
for th in tr.find_all('th'):
if th.contents[0].find("In-game Name") != -1:
tds = tr.find_all('td')
totalStr = tds[0].contents[0].rstrip()
newMonster.inGameNames = totalStr.split(',')
# print(newMonster.inGameNames)
elif th.contents[0].find("HP") != -1:
tds = tr.find_all('td')
newMonster.hp = tds[0].contents[0].rstrip()
newMonster.pngName = tds[1].contents[0].contents[0]['alt']
# print(newMonster.hp)
# print(newMonster.pngName)
elif th.contents[0].find("Draw") != -1:
tds = tr.find_all('td')
newMonster.draw = tds[0].contents[0].rstrip()
# print(newMonster.draw)
elif th.contents[0].find("VP worth") != -1:
tds = tr.find_all('td')
newMonster.vpWorth = tds[0].contents[0].rstrip()
# print(newMonster.vpWorth)
elif th.contents[0].find("Default Move") != -1:
tds = tr.find_all('td')
if len(tds) > 0 and len(tds[0].contents) > 1 and len(tds[0].contents[1].contents) > 0:
newMonster.defaultMove = tds[0].contents[1].contents[0].rstrip()
# print(newMonster.defaultMove)
else:
tds = tr.find_all('td')
if len(tds) > 0 and tds[0].contents[0].find(' x ') != -1:
for i in range(0, len(tds[0].contents) - 1, 3):
tmp = tds[0].contents[i].rstrip()
count = int(tmp.replace(' x', ''))
cardName = tds[0].contents[i+1].contents[0].rstrip()
newCard = CCard(cardName,count)
newMonster.cardList.append(newCard)
newMonster.bGetAllInfo = True
def ProcessAllMonsterUrls(self):
if not os.path.exists(self.monsterXMLPath):
self.ReadMonsterName();
self.WriteMonsterBuildsToXML();
else:
self.ReadMonsterBuildFromXML();
self.CollectMonsterDataFromURL();
self.WriteMonsterBuildsToXML();
g_GetWebData = CGetWebData()
# # 1.测试写xml文件
# g_GetWebData.AddTestData()
# g_GetWebData.WriteFigureDataToXML("test.xml")
# # 2.抓取COC人物卡组并写到xml文件
# g_GetWebData.ProcessAllFigureUrls()
# g_GetWebData.WriteFigureDataToXML("FigureBuild.xml")
# 3.抓取COC怪物卡组并写到xml文件
g_GetWebData.ProcessAllMonsterUrls()
# 数据结构:
# n个人物名称:
# 卡牌
# 名称
# 数量
| Attribute("Count", str(eachCard.count))
monsterNode.appendChild(cardNode)
root.appendChild(monsterNode)
with open(self.monsterXMLPath, 'w') as f:
f.write(doc.toprettyx | conditional_block |
COCDataCrawler.py | import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import bs4
import re
from enum import Enum
from xml.dom.minidom import Document
from xml.dom.minidom import parse
import xml.dom.minidom
import os
LOG_LEVEL = Enum('LOG_LEVEL',('Trace','Log'))
CURR_LOG_LEVEL = LOG_LEVEL.Log
class CFunTrace:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Entered...')
def __del__(self):
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Exited...')
class CLog:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Log.value:
print(self.__funName)
class CCard:
def __init__(self,name,count):
self.name = name
self.count = count
class CFigure:
| __(self,name=''):
self.name = name
self.inGameNames = []
self.hp = ''
self.draw = ''
self.vpWorth = ''
self.defaultMove = ''
self.cardList = []
self.pngName = ''
self.bGetAllInfo = False
class CGetWebData:
def __init__(self):
self.figureHeaders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'xf_user=15819%2C9cb5406f5957d4c36620c1cc70c424d6e6a08605; xf_session=14aa3e308c605034afe321882df74a44',
'Host': 'forums.cardhunter.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', }
self.fugureURLs = ['http://forums.cardhunter.com/threads/coc-pcs.9397/',
'http://forums.cardhunter.com/threads/coc-pcs.9397/page-2']
self.firstFindString = 'http://live.cardhunter.com/assets/large_portraits/'
# 人物列表
self.figureList = []
# 怪物卡组相关变量
self.monsterXMLPath = 'output/MonsterBuild.xml'
self.monsterNameHtmPaths = ['input/MonsterName1.htm','input/MonsterName2.htm']
self.monsterList = [] # 怪物列表
self.eachMonsterHeaders = {
'Host': 'wiki.cardhuntria.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Cookie': '_ga=GA1.2.216598363.1582447086; _gid=GA1.2.1486757786.1582447086; _gat=1',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',}
def ProcessFigureUrl(self, url):
response = requests.get(url, headers=self.figureHeaders)
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
firstFindStringLength = len(self.firstFindString)
for k in soup.find_all('blockquote'):
img_url = str(k.contents[1])
nstr = img_url.find(self.firstFindString)
nend = img_url.find('.png')
if nstr >= 0 and nend > nstr :
figureName = img_url[nstr + firstFindStringLength:nend]
newFigure = CFigure(figureName)
for i in range(2,len(k.contents),3):
tmpStr = str(k.contents[i + 1])
endIndex = tmpStr.find('x ')
if endIndex >= 0:
cardCount = int(tmpStr[1:endIndex]) # 求卡牌数量
tmpStr = str(k.contents[i + 2])
nCardStart = tmpStr.find(';">')
nCardEnd = tmpStr.find('</a>')
if nCardStart >= 0 and nCardEnd > nCardStart :
cardName = tmpStr[nCardStart + 3:nCardEnd]
newFigure.addCard(cardName,cardCount)
self.figureList.append(newFigure)
def ProcessAllFigureUrls(self):
for eachUrl in self.fugureURLs:
self.ProcessFigureUrl(eachUrl)
log = CLog('人物数据采集完成,数量:{count}'.format(count = len(self.figureList)))
def AddTestData(self):
newFigure = CFigure("DwarfWizardF04C")
newFigure.addCard("Hard to Pin Down", 1)
newFigure.addCard("Vulnerable", 2)
self.figureList.append(newFigure)
newFigure1 = CFigure("ElfWizardF01C")
newFigure1.addCard("Elven Trickery", 4)
newFigure1.addCard("Powerful Spark", 3)
self.figureList.append(newFigure1)
def WriteFigureDataToXML(self,filename):
doc = Document() #创建doc
root = doc.createElement("FigureBuild") #创建根节点
root.setAttribute("URL", self.fugureURLs[0])
doc.appendChild(root)
for eachFigure in self.figureList:
figureNode = doc.createElement("Figure")
figureNode.setAttribute("name", eachFigure.name)
for eachCard in eachFigure.cardlist:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
figureNode.appendChild(cardNode)
root.appendChild(figureNode)
with open(filename, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterName(self):
for eachPath in self.monsterNameHtmPaths:
bStart = False
for line in open(eachPath):
if not bStart:
if line.find('<!-- bodycontent -->') != -1:
bStart = True
else:
if line.find('<li><a href=') != -1:
startPos = line.find('title="')
endPos = line.find('">',startPos, len(line))
if startPos != -1 and endPos != -1:
newMonster = CMonster(line[startPos + 7:endPos])
# newMonster.inGameNames.append("ingame1")
# newMonster.inGameNames.append("ingame2")
# newCard1 = CCard("card1", 1)
# newCard2 = CCard("card2", 2)
# newMonster.cardList.append(newCard1)
# newMonster.cardList.append(newCard2)
self.monsterList.append(newMonster)
else:
if line.find('<!-- /bodycontent -->') != -1:
break
def WriteMonsterBuildsToXML(self):
doc = Document() # 创建doc
root = doc.createElement("MonsterBuild") # 创建根节点
doc.appendChild(root)
for eachMonster in self.monsterList:
monsterNode = doc.createElement("Monster")
monsterNode.setAttribute("Name", eachMonster.name)
monsterNode.setAttribute("HP", eachMonster.hp)
monsterNode.setAttribute("Draw", eachMonster.draw)
monsterNode.setAttribute("VPWorth", eachMonster.vpWorth)
monsterNode.setAttribute("DefaultMove", eachMonster.defaultMove)
monsterNode.setAttribute("bGetAllInfo", str(eachMonster.bGetAllInfo))
for eachInGameName in eachMonster.inGameNames:
inGameNameNode = doc.createElement("InGameName")
inGameNameNode.setAttribute("Name", eachInGameName)
monsterNode.appendChild(inGameNameNode)
for eachCard in eachMonster.cardList:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
monsterNode.appendChild(cardNode)
root.appendChild(monsterNode)
with open(self.monsterXMLPath, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterBuildFromXML(self):
self.monsterList.clear();
dom = xml.dom.minidom.parse(self.monsterXMLPath)
root = dom.documentElement
monsters = root.getElementsByTagName("Monster") # 获取Monster节点
# print(len(monsters))
for eachMonster in monsters:
newMonster = CMonster()
newMonster.name = eachMonster.getAttribute("Name")
newMonster.hp = eachMonster.getAttribute("HP")
newMonster.draw = eachMonster.getAttribute("Draw")
newMonster.vpWorth = eachMonster.getAttribute("VPWorth")
newMonster.defaultMove = eachMonster.getAttribute("DefaultMove")
newMonster.bGetAllInfo = eachMonster.getAttribute("bGetAllInfo") == "True"
inGameNames = eachMonster.getElementsByTagName("InGameName")
for eachInGameName in inGameNames:
newMonster.inGameNames.append(eachInGameName.getAttribute("Name"))
cards = eachMonster.getElementsByTagName("Card")
for eachCard in cards:
newCard = CCard(eachCard.getAttribute("Name"),int(eachCard.getAttribute("Count")))
newMonster.cardList.append(newCard)
self.monsterList.append(newMonster)
def CollectMonsterDataFromURL(self):
bAllDone = True
for eachMonster in self.monsterList:
if eachMonster.bGetAllInfo:
continue
bAllDone = False
url = "http://wiki.cardhuntria.com/wiki/" + eachMonster.name
try:
response = requests.get(url, headers=self.eachMonsterHeaders,timeout=20)
except:
print("请求" + url +"超时")
continue
if response.status_code == 200:
print(url + "请求成功")
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
self.GetOneMonsterData(soup,eachMonster)
else:
print("{}请求失败,返回值为{}".format(url,response.status_code))
if bAllDone:
print("所有怪物卡已经抓取完成")
else:
print("仍有怪物卡未抓取完成")
def GetOneMonsterData(self,soup,newMonster):
for tr in soup.find_all('tr'):
for th in tr.find_all('th'):
if th.contents[0].find("In-game Name") != -1:
tds = tr.find_all('td')
totalStr = tds[0].contents[0].rstrip()
newMonster.inGameNames = totalStr.split(',')
# print(newMonster.inGameNames)
elif th.contents[0].find("HP") != -1:
tds = tr.find_all('td')
newMonster.hp = tds[0].contents[0].rstrip()
newMonster.pngName = tds[1].contents[0].contents[0]['alt']
# print(newMonster.hp)
# print(newMonster.pngName)
elif th.contents[0].find("Draw") != -1:
tds = tr.find_all('td')
newMonster.draw = tds[0].contents[0].rstrip()
# print(newMonster.draw)
elif th.contents[0].find("VP worth") != -1:
tds = tr.find_all('td')
newMonster.vpWorth = tds[0].contents[0].rstrip()
# print(newMonster.vpWorth)
elif th.contents[0].find("Default Move") != -1:
tds = tr.find_all('td')
if len(tds) > 0 and len(tds[0].contents) > 1 and len(tds[0].contents[1].contents) > 0:
newMonster.defaultMove = tds[0].contents[1].contents[0].rstrip()
# print(newMonster.defaultMove)
else:
tds = tr.find_all('td')
if len(tds) > 0 and tds[0].contents[0].find(' x ') != -1:
for i in range(0, len(tds[0].contents) - 1, 3):
tmp = tds[0].contents[i].rstrip()
count = int(tmp.replace(' x', ''))
cardName = tds[0].contents[i+1].contents[0].rstrip()
newCard = CCard(cardName,count)
newMonster.cardList.append(newCard)
newMonster.bGetAllInfo = True
def ProcessAllMonsterUrls(self):
if not os.path.exists(self.monsterXMLPath):
self.ReadMonsterName();
self.WriteMonsterBuildsToXML();
else:
self.ReadMonsterBuildFromXML();
self.CollectMonsterDataFromURL();
self.WriteMonsterBuildsToXML();
g_GetWebData = CGetWebData()
# # 1.测试写xml文件
# g_GetWebData.AddTestData()
# g_GetWebData.WriteFigureDataToXML("test.xml")
# # 2.抓取COC人物卡组并写到xml文件
# g_GetWebData.ProcessAllFigureUrls()
# g_GetWebData.WriteFigureDataToXML("FigureBuild.xml")
# 3.抓取COC怪物卡组并写到xml文件
g_GetWebData.ProcessAllMonsterUrls()
# 数据结构:
# n个人物名称:
# 卡牌
# 名称
# 数量
| def __init__(self,figure_name):
funLog = CFunTrace('CFigure:__init__')
self.cardlist = []
self.name = figure_name
log = CLog('添加人物:{nameStr}'.format(nameStr = figure_name))
def addCard(self,name,count):
funLog = CFunTrace('CFigure:addCard()')
card = CCard(name,count)
self.cardlist.append(card)
log = CLog('添加卡牌,名称:{nameStr},数量:{countStr}'.format(nameStr = name,countStr = count))
class CMonster:
def __init | identifier_body |
COCDataCrawler.py | import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import bs4
import re
from enum import Enum
from xml.dom.minidom import Document
from xml.dom.minidom import parse
import xml.dom.minidom
import os
LOG_LEVEL = Enum('LOG_LEVEL',('Trace','Log'))
CURR_LOG_LEVEL = LOG_LEVEL.Log
class CFunTrace:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Entered...')
def __del__(self):
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Exited...')
| class CLog:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Log.value:
print(self.__funName)
class CCard:
def __init__(self,name,count):
self.name = name
self.count = count
class CFigure:
def __init__(self,figure_name):
funLog = CFunTrace('CFigure:__init__')
self.cardlist = []
self.name = figure_name
log = CLog('添加人物:{nameStr}'.format(nameStr = figure_name))
def addCard(self,name,count):
funLog = CFunTrace('CFigure:addCard()')
card = CCard(name,count)
self.cardlist.append(card)
log = CLog('添加卡牌,名称:{nameStr},数量:{countStr}'.format(nameStr = name,countStr = count))
class CMonster:
def __init__(self,name=''):
self.name = name
self.inGameNames = []
self.hp = ''
self.draw = ''
self.vpWorth = ''
self.defaultMove = ''
self.cardList = []
self.pngName = ''
self.bGetAllInfo = False
class CGetWebData:
def __init__(self):
self.figureHeaders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'xf_user=15819%2C9cb5406f5957d4c36620c1cc70c424d6e6a08605; xf_session=14aa3e308c605034afe321882df74a44',
'Host': 'forums.cardhunter.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', }
self.fugureURLs = ['http://forums.cardhunter.com/threads/coc-pcs.9397/',
'http://forums.cardhunter.com/threads/coc-pcs.9397/page-2']
self.firstFindString = 'http://live.cardhunter.com/assets/large_portraits/'
# 人物列表
self.figureList = []
# 怪物卡组相关变量
self.monsterXMLPath = 'output/MonsterBuild.xml'
self.monsterNameHtmPaths = ['input/MonsterName1.htm','input/MonsterName2.htm']
self.monsterList = [] # 怪物列表
self.eachMonsterHeaders = {
'Host': 'wiki.cardhuntria.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Cookie': '_ga=GA1.2.216598363.1582447086; _gid=GA1.2.1486757786.1582447086; _gat=1',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',}
def ProcessFigureUrl(self, url):
response = requests.get(url, headers=self.figureHeaders)
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
firstFindStringLength = len(self.firstFindString)
for k in soup.find_all('blockquote'):
img_url = str(k.contents[1])
nstr = img_url.find(self.firstFindString)
nend = img_url.find('.png')
if nstr >= 0 and nend > nstr :
figureName = img_url[nstr + firstFindStringLength:nend]
newFigure = CFigure(figureName)
for i in range(2,len(k.contents),3):
tmpStr = str(k.contents[i + 1])
endIndex = tmpStr.find('x ')
if endIndex >= 0:
cardCount = int(tmpStr[1:endIndex]) # 求卡牌数量
tmpStr = str(k.contents[i + 2])
nCardStart = tmpStr.find(';">')
nCardEnd = tmpStr.find('</a>')
if nCardStart >= 0 and nCardEnd > nCardStart :
cardName = tmpStr[nCardStart + 3:nCardEnd]
newFigure.addCard(cardName,cardCount)
self.figureList.append(newFigure)
def ProcessAllFigureUrls(self):
for eachUrl in self.fugureURLs:
self.ProcessFigureUrl(eachUrl)
log = CLog('人物数据采集完成,数量:{count}'.format(count = len(self.figureList)))
def AddTestData(self):
newFigure = CFigure("DwarfWizardF04C")
newFigure.addCard("Hard to Pin Down", 1)
newFigure.addCard("Vulnerable", 2)
self.figureList.append(newFigure)
newFigure1 = CFigure("ElfWizardF01C")
newFigure1.addCard("Elven Trickery", 4)
newFigure1.addCard("Powerful Spark", 3)
self.figureList.append(newFigure1)
def WriteFigureDataToXML(self,filename):
doc = Document() #创建doc
root = doc.createElement("FigureBuild") #创建根节点
root.setAttribute("URL", self.fugureURLs[0])
doc.appendChild(root)
for eachFigure in self.figureList:
figureNode = doc.createElement("Figure")
figureNode.setAttribute("name", eachFigure.name)
for eachCard in eachFigure.cardlist:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
figureNode.appendChild(cardNode)
root.appendChild(figureNode)
with open(filename, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterName(self):
for eachPath in self.monsterNameHtmPaths:
bStart = False
for line in open(eachPath):
if not bStart:
if line.find('<!-- bodycontent -->') != -1:
bStart = True
else:
if line.find('<li><a href=') != -1:
startPos = line.find('title="')
endPos = line.find('">',startPos, len(line))
if startPos != -1 and endPos != -1:
newMonster = CMonster(line[startPos + 7:endPos])
# newMonster.inGameNames.append("ingame1")
# newMonster.inGameNames.append("ingame2")
# newCard1 = CCard("card1", 1)
# newCard2 = CCard("card2", 2)
# newMonster.cardList.append(newCard1)
# newMonster.cardList.append(newCard2)
self.monsterList.append(newMonster)
else:
if line.find('<!-- /bodycontent -->') != -1:
break
def WriteMonsterBuildsToXML(self):
doc = Document() # 创建doc
root = doc.createElement("MonsterBuild") # 创建根节点
doc.appendChild(root)
for eachMonster in self.monsterList:
monsterNode = doc.createElement("Monster")
monsterNode.setAttribute("Name", eachMonster.name)
monsterNode.setAttribute("HP", eachMonster.hp)
monsterNode.setAttribute("Draw", eachMonster.draw)
monsterNode.setAttribute("VPWorth", eachMonster.vpWorth)
monsterNode.setAttribute("DefaultMove", eachMonster.defaultMove)
monsterNode.setAttribute("bGetAllInfo", str(eachMonster.bGetAllInfo))
for eachInGameName in eachMonster.inGameNames:
inGameNameNode = doc.createElement("InGameName")
inGameNameNode.setAttribute("Name", eachInGameName)
monsterNode.appendChild(inGameNameNode)
for eachCard in eachMonster.cardList:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
monsterNode.appendChild(cardNode)
root.appendChild(monsterNode)
with open(self.monsterXMLPath, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterBuildFromXML(self):
self.monsterList.clear();
dom = xml.dom.minidom.parse(self.monsterXMLPath)
root = dom.documentElement
monsters = root.getElementsByTagName("Monster") # 获取Monster节点
# print(len(monsters))
for eachMonster in monsters:
newMonster = CMonster()
newMonster.name = eachMonster.getAttribute("Name")
newMonster.hp = eachMonster.getAttribute("HP")
newMonster.draw = eachMonster.getAttribute("Draw")
newMonster.vpWorth = eachMonster.getAttribute("VPWorth")
newMonster.defaultMove = eachMonster.getAttribute("DefaultMove")
newMonster.bGetAllInfo = eachMonster.getAttribute("bGetAllInfo") == "True"
inGameNames = eachMonster.getElementsByTagName("InGameName")
for eachInGameName in inGameNames:
newMonster.inGameNames.append(eachInGameName.getAttribute("Name"))
cards = eachMonster.getElementsByTagName("Card")
for eachCard in cards:
newCard = CCard(eachCard.getAttribute("Name"),int(eachCard.getAttribute("Count")))
newMonster.cardList.append(newCard)
self.monsterList.append(newMonster)
def CollectMonsterDataFromURL(self):
bAllDone = True
for eachMonster in self.monsterList:
if eachMonster.bGetAllInfo:
continue
bAllDone = False
url = "http://wiki.cardhuntria.com/wiki/" + eachMonster.name
try:
response = requests.get(url, headers=self.eachMonsterHeaders,timeout=20)
except:
print("请求" + url +"超时")
continue
if response.status_code == 200:
print(url + "请求成功")
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
self.GetOneMonsterData(soup,eachMonster)
else:
print("{}请求失败,返回值为{}".format(url,response.status_code))
if bAllDone:
print("所有怪物卡已经抓取完成")
else:
print("仍有怪物卡未抓取完成")
def GetOneMonsterData(self,soup,newMonster):
for tr in soup.find_all('tr'):
for th in tr.find_all('th'):
if th.contents[0].find("In-game Name") != -1:
tds = tr.find_all('td')
totalStr = tds[0].contents[0].rstrip()
newMonster.inGameNames = totalStr.split(',')
# print(newMonster.inGameNames)
elif th.contents[0].find("HP") != -1:
tds = tr.find_all('td')
newMonster.hp = tds[0].contents[0].rstrip()
newMonster.pngName = tds[1].contents[0].contents[0]['alt']
# print(newMonster.hp)
# print(newMonster.pngName)
elif th.contents[0].find("Draw") != -1:
tds = tr.find_all('td')
newMonster.draw = tds[0].contents[0].rstrip()
# print(newMonster.draw)
elif th.contents[0].find("VP worth") != -1:
tds = tr.find_all('td')
newMonster.vpWorth = tds[0].contents[0].rstrip()
# print(newMonster.vpWorth)
elif th.contents[0].find("Default Move") != -1:
tds = tr.find_all('td')
if len(tds) > 0 and len(tds[0].contents) > 1 and len(tds[0].contents[1].contents) > 0:
newMonster.defaultMove = tds[0].contents[1].contents[0].rstrip()
# print(newMonster.defaultMove)
else:
tds = tr.find_all('td')
if len(tds) > 0 and tds[0].contents[0].find(' x ') != -1:
for i in range(0, len(tds[0].contents) - 1, 3):
tmp = tds[0].contents[i].rstrip()
count = int(tmp.replace(' x', ''))
cardName = tds[0].contents[i+1].contents[0].rstrip()
newCard = CCard(cardName,count)
newMonster.cardList.append(newCard)
newMonster.bGetAllInfo = True
def ProcessAllMonsterUrls(self):
if not os.path.exists(self.monsterXMLPath):
self.ReadMonsterName();
self.WriteMonsterBuildsToXML();
else:
self.ReadMonsterBuildFromXML();
self.CollectMonsterDataFromURL();
self.WriteMonsterBuildsToXML();
g_GetWebData = CGetWebData()
# # 1.测试写xml文件
# g_GetWebData.AddTestData()
# g_GetWebData.WriteFigureDataToXML("test.xml")
# # 2.抓取COC人物卡组并写到xml文件
# g_GetWebData.ProcessAllFigureUrls()
# g_GetWebData.WriteFigureDataToXML("FigureBuild.xml")
# 3.抓取COC怪物卡组并写到xml文件
g_GetWebData.ProcessAllMonsterUrls()
# 数据结构:
# n个人物名称:
# 卡牌
# 名称
# 数量 | random_line_split | |
COCDataCrawler.py | import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import bs4
import re
from enum import Enum
from xml.dom.minidom import Document
from xml.dom.minidom import parse
import xml.dom.minidom
import os
LOG_LEVEL = Enum('LOG_LEVEL',('Trace','Log'))
CURR_LOG_LEVEL = LOG_LEVEL.Log
class CFunTrace:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Entered...')
def __del__(self):
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Trace.value:
print(self.__funName + ' Exited...')
class CLog:
def __init__(self,funName):
self.__funName = funName
if CURR_LOG_LEVEL.value <= LOG_LEVEL.Log.value:
print(self.__funName)
class CCard:
def __init__(self,name,count):
self.name = name
self.count = count
class CFigure:
def __init__(self,figure_name):
funLog = CFunTrace('CFigure:__init__')
self.cardlist = []
self.name = figure_name
log = CLog('添加人物:{nameStr}'.format(nameStr = figure_name))
def addCard(self,name,count):
funLog = CFunTrace('CFigure:addCard()')
card = CCard(name,count)
self.cardlist.append(card)
log = CLog('添加卡牌,名称:{nameStr},数量:{countStr}'.format(nameStr = name,countStr = count))
class CMonster:
def __init__(self,name=''):
self.name = name
self.inGameNames = []
self.hp = ''
self.draw = ''
self.vpWorth = ''
self.defaultMove = ''
self.cardList = []
self.pngName = ''
self.bGetAllInfo = False
class CGetWebData:
def __init__(self):
self.figureHeaders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'xf_user=15819%2C9cb5406f5957d4c36620c1cc70c424d6e6a08605; xf_session=14aa3e308c605034afe321882df74a44',
'Host': 'forums.cardhunter.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36', }
self.fugureURLs = ['http://forums.cardhunter.com/threads/coc-pcs.9397/',
'http://forums.cardhunter.com/threads/coc-pcs.9397/page-2']
self.firstFindString = 'http://live.cardhunter.com/assets/large_portraits/'
# 人物列表
self.figureList = []
# 怪物卡组相关变量
self.monsterXMLPath = 'output/MonsterBuild.xml'
self.monsterNameHtmPaths = ['input/MonsterName1.htm','input/MonsterName2.htm']
self.monsterList = [] # 怪物列表
self.eachMonsterHeaders = {
'Host': 'wiki.cardhuntria.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Cookie': '_ga=GA1.2.216598363.1582447086; _gid=GA1.2.1486757786.1582447086; _gat=1',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0',}
def ProcessFigureUrl(self, url):
response = requests.get(url, headers=self.figureHeaders)
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
firstFindStringLength = len(self.firstFindString)
for k in soup.find_all('blockquote'):
img_url = str(k.contents[1])
nstr = img_url.find(self.firstFindString)
nend = img_url.find('.png')
if nstr >= 0 and nend > nstr :
figureName = img_url[nstr + firstFindStringLength:nend]
newFigure = CFigure(figureName)
for i in range(2,len(k.contents),3):
tmpStr = str(k.contents[i + 1])
endIndex = tmpStr.find('x ')
if endIndex >= 0:
cardCount = int(tmpStr[1:endIndex]) # 求卡牌数量
tmpStr = str(k.contents[i + 2])
nCardStart = tmpStr.find(';">')
nCardEnd = tmpStr.find('</a>')
if nCardStart >= 0 and nCardEnd > nCardStart :
cardName = tmpStr[nCardStart + 3:nCardEnd]
newFigure.addCard(cardName,cardCount)
self.figureList.append(newFigure)
def ProcessAllFigureUrls(self):
for eachUrl in self.fugureURLs:
self.ProcessFigureUrl(eachUrl)
log = CLog('人物数据采集完成,数量:{count}'.format(count = len(self.figureList)))
def AddTestData(self):
newFigure = CFigure("DwarfWizardF04C")
newFigure.addCard("Hard to Pin Down", 1)
newFigure.addCard("Vulnerable", 2)
self.figureList.append(newFigure)
newFigure1 = CFigure("ElfWizardF01C")
newFigure1.addCard("Elven Trickery", 4)
newFigure1.addCard("Powerful Spark", 3)
self.figureList.append(newFigure1)
def WriteFigureDataToXML(self,filename):
doc = Document() #创建doc
root = doc.createElem | 创建根节点
root.setAttribute("URL", self.fugureURLs[0])
doc.appendChild(root)
for eachFigure in self.figureList:
figureNode = doc.createElement("Figure")
figureNode.setAttribute("name", eachFigure.name)
for eachCard in eachFigure.cardlist:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
figureNode.appendChild(cardNode)
root.appendChild(figureNode)
with open(filename, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterName(self):
for eachPath in self.monsterNameHtmPaths:
bStart = False
for line in open(eachPath):
if not bStart:
if line.find('<!-- bodycontent -->') != -1:
bStart = True
else:
if line.find('<li><a href=') != -1:
startPos = line.find('title="')
endPos = line.find('">',startPos, len(line))
if startPos != -1 and endPos != -1:
newMonster = CMonster(line[startPos + 7:endPos])
# newMonster.inGameNames.append("ingame1")
# newMonster.inGameNames.append("ingame2")
# newCard1 = CCard("card1", 1)
# newCard2 = CCard("card2", 2)
# newMonster.cardList.append(newCard1)
# newMonster.cardList.append(newCard2)
self.monsterList.append(newMonster)
else:
if line.find('<!-- /bodycontent -->') != -1:
break
def WriteMonsterBuildsToXML(self):
doc = Document() # 创建doc
root = doc.createElement("MonsterBuild") # 创建根节点
doc.appendChild(root)
for eachMonster in self.monsterList:
monsterNode = doc.createElement("Monster")
monsterNode.setAttribute("Name", eachMonster.name)
monsterNode.setAttribute("HP", eachMonster.hp)
monsterNode.setAttribute("Draw", eachMonster.draw)
monsterNode.setAttribute("VPWorth", eachMonster.vpWorth)
monsterNode.setAttribute("DefaultMove", eachMonster.defaultMove)
monsterNode.setAttribute("bGetAllInfo", str(eachMonster.bGetAllInfo))
for eachInGameName in eachMonster.inGameNames:
inGameNameNode = doc.createElement("InGameName")
inGameNameNode.setAttribute("Name", eachInGameName)
monsterNode.appendChild(inGameNameNode)
for eachCard in eachMonster.cardList:
cardNode = doc.createElement("Card")
cardNode.setAttribute("Name", eachCard.name)
cardNode.setAttribute("Count", str(eachCard.count))
monsterNode.appendChild(cardNode)
root.appendChild(monsterNode)
with open(self.monsterXMLPath, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
def ReadMonsterBuildFromXML(self):
self.monsterList.clear();
dom = xml.dom.minidom.parse(self.monsterXMLPath)
root = dom.documentElement
monsters = root.getElementsByTagName("Monster") # 获取Monster节点
# print(len(monsters))
for eachMonster in monsters:
newMonster = CMonster()
newMonster.name = eachMonster.getAttribute("Name")
newMonster.hp = eachMonster.getAttribute("HP")
newMonster.draw = eachMonster.getAttribute("Draw")
newMonster.vpWorth = eachMonster.getAttribute("VPWorth")
newMonster.defaultMove = eachMonster.getAttribute("DefaultMove")
newMonster.bGetAllInfo = eachMonster.getAttribute("bGetAllInfo") == "True"
inGameNames = eachMonster.getElementsByTagName("InGameName")
for eachInGameName in inGameNames:
newMonster.inGameNames.append(eachInGameName.getAttribute("Name"))
cards = eachMonster.getElementsByTagName("Card")
for eachCard in cards:
newCard = CCard(eachCard.getAttribute("Name"),int(eachCard.getAttribute("Count")))
newMonster.cardList.append(newCard)
self.monsterList.append(newMonster)
def CollectMonsterDataFromURL(self):
bAllDone = True
for eachMonster in self.monsterList:
if eachMonster.bGetAllInfo:
continue
bAllDone = False
url = "http://wiki.cardhuntria.com/wiki/" + eachMonster.name
try:
response = requests.get(url, headers=self.eachMonsterHeaders,timeout=20)
except:
print("请求" + url +"超时")
continue
if response.status_code == 200:
print(url + "请求成功")
response.encoding = 'GBK'
soup = BeautifulSoup(response.text, 'lxml')
self.GetOneMonsterData(soup,eachMonster)
else:
print("{}请求失败,返回值为{}".format(url,response.status_code))
if bAllDone:
print("所有怪物卡已经抓取完成")
else:
print("仍有怪物卡未抓取完成")
def GetOneMonsterData(self,soup,newMonster):
for tr in soup.find_all('tr'):
for th in tr.find_all('th'):
if th.contents[0].find("In-game Name") != -1:
tds = tr.find_all('td')
totalStr = tds[0].contents[0].rstrip()
newMonster.inGameNames = totalStr.split(',')
# print(newMonster.inGameNames)
elif th.contents[0].find("HP") != -1:
tds = tr.find_all('td')
newMonster.hp = tds[0].contents[0].rstrip()
newMonster.pngName = tds[1].contents[0].contents[0]['alt']
# print(newMonster.hp)
# print(newMonster.pngName)
elif th.contents[0].find("Draw") != -1:
tds = tr.find_all('td')
newMonster.draw = tds[0].contents[0].rstrip()
# print(newMonster.draw)
elif th.contents[0].find("VP worth") != -1:
tds = tr.find_all('td')
newMonster.vpWorth = tds[0].contents[0].rstrip()
# print(newMonster.vpWorth)
elif th.contents[0].find("Default Move") != -1:
tds = tr.find_all('td')
if len(tds) > 0 and len(tds[0].contents) > 1 and len(tds[0].contents[1].contents) > 0:
newMonster.defaultMove = tds[0].contents[1].contents[0].rstrip()
# print(newMonster.defaultMove)
else:
tds = tr.find_all('td')
if len(tds) > 0 and tds[0].contents[0].find(' x ') != -1:
for i in range(0, len(tds[0].contents) - 1, 3):
tmp = tds[0].contents[i].rstrip()
count = int(tmp.replace(' x', ''))
cardName = tds[0].contents[i+1].contents[0].rstrip()
newCard = CCard(cardName,count)
newMonster.cardList.append(newCard)
newMonster.bGetAllInfo = True
def ProcessAllMonsterUrls(self):
if not os.path.exists(self.monsterXMLPath):
self.ReadMonsterName();
self.WriteMonsterBuildsToXML();
else:
self.ReadMonsterBuildFromXML();
self.CollectMonsterDataFromURL();
self.WriteMonsterBuildsToXML();
g_GetWebData = CGetWebData()
# # 1.测试写xml文件
# g_GetWebData.AddTestData()
# g_GetWebData.WriteFigureDataToXML("test.xml")
# # 2.抓取COC人物卡组并写到xml文件
# g_GetWebData.ProcessAllFigureUrls()
# g_GetWebData.WriteFigureDataToXML("FigureBuild.xml")
# 3.抓取COC怪物卡组并写到xml文件
g_GetWebData.ProcessAllMonsterUrls()
# 数据结构:
# n个人物名称:
# 卡牌
# 名称
# 数量
| ent("FigureBuild") # | identifier_name |
error.ts | const es = {};
/// Installation
export const NO_ADMIN_EMAIL = -400100; es[NO_ADMIN_EMAIL] = "Please input admin email";
/// Users
export const TEST = -11; es[TEST] = 'Test Error';
export const UNKNOWN = -12; es[UNKNOWN] = 'Unknown erorr. #info';
export const PERMISSION_DENIED_ADMIN_ONLY = -10; es[PERMISSION_DENIED_ADMIN_ONLY] = 'Permission denied. Only administrators are allowed.';
// export const UNHANDLED = -10; es[UNHANDLED] = 'Unhandled error message must have suggestion on which error is to be handled.'
export const NO_EMAIL = -50; es[NO_EMAIL] = 'No email address.';
export const NO_PASSWORD = -51; es[NO_PASSWORD] = 'No password.';
export const NO_NAME = -52; es[NO_NAME] = 'No name.';
export const NO_UID = -53; es[NO_UID] = 'No uid.';
export const UID_TOO_LONG = -54; es[UID_TOO_LONG] = 'UID is too long. Must be less than 128 characters.';
export const UID_CANNOT_CONTAIN_SLASH = -55; es[UID_CANNOT_CONTAIN_SLASH] = 'UID cannot contain slashes.';
export const TYPE_CHECK = -40010; es[TYPE_CHECK] = 'Type check error. name: #name, type should be #type';
export const NO_DOCUMENT_ID = -40025; es[NO_DOCUMENT_ID] = 'No documnet ID, #documentID';
export const DOCUMENT_ID_TOO_LONG = -40026; es[DOCUMENT_ID_TOO_LONG] = 'Document ID is too long.';
export const DOCUMENT_ID_CANNOT_CONTAIN_SLASH = -40027; es[DOCUMENT_ID_CANNOT_CONTAIN_SLASH] = 'Document ID cannot contain slash.';
export const WRONG_GENDER = -40061; es[WRONG_GENDER] = 'Wrong gender.';
export const WRONG_ROUTE = -40060; es[WRONG_ROUTE] = 'The given route is not exists. It is a wrong route.';
export const EMPTY_ROUTE = -40063; es[EMPTY_ROUTE] = 'Empty route.';
export const WRONG_METHOD = -40064; es[WRONG_METHOD] = 'Wrong method.';
export const ANONYMOUS_CANNOT_EDIT_PROFILE = -40070; es[ANONYMOUS_CANNOT_EDIT_PROFILE] = 'Anonymous cannot set/update profile.';
export const USER_ID_NOT_EXISTS_IN_USER_COLLECTION = -40020; es[USER_ID_NOT_EXISTS_IN_USER_COLLECTION] = 'User UID "#id" in users collection does not exists.';
// basic error message
// export const CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER = -40446; es[CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER] = 'Level on write field in category should be number.'
// export const CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER = -40447; es[CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER] = 'Level on read field in category should be a number.'
// export const CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER = -40448; es[CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER] = 'Level on list field in category should be a number.'
export const MUST_BE_A_NUMBER = -400204; es[MUST_BE_A_NUMBER] = '#value must be a number';
export const MUST_BE_AN_ARRAY = -400205; es[MUST_BE_AN_ARRAY] = '#value must be an array';
export const MUST_BE_AN_OBJECT = -400206; es[MUST_BE_AN_OBJECT] = '#value must be an object';
export const MUST_BE_A_BOOLEAN = -400207; es[MUST_BE_A_BOOLEAN] = '#value must be a boolean';
export const MUST_BE_A_STRING = -400208; es[MUST_BE_A_STRING] = '#value must be a string';
// documnets
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET = -40004; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET] = 'Document ID to get a data does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION = -40006; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION] = 'Document ID to get a data in transaction does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE = -40008; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE] = 'Document ID "#id" does not exsit for update.';
export const USER_NOT_LOGIN = -400210; es[USER_NOT_LOGIN] = 'User has not logged in. Or maybe the has a wrong(expired) ID token.';
export const NO_USER_DOCUMENT_ID = -400220; es[NO_USER_DOCUMENT_ID] = 'Empty document path for user collection.';
export const FAILED_TO_VERIFY_USER = -400230; es[FAILED_TO_VERIFY_USER] = 'Failed to verify who you are.';
export const FAILED_TO_CREATE_ANONYMOUS = -400232; es[FAILED_TO_CREATE_ANONYMOUS] = 'Failed to create anonymous account';
export const SYSTEM_ALREADY_INSTALLED = -400240; es[SYSTEM_ALREADY_INSTALLED] = 'System is already installed.';
export const COLLECTION_IS_NOT_SET = -400250; es[COLLECTION_IS_NOT_SET] = 'Collection name is NOT set on base class.';
// Posting errors
export const EMPTY_POST_BODY = -40301; es[EMPTY_POST_BODY] = 'Post body can\'t be empty';
// export const POST_HAS_NO_CATEGORY = -40302; es[POST_HAS_NO_CATEGORY] = 'Post must have category.';
export const NO_POST_ID = -40353; es[NO_POST_ID] = 'No post id. Post id is needed to identify the post.';
export const POST_ID_TOO_LONG = -40354; es[POST_ID_TOO_LONG] = 'post id is too long. Must be less than 128 characters.';
export const POST_ID_CANNOT_CONTAIN_SLASH = -40355; es[POST_ID_CANNOT_CONTAIN_SLASH] = 'post id cannot contain slashes.';
// export const POST_ID_CANNOT_SOLELY_CONSIST_DOT = -40356; es[POST_ID_CANNOT_SOLELY_CONSIST_DOT] = 'Post id or document id cannot be equal to dot [.] or double dot [..] ';
export const POST_ALREADY_EXISTS = -40357; es[POST_ALREADY_EXISTS] = 'Post ID already exists.';
export const ANONYMOUS_PASSWORD_IS_EMPTY = -40356; es[ANONYMOUS_PASSWORD_IS_EMPTY] = 'Anonymous must send a psassword to edit a post';
export const ANONYMOUS_WRONG_PASSWORD = -40357; es[ANONYMOUS_WRONG_PASSWORD] = 'Anonymous sent a wrong password.';
export const NOT_YOUR_POST = -40358; es[NOT_YOUR_POST] = 'This is not your post.';
export const NOT_OWNED_BY_ANONYMOUS = -40359; es[NOT_OWNED_BY_ANONYMOUS] = 'This is not owned by Anonymous. But you are Anonymous.';
// Categories
export const CATEGORY_ID_CANNOT_CONTAIN_SLASH = -40445; es[CATEGORY_ID_CANNOT_CONTAIN_SLASH] = 'Category id cannot contain slashes.';
export const ANONYMOUS_CANNOT_CREATE_CATEGORY = -40449; es[ANONYMOUS_CANNOT_CREATE_CATEGORY] = 'Anonymous cannot set a category';
export const CATEGORY_ALREADY_EXISTS = -40450; es[CATEGORY_ALREADY_EXISTS] = 'Category already exists. Category ID: #id';
export const CATEGORY_ID_TOO_LONG = -40454; es[CATEGORY_ID_TOO_LONG] = 'Category id is too long. Must be less than 128 characters.';
export const NO_CATEGORY_ID = -40460; es[NO_CATEGORY_ID] = 'No Category ID';
export const WRONG_CATEGORY_ID = -40462; es[WRONG_CATEGORY_ID] = 'Wrong Category ID. The Category may not exists. categoryId: "#categoryId"';
export const POST_CATEGORY_DOES_NOT_EXIST = -40464; es[POST_CATEGORY_DOES_NOT_EXIST] = 'Post category does not exists. categoryId: #categoryId';
// Firebase errors.
export const FIREBASE_CODE = -40900; es[FIREBASE_CODE] = 'Firebase error code';
export const FIREBASE_AUTH_UID_ALREADY_EXISTS = -40901; es[FIREBASE_AUTH_UID_ALREADY_EXISTS] = 'User already exists';
export const FIREBASE_ID_TOKEN_EXPIRED = -40902; es[FIREBASE_ID_TOKEN_EXPIRED] = 'User ID Token has expired.';
export const FIREBASE_FAILED_TO_DECODE_ID_TOKEN = -40905; es[FIREBASE_FAILED_TO_DECODE_ID_TOKEN] = 'Failed to verfiy who you are. The ID Token may be expired or invalid.';
export const FIREBASE_INVALID_PASSWORD = -40906; es[FIREBASE_INVALID_PASSWORD] = '';
export const FIREBASE_DO_NOT_ACCEPT_UNDEFINED = -40907; es[FIREBASE_DO_NOT_ACCEPT_UNDEFINED] = 'Undefined value is not accepted in firebase.';
// system
export const SYSTEM_NOT_INSTALLED = -400501; es[SYSTEM_NOT_INSTALLED] = 'System is not installed.';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT = -400502; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT] = 'Document ID "#id" of settings does not exsits.';
/**
*
*
* @since 2018-02-26. `code` must be a number and it MUST be less than 0.
* @desc `code` must be a number less than 0.
* @desc When `Firestore` throws error, the error code is usually bigger than 0 and sometimes it is evena string like `auth/uid-already-exists`.
* We will convert it into our ERROR CODE.
*
* @desc So, if the `code` is number and less than 0, then it's an error. otherwise. it's not an error.
*
* if `code` is one of below, it's not an error.
* - a falsy value
* - string
* - number that is bigger than or equal to 0
*
* @desc When `BACKEND_ERROR_OBJECT` is discussed, it means `Backend Error Object`.
* While `Error Object` is stated, it means an Error Object that is any kind of error object.
* - It can be a `Backend Error Object` or `Firebase Error Object` or `Javascript Error Object`.
*
*
*/
export interface BACKEND_ERROR_OBJECT {
code: number;
message?: string;
}
/**
* Returns true if the input is an Error Object.
* @desc It may be a Firebase erorr object or Simple Javascript error objct.
* - Meaning, if `code` property exists and none falsy value, it returns true.
* - This conflicts the concept of `BACKEND_ERROR_OBJECT`.
* - the code of `BACKEND_ERROR_OBJECT` object can only hold number that is less than 0.
* - while the code of Error Object can hold any value. number or string.
*
* @param o any value. May be an Error Object.
*
*/
export function isErrorObject(o): boolean {
if (o) {
if (o['code'] !== void 0) {
if (typeof o['code'] === 'number') {
if (o['code'] < 0) {
return true;
}
}
}
}
return false;
}
/**
*
* Returns `BACKEND_ERROR_OBJECT` from a number or `Firebase error object`.
*
* @param code It may be a ERROR CODE or a `Error Object`.
*
* - If `Firestore Error Object` or `Javascript Error Object` was given, then it will be replaced as BACKEND_ERROR_OBJECT.
*
* @return
* - If the input is falsy value return the input is retuerned as it is.
* - if the input is not a number or Error Object, then the input is returned as it is.
* - Otherwise, Backend Error Object is returned.
*/
export function obj(code, info: object = {}): BACKEND_ERROR_OBJECT {
if (!code) return code; // if falsy, return as it is.
let re: BACKEND_ERROR_OBJECT;
if (typeof code === 'number') { // Backend Error Code
re = {
code: code,
message: es[code]
};
}
else if (typeof code === 'object' && code && code.code !== void 0) { // May be an Error Code Object or `Firestore Error` object.
const eo = code; // error object
if (eo['code'] === void 0) { // has no code? then it's not an error object.
return code; // just return as it is.
}
if (typeof eo['code'] === 'string' || (typeof eo['code'] === 'number' && eo['code'] > 0)) { // It is `Firestore` Error Object.
re = convertFirestoreErrorToBackendError(eo);
}
else {
re = {
code: code['code'],
message: code['message']
};
}
}
// if the input is not number or object. Just return as it was.
else {
return code;
}
re['message'] = patchWithMarker(re['message'], info);
return re;
}
/**
*
* Returns a string after patching error information.
* @param str Error string
* @param info Error information to patch into the string
*
* @see convertFirestoreErrorToBackendError() to know how to use.
*/
function patchWithMarker(str, info: object = null): string {
if (info === null || typeof info !== 'object') return str;
const keys = Object.keys(info);
if (!keys.length) return str;
for (const k of keys) {
str = str.replace('#' + k, (<string>info[k]));
}
return str;
}
/**
* Returns BACKEND_ERROR object.
* @param FireStoreErrorObject a Firebase Error Object.
*
*
*/
function convertFirestoreErrorToBackendError(FireStoreErrorObject): BACKEND_ERROR_OBJECT | {
let code = 0;
let message = '';
// console.log("convert; ", FireStoreErrorObject);
switch (FireStoreErrorObject['code']) {
case 5: /// convert firebase error message into backend error message with information.
code = DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE;
message = es[code];
const error_object_message = <string>FireStoreErrorObject['message'];
let id = '';
const arr = error_object_message.split('x-users/');
if (arr.length === 2) {
id = arr[1];
}
else id = error_object_message;
message = patchWithMarker(es[code], { id: id });
break;
case 'auth/uid-already-exists':
code = FIREBASE_AUTH_UID_ALREADY_EXISTS;
message = FireStoreErrorObject['message'];
break;
case 'auth/argument-error':
if (FireStoreErrorObject['message'].indexOf('ID token has expired') !== -1) {
code = FIREBASE_ID_TOKEN_EXPIRED;
message = FireStoreErrorObject['message'];
}
break;
case 'auth/invalid-password':
code = FIREBASE_INVALID_PASSWORD;
message = <string>FireStoreErrorObject['message'];
break;
default:
return { code: FIREBASE_CODE, message: `Firebase error code. it is not converted. code: ${FireStoreErrorObject['code']}. message: ${FireStoreErrorObject['message']}` };
}
return {
code: code,
message: message
};
} | identifier_body | |
error.ts | const es = {};
/// Installation
export const NO_ADMIN_EMAIL = -400100; es[NO_ADMIN_EMAIL] = "Please input admin email";
/// Users
export const TEST = -11; es[TEST] = 'Test Error';
export const UNKNOWN = -12; es[UNKNOWN] = 'Unknown erorr. #info';
export const PERMISSION_DENIED_ADMIN_ONLY = -10; es[PERMISSION_DENIED_ADMIN_ONLY] = 'Permission denied. Only administrators are allowed.';
// export const UNHANDLED = -10; es[UNHANDLED] = 'Unhandled error message must have suggestion on which error is to be handled.'
export const NO_EMAIL = -50; es[NO_EMAIL] = 'No email address.';
export const NO_PASSWORD = -51; es[NO_PASSWORD] = 'No password.';
export const NO_NAME = -52; es[NO_NAME] = 'No name.';
export const NO_UID = -53; es[NO_UID] = 'No uid.';
export const UID_TOO_LONG = -54; es[UID_TOO_LONG] = 'UID is too long. Must be less than 128 characters.';
export const UID_CANNOT_CONTAIN_SLASH = -55; es[UID_CANNOT_CONTAIN_SLASH] = 'UID cannot contain slashes.';
export const TYPE_CHECK = -40010; es[TYPE_CHECK] = 'Type check error. name: #name, type should be #type';
export const NO_DOCUMENT_ID = -40025; es[NO_DOCUMENT_ID] = 'No documnet ID, #documentID';
export const DOCUMENT_ID_TOO_LONG = -40026; es[DOCUMENT_ID_TOO_LONG] = 'Document ID is too long.';
export const DOCUMENT_ID_CANNOT_CONTAIN_SLASH = -40027; es[DOCUMENT_ID_CANNOT_CONTAIN_SLASH] = 'Document ID cannot contain slash.';
export const WRONG_GENDER = -40061; es[WRONG_GENDER] = 'Wrong gender.';
export const WRONG_ROUTE = -40060; es[WRONG_ROUTE] = 'The given route is not exists. It is a wrong route.';
export const EMPTY_ROUTE = -40063; es[EMPTY_ROUTE] = 'Empty route.';
export const WRONG_METHOD = -40064; es[WRONG_METHOD] = 'Wrong method.';
export const ANONYMOUS_CANNOT_EDIT_PROFILE = -40070; es[ANONYMOUS_CANNOT_EDIT_PROFILE] = 'Anonymous cannot set/update profile.';
export const USER_ID_NOT_EXISTS_IN_USER_COLLECTION = -40020; es[USER_ID_NOT_EXISTS_IN_USER_COLLECTION] = 'User UID "#id" in users collection does not exists.';
// basic error message
// export const CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER = -40446; es[CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER] = 'Level on write field in category should be number.'
// export const CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER = -40447; es[CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER] = 'Level on read field in category should be a number.'
// export const CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER = -40448; es[CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER] = 'Level on list field in category should be a number.'
export const MUST_BE_A_NUMBER = -400204; es[MUST_BE_A_NUMBER] = '#value must be a number';
export const MUST_BE_AN_ARRAY = -400205; es[MUST_BE_AN_ARRAY] = '#value must be an array';
export const MUST_BE_AN_OBJECT = -400206; es[MUST_BE_AN_OBJECT] = '#value must be an object';
export const MUST_BE_A_BOOLEAN = -400207; es[MUST_BE_A_BOOLEAN] = '#value must be a boolean';
export const MUST_BE_A_STRING = -400208; es[MUST_BE_A_STRING] = '#value must be a string';
// documnets
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET = -40004; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET] = 'Document ID to get a data does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION = -40006; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION] = 'Document ID to get a data in transaction does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE = -40008; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE] = 'Document ID "#id" does not exsit for update.';
export const USER_NOT_LOGIN = -400210; es[USER_NOT_LOGIN] = 'User has not logged in. Or maybe the has a wrong(expired) ID token.';
export const NO_USER_DOCUMENT_ID = -400220; es[NO_USER_DOCUMENT_ID] = 'Empty document path for user collection.';
export const FAILED_TO_VERIFY_USER = -400230; es[FAILED_TO_VERIFY_USER] = 'Failed to verify who you are.';
export const FAILED_TO_CREATE_ANONYMOUS = -400232; es[FAILED_TO_CREATE_ANONYMOUS] = 'Failed to create anonymous account';
export const SYSTEM_ALREADY_INSTALLED = -400240; es[SYSTEM_ALREADY_INSTALLED] = 'System is already installed.';
export const COLLECTION_IS_NOT_SET = -400250; es[COLLECTION_IS_NOT_SET] = 'Collection name is NOT set on base class.';
// Posting errors
export const EMPTY_POST_BODY = -40301; es[EMPTY_POST_BODY] = 'Post body can\'t be empty';
// export const POST_HAS_NO_CATEGORY = -40302; es[POST_HAS_NO_CATEGORY] = 'Post must have category.';
export const NO_POST_ID = -40353; es[NO_POST_ID] = 'No post id. Post id is needed to identify the post.';
export const POST_ID_TOO_LONG = -40354; es[POST_ID_TOO_LONG] = 'post id is too long. Must be less than 128 characters.';
export const POST_ID_CANNOT_CONTAIN_SLASH = -40355; es[POST_ID_CANNOT_CONTAIN_SLASH] = 'post id cannot contain slashes.';
// export const POST_ID_CANNOT_SOLELY_CONSIST_DOT = -40356; es[POST_ID_CANNOT_SOLELY_CONSIST_DOT] = 'Post id or document id cannot be equal to dot [.] or double dot [..] ';
export const POST_ALREADY_EXISTS = -40357; es[POST_ALREADY_EXISTS] = 'Post ID already exists.';
export const ANONYMOUS_PASSWORD_IS_EMPTY = -40356; es[ANONYMOUS_PASSWORD_IS_EMPTY] = 'Anonymous must send a psassword to edit a post';
export const ANONYMOUS_WRONG_PASSWORD = -40357; es[ANONYMOUS_WRONG_PASSWORD] = 'Anonymous sent a wrong password.';
export const NOT_YOUR_POST = -40358; es[NOT_YOUR_POST] = 'This is not your post.';
export const NOT_OWNED_BY_ANONYMOUS = -40359; es[NOT_OWNED_BY_ANONYMOUS] = 'This is not owned by Anonymous. But you are Anonymous.';
// Categories
export const CATEGORY_ID_CANNOT_CONTAIN_SLASH = -40445; es[CATEGORY_ID_CANNOT_CONTAIN_SLASH] = 'Category id cannot contain slashes.';
export const ANONYMOUS_CANNOT_CREATE_CATEGORY = -40449; es[ANONYMOUS_CANNOT_CREATE_CATEGORY] = 'Anonymous cannot set a category';
export const CATEGORY_ALREADY_EXISTS = -40450; es[CATEGORY_ALREADY_EXISTS] = 'Category already exists. Category ID: #id';
export const CATEGORY_ID_TOO_LONG = -40454; es[CATEGORY_ID_TOO_LONG] = 'Category id is too long. Must be less than 128 characters.';
export const NO_CATEGORY_ID = -40460; es[NO_CATEGORY_ID] = 'No Category ID';
export const WRONG_CATEGORY_ID = -40462; es[WRONG_CATEGORY_ID] = 'Wrong Category ID. The Category may not exists. categoryId: "#categoryId"';
export const POST_CATEGORY_DOES_NOT_EXIST = -40464; es[POST_CATEGORY_DOES_NOT_EXIST] = 'Post category does not exists. categoryId: #categoryId';
// Firebase errors.
export const FIREBASE_CODE = -40900; es[FIREBASE_CODE] = 'Firebase error code';
export const FIREBASE_AUTH_UID_ALREADY_EXISTS = -40901; es[FIREBASE_AUTH_UID_ALREADY_EXISTS] = 'User already exists';
export const FIREBASE_ID_TOKEN_EXPIRED = -40902; es[FIREBASE_ID_TOKEN_EXPIRED] = 'User ID Token has expired.';
export const FIREBASE_FAILED_TO_DECODE_ID_TOKEN = -40905; es[FIREBASE_FAILED_TO_DECODE_ID_TOKEN] = 'Failed to verfiy who you are. The ID Token may be expired or invalid.';
export const FIREBASE_INVALID_PASSWORD = -40906; es[FIREBASE_INVALID_PASSWORD] = '';
export const FIREBASE_DO_NOT_ACCEPT_UNDEFINED = -40907; es[FIREBASE_DO_NOT_ACCEPT_UNDEFINED] = 'Undefined value is not accepted in firebase.';
// system
export const SYSTEM_NOT_INSTALLED = -400501; es[SYSTEM_NOT_INSTALLED] = 'System is not installed.';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT = -400502; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT] = 'Document ID "#id" of settings does not exsits.';
/**
*
*
* @since 2018-02-26. `code` must be a number and it MUST be less than 0.
* @desc `code` must be a number less than 0.
* @desc When `Firestore` throws error, the error code is usually bigger than 0 and sometimes it is evena string like `auth/uid-already-exists`.
* We will convert it into our ERROR CODE.
*
* @desc So, if the `code` is number and less than 0, then it's an error. otherwise. it's not an error.
*
* if `code` is one of below, it's not an error.
* - a falsy value
* - string
* - number that is bigger than or equal to 0
*
* @desc When `BACKEND_ERROR_OBJECT` is discussed, it means `Backend Error Object`.
* While `Error Object` is stated, it means an Error Object that is any kind of error object.
* - It can be a `Backend Error Object` or `Firebase Error Object` or `Javascript Error Object`.
*
*
*/
export interface BACKEND_ERROR_OBJECT {
code: number;
message?: string;
}
/**
* Returns true if the input is an Error Object.
* @desc It may be a Firebase erorr object or Simple Javascript error objct.
* - Meaning, if `code` property exists and none falsy value, it returns true.
* - This conflicts the concept of `BACKEND_ERROR_OBJECT`.
* - the code of `BACKEND_ERROR_OBJECT` object can only hold number that is less than 0.
* - while the code of Error Object can hold any value. number or string.
*
* @param o any value. May be an Error Object.
*
*/
export function isErrorObject(o): boolean {
if (o) {
if (o['code'] !== void 0) {
if (typeof o['code'] === 'number') {
if (o['code'] < 0) {
return true;
}
}
}
}
return false;
}
/**
*
* Returns `BACKEND_ERROR_OBJECT` from a number or `Firebase error object`.
*
* @param code It may be a ERROR CODE or a `Error Object`.
*
* - If `Firestore Error Object` or `Javascript Error Object` was given, then it will be replaced as BACKEND_ERROR_OBJECT.
*
* @return
* - If the input is falsy value return the input is retuerned as it is.
* - if the input is not a number or Error Object, then the input is returned as it is.
* - Otherwise, Backend Error Object is returned.
*/
export function obj(code, info: object = {}): BACKEND_ERROR_OBJECT {
if (!code) return code; // if falsy, return as it is.
let re: BACKEND_ERROR_OBJECT;
if (typeof code === 'number') { // Backend Error Code
re = {
code: code,
message: es[code]
};
}
else if (typeof code === 'object' && code && code.code !== void 0) { // May be an Error Code Object or `Firestore Error` object.
const eo = code; // error object
if (eo['code'] === void 0) { // has no code? then it's not an error object.
return code; // just return as it is.
}
if (typeof eo['code'] === 'string' || (typeof eo['code'] === 'number' && eo['code'] > 0)) { // It is `Firestore` Error Object.
re = convertFirestoreErrorToBackendError(eo);
}
else {
re = {
code: code['code'],
message: code['message']
};
}
}
// if the input is not number or object. Just return as it was.
else {
return code;
}
re['message'] = patchWithMarker(re['message'], info);
return re;
}
/**
*
* Returns a string after patching error information.
* @param str Error string
* @param info Error information to patch into the string
*
* @see convertFirestoreErrorToBackendError() to know how to use.
*/
function | (str, info: object = null): string {
if (info === null || typeof info !== 'object') return str;
const keys = Object.keys(info);
if (!keys.length) return str;
for (const k of keys) {
str = str.replace('#' + k, (<string>info[k]));
}
return str;
}
/**
* Returns BACKEND_ERROR object.
* @param FireStoreErrorObject a Firebase Error Object.
*
*
*/
function convertFirestoreErrorToBackendError(FireStoreErrorObject): BACKEND_ERROR_OBJECT {
let code = 0;
let message = '';
// console.log("convert; ", FireStoreErrorObject);
switch (FireStoreErrorObject['code']) {
case 5: /// convert firebase error message into backend error message with information.
code = DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE;
message = es[code];
const error_object_message = <string>FireStoreErrorObject['message'];
let id = '';
const arr = error_object_message.split('x-users/');
if (arr.length === 2) {
id = arr[1];
}
else id = error_object_message;
message = patchWithMarker(es[code], { id: id });
break;
case 'auth/uid-already-exists':
code = FIREBASE_AUTH_UID_ALREADY_EXISTS;
message = FireStoreErrorObject['message'];
break;
case 'auth/argument-error':
if (FireStoreErrorObject['message'].indexOf('ID token has expired') !== -1) {
code = FIREBASE_ID_TOKEN_EXPIRED;
message = FireStoreErrorObject['message'];
}
break;
case 'auth/invalid-password':
code = FIREBASE_INVALID_PASSWORD;
message = <string>FireStoreErrorObject['message'];
break;
default:
return { code: FIREBASE_CODE, message: `Firebase error code. it is not converted. code: ${FireStoreErrorObject['code']}. message: ${FireStoreErrorObject['message']}` };
}
return {
code: code,
message: message
};
}
| patchWithMarker | identifier_name |
error.ts | const es = {};
/// Installation
export const NO_ADMIN_EMAIL = -400100; es[NO_ADMIN_EMAIL] = "Please input admin email";
/// Users
export const TEST = -11; es[TEST] = 'Test Error';
export const UNKNOWN = -12; es[UNKNOWN] = 'Unknown erorr. #info';
export const PERMISSION_DENIED_ADMIN_ONLY = -10; es[PERMISSION_DENIED_ADMIN_ONLY] = 'Permission denied. Only administrators are allowed.';
// export const UNHANDLED = -10; es[UNHANDLED] = 'Unhandled error message must have suggestion on which error is to be handled.'
export const NO_EMAIL = -50; es[NO_EMAIL] = 'No email address.';
export const NO_PASSWORD = -51; es[NO_PASSWORD] = 'No password.';
export const NO_NAME = -52; es[NO_NAME] = 'No name.';
export const NO_UID = -53; es[NO_UID] = 'No uid.';
export const UID_TOO_LONG = -54; es[UID_TOO_LONG] = 'UID is too long. Must be less than 128 characters.';
export const UID_CANNOT_CONTAIN_SLASH = -55; es[UID_CANNOT_CONTAIN_SLASH] = 'UID cannot contain slashes.';
export const TYPE_CHECK = -40010; es[TYPE_CHECK] = 'Type check error. name: #name, type should be #type';
export const NO_DOCUMENT_ID = -40025; es[NO_DOCUMENT_ID] = 'No documnet ID, #documentID';
export const DOCUMENT_ID_TOO_LONG = -40026; es[DOCUMENT_ID_TOO_LONG] = 'Document ID is too long.';
export const DOCUMENT_ID_CANNOT_CONTAIN_SLASH = -40027; es[DOCUMENT_ID_CANNOT_CONTAIN_SLASH] = 'Document ID cannot contain slash.';
export const WRONG_GENDER = -40061; es[WRONG_GENDER] = 'Wrong gender.';
export const WRONG_ROUTE = -40060; es[WRONG_ROUTE] = 'The given route is not exists. It is a wrong route.';
export const EMPTY_ROUTE = -40063; es[EMPTY_ROUTE] = 'Empty route.';
export const WRONG_METHOD = -40064; es[WRONG_METHOD] = 'Wrong method.';
export const ANONYMOUS_CANNOT_EDIT_PROFILE = -40070; es[ANONYMOUS_CANNOT_EDIT_PROFILE] = 'Anonymous cannot set/update profile.';
export const USER_ID_NOT_EXISTS_IN_USER_COLLECTION = -40020; es[USER_ID_NOT_EXISTS_IN_USER_COLLECTION] = 'User UID "#id" in users collection does not exists.';
// basic error message
// export const CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER = -40446; es[CATEGORY_LEVEL_ON_WRITE_MUST_CONTAIN_NUMBER] = 'Level on write field in category should be number.'
// export const CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER = -40447; es[CATEGORY_LEVEL_ON_READ_MUST_CONTAIN_NUMBER] = 'Level on read field in category should be a number.'
// export const CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER = -40448; es[CATEGORY_LEVEL_ON_LIST_MUST_CONTAIN_NUMBER] = 'Level on list field in category should be a number.'
export const MUST_BE_A_NUMBER = -400204; es[MUST_BE_A_NUMBER] = '#value must be a number';
export const MUST_BE_AN_ARRAY = -400205; es[MUST_BE_AN_ARRAY] = '#value must be an array';
export const MUST_BE_AN_OBJECT = -400206; es[MUST_BE_AN_OBJECT] = '#value must be an object';
export const MUST_BE_A_BOOLEAN = -400207; es[MUST_BE_A_BOOLEAN] = '#value must be a boolean';
export const MUST_BE_A_STRING = -400208; es[MUST_BE_A_STRING] = '#value must be a string';
| // documnets
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET = -40004; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET] = 'Document ID to get a data does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION = -40006; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_IN_TRANSACTION] = 'Document ID to get a data in transaction does not exist in database. collection: #collectionName, documentID: #documentID';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE = -40008; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE] = 'Document ID "#id" does not exsit for update.';
export const USER_NOT_LOGIN = -400210; es[USER_NOT_LOGIN] = 'User has not logged in. Or maybe the has a wrong(expired) ID token.';
export const NO_USER_DOCUMENT_ID = -400220; es[NO_USER_DOCUMENT_ID] = 'Empty document path for user collection.';
export const FAILED_TO_VERIFY_USER = -400230; es[FAILED_TO_VERIFY_USER] = 'Failed to verify who you are.';
export const FAILED_TO_CREATE_ANONYMOUS = -400232; es[FAILED_TO_CREATE_ANONYMOUS] = 'Failed to create anonymous account';
export const SYSTEM_ALREADY_INSTALLED = -400240; es[SYSTEM_ALREADY_INSTALLED] = 'System is already installed.';
export const COLLECTION_IS_NOT_SET = -400250; es[COLLECTION_IS_NOT_SET] = 'Collection name is NOT set on base class.';
// Posting errors
export const EMPTY_POST_BODY = -40301; es[EMPTY_POST_BODY] = 'Post body can\'t be empty';
// export const POST_HAS_NO_CATEGORY = -40302; es[POST_HAS_NO_CATEGORY] = 'Post must have category.';
export const NO_POST_ID = -40353; es[NO_POST_ID] = 'No post id. Post id is needed to identify the post.';
export const POST_ID_TOO_LONG = -40354; es[POST_ID_TOO_LONG] = 'post id is too long. Must be less than 128 characters.';
export const POST_ID_CANNOT_CONTAIN_SLASH = -40355; es[POST_ID_CANNOT_CONTAIN_SLASH] = 'post id cannot contain slashes.';
// export const POST_ID_CANNOT_SOLELY_CONSIST_DOT = -40356; es[POST_ID_CANNOT_SOLELY_CONSIST_DOT] = 'Post id or document id cannot be equal to dot [.] or double dot [..] ';
export const POST_ALREADY_EXISTS = -40357; es[POST_ALREADY_EXISTS] = 'Post ID already exists.';
export const ANONYMOUS_PASSWORD_IS_EMPTY = -40356; es[ANONYMOUS_PASSWORD_IS_EMPTY] = 'Anonymous must send a psassword to edit a post';
export const ANONYMOUS_WRONG_PASSWORD = -40357; es[ANONYMOUS_WRONG_PASSWORD] = 'Anonymous sent a wrong password.';
export const NOT_YOUR_POST = -40358; es[NOT_YOUR_POST] = 'This is not your post.';
export const NOT_OWNED_BY_ANONYMOUS = -40359; es[NOT_OWNED_BY_ANONYMOUS] = 'This is not owned by Anonymous. But you are Anonymous.';
// Categories
export const CATEGORY_ID_CANNOT_CONTAIN_SLASH = -40445; es[CATEGORY_ID_CANNOT_CONTAIN_SLASH] = 'Category id cannot contain slashes.';
export const ANONYMOUS_CANNOT_CREATE_CATEGORY = -40449; es[ANONYMOUS_CANNOT_CREATE_CATEGORY] = 'Anonymous cannot set a category';
export const CATEGORY_ALREADY_EXISTS = -40450; es[CATEGORY_ALREADY_EXISTS] = 'Category already exists. Category ID: #id';
export const CATEGORY_ID_TOO_LONG = -40454; es[CATEGORY_ID_TOO_LONG] = 'Category id is too long. Must be less than 128 characters.';
export const NO_CATEGORY_ID = -40460; es[NO_CATEGORY_ID] = 'No Category ID';
export const WRONG_CATEGORY_ID = -40462; es[WRONG_CATEGORY_ID] = 'Wrong Category ID. The Category may not exists. categoryId: "#categoryId"';
export const POST_CATEGORY_DOES_NOT_EXIST = -40464; es[POST_CATEGORY_DOES_NOT_EXIST] = 'Post category does not exists. categoryId: #categoryId';
// Firebase errors.
export const FIREBASE_CODE = -40900; es[FIREBASE_CODE] = 'Firebase error code';
export const FIREBASE_AUTH_UID_ALREADY_EXISTS = -40901; es[FIREBASE_AUTH_UID_ALREADY_EXISTS] = 'User already exists';
export const FIREBASE_ID_TOKEN_EXPIRED = -40902; es[FIREBASE_ID_TOKEN_EXPIRED] = 'User ID Token has expired.';
export const FIREBASE_FAILED_TO_DECODE_ID_TOKEN = -40905; es[FIREBASE_FAILED_TO_DECODE_ID_TOKEN] = 'Failed to verfiy who you are. The ID Token may be expired or invalid.';
export const FIREBASE_INVALID_PASSWORD = -40906; es[FIREBASE_INVALID_PASSWORD] = '';
export const FIREBASE_DO_NOT_ACCEPT_UNDEFINED = -40907; es[FIREBASE_DO_NOT_ACCEPT_UNDEFINED] = 'Undefined value is not accepted in firebase.';
// system
export const SYSTEM_NOT_INSTALLED = -400501; es[SYSTEM_NOT_INSTALLED] = 'System is not installed.';
export const DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT = -400502; es[DOCUMENT_ID_DOES_NOT_EXISTS_FOR_GET_SETTINGS_DOCUMENT] = 'Document ID "#id" of settings does not exsits.';
/**
*
*
* @since 2018-02-26. `code` must be a number and it MUST be less than 0.
* @desc `code` must be a number less than 0.
* @desc When `Firestore` throws error, the error code is usually bigger than 0 and sometimes it is evena string like `auth/uid-already-exists`.
* We will convert it into our ERROR CODE.
*
* @desc So, if the `code` is number and less than 0, then it's an error. otherwise. it's not an error.
*
* if `code` is one of below, it's not an error.
* - a falsy value
* - string
* - number that is bigger than or equal to 0
*
* @desc When `BACKEND_ERROR_OBJECT` is discussed, it means `Backend Error Object`.
* While `Error Object` is stated, it means an Error Object that is any kind of error object.
* - It can be a `Backend Error Object` or `Firebase Error Object` or `Javascript Error Object`.
*
*
*/
export interface BACKEND_ERROR_OBJECT {
code: number;
message?: string;
}
/**
* Returns true if the input is an Error Object.
* @desc It may be a Firebase erorr object or Simple Javascript error objct.
* - Meaning, if `code` property exists and none falsy value, it returns true.
* - This conflicts the concept of `BACKEND_ERROR_OBJECT`.
* - the code of `BACKEND_ERROR_OBJECT` object can only hold number that is less than 0.
* - while the code of Error Object can hold any value. number or string.
*
* @param o any value. May be an Error Object.
*
*/
export function isErrorObject(o): boolean {
if (o) {
if (o['code'] !== void 0) {
if (typeof o['code'] === 'number') {
if (o['code'] < 0) {
return true;
}
}
}
}
return false;
}
/**
*
* Returns `BACKEND_ERROR_OBJECT` from a number or `Firebase error object`.
*
* @param code It may be a ERROR CODE or a `Error Object`.
*
* - If `Firestore Error Object` or `Javascript Error Object` was given, then it will be replaced as BACKEND_ERROR_OBJECT.
*
* @return
* - If the input is falsy value return the input is retuerned as it is.
* - if the input is not a number or Error Object, then the input is returned as it is.
* - Otherwise, Backend Error Object is returned.
*/
export function obj(code, info: object = {}): BACKEND_ERROR_OBJECT {
if (!code) return code; // if falsy, return as it is.
let re: BACKEND_ERROR_OBJECT;
if (typeof code === 'number') { // Backend Error Code
re = {
code: code,
message: es[code]
};
}
else if (typeof code === 'object' && code && code.code !== void 0) { // May be an Error Code Object or `Firestore Error` object.
const eo = code; // error object
if (eo['code'] === void 0) { // has no code? then it's not an error object.
return code; // just return as it is.
}
if (typeof eo['code'] === 'string' || (typeof eo['code'] === 'number' && eo['code'] > 0)) { // It is `Firestore` Error Object.
re = convertFirestoreErrorToBackendError(eo);
}
else {
re = {
code: code['code'],
message: code['message']
};
}
}
// if the input is not number or object. Just return as it was.
else {
return code;
}
re['message'] = patchWithMarker(re['message'], info);
return re;
}
/**
*
* Returns a string after patching error information.
* @param str Error string
* @param info Error information to patch into the string
*
* @see convertFirestoreErrorToBackendError() to know how to use.
*/
function patchWithMarker(str, info: object = null): string {
if (info === null || typeof info !== 'object') return str;
const keys = Object.keys(info);
if (!keys.length) return str;
for (const k of keys) {
str = str.replace('#' + k, (<string>info[k]));
}
return str;
}
/**
* Returns BACKEND_ERROR object.
* @param FireStoreErrorObject a Firebase Error Object.
*
*
*/
function convertFirestoreErrorToBackendError(FireStoreErrorObject): BACKEND_ERROR_OBJECT {
let code = 0;
let message = '';
// console.log("convert; ", FireStoreErrorObject);
switch (FireStoreErrorObject['code']) {
case 5: /// convert firebase error message into backend error message with information.
code = DOCUMENT_ID_DOES_NOT_EXISTS_FOR_UPDATE;
message = es[code];
const error_object_message = <string>FireStoreErrorObject['message'];
let id = '';
const arr = error_object_message.split('x-users/');
if (arr.length === 2) {
id = arr[1];
}
else id = error_object_message;
message = patchWithMarker(es[code], { id: id });
break;
case 'auth/uid-already-exists':
code = FIREBASE_AUTH_UID_ALREADY_EXISTS;
message = FireStoreErrorObject['message'];
break;
case 'auth/argument-error':
if (FireStoreErrorObject['message'].indexOf('ID token has expired') !== -1) {
code = FIREBASE_ID_TOKEN_EXPIRED;
message = FireStoreErrorObject['message'];
}
break;
case 'auth/invalid-password':
code = FIREBASE_INVALID_PASSWORD;
message = <string>FireStoreErrorObject['message'];
break;
default:
return { code: FIREBASE_CODE, message: `Firebase error code. it is not converted. code: ${FireStoreErrorObject['code']}. message: ${FireStoreErrorObject['message']}` };
}
return {
code: code,
message: message
};
} | random_line_split | |
reader.rs | use std::fmt;
use crate::{Seq, RtpPacketBuilder};
/// Wrapper around a byte-slice of RTP data, providing accessor methods for the RTP header fields.
pub struct RtpReader<'a> {
buf: &'a [u8],
}
/// Reasons for `RtpHeader::new()` to fail
#[derive(Debug)]
pub enum RtpReaderError {
/// Buffer too short to be valid RTP packet
BufferTooShort(usize),
/// Only RTP version 2 supported
UnsupportedVersion(u8),
/// RTP headers truncated before end of buffer
HeadersTruncated {
/// The amount of data which was expected to be present (which may vary depending on flags
/// in the RTP header)
header_len: usize,
/// The actual amount of data that was available, which was found to be smaller than
/// `header_len`
buffer_len: usize,
},
/// The padding header at the end of the packet, if present, specifies the number of padding
/// bytes, including itself, and therefore cannot be less than `1`, or greater than the
/// available space.
PaddingLengthInvalid(u8),
}
impl<'a> RtpReader<'a> {
/// An RTP packet header is no fewer than 12 bytes long
pub const MIN_HEADER_LEN: usize = 12;
const EXTENSION_HEADER_LEN: usize = 4;
/// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is
/// malformed.
///
/// In particular, if there is too little data in the given buffer, such that some later
/// attempt to access an RTP header field would need to access bytes that are not available,
/// then this method will fail up front, rather than allowing attempts to access any header
/// field to fail later on.
pub fn | (b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> {
if b.len() < Self::MIN_HEADER_LEN {
return Err(RtpReaderError::BufferTooShort(b.len()));
}
let r = RtpReader { buf: b };
if r.version() != 2 {
return Err(RtpReaderError::UnsupportedVersion(r.version()));
}
if r.extension_flag() {
let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN;
if extension_start > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_start,
buffer_len: b.len(),
});
}
let extension_end = extension_start + r.extension_len();
if extension_end > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_end,
buffer_len: b.len(),
});
}
}
if r.payload_offset() > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len(),
});
}
if r.padding_flag() {
let post_header_bytes = b.len() - r.payload_offset();
// with 'padding' flag set, there must be at least a single byte after the headers to
// hold the padding length
if post_header_bytes == 0 {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len() - 1,
});
}
let pad_len = r.padding_len()?;
if r.payload_offset() + pad_len as usize > b.len() {
return Err(RtpReaderError::PaddingLengthInvalid(pad_len));
}
}
Ok(r)
}
/// Version field value (currently only version 2 is supported, so other values will not be
/// seen from this release of `rtp-rs`.
pub fn version(&self) -> u8 {
(self.buf[0] & 0b1100_0000) >> 6
}
/// Flag indicating if padding is present at the end of the payload data.
fn padding_flag(&self) -> bool {
(self.buf[0] & 0b0010_0000) != 0
}
/// Returns the size of the padding at the end of this packet, or `None` if the padding flag is
/// not set in the packet header
pub fn padding(&self) -> Option<u8> {
if self.padding_flag() {
Some(self.padding_len().unwrap())
} else {
None
}
}
fn extension_flag(&self) -> bool {
(self.buf[0] & 0b0001_0000) != 0
}
/// A count of the number of CSRC fields present in the RTP headers - may be `0`.
///
/// See [csrc()](#method.csrc).
pub fn csrc_count(&self) -> u8 {
self.buf[0] & 0b0000_1111
}
/// A 'marker', which may have some definition in the specific RTP profile in use
pub fn mark(&self) -> bool {
(self.buf[1] & 0b1000_0000) != 0
}
/// Indicates the type of content carried in this RTP packet.
///
/// A few types-values are defined in the standard, but in many applications of RTP the value
/// of this field needs to be agreed between sender and receiver by some mechanism outside of
/// RTP itself.
pub fn payload_type(&self) -> u8 {
self.buf[1] & 0b0111_1111
}
/// The sequence number of this particular packet.
///
/// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit
/// value of `65535`.
///
/// Receivers can identify packet losses or reordering by inspecting the value of this field
/// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps
/// calling code reason about sequence number problems in the face of any wraparound that might
/// have legitimately happened.
pub fn sequence_number(&self) -> Seq {
Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16))
}
/// The timestamp of this packet, given in a timebase that relates to the particular
/// `payload_type` in use.
///
/// It is perfectly possible for successive packets in a sequence to have the same value, or
/// to have values that differ by arbitrarily large amounts.
///
/// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value
/// of `4294967295`.
pub fn timestamp(&self) -> u32 {
(self.buf[4] as u32) << 24
| (self.buf[5] as u32) << 16
| (self.buf[6] as u32) << 8
| (self.buf[7] as u32)
}
/// The _synchronisation source_ for this packet. Many applications of RTP do not use this
/// field.
pub fn ssrc(&self) -> u32 {
(self.buf[8] as u32) << 24
| (self.buf[9] as u32) << 16
| (self.buf[10] as u32) << 8
| (self.buf[11] as u32)
}
/// A potentially empty list of _contributing sources_ for this packet. Many applications of
/// RTP do not use this field.
pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ {
self.buf[Self::MIN_HEADER_LEN..]
.chunks(4)
.take(self.csrc_count() as usize)
.map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32))
}
/// Returns the offset of the payload for the packet
pub fn payload_offset(&self) -> usize {
let offset = self.csrc_end();
if self.extension_flag() {
offset + Self::EXTENSION_HEADER_LEN + self.extension_len()
} else {
offset
}
}
fn csrc_end(&self) -> usize {
Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize
}
/// Returns the payload data of this RTP packet, excluding the packet's headers and any
/// optional trailing padding.
pub fn payload(&self) -> &'a [u8] {
let pad = if self.padding_flag() {
// in Self::new(), we already checked this was Ok, and will not attempt an invalid
// slice below,
self.padding_len().unwrap() as usize
} else {
0
};
&self.buf[self.payload_offset()..self.buf.len() - pad]
}
fn extension_len(&self) -> usize {
let offset = self.csrc_end();
// The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a
// valid length.
4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize))
}
// must only be used if padding() returns true
fn padding_len(&self) -> Result<u8, RtpReaderError> {
match self.buf[self.buf.len() - 1] {
0 => Err(RtpReaderError::PaddingLengthInvalid(0)),
l => Ok(l),
}
}
/// Returns details of the optional RTP header extension field. If there is an extension,
/// the first component of the resulting tuple is the extension id, and the second is a
/// byte-slice for the extension data value, to be interpreted by the application.
pub fn extension(&self) -> Option<(u16, &'a [u8])> {
if self.extension_flag() {
let offset = self.csrc_end();
let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16);
let start = offset + 4;
Some((id, &self.buf[start..start + self.extension_len()]))
} else {
None
}
}
/// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original
/// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder`
/// if required.
///
/// The padding is not copied from the original since, while we do know how many padding bytes
/// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly
/// copying the padding could result in an incorrect result _if_ the payload is subsequently
/// changed for one with a different length.
///
/// If you know your output packets don't need padding, there is nothing more to do, since
/// that is the default for the resulting `RtpPacketBulder`.
///
/// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify
/// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded
/// to a 4 byte boundary.
pub fn create_builder(&self) -> RtpPacketBuilder<'a> {
let mut builder = RtpPacketBuilder::new()
.payload_type(self.payload_type())
.marked(self.mark())
.sequence(self.sequence_number())
.ssrc(self.ssrc())
.timestamp(self.timestamp())
.payload(self.payload());
if let Some(ext) = self.extension() {
builder = builder.extension(ext.0, ext.1);
}
for csrc in self.csrc() {
builder = builder.add_csrc(csrc);
}
builder
}
}
impl<'a> fmt::Debug for RtpReader<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("RtpReader")
.field("version", &self.version())
.field("padding", &self.padding())
.field("extension", &self.extension().map(|(id, _)| id))
.field("csrc_count", &self.csrc_count())
.field("mark", &self.mark())
.field("payload_type", &self.payload_type())
.field("sequence_number", &self.sequence_number())
.field("timestamp", &self.timestamp())
.field("ssrc", &self.ssrc())
.field("payload_length", &self.payload().len())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::IntoSeqIterator;
const TEST_RTP_PACKET: [u8; 391] = [
0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8,
0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8,
0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8,
0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8,
0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8,
0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8,
0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8,
0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8,
0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8,
0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8,
0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8,
0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8,
0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8,
0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8,
0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8,
0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8,
0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8,
0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8,
0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8,
0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8,
0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8,
0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8,
0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8,
0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8,
0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8,
0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8,
0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8,
0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8,
0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8,
0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8,
0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8,
0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8,
0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8,
0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8,
0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8,
0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8,
];
const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [
144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8,
191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8,
64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8,
165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8,
30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8,
120u8, 63u8, 17u8, 101u8, 55u8, 17u8
];
#[test]
fn version() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_none());
assert_eq!(0, reader.csrc_count());
assert!(reader.mark());
assert_eq!(96, reader.payload_type());
assert_eq!(Seq(10040), reader.sequence_number());
assert_eq!(1_692_665_255, reader.timestamp());
assert_eq!(0xa242_af01, reader.ssrc());
assert_eq!(379, reader.payload().len());
format!("{:?}", reader);
}
#[test]
fn padding() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_some());
assert_eq!(0, reader.csrc_count());
assert_eq!(111, reader.payload_type());
}
#[test]
fn padding_too_large() {
// 'padding' header-flag is on, and padding length (255) in final byte is larger than the
// buffer length. (Test data created by fuzzing.)
let data = [
0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2,
0x90, 0x0, 0x0, 0x0, 0x0, 0xff,
];
assert!(RtpReader::new(&data).is_err());
}
#[test]
fn builder_juggle() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]);
}
#[test]
fn builder_juggle_extension() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET_WITH_EXTENSION[..]);
}
#[test]
fn builder_juggle_clear_payload() {
let new_payload = vec![];
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder()
.payload(&new_payload).build().unwrap();
let expected = &TEST_RTP_PACKET_WITH_EXTENSION[0..(3 + 4) * 4];
assert_eq!(&buffer.as_slice()[..], expected);
}
#[test]
fn seq() {
assert!(Seq(0).precedes(Seq(1)));
assert!(Seq(0xffff).precedes(Seq(0)));
assert!(Seq(0) < Seq(1));
assert!(Seq(0xffff) < Seq(0));
assert_eq!(-1, Seq(0) - Seq(1));
assert_eq!(1, Seq(1) - Seq(0));
assert_eq!(0, Seq(1) - Seq(1));
assert_eq!(1, Seq(0) - Seq(0xffff));
assert_eq!(-1, Seq(0xffff) - Seq(0));
let mut it = (Seq(0xfffe)..Seq(1)).seq_iter();
assert_eq!(Seq(0xfffe), it.next().unwrap());
assert_eq!(Seq(0xffff), it.next().unwrap());
assert_eq!(Seq(0x0000), it.next().unwrap());
assert_eq!(None, it.next());
}
}
| new | identifier_name |
reader.rs | use std::fmt;
use crate::{Seq, RtpPacketBuilder};
/// Wrapper around a byte-slice of RTP data, providing accessor methods for the RTP header fields.
pub struct RtpReader<'a> {
buf: &'a [u8],
}
/// Reasons for `RtpHeader::new()` to fail
#[derive(Debug)]
pub enum RtpReaderError {
/// Buffer too short to be valid RTP packet
BufferTooShort(usize),
/// Only RTP version 2 supported
UnsupportedVersion(u8),
/// RTP headers truncated before end of buffer
HeadersTruncated {
/// The amount of data which was expected to be present (which may vary depending on flags
/// in the RTP header)
header_len: usize,
/// The actual amount of data that was available, which was found to be smaller than
/// `header_len`
buffer_len: usize,
},
/// The padding header at the end of the packet, if present, specifies the number of padding
/// bytes, including itself, and therefore cannot be less than `1`, or greater than the
/// available space.
PaddingLengthInvalid(u8),
}
impl<'a> RtpReader<'a> {
/// An RTP packet header is no fewer than 12 bytes long
pub const MIN_HEADER_LEN: usize = 12;
const EXTENSION_HEADER_LEN: usize = 4;
/// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is
/// malformed.
///
/// In particular, if there is too little data in the given buffer, such that some later
/// attempt to access an RTP header field would need to access bytes that are not available,
/// then this method will fail up front, rather than allowing attempts to access any header
/// field to fail later on.
pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> {
if b.len() < Self::MIN_HEADER_LEN {
return Err(RtpReaderError::BufferTooShort(b.len()));
}
let r = RtpReader { buf: b };
if r.version() != 2 {
return Err(RtpReaderError::UnsupportedVersion(r.version()));
}
if r.extension_flag() {
let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN;
if extension_start > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_start,
buffer_len: b.len(),
});
}
let extension_end = extension_start + r.extension_len();
if extension_end > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_end,
buffer_len: b.len(),
});
}
}
if r.payload_offset() > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len(),
});
}
if r.padding_flag() {
let post_header_bytes = b.len() - r.payload_offset();
// with 'padding' flag set, there must be at least a single byte after the headers to
// hold the padding length
if post_header_bytes == 0 {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len() - 1,
});
}
let pad_len = r.padding_len()?;
if r.payload_offset() + pad_len as usize > b.len() {
return Err(RtpReaderError::PaddingLengthInvalid(pad_len));
}
}
Ok(r)
}
/// Version field value (currently only version 2 is supported, so other values will not be
/// seen from this release of `rtp-rs`.
pub fn version(&self) -> u8 {
(self.buf[0] & 0b1100_0000) >> 6
}
/// Flag indicating if padding is present at the end of the payload data.
fn padding_flag(&self) -> bool {
(self.buf[0] & 0b0010_0000) != 0
}
/// Returns the size of the padding at the end of this packet, or `None` if the padding flag is
/// not set in the packet header
pub fn padding(&self) -> Option<u8> {
if self.padding_flag() {
Some(self.padding_len().unwrap())
} else {
None
}
}
fn extension_flag(&self) -> bool {
(self.buf[0] & 0b0001_0000) != 0
}
/// A count of the number of CSRC fields present in the RTP headers - may be `0`.
///
/// See [csrc()](#method.csrc).
pub fn csrc_count(&self) -> u8 {
self.buf[0] & 0b0000_1111
}
/// A 'marker', which may have some definition in the specific RTP profile in use
pub fn mark(&self) -> bool {
(self.buf[1] & 0b1000_0000) != 0
}
/// Indicates the type of content carried in this RTP packet.
///
/// A few types-values are defined in the standard, but in many applications of RTP the value
/// of this field needs to be agreed between sender and receiver by some mechanism outside of
/// RTP itself.
pub fn payload_type(&self) -> u8 {
self.buf[1] & 0b0111_1111
}
/// The sequence number of this particular packet.
///
/// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit
/// value of `65535`.
///
/// Receivers can identify packet losses or reordering by inspecting the value of this field
/// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps
/// calling code reason about sequence number problems in the face of any wraparound that might
/// have legitimately happened.
pub fn sequence_number(&self) -> Seq {
Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16))
}
/// The timestamp of this packet, given in a timebase that relates to the particular
/// `payload_type` in use.
///
/// It is perfectly possible for successive packets in a sequence to have the same value, or
/// to have values that differ by arbitrarily large amounts.
///
/// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value
/// of `4294967295`.
pub fn timestamp(&self) -> u32 {
(self.buf[4] as u32) << 24
| (self.buf[5] as u32) << 16
| (self.buf[6] as u32) << 8
| (self.buf[7] as u32)
}
/// The _synchronisation source_ for this packet. Many applications of RTP do not use this
/// field.
pub fn ssrc(&self) -> u32 {
(self.buf[8] as u32) << 24
| (self.buf[9] as u32) << 16
| (self.buf[10] as u32) << 8
| (self.buf[11] as u32)
}
/// A potentially empty list of _contributing sources_ for this packet. Many applications of
/// RTP do not use this field.
pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ {
self.buf[Self::MIN_HEADER_LEN..]
.chunks(4)
.take(self.csrc_count() as usize)
.map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32))
}
/// Returns the offset of the payload for the packet
pub fn payload_offset(&self) -> usize {
let offset = self.csrc_end();
if self.extension_flag() {
offset + Self::EXTENSION_HEADER_LEN + self.extension_len()
} else {
offset
}
}
fn csrc_end(&self) -> usize {
Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize
}
/// Returns the payload data of this RTP packet, excluding the packet's headers and any
/// optional trailing padding.
pub fn payload(&self) -> &'a [u8] {
let pad = if self.padding_flag() {
// in Self::new(), we already checked this was Ok, and will not attempt an invalid
// slice below,
self.padding_len().unwrap() as usize
} else {
0
};
&self.buf[self.payload_offset()..self.buf.len() - pad]
}
fn extension_len(&self) -> usize {
let offset = self.csrc_end();
// The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a
// valid length.
4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize))
}
// must only be used if padding() returns true
fn padding_len(&self) -> Result<u8, RtpReaderError> {
match self.buf[self.buf.len() - 1] {
0 => Err(RtpReaderError::PaddingLengthInvalid(0)),
l => Ok(l),
}
}
/// Returns details of the optional RTP header extension field. If there is an extension,
/// the first component of the resulting tuple is the extension id, and the second is a
/// byte-slice for the extension data value, to be interpreted by the application.
pub fn extension(&self) -> Option<(u16, &'a [u8])> {
if self.extension_flag() {
let offset = self.csrc_end();
let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16);
let start = offset + 4;
Some((id, &self.buf[start..start + self.extension_len()]))
} else {
None
}
}
/// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original
/// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder`
/// if required.
///
/// The padding is not copied from the original since, while we do know how many padding bytes
/// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly
/// copying the padding could result in an incorrect result _if_ the payload is subsequently
/// changed for one with a different length.
///
/// If you know your output packets don't need padding, there is nothing more to do, since
/// that is the default for the resulting `RtpPacketBulder`.
///
/// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify
/// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded
/// to a 4 byte boundary.
pub fn create_builder(&self) -> RtpPacketBuilder<'a> {
let mut builder = RtpPacketBuilder::new()
.payload_type(self.payload_type())
.marked(self.mark())
.sequence(self.sequence_number())
.ssrc(self.ssrc())
.timestamp(self.timestamp())
.payload(self.payload());
if let Some(ext) = self.extension() {
builder = builder.extension(ext.0, ext.1);
}
for csrc in self.csrc() {
builder = builder.add_csrc(csrc);
}
builder
}
}
impl<'a> fmt::Debug for RtpReader<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("RtpReader")
.field("version", &self.version())
.field("padding", &self.padding())
.field("extension", &self.extension().map(|(id, _)| id))
.field("csrc_count", &self.csrc_count())
.field("mark", &self.mark())
.field("payload_type", &self.payload_type())
.field("sequence_number", &self.sequence_number())
.field("timestamp", &self.timestamp())
.field("ssrc", &self.ssrc())
.field("payload_length", &self.payload().len())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::IntoSeqIterator;
const TEST_RTP_PACKET: [u8; 391] = [
0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8,
0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8,
0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8,
0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8,
0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8,
0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8,
0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8,
0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8,
0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8,
0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8,
0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8,
0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8,
0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8,
0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8,
0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8,
0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8,
0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8,
0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8,
0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8,
0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8,
0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8,
0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8,
0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8,
0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8,
0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8,
0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8,
0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8,
0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8,
0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8,
0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8,
0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8,
0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8,
0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8,
0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8,
0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8,
0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8,
];
const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [
144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8,
191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8,
64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8,
165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8,
30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8,
120u8, 63u8, 17u8, 101u8, 55u8, 17u8
];
#[test]
fn version() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_none());
assert_eq!(0, reader.csrc_count());
assert!(reader.mark());
assert_eq!(96, reader.payload_type());
assert_eq!(Seq(10040), reader.sequence_number());
assert_eq!(1_692_665_255, reader.timestamp());
assert_eq!(0xa242_af01, reader.ssrc());
assert_eq!(379, reader.payload().len());
format!("{:?}", reader);
}
#[test]
fn padding() |
#[test]
fn padding_too_large() {
// 'padding' header-flag is on, and padding length (255) in final byte is larger than the
// buffer length. (Test data created by fuzzing.)
let data = [
0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2,
0x90, 0x0, 0x0, 0x0, 0x0, 0xff,
];
assert!(RtpReader::new(&data).is_err());
}
#[test]
fn builder_juggle() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]);
}
#[test]
fn builder_juggle_extension() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET_WITH_EXTENSION[..]);
}
#[test]
fn builder_juggle_clear_payload() {
let new_payload = vec![];
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder()
.payload(&new_payload).build().unwrap();
let expected = &TEST_RTP_PACKET_WITH_EXTENSION[0..(3 + 4) * 4];
assert_eq!(&buffer.as_slice()[..], expected);
}
#[test]
fn seq() {
assert!(Seq(0).precedes(Seq(1)));
assert!(Seq(0xffff).precedes(Seq(0)));
assert!(Seq(0) < Seq(1));
assert!(Seq(0xffff) < Seq(0));
assert_eq!(-1, Seq(0) - Seq(1));
assert_eq!(1, Seq(1) - Seq(0));
assert_eq!(0, Seq(1) - Seq(1));
assert_eq!(1, Seq(0) - Seq(0xffff));
assert_eq!(-1, Seq(0xffff) - Seq(0));
let mut it = (Seq(0xfffe)..Seq(1)).seq_iter();
assert_eq!(Seq(0xfffe), it.next().unwrap());
assert_eq!(Seq(0xffff), it.next().unwrap());
assert_eq!(Seq(0x0000), it.next().unwrap());
assert_eq!(None, it.next());
}
}
| {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_some());
assert_eq!(0, reader.csrc_count());
assert_eq!(111, reader.payload_type());
} | identifier_body |
reader.rs | use std::fmt;
use crate::{Seq, RtpPacketBuilder};
/// Wrapper around a byte-slice of RTP data, providing accessor methods for the RTP header fields.
pub struct RtpReader<'a> {
buf: &'a [u8],
}
/// Reasons for `RtpHeader::new()` to fail
#[derive(Debug)]
pub enum RtpReaderError {
/// Buffer too short to be valid RTP packet
BufferTooShort(usize),
/// Only RTP version 2 supported
UnsupportedVersion(u8),
/// RTP headers truncated before end of buffer
HeadersTruncated {
/// The amount of data which was expected to be present (which may vary depending on flags
/// in the RTP header)
header_len: usize,
/// The actual amount of data that was available, which was found to be smaller than
/// `header_len`
buffer_len: usize,
},
/// The padding header at the end of the packet, if present, specifies the number of padding
/// bytes, including itself, and therefore cannot be less than `1`, or greater than the
/// available space.
PaddingLengthInvalid(u8),
}
impl<'a> RtpReader<'a> {
/// An RTP packet header is no fewer than 12 bytes long
pub const MIN_HEADER_LEN: usize = 12;
const EXTENSION_HEADER_LEN: usize = 4;
/// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is
/// malformed.
///
/// In particular, if there is too little data in the given buffer, such that some later
/// attempt to access an RTP header field would need to access bytes that are not available,
/// then this method will fail up front, rather than allowing attempts to access any header
/// field to fail later on.
pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> {
if b.len() < Self::MIN_HEADER_LEN {
return Err(RtpReaderError::BufferTooShort(b.len()));
}
let r = RtpReader { buf: b };
if r.version() != 2 {
return Err(RtpReaderError::UnsupportedVersion(r.version()));
}
if r.extension_flag() {
let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN;
if extension_start > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_start,
buffer_len: b.len(),
});
}
let extension_end = extension_start + r.extension_len();
if extension_end > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_end,
buffer_len: b.len(),
});
}
}
if r.payload_offset() > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len(),
});
}
if r.padding_flag() {
let post_header_bytes = b.len() - r.payload_offset();
// with 'padding' flag set, there must be at least a single byte after the headers to
// hold the padding length
if post_header_bytes == 0 {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len() - 1,
});
}
let pad_len = r.padding_len()?;
if r.payload_offset() + pad_len as usize > b.len() {
return Err(RtpReaderError::PaddingLengthInvalid(pad_len));
}
}
Ok(r)
}
/// Version field value (currently only version 2 is supported, so other values will not be
/// seen from this release of `rtp-rs`.
pub fn version(&self) -> u8 {
(self.buf[0] & 0b1100_0000) >> 6
}
/// Flag indicating if padding is present at the end of the payload data.
fn padding_flag(&self) -> bool {
(self.buf[0] & 0b0010_0000) != 0
}
/// Returns the size of the padding at the end of this packet, or `None` if the padding flag is
/// not set in the packet header
pub fn padding(&self) -> Option<u8> {
if self.padding_flag() {
Some(self.padding_len().unwrap())
} else {
None
}
}
fn extension_flag(&self) -> bool {
(self.buf[0] & 0b0001_0000) != 0
}
/// A count of the number of CSRC fields present in the RTP headers - may be `0`.
///
/// See [csrc()](#method.csrc).
pub fn csrc_count(&self) -> u8 {
self.buf[0] & 0b0000_1111
}
/// A 'marker', which may have some definition in the specific RTP profile in use
pub fn mark(&self) -> bool {
(self.buf[1] & 0b1000_0000) != 0
}
/// Indicates the type of content carried in this RTP packet.
///
/// A few types-values are defined in the standard, but in many applications of RTP the value
/// of this field needs to be agreed between sender and receiver by some mechanism outside of
/// RTP itself.
pub fn payload_type(&self) -> u8 {
self.buf[1] & 0b0111_1111
}
/// The sequence number of this particular packet.
///
/// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit
/// value of `65535`.
///
/// Receivers can identify packet losses or reordering by inspecting the value of this field
/// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps
/// calling code reason about sequence number problems in the face of any wraparound that might
/// have legitimately happened.
pub fn sequence_number(&self) -> Seq {
Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16))
}
/// The timestamp of this packet, given in a timebase that relates to the particular
/// `payload_type` in use.
///
/// It is perfectly possible for successive packets in a sequence to have the same value, or
/// to have values that differ by arbitrarily large amounts.
///
/// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value
/// of `4294967295`.
pub fn timestamp(&self) -> u32 {
(self.buf[4] as u32) << 24
| (self.buf[5] as u32) << 16
| (self.buf[6] as u32) << 8
| (self.buf[7] as u32)
}
/// The _synchronisation source_ for this packet. Many applications of RTP do not use this
/// field.
pub fn ssrc(&self) -> u32 {
(self.buf[8] as u32) << 24
| (self.buf[9] as u32) << 16
| (self.buf[10] as u32) << 8
| (self.buf[11] as u32)
}
/// A potentially empty list of _contributing sources_ for this packet. Many applications of
/// RTP do not use this field.
pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ {
self.buf[Self::MIN_HEADER_LEN..]
.chunks(4)
.take(self.csrc_count() as usize)
.map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32))
}
/// Returns the offset of the payload for the packet
pub fn payload_offset(&self) -> usize {
let offset = self.csrc_end();
if self.extension_flag() {
offset + Self::EXTENSION_HEADER_LEN + self.extension_len()
} else {
offset
}
}
fn csrc_end(&self) -> usize {
Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize
}
/// Returns the payload data of this RTP packet, excluding the packet's headers and any
/// optional trailing padding.
pub fn payload(&self) -> &'a [u8] {
let pad = if self.padding_flag() {
// in Self::new(), we already checked this was Ok, and will not attempt an invalid
// slice below,
self.padding_len().unwrap() as usize
} else {
0
};
&self.buf[self.payload_offset()..self.buf.len() - pad]
}
fn extension_len(&self) -> usize {
let offset = self.csrc_end();
// The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a
// valid length.
4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize))
}
// must only be used if padding() returns true
fn padding_len(&self) -> Result<u8, RtpReaderError> {
match self.buf[self.buf.len() - 1] {
0 => Err(RtpReaderError::PaddingLengthInvalid(0)),
l => Ok(l),
}
}
/// Returns details of the optional RTP header extension field. If there is an extension,
/// the first component of the resulting tuple is the extension id, and the second is a
/// byte-slice for the extension data value, to be interpreted by the application.
pub fn extension(&self) -> Option<(u16, &'a [u8])> {
if self.extension_flag() | else {
None
}
}
/// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original
/// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder`
/// if required.
///
/// The padding is not copied from the original since, while we do know how many padding bytes
/// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly
/// copying the padding could result in an incorrect result _if_ the payload is subsequently
/// changed for one with a different length.
///
/// If you know your output packets don't need padding, there is nothing more to do, since
/// that is the default for the resulting `RtpPacketBulder`.
///
/// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify
/// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded
/// to a 4 byte boundary.
pub fn create_builder(&self) -> RtpPacketBuilder<'a> {
let mut builder = RtpPacketBuilder::new()
.payload_type(self.payload_type())
.marked(self.mark())
.sequence(self.sequence_number())
.ssrc(self.ssrc())
.timestamp(self.timestamp())
.payload(self.payload());
if let Some(ext) = self.extension() {
builder = builder.extension(ext.0, ext.1);
}
for csrc in self.csrc() {
builder = builder.add_csrc(csrc);
}
builder
}
}
impl<'a> fmt::Debug for RtpReader<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("RtpReader")
.field("version", &self.version())
.field("padding", &self.padding())
.field("extension", &self.extension().map(|(id, _)| id))
.field("csrc_count", &self.csrc_count())
.field("mark", &self.mark())
.field("payload_type", &self.payload_type())
.field("sequence_number", &self.sequence_number())
.field("timestamp", &self.timestamp())
.field("ssrc", &self.ssrc())
.field("payload_length", &self.payload().len())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::IntoSeqIterator;
const TEST_RTP_PACKET: [u8; 391] = [
0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8,
0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8,
0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8,
0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8,
0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8,
0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8,
0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8,
0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8,
0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8,
0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8,
0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8,
0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8,
0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8,
0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8,
0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8,
0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8,
0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8,
0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8,
0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8,
0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8,
0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8,
0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8,
0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8,
0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8,
0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8,
0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8,
0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8,
0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8,
0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8,
0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8,
0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8,
0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8,
0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8,
0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8,
0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8,
0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8,
];
const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [
144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8,
191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8,
64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8,
165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8,
30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8,
120u8, 63u8, 17u8, 101u8, 55u8, 17u8
];
#[test]
fn version() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_none());
assert_eq!(0, reader.csrc_count());
assert!(reader.mark());
assert_eq!(96, reader.payload_type());
assert_eq!(Seq(10040), reader.sequence_number());
assert_eq!(1_692_665_255, reader.timestamp());
assert_eq!(0xa242_af01, reader.ssrc());
assert_eq!(379, reader.payload().len());
format!("{:?}", reader);
}
#[test]
fn padding() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_some());
assert_eq!(0, reader.csrc_count());
assert_eq!(111, reader.payload_type());
}
#[test]
fn padding_too_large() {
// 'padding' header-flag is on, and padding length (255) in final byte is larger than the
// buffer length. (Test data created by fuzzing.)
let data = [
0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2,
0x90, 0x0, 0x0, 0x0, 0x0, 0xff,
];
assert!(RtpReader::new(&data).is_err());
}
#[test]
fn builder_juggle() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]);
}
#[test]
fn builder_juggle_extension() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET_WITH_EXTENSION[..]);
}
#[test]
fn builder_juggle_clear_payload() {
let new_payload = vec![];
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder()
.payload(&new_payload).build().unwrap();
let expected = &TEST_RTP_PACKET_WITH_EXTENSION[0..(3 + 4) * 4];
assert_eq!(&buffer.as_slice()[..], expected);
}
#[test]
fn seq() {
assert!(Seq(0).precedes(Seq(1)));
assert!(Seq(0xffff).precedes(Seq(0)));
assert!(Seq(0) < Seq(1));
assert!(Seq(0xffff) < Seq(0));
assert_eq!(-1, Seq(0) - Seq(1));
assert_eq!(1, Seq(1) - Seq(0));
assert_eq!(0, Seq(1) - Seq(1));
assert_eq!(1, Seq(0) - Seq(0xffff));
assert_eq!(-1, Seq(0xffff) - Seq(0));
let mut it = (Seq(0xfffe)..Seq(1)).seq_iter();
assert_eq!(Seq(0xfffe), it.next().unwrap());
assert_eq!(Seq(0xffff), it.next().unwrap());
assert_eq!(Seq(0x0000), it.next().unwrap());
assert_eq!(None, it.next());
}
}
| {
let offset = self.csrc_end();
let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16);
let start = offset + 4;
Some((id, &self.buf[start..start + self.extension_len()]))
} | conditional_block |
reader.rs | use std::fmt;
use crate::{Seq, RtpPacketBuilder};
/// Wrapper around a byte-slice of RTP data, providing accessor methods for the RTP header fields.
pub struct RtpReader<'a> {
buf: &'a [u8],
}
/// Reasons for `RtpHeader::new()` to fail
#[derive(Debug)]
pub enum RtpReaderError {
/// Buffer too short to be valid RTP packet
BufferTooShort(usize),
/// Only RTP version 2 supported
UnsupportedVersion(u8),
/// RTP headers truncated before end of buffer
HeadersTruncated {
/// The amount of data which was expected to be present (which may vary depending on flags
/// in the RTP header)
header_len: usize,
/// The actual amount of data that was available, which was found to be smaller than
/// `header_len`
buffer_len: usize,
},
/// The padding header at the end of the packet, if present, specifies the number of padding
/// bytes, including itself, and therefore cannot be less than `1`, or greater than the
/// available space.
PaddingLengthInvalid(u8),
}
impl<'a> RtpReader<'a> {
/// An RTP packet header is no fewer than 12 bytes long
pub const MIN_HEADER_LEN: usize = 12;
const EXTENSION_HEADER_LEN: usize = 4;
/// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is
/// malformed.
///
/// In particular, if there is too little data in the given buffer, such that some later
/// attempt to access an RTP header field would need to access bytes that are not available,
/// then this method will fail up front, rather than allowing attempts to access any header
/// field to fail later on.
pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> {
if b.len() < Self::MIN_HEADER_LEN {
return Err(RtpReaderError::BufferTooShort(b.len()));
}
let r = RtpReader { buf: b };
if r.version() != 2 {
return Err(RtpReaderError::UnsupportedVersion(r.version()));
}
if r.extension_flag() {
let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN;
if extension_start > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_start,
buffer_len: b.len(),
});
}
let extension_end = extension_start + r.extension_len();
if extension_end > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: extension_end,
buffer_len: b.len(),
});
}
}
if r.payload_offset() > b.len() {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len(),
});
}
if r.padding_flag() {
let post_header_bytes = b.len() - r.payload_offset();
// with 'padding' flag set, there must be at least a single byte after the headers to
// hold the padding length
if post_header_bytes == 0 {
return Err(RtpReaderError::HeadersTruncated {
header_len: r.payload_offset(),
buffer_len: b.len() - 1,
});
}
let pad_len = r.padding_len()?;
if r.payload_offset() + pad_len as usize > b.len() {
return Err(RtpReaderError::PaddingLengthInvalid(pad_len));
}
}
Ok(r)
}
/// Version field value (currently only version 2 is supported, so other values will not be
/// seen from this release of `rtp-rs`.
pub fn version(&self) -> u8 {
(self.buf[0] & 0b1100_0000) >> 6
}
/// Flag indicating if padding is present at the end of the payload data.
fn padding_flag(&self) -> bool {
(self.buf[0] & 0b0010_0000) != 0
}
/// Returns the size of the padding at the end of this packet, or `None` if the padding flag is
/// not set in the packet header
pub fn padding(&self) -> Option<u8> {
if self.padding_flag() {
Some(self.padding_len().unwrap())
} else {
None
}
}
fn extension_flag(&self) -> bool {
(self.buf[0] & 0b0001_0000) != 0
}
/// A count of the number of CSRC fields present in the RTP headers - may be `0`.
///
/// See [csrc()](#method.csrc).
pub fn csrc_count(&self) -> u8 {
self.buf[0] & 0b0000_1111
}
/// A 'marker', which may have some definition in the specific RTP profile in use
pub fn mark(&self) -> bool {
(self.buf[1] & 0b1000_0000) != 0
}
/// Indicates the type of content carried in this RTP packet.
///
/// A few types-values are defined in the standard, but in many applications of RTP the value
/// of this field needs to be agreed between sender and receiver by some mechanism outside of
/// RTP itself.
pub fn payload_type(&self) -> u8 {
self.buf[1] & 0b0111_1111
}
/// The sequence number of this particular packet.
///
/// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit
/// value of `65535`.
///
/// Receivers can identify packet losses or reordering by inspecting the value of this field
/// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps
/// calling code reason about sequence number problems in the face of any wraparound that might
/// have legitimately happened.
pub fn sequence_number(&self) -> Seq {
Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16))
}
/// The timestamp of this packet, given in a timebase that relates to the particular
/// `payload_type` in use.
///
/// It is perfectly possible for successive packets in a sequence to have the same value, or
/// to have values that differ by arbitrarily large amounts.
///
/// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value
/// of `4294967295`.
pub fn timestamp(&self) -> u32 {
(self.buf[4] as u32) << 24
| (self.buf[5] as u32) << 16
| (self.buf[6] as u32) << 8
| (self.buf[7] as u32)
}
/// The _synchronisation source_ for this packet. Many applications of RTP do not use this
/// field.
pub fn ssrc(&self) -> u32 {
(self.buf[8] as u32) << 24
| (self.buf[9] as u32) << 16
| (self.buf[10] as u32) << 8
| (self.buf[11] as u32)
}
/// A potentially empty list of _contributing sources_ for this packet. Many applications of
/// RTP do not use this field.
pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ {
self.buf[Self::MIN_HEADER_LEN..]
.chunks(4)
.take(self.csrc_count() as usize)
.map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32))
}
/// Returns the offset of the payload for the packet
pub fn payload_offset(&self) -> usize {
let offset = self.csrc_end();
if self.extension_flag() {
offset + Self::EXTENSION_HEADER_LEN + self.extension_len()
} else {
offset
}
}
fn csrc_end(&self) -> usize {
Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize
}
/// Returns the payload data of this RTP packet, excluding the packet's headers and any
/// optional trailing padding.
pub fn payload(&self) -> &'a [u8] {
let pad = if self.padding_flag() {
// in Self::new(), we already checked this was Ok, and will not attempt an invalid
// slice below,
self.padding_len().unwrap() as usize
} else {
0
};
&self.buf[self.payload_offset()..self.buf.len() - pad]
}
fn extension_len(&self) -> usize {
let offset = self.csrc_end();
// The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a
// valid length.
4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize))
}
// must only be used if padding() returns true
fn padding_len(&self) -> Result<u8, RtpReaderError> {
match self.buf[self.buf.len() - 1] {
0 => Err(RtpReaderError::PaddingLengthInvalid(0)),
l => Ok(l),
}
}
/// Returns details of the optional RTP header extension field. If there is an extension,
/// the first component of the resulting tuple is the extension id, and the second is a
/// byte-slice for the extension data value, to be interpreted by the application.
pub fn extension(&self) -> Option<(u16, &'a [u8])> {
if self.extension_flag() {
let offset = self.csrc_end();
let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16);
let start = offset + 4;
Some((id, &self.buf[start..start + self.extension_len()]))
} else {
None
}
}
/// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original
/// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder`
/// if required.
///
/// The padding is not copied from the original since, while we do know how many padding bytes
/// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly
/// copying the padding could result in an incorrect result _if_ the payload is subsequently
/// changed for one with a different length.
///
/// If you know your output packets don't need padding, there is nothing more to do, since
/// that is the default for the resulting `RtpPacketBulder`.
///
/// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify
/// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded
/// to a 4 byte boundary.
pub fn create_builder(&self) -> RtpPacketBuilder<'a> {
let mut builder = RtpPacketBuilder::new()
.payload_type(self.payload_type())
.marked(self.mark())
.sequence(self.sequence_number())
.ssrc(self.ssrc())
.timestamp(self.timestamp())
.payload(self.payload());
if let Some(ext) = self.extension() {
builder = builder.extension(ext.0, ext.1);
}
for csrc in self.csrc() {
builder = builder.add_csrc(csrc);
}
builder
}
}
impl<'a> fmt::Debug for RtpReader<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("RtpReader")
.field("version", &self.version())
.field("padding", &self.padding())
.field("extension", &self.extension().map(|(id, _)| id))
.field("csrc_count", &self.csrc_count())
.field("mark", &self.mark())
.field("payload_type", &self.payload_type())
.field("sequence_number", &self.sequence_number())
.field("timestamp", &self.timestamp())
.field("ssrc", &self.ssrc())
.field("payload_length", &self.payload().len())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::IntoSeqIterator;
const TEST_RTP_PACKET: [u8; 391] = [
0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8,
0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8,
0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8,
0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8,
0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8,
0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8,
0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8,
0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8,
0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8,
0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8,
0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8,
0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8,
0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8,
0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8,
0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8,
0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8,
0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8,
0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8,
0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8,
0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8,
0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8,
0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8,
0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8,
0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8,
0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8,
0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8,
0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8,
0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8,
0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8,
0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8,
0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8,
0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8,
0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8,
0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8,
0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8,
0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8,
];
const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [
144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8,
191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8,
64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8,
165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8,
30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8,
120u8, 63u8, 17u8, 101u8, 55u8, 17u8
];
#[test]
fn version() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
assert_eq!(2, reader.version());
assert!(reader.padding().is_none());
assert!(reader.extension().is_none());
assert_eq!(0, reader.csrc_count());
assert!(reader.mark());
assert_eq!(96, reader.payload_type());
assert_eq!(Seq(10040), reader.sequence_number());
assert_eq!(1_692_665_255, reader.timestamp());
assert_eq!(0xa242_af01, reader.ssrc());
assert_eq!(379, reader.payload().len());
format!("{:?}", reader);
}
#[test]
fn padding() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
assert_eq!(2, reader.version()); | }
#[test]
fn padding_too_large() {
// 'padding' header-flag is on, and padding length (255) in final byte is larger than the
// buffer length. (Test data created by fuzzing.)
let data = [
0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2,
0x90, 0x0, 0x0, 0x0, 0x0, 0xff,
];
assert!(RtpReader::new(&data).is_err());
}
#[test]
fn builder_juggle() {
let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]);
}
#[test]
fn builder_juggle_extension() {
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder().build().unwrap();
assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET_WITH_EXTENSION[..]);
}
#[test]
fn builder_juggle_clear_payload() {
let new_payload = vec![];
let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap();
let buffer = reader.create_builder()
.payload(&new_payload).build().unwrap();
let expected = &TEST_RTP_PACKET_WITH_EXTENSION[0..(3 + 4) * 4];
assert_eq!(&buffer.as_slice()[..], expected);
}
#[test]
fn seq() {
assert!(Seq(0).precedes(Seq(1)));
assert!(Seq(0xffff).precedes(Seq(0)));
assert!(Seq(0) < Seq(1));
assert!(Seq(0xffff) < Seq(0));
assert_eq!(-1, Seq(0) - Seq(1));
assert_eq!(1, Seq(1) - Seq(0));
assert_eq!(0, Seq(1) - Seq(1));
assert_eq!(1, Seq(0) - Seq(0xffff));
assert_eq!(-1, Seq(0xffff) - Seq(0));
let mut it = (Seq(0xfffe)..Seq(1)).seq_iter();
assert_eq!(Seq(0xfffe), it.next().unwrap());
assert_eq!(Seq(0xffff), it.next().unwrap());
assert_eq!(Seq(0x0000), it.next().unwrap());
assert_eq!(None, it.next());
}
} | assert!(reader.padding().is_none());
assert!(reader.extension().is_some());
assert_eq!(0, reader.csrc_count());
assert_eq!(111, reader.payload_type()); | random_line_split |
lib.rs | //! Linear regression
//!
//! `linreg` calculates linear regressions for two dimensional measurements, also known as
//! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
//!
//! Base for all calculations of linear regression is the simple model found in
//! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model.
//!
//! ## Example use
//!
//! ```rust
//! use linreg::{linear_regression, linear_regression_of};
//!
//! // Example 1: x and y values stored in two different vectors
//! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
//! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//!
//!
//! // Example 2: x and y values stored as tuples
//! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0),
//! (2.0, 4.0),
//! (3.0, 5.0),
//! (4.0, 4.0),
//! (5.0, 5.0)];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
//!
//!
//! // Example 3: directly operating on integer (converted to float as required)
//! let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
//! let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//! ```
#![no_std]
extern crate num_traits;
use num_traits::float::FloatCore;
#[cfg(test)]
#[macro_use]
extern crate std;
use core::iter::Iterator;
use core::iter::Sum;
use displaydoc::Display;
/// The kinds of errors that can occur when calculating a linear regression.
#[derive(Copy, Clone, Display, Debug, PartialEq)]
pub enum Error {
/// The slope is too steep to represent, approaching infinity.
TooSteep,
/// Failed to calculate mean.
///
/// This means the input was empty or had too many elements.
Mean,
/// Lengths of the inputs are different.
InputLenDif,
/// Can't compute linear regression of zero elements
NoElements,
}
/// Single-pass simple linear regression.
///
/// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus
/// does not require a second pass over the input data.
///
/// Returns `Ok((slope, intercept))` of the regression line.
///
/// # Errors
///
/// Errors if the number of elements is too large to be represented as `F` or
/// the slope is too steep to represent, approaching infinity.
pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
details::lin_reg_imprecise_components(xys)?.finish()
}
/// A module containing the building parts of the main API.
/// You can use these if you want to have more control over the linear regression
mod details {
use super::Error;
use num_traits::float::FloatCore;
/// Low level linear regression primitive for pushing values instead of fetching them
/// from an iterator
#[derive(Debug)]
pub struct Accumulator<F: FloatCore> {
x_mean: F,
y_mean: F,
x_mul_y_mean: F,
x_squared_mean: F,
n: usize,
}
impl<F: FloatCore> Default for Accumulator<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: FloatCore> Accumulator<F> {
pub fn new() -> Self {
Self {
x_mean: F::zero(),
y_mean: F::zero(),
x_mul_y_mean: F::zero(),
x_squared_mean: F::zero(),
n: 0,
}
}
pub fn push(&mut self, x: F, y: F) {
self.x_mean = self.x_mean + x;
self.y_mean = self.y_mean + y;
self.x_mul_y_mean = self.x_mul_y_mean + x * y;
self.x_squared_mean = self.x_squared_mean + x * x;
self.n += 1;
}
pub fn normalize(&mut self) -> Result<(), Error> {
match self.n {
1 => return Ok(()),
0 => return Err(Error::NoElements),
_ => {}
}
let n = F::from(self.n).ok_or(Error::Mean)?;
self.n = 1;
self.x_mean = self.x_mean / n;
self.y_mean = self.y_mean / n;
self.x_mul_y_mean = self.x_mul_y_mean / n;
self.x_squared_mean = self.x_squared_mean / n;
Ok(())
}
pub fn parts(mut self) -> Result<(F, F, F, F), Error> {
self.normalize()?;
let Self {
x_mean,
y_mean,
x_mul_y_mean,
x_squared_mean,
..
} = self;
Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean))
}
pub fn finish(self) -> Result<(F, F), Error> {
let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?;
let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean);
let intercept = y_mean - slope * x_mean;
if slope.is_nan() {
return Err(Error::TooSteep);
}
Ok((slope, intercept))
}
}
pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
let mut acc = Accumulator::new();
for (x, y) in xys {
acc.push(x, y);
}
acc.normalize()?;
Ok(acc)
}
}
/// Calculates a linear regression with a known mean.
///
/// Lower-level linear regression function. Assumes that `x_mean` and `y_mean`
/// have already been calculated. Returns `Error::DivByZero` if
///
/// * the slope is too steep to represent, approaching infinity.
///
/// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty.
///
/// Returns `Ok((slope, intercept))` of the regression line.
pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error>
where
I: Iterator<Item = (F, F)>,
F: FloatCore,
{
// SUM (x-mean(x))^2
let mut xxm2 = F::zero();
// SUM (x-mean(x)) (y-mean(y))
let mut xmym2 = F::zero();
for (x, y) in xys {
xxm2 = xxm2 + (x - x_mean) * (x - x_mean);
xmym2 = xmym2 + (x - x_mean) * (y - y_mean);
}
let slope = xmym2 / xxm2;
// we check for divide-by-zero after the fact
if slope.is_nan() {
return Err(Error::TooSteep);
}
let intercept = y_mean - slope * x_mean;
Ok((slope, intercept))
}
/// Two-pass simple linear regression from slices.
///
/// Calculates the linear regression from two slices, one for x- and one for y-values, by
/// calculating the mean and then calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xs` and `ys` differ in length
/// * `xs` or `ys` are empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
///
pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore + Sum,
{
if xs.len() != ys.len() {
return Err(Error::InputLenDif);
}
if xs.is_empty() |
let x_sum: F = xs.iter().cloned().map(Into::into).sum();
let n = F::from(xs.len()).ok_or(Error::Mean)?;
let x_mean = x_sum / n;
let y_sum: F = ys.iter().cloned().map(Into::into).sum();
let y_mean = y_sum / n;
lin_reg(
xs.iter()
.map(|i| i.clone().into())
.zip(ys.iter().map(|i| i.clone().into())),
x_mean,
y_mean,
)
}
/// Two-pass linear regression from tuples.
///
/// Calculates the linear regression from a slice of tuple values by first calculating the mean
/// before calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xys` is empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore,
{
if xys.is_empty() {
return Err(Error::Mean);
}
// We're handrolling the mean computation here, because our generic implementation can't handle tuples.
// If we ran the generic impl on each tuple field, that would be very cache inefficient
let n = F::from(xys.len()).ok_or(Error::Mean)?;
let (x_sum, y_sum) = xys
.iter()
.cloned()
.fold((F::zero(), F::zero()), |(sx, sy), (x, y)| {
(sx + x.into(), sy + y.into())
});
let x_mean = x_sum / n;
let y_mean = y_sum / n;
lin_reg(
xys.iter()
.map(|(x, y)| (x.clone().into(), y.clone().into())),
x_mean,
y_mean,
)
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use super::*;
#[test]
fn float_slices_regression() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn lin_reg_imprecises_vs_linreg() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap();
let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap();
assert!(f64::abs(x1 - x2) < 0.00001);
assert!(f64::abs(y1 - y2) < 0.00001);
}
#[test]
fn int_slices_regression() {
let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn float_tuples_regression() {
let tuples: Vec<(f32, f32)> =
vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
#[test]
fn int_tuples_regression() {
let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
}
| {
return Err(Error::Mean);
} | conditional_block |
lib.rs | //! Linear regression
//!
//! `linreg` calculates linear regressions for two dimensional measurements, also known as
//! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
//!
//! Base for all calculations of linear regression is the simple model found in
//! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model.
//!
//! ## Example use
//!
//! ```rust
//! use linreg::{linear_regression, linear_regression_of};
//!
//! // Example 1: x and y values stored in two different vectors
//! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
//! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//!
//!
//! // Example 2: x and y values stored as tuples
//! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0),
//! (2.0, 4.0),
//! (3.0, 5.0),
//! (4.0, 4.0),
//! (5.0, 5.0)];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
//!
//!
//! // Example 3: directly operating on integer (converted to float as required)
//! let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
//! let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//! ```
#![no_std]
extern crate num_traits;
use num_traits::float::FloatCore;
#[cfg(test)]
#[macro_use]
extern crate std;
use core::iter::Iterator;
use core::iter::Sum;
use displaydoc::Display;
/// The kinds of errors that can occur when calculating a linear regression.
#[derive(Copy, Clone, Display, Debug, PartialEq)]
pub enum Error {
/// The slope is too steep to represent, approaching infinity.
TooSteep,
/// Failed to calculate mean.
///
/// This means the input was empty or had too many elements.
Mean,
/// Lengths of the inputs are different.
InputLenDif,
/// Can't compute linear regression of zero elements
NoElements,
}
/// Single-pass simple linear regression.
///
/// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus
/// does not require a second pass over the input data.
///
/// Returns `Ok((slope, intercept))` of the regression line.
///
/// # Errors
///
/// Errors if the number of elements is too large to be represented as `F` or
/// the slope is too steep to represent, approaching infinity.
pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
details::lin_reg_imprecise_components(xys)?.finish()
}
/// A module containing the building parts of the main API.
/// You can use these if you want to have more control over the linear regression
mod details {
use super::Error;
use num_traits::float::FloatCore;
/// Low level linear regression primitive for pushing values instead of fetching them
/// from an iterator
#[derive(Debug)]
pub struct Accumulator<F: FloatCore> {
x_mean: F,
y_mean: F,
x_mul_y_mean: F,
x_squared_mean: F,
n: usize,
}
impl<F: FloatCore> Default for Accumulator<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: FloatCore> Accumulator<F> {
pub fn new() -> Self {
Self {
x_mean: F::zero(),
y_mean: F::zero(),
x_mul_y_mean: F::zero(),
x_squared_mean: F::zero(),
n: 0,
}
}
pub fn push(&mut self, x: F, y: F) {
self.x_mean = self.x_mean + x;
self.y_mean = self.y_mean + y;
self.x_mul_y_mean = self.x_mul_y_mean + x * y;
self.x_squared_mean = self.x_squared_mean + x * x;
self.n += 1;
}
pub fn normalize(&mut self) -> Result<(), Error> |
pub fn parts(mut self) -> Result<(F, F, F, F), Error> {
self.normalize()?;
let Self {
x_mean,
y_mean,
x_mul_y_mean,
x_squared_mean,
..
} = self;
Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean))
}
pub fn finish(self) -> Result<(F, F), Error> {
let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?;
let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean);
let intercept = y_mean - slope * x_mean;
if slope.is_nan() {
return Err(Error::TooSteep);
}
Ok((slope, intercept))
}
}
pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
let mut acc = Accumulator::new();
for (x, y) in xys {
acc.push(x, y);
}
acc.normalize()?;
Ok(acc)
}
}
/// Calculates a linear regression with a known mean.
///
/// Lower-level linear regression function. Assumes that `x_mean` and `y_mean`
/// have already been calculated. Returns `Error::DivByZero` if
///
/// * the slope is too steep to represent, approaching infinity.
///
/// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty.
///
/// Returns `Ok((slope, intercept))` of the regression line.
pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error>
where
I: Iterator<Item = (F, F)>,
F: FloatCore,
{
// SUM (x-mean(x))^2
let mut xxm2 = F::zero();
// SUM (x-mean(x)) (y-mean(y))
let mut xmym2 = F::zero();
for (x, y) in xys {
xxm2 = xxm2 + (x - x_mean) * (x - x_mean);
xmym2 = xmym2 + (x - x_mean) * (y - y_mean);
}
let slope = xmym2 / xxm2;
// we check for divide-by-zero after the fact
if slope.is_nan() {
return Err(Error::TooSteep);
}
let intercept = y_mean - slope * x_mean;
Ok((slope, intercept))
}
/// Two-pass simple linear regression from slices.
///
/// Calculates the linear regression from two slices, one for x- and one for y-values, by
/// calculating the mean and then calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xs` and `ys` differ in length
/// * `xs` or `ys` are empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
///
pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore + Sum,
{
if xs.len() != ys.len() {
return Err(Error::InputLenDif);
}
if xs.is_empty() {
return Err(Error::Mean);
}
let x_sum: F = xs.iter().cloned().map(Into::into).sum();
let n = F::from(xs.len()).ok_or(Error::Mean)?;
let x_mean = x_sum / n;
let y_sum: F = ys.iter().cloned().map(Into::into).sum();
let y_mean = y_sum / n;
lin_reg(
xs.iter()
.map(|i| i.clone().into())
.zip(ys.iter().map(|i| i.clone().into())),
x_mean,
y_mean,
)
}
/// Two-pass linear regression from tuples.
///
/// Calculates the linear regression from a slice of tuple values by first calculating the mean
/// before calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xys` is empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore,
{
if xys.is_empty() {
return Err(Error::Mean);
}
// We're handrolling the mean computation here, because our generic implementation can't handle tuples.
// If we ran the generic impl on each tuple field, that would be very cache inefficient
let n = F::from(xys.len()).ok_or(Error::Mean)?;
let (x_sum, y_sum) = xys
.iter()
.cloned()
.fold((F::zero(), F::zero()), |(sx, sy), (x, y)| {
(sx + x.into(), sy + y.into())
});
let x_mean = x_sum / n;
let y_mean = y_sum / n;
lin_reg(
xys.iter()
.map(|(x, y)| (x.clone().into(), y.clone().into())),
x_mean,
y_mean,
)
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use super::*;
#[test]
fn float_slices_regression() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn lin_reg_imprecises_vs_linreg() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap();
let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap();
assert!(f64::abs(x1 - x2) < 0.00001);
assert!(f64::abs(y1 - y2) < 0.00001);
}
#[test]
fn int_slices_regression() {
let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn float_tuples_regression() {
let tuples: Vec<(f32, f32)> =
vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
#[test]
fn int_tuples_regression() {
let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
}
| {
match self.n {
1 => return Ok(()),
0 => return Err(Error::NoElements),
_ => {}
}
let n = F::from(self.n).ok_or(Error::Mean)?;
self.n = 1;
self.x_mean = self.x_mean / n;
self.y_mean = self.y_mean / n;
self.x_mul_y_mean = self.x_mul_y_mean / n;
self.x_squared_mean = self.x_squared_mean / n;
Ok(())
} | identifier_body |
lib.rs | //! Linear regression
//!
//! `linreg` calculates linear regressions for two dimensional measurements, also known as
//! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
//!
//! Base for all calculations of linear regression is the simple model found in
//! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model.
//!
//! ## Example use
//!
//! ```rust
//! use linreg::{linear_regression, linear_regression_of};
//!
//! // Example 1: x and y values stored in two different vectors
//! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
//! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//!
//!
//! // Example 2: x and y values stored as tuples
//! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0),
//! (2.0, 4.0),
//! (3.0, 5.0),
//! (4.0, 4.0),
//! (5.0, 5.0)];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
//!
//!
//! // Example 3: directly operating on integer (converted to float as required)
//! let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
//! let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//! ```
#![no_std]
extern crate num_traits;
use num_traits::float::FloatCore;
#[cfg(test)]
#[macro_use]
extern crate std;
use core::iter::Iterator;
use core::iter::Sum;
use displaydoc::Display;
/// The kinds of errors that can occur when calculating a linear regression.
#[derive(Copy, Clone, Display, Debug, PartialEq)]
pub enum Error {
/// The slope is too steep to represent, approaching infinity.
TooSteep,
/// Failed to calculate mean.
///
/// This means the input was empty or had too many elements.
Mean,
/// Lengths of the inputs are different.
InputLenDif,
/// Can't compute linear regression of zero elements
NoElements,
}
/// Single-pass simple linear regression.
///
/// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus
/// does not require a second pass over the input data.
///
/// Returns `Ok((slope, intercept))` of the regression line.
///
/// # Errors
///
/// Errors if the number of elements is too large to be represented as `F` or
/// the slope is too steep to represent, approaching infinity.
pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
details::lin_reg_imprecise_components(xys)?.finish()
}
/// A module containing the building parts of the main API.
/// You can use these if you want to have more control over the linear regression
mod details {
use super::Error;
use num_traits::float::FloatCore;
/// Low level linear regression primitive for pushing values instead of fetching them
/// from an iterator
#[derive(Debug)]
pub struct | <F: FloatCore> {
x_mean: F,
y_mean: F,
x_mul_y_mean: F,
x_squared_mean: F,
n: usize,
}
impl<F: FloatCore> Default for Accumulator<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: FloatCore> Accumulator<F> {
pub fn new() -> Self {
Self {
x_mean: F::zero(),
y_mean: F::zero(),
x_mul_y_mean: F::zero(),
x_squared_mean: F::zero(),
n: 0,
}
}
pub fn push(&mut self, x: F, y: F) {
self.x_mean = self.x_mean + x;
self.y_mean = self.y_mean + y;
self.x_mul_y_mean = self.x_mul_y_mean + x * y;
self.x_squared_mean = self.x_squared_mean + x * x;
self.n += 1;
}
pub fn normalize(&mut self) -> Result<(), Error> {
match self.n {
1 => return Ok(()),
0 => return Err(Error::NoElements),
_ => {}
}
let n = F::from(self.n).ok_or(Error::Mean)?;
self.n = 1;
self.x_mean = self.x_mean / n;
self.y_mean = self.y_mean / n;
self.x_mul_y_mean = self.x_mul_y_mean / n;
self.x_squared_mean = self.x_squared_mean / n;
Ok(())
}
pub fn parts(mut self) -> Result<(F, F, F, F), Error> {
self.normalize()?;
let Self {
x_mean,
y_mean,
x_mul_y_mean,
x_squared_mean,
..
} = self;
Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean))
}
pub fn finish(self) -> Result<(F, F), Error> {
let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?;
let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean);
let intercept = y_mean - slope * x_mean;
if slope.is_nan() {
return Err(Error::TooSteep);
}
Ok((slope, intercept))
}
}
pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
let mut acc = Accumulator::new();
for (x, y) in xys {
acc.push(x, y);
}
acc.normalize()?;
Ok(acc)
}
}
/// Calculates a linear regression with a known mean.
///
/// Lower-level linear regression function. Assumes that `x_mean` and `y_mean`
/// have already been calculated. Returns `Error::DivByZero` if
///
/// * the slope is too steep to represent, approaching infinity.
///
/// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty.
///
/// Returns `Ok((slope, intercept))` of the regression line.
pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error>
where
I: Iterator<Item = (F, F)>,
F: FloatCore,
{
// SUM (x-mean(x))^2
let mut xxm2 = F::zero();
// SUM (x-mean(x)) (y-mean(y))
let mut xmym2 = F::zero();
for (x, y) in xys {
xxm2 = xxm2 + (x - x_mean) * (x - x_mean);
xmym2 = xmym2 + (x - x_mean) * (y - y_mean);
}
let slope = xmym2 / xxm2;
// we check for divide-by-zero after the fact
if slope.is_nan() {
return Err(Error::TooSteep);
}
let intercept = y_mean - slope * x_mean;
Ok((slope, intercept))
}
/// Two-pass simple linear regression from slices.
///
/// Calculates the linear regression from two slices, one for x- and one for y-values, by
/// calculating the mean and then calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xs` and `ys` differ in length
/// * `xs` or `ys` are empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
///
pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore + Sum,
{
if xs.len() != ys.len() {
return Err(Error::InputLenDif);
}
if xs.is_empty() {
return Err(Error::Mean);
}
let x_sum: F = xs.iter().cloned().map(Into::into).sum();
let n = F::from(xs.len()).ok_or(Error::Mean)?;
let x_mean = x_sum / n;
let y_sum: F = ys.iter().cloned().map(Into::into).sum();
let y_mean = y_sum / n;
lin_reg(
xs.iter()
.map(|i| i.clone().into())
.zip(ys.iter().map(|i| i.clone().into())),
x_mean,
y_mean,
)
}
/// Two-pass linear regression from tuples.
///
/// Calculates the linear regression from a slice of tuple values by first calculating the mean
/// before calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xys` is empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore,
{
if xys.is_empty() {
return Err(Error::Mean);
}
// We're handrolling the mean computation here, because our generic implementation can't handle tuples.
// If we ran the generic impl on each tuple field, that would be very cache inefficient
let n = F::from(xys.len()).ok_or(Error::Mean)?;
let (x_sum, y_sum) = xys
.iter()
.cloned()
.fold((F::zero(), F::zero()), |(sx, sy), (x, y)| {
(sx + x.into(), sy + y.into())
});
let x_mean = x_sum / n;
let y_mean = y_sum / n;
lin_reg(
xys.iter()
.map(|(x, y)| (x.clone().into(), y.clone().into())),
x_mean,
y_mean,
)
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use super::*;
#[test]
fn float_slices_regression() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn lin_reg_imprecises_vs_linreg() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap();
let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap();
assert!(f64::abs(x1 - x2) < 0.00001);
assert!(f64::abs(y1 - y2) < 0.00001);
}
#[test]
fn int_slices_regression() {
let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn float_tuples_regression() {
let tuples: Vec<(f32, f32)> =
vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
#[test]
fn int_tuples_regression() {
let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
}
| Accumulator | identifier_name |
lib.rs | //! Linear regression
//!
//! `linreg` calculates linear regressions for two dimensional measurements, also known as
//! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
//!
//! Base for all calculations of linear regression is the simple model found in
//! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model.
//!
//! ## Example use
//!
//! ```rust
//! use linreg::{linear_regression, linear_regression_of};
//!
//! // Example 1: x and y values stored in two different vectors
//! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
//! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//!
//!
//! // Example 2: x and y values stored as tuples
//! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0),
//! (2.0, 4.0),
//! (3.0, 5.0),
//! (4.0, 4.0),
//! (5.0, 5.0)];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
//!
//!
//! // Example 3: directly operating on integer (converted to float as required)
//! let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
//! let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
//!
//! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
//! ```
#![no_std]
extern crate num_traits;
use num_traits::float::FloatCore;
#[cfg(test)]
#[macro_use]
extern crate std;
use core::iter::Iterator;
use core::iter::Sum;
use displaydoc::Display;
/// The kinds of errors that can occur when calculating a linear regression.
#[derive(Copy, Clone, Display, Debug, PartialEq)]
pub enum Error {
/// The slope is too steep to represent, approaching infinity.
TooSteep,
/// Failed to calculate mean.
///
/// This means the input was empty or had too many elements.
Mean,
/// Lengths of the inputs are different.
InputLenDif,
/// Can't compute linear regression of zero elements
NoElements,
}
/// Single-pass simple linear regression.
///
/// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus
/// does not require a second pass over the input data.
///
/// Returns `Ok((slope, intercept))` of the regression line.
///
/// # Errors
///
/// Errors if the number of elements is too large to be represented as `F` or
/// the slope is too steep to represent, approaching infinity.
pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
details::lin_reg_imprecise_components(xys)?.finish()
}
/// A module containing the building parts of the main API.
/// You can use these if you want to have more control over the linear regression
mod details {
use super::Error;
use num_traits::float::FloatCore;
/// Low level linear regression primitive for pushing values instead of fetching them
/// from an iterator
#[derive(Debug)]
pub struct Accumulator<F: FloatCore> {
x_mean: F,
y_mean: F,
x_mul_y_mean: F,
x_squared_mean: F,
n: usize,
}
impl<F: FloatCore> Default for Accumulator<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: FloatCore> Accumulator<F> {
pub fn new() -> Self {
Self {
x_mean: F::zero(),
y_mean: F::zero(),
x_mul_y_mean: F::zero(),
x_squared_mean: F::zero(),
n: 0,
}
}
pub fn push(&mut self, x: F, y: F) {
self.x_mean = self.x_mean + x;
self.y_mean = self.y_mean + y;
self.x_mul_y_mean = self.x_mul_y_mean + x * y;
self.x_squared_mean = self.x_squared_mean + x * x;
self.n += 1;
}
pub fn normalize(&mut self) -> Result<(), Error> {
match self.n {
1 => return Ok(()),
0 => return Err(Error::NoElements),
_ => {}
}
let n = F::from(self.n).ok_or(Error::Mean)?;
self.n = 1;
self.x_mean = self.x_mean / n;
self.y_mean = self.y_mean / n;
self.x_mul_y_mean = self.x_mul_y_mean / n;
self.x_squared_mean = self.x_squared_mean / n;
Ok(())
}
pub fn parts(mut self) -> Result<(F, F, F, F), Error> {
self.normalize()?;
let Self {
x_mean,
y_mean,
x_mul_y_mean,
x_squared_mean,
..
} = self;
Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean))
}
pub fn finish(self) -> Result<(F, F), Error> {
let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?;
let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean);
let intercept = y_mean - slope * x_mean;
if slope.is_nan() {
return Err(Error::TooSteep);
}
Ok((slope, intercept))
}
}
pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error>
where
F: FloatCore,
I: Iterator<Item = (F, F)>,
{
let mut acc = Accumulator::new();
for (x, y) in xys {
acc.push(x, y);
}
acc.normalize()?;
Ok(acc)
}
}
/// Calculates a linear regression with a known mean.
///
/// Lower-level linear regression function. Assumes that `x_mean` and `y_mean`
/// have already been calculated. Returns `Error::DivByZero` if
///
/// * the slope is too steep to represent, approaching infinity.
///
/// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty.
///
/// Returns `Ok((slope, intercept))` of the regression line.
pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error>
where
I: Iterator<Item = (F, F)>,
F: FloatCore,
{
// SUM (x-mean(x))^2
let mut xxm2 = F::zero();
// SUM (x-mean(x)) (y-mean(y))
let mut xmym2 = F::zero();
for (x, y) in xys {
xxm2 = xxm2 + (x - x_mean) * (x - x_mean);
xmym2 = xmym2 + (x - x_mean) * (y - y_mean);
}
let slope = xmym2 / xxm2;
// we check for divide-by-zero after the fact
if slope.is_nan() {
return Err(Error::TooSteep);
}
let intercept = y_mean - slope * x_mean;
Ok((slope, intercept))
}
/// Two-pass simple linear regression from slices.
///
/// Calculates the linear regression from two slices, one for x- and one for y-values, by
/// calculating the mean and then calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xs` and `ys` differ in length
/// * `xs` or `ys` are empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
///
pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore + Sum,
{
if xs.len() != ys.len() {
return Err(Error::InputLenDif);
}
if xs.is_empty() {
return Err(Error::Mean);
}
let x_sum: F = xs.iter().cloned().map(Into::into).sum();
let n = F::from(xs.len()).ok_or(Error::Mean)?;
let x_mean = x_sum / n;
let y_sum: F = ys.iter().cloned().map(Into::into).sum();
let y_mean = y_sum / n;
lin_reg(
xs.iter()
.map(|i| i.clone().into())
.zip(ys.iter().map(|i| i.clone().into())),
x_mean,
y_mean,
)
}
/// Two-pass linear regression from tuples.
///
/// Calculates the linear regression from a slice of tuple values by first calculating the mean
/// before calling `lin_reg`.
///
/// Returns `Ok(slope, intercept)` of the regression line.
///
/// # Errors
///
/// Returns an error if
///
/// * `xys` is empty
/// * the slope is too steep to represent, approaching infinity
/// * the number of elements cannot be represented as an `F`
pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error>
where
X: Clone + Into<F>,
Y: Clone + Into<F>,
F: FloatCore,
{
if xys.is_empty() {
return Err(Error::Mean);
}
// We're handrolling the mean computation here, because our generic implementation can't handle tuples. | .fold((F::zero(), F::zero()), |(sx, sy), (x, y)| {
(sx + x.into(), sy + y.into())
});
let x_mean = x_sum / n;
let y_mean = y_sum / n;
lin_reg(
xys.iter()
.map(|(x, y)| (x.clone().into(), y.clone().into())),
x_mean,
y_mean,
)
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use super::*;
#[test]
fn float_slices_regression() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn lin_reg_imprecises_vs_linreg() {
let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0];
let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap();
let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap();
assert!(f64::abs(x1 - x2) < 0.00001);
assert!(f64::abs(y1 - y2) < 0.00001);
}
#[test]
fn int_slices_regression() {
let xs: Vec<u8> = vec![1, 2, 3, 4, 5];
let ys: Vec<u8> = vec![2, 4, 5, 4, 5];
assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys));
}
#[test]
fn float_tuples_regression() {
let tuples: Vec<(f32, f32)> =
vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
#[test]
fn int_tuples_regression() {
let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)];
assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples));
}
} | // If we ran the generic impl on each tuple field, that would be very cache inefficient
let n = F::from(xys.len()).ok_or(Error::Mean)?;
let (x_sum, y_sum) = xys
.iter()
.cloned() | random_line_split |
rro_test.go | // Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"reflect"
"strings"
"testing"
"android/soong/android"
"android/soong/shared"
)
func TestRuntimeResourceOverlay(t *testing.T) |
func TestRuntimeResourceOverlay_JavaDefaults(t *testing.T) {
ctx, config := testJava(t, `
java_defaults {
name: "rro_defaults",
theme: "default_theme",
product_specific: true,
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_with_defaults",
defaults: ["rro_defaults"],
}
runtime_resource_overlay {
name: "foo_barebones",
}
`)
//
// RRO module with defaults
//
m := ctx.ModuleForTests("foo_with_defaults", "android_common")
// Check AAPT2 link flags.
aapt2Flags := strings.Split(m.Output("package-res.apk").Args["flags"], " ")
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, aapt2Flags)
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check device location.
path := android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath := []string{shared.JoinPath("out/target/product/test_device/product/overlay/default_theme")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
//
// RRO module without defaults
//
m = ctx.ModuleForTests("foo_barebones", "android_common")
// Check AAPT2 link flags.
aapt2Flags = strings.Split(m.Output("package-res.apk").Args["flags"], " ")
unexpectedFlags := "--keep-raw-values"
if inList(unexpectedFlags, aapt2Flags) {
t.Errorf("unexpected value, %q is present in aapt2 link flags, %q", unexpectedFlags, aapt2Flags)
}
// Check device location.
path = android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/system/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
}
func TestOverrideRuntimeResourceOverlay(t *testing.T) {
ctx, _ := testJava(t, `
runtime_resource_overlay {
name: "foo_overlay",
certificate: "platform",
product_specific: true,
sdk_version: "current",
}
override_runtime_resource_overlay {
name: "bar_overlay",
base: "foo_overlay",
package_name: "com.android.bar.overlay",
target_package_name: "com.android.bar",
}
`)
expectedVariants := []struct {
moduleName string
variantName string
apkPath string
overrides []string
targetVariant string
packageFlag string
targetPackageFlag string
}{
{
variantName: "android_common",
apkPath: "out/soong/target/product/test_device/product/overlay/foo_overlay.apk",
overrides: nil,
targetVariant: "android_common",
packageFlag: "",
targetPackageFlag: "",
},
{
variantName: "android_common_bar_overlay",
apkPath: "out/soong/target/product/test_device/product/overlay/bar_overlay.apk",
overrides: []string{"foo_overlay"},
targetVariant: "android_common_bar",
packageFlag: "com.android.bar.overlay",
targetPackageFlag: "com.android.bar",
},
}
for _, expected := range expectedVariants {
variant := ctx.ModuleForTests("foo_overlay", expected.variantName)
// Check the final apk name
variant.Output(expected.apkPath)
// Check if the overrides field values are correctly aggregated.
mod := variant.Module().(*RuntimeResourceOverlay)
if !reflect.DeepEqual(expected.overrides, mod.properties.Overrides) {
t.Errorf("Incorrect overrides property value, expected: %q, got: %q",
expected.overrides, mod.properties.Overrides)
}
// Check aapt2 flags.
res := variant.Output("package-res.apk")
aapt2Flags := res.Args["flags"]
checkAapt2LinkFlag(t, aapt2Flags, "rename-manifest-package", expected.packageFlag)
checkAapt2LinkFlag(t, aapt2Flags, "rename-resources-package", "")
checkAapt2LinkFlag(t, aapt2Flags, "rename-overlay-target-package", expected.targetPackageFlag)
}
}
func TestEnforceRRO_propagatesToDependencies(t *testing.T) {
testCases := []struct {
name string
enforceRROTargets []string
rroDirs map[string][]string
}{
{
name: "no RRO",
enforceRROTargets: nil,
rroDirs: map[string][]string{
"foo": nil,
"bar": nil,
},
},
{
name: "enforce RRO on all",
enforceRROTargets: []string{"*"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
{
name: "enforce RRO on foo",
enforceRROTargets: []string{"foo"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
}
productResourceOverlays := []string{
"product/vendor/blah/overlay",
}
fs := android.MockFS{
"lib2/res/values/strings.xml": nil,
"product/vendor/blah/overlay/lib2/res/values/strings.xml": nil,
}
bp := `
android_app {
name: "foo",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_app {
name: "bar",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_library {
name: "lib",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib2"],
}
android_library {
name: "lib2",
sdk_version: "current",
resource_dirs: ["lib2/res"],
}
`
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
android.FixtureModifyProductVariables(func(variables android.FixtureProductVariables) {
variables.ProductResourceOverlays = productResourceOverlays
if testCase.enforceRROTargets != nil {
variables.EnforceRROTargets = testCase.enforceRROTargets
}
}),
).RunTestWithBp(t, bp)
modules := []string{"foo", "bar"}
for _, moduleName := range modules {
module := result.ModuleForTests(moduleName, "android_common")
mkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, module.Module())[0]
actualRRODirs := mkEntries.EntryMap["LOCAL_SOONG_PRODUCT_RRO_DIRS"]
if !reflect.DeepEqual(actualRRODirs, testCase.rroDirs[moduleName]) {
t.Errorf("exected %s LOCAL_SOONG_PRODUCT_RRO_DIRS entry: %v\ngot:%q",
moduleName, testCase.rroDirs[moduleName], actualRRODirs)
}
}
})
}
}
| {
fs := android.MockFS{
"baz/res/res/values/strings.xml": nil,
"bar/res/res/values/strings.xml": nil,
}
bp := `
runtime_resource_overlay {
name: "foo",
certificate: "platform",
lineage: "lineage.bin",
product_specific: true,
static_libs: ["bar"],
resource_libs: ["baz"],
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_themed",
certificate: "platform",
product_specific: true,
theme: "faza",
overrides: ["foo"],
}
android_library {
name: "bar",
resource_dirs: ["bar/res"],
}
android_app {
name: "baz",
sdk_version: "current",
resource_dirs: ["baz/res"],
}
`
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
).RunTestWithBp(t, bp)
m := result.ModuleForTests("foo", "android_common")
// Check AAPT2 link flags.
aapt2Flags := m.Output("package-res.apk").Args["flags"]
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, strings.Split(aapt2Flags, " "))
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check overlay.list output for static_libs dependency.
overlayList := android.PathsRelativeToTop(m.Output("aapt2/overlay.list").Inputs)
staticLibPackage := "out/soong/.intermediates/bar/android_common/package-res.apk"
if !inList(staticLibPackage, overlayList) {
t.Errorf("Stactic lib res package %q missing in overlay list: %q", staticLibPackage, overlayList)
}
// Check AAPT2 link flags for resource_libs dependency.
resourceLibFlag := "-I " + "out/soong/.intermediates/baz/android_common/package-res.apk"
if !strings.Contains(aapt2Flags, resourceLibFlag) {
t.Errorf("Resource lib flag %q missing in aapt2 link flags: %q", resourceLibFlag, aapt2Flags)
}
// Check cert signing flag.
signedApk := m.Output("signed/foo.apk")
lineageFlag := signedApk.Args["flags"]
expectedLineageFlag := "--lineage lineage.bin"
if expectedLineageFlag != lineageFlag {
t.Errorf("Incorrect signing lineage flags, expected: %q, got: %q", expectedLineageFlag, lineageFlag)
}
signingFlag := signedApk.Args["certificates"]
expected := "build/make/target/product/security/platform.x509.pem build/make/target/product/security/platform.pk8"
if expected != signingFlag {
t.Errorf("Incorrect signing flags, expected: %q, got: %q", expected, signingFlag)
}
androidMkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path := androidMkEntries.EntryMap["LOCAL_CERTIFICATE"]
expectedPath := []string{"build/make/target/product/security/platform.x509.pem"}
if !reflect.DeepEqual(path, expectedPath) {
t.Errorf("Unexpected LOCAL_CERTIFICATE value: %v, expected: %v", path, expectedPath)
}
// Check device location.
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
// A themed module has a different device location
m = result.ModuleForTests("foo_themed", "android_common")
androidMkEntries = android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay/faza")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
overrides := androidMkEntries.EntryMap["LOCAL_OVERRIDES_PACKAGES"]
expectedOverrides := []string{"foo"}
if !reflect.DeepEqual(overrides, expectedOverrides) {
t.Errorf("Unexpected LOCAL_OVERRIDES_PACKAGES value: %v, expected: %v", overrides, expectedOverrides)
}
} | identifier_body |
rro_test.go | // Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"reflect"
"strings"
"testing"
"android/soong/android"
"android/soong/shared"
)
func TestRuntimeResourceOverlay(t *testing.T) {
fs := android.MockFS{
"baz/res/res/values/strings.xml": nil,
"bar/res/res/values/strings.xml": nil,
}
bp := `
runtime_resource_overlay {
name: "foo",
certificate: "platform",
lineage: "lineage.bin",
product_specific: true,
static_libs: ["bar"],
resource_libs: ["baz"],
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_themed",
certificate: "platform",
product_specific: true,
theme: "faza",
overrides: ["foo"],
}
android_library {
name: "bar",
resource_dirs: ["bar/res"],
}
android_app {
name: "baz",
sdk_version: "current",
resource_dirs: ["baz/res"],
}
`
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
).RunTestWithBp(t, bp)
m := result.ModuleForTests("foo", "android_common")
// Check AAPT2 link flags.
aapt2Flags := m.Output("package-res.apk").Args["flags"]
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, strings.Split(aapt2Flags, " "))
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check overlay.list output for static_libs dependency.
overlayList := android.PathsRelativeToTop(m.Output("aapt2/overlay.list").Inputs)
staticLibPackage := "out/soong/.intermediates/bar/android_common/package-res.apk"
if !inList(staticLibPackage, overlayList) {
t.Errorf("Stactic lib res package %q missing in overlay list: %q", staticLibPackage, overlayList)
}
// Check AAPT2 link flags for resource_libs dependency.
resourceLibFlag := "-I " + "out/soong/.intermediates/baz/android_common/package-res.apk"
if !strings.Contains(aapt2Flags, resourceLibFlag) {
t.Errorf("Resource lib flag %q missing in aapt2 link flags: %q", resourceLibFlag, aapt2Flags)
}
// Check cert signing flag.
signedApk := m.Output("signed/foo.apk")
lineageFlag := signedApk.Args["flags"]
expectedLineageFlag := "--lineage lineage.bin"
if expectedLineageFlag != lineageFlag {
t.Errorf("Incorrect signing lineage flags, expected: %q, got: %q", expectedLineageFlag, lineageFlag)
}
signingFlag := signedApk.Args["certificates"]
expected := "build/make/target/product/security/platform.x509.pem build/make/target/product/security/platform.pk8"
if expected != signingFlag {
t.Errorf("Incorrect signing flags, expected: %q, got: %q", expected, signingFlag)
}
androidMkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path := androidMkEntries.EntryMap["LOCAL_CERTIFICATE"]
expectedPath := []string{"build/make/target/product/security/platform.x509.pem"}
if !reflect.DeepEqual(path, expectedPath) {
t.Errorf("Unexpected LOCAL_CERTIFICATE value: %v, expected: %v", path, expectedPath)
}
// Check device location.
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
// A themed module has a different device location
m = result.ModuleForTests("foo_themed", "android_common")
androidMkEntries = android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay/faza")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
overrides := androidMkEntries.EntryMap["LOCAL_OVERRIDES_PACKAGES"]
expectedOverrides := []string{"foo"}
if !reflect.DeepEqual(overrides, expectedOverrides) {
t.Errorf("Unexpected LOCAL_OVERRIDES_PACKAGES value: %v, expected: %v", overrides, expectedOverrides)
}
}
func TestRuntimeResourceOverlay_JavaDefaults(t *testing.T) {
ctx, config := testJava(t, `
java_defaults {
name: "rro_defaults",
theme: "default_theme",
product_specific: true,
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_with_defaults",
defaults: ["rro_defaults"],
}
runtime_resource_overlay {
name: "foo_barebones",
}
`)
//
// RRO module with defaults
//
m := ctx.ModuleForTests("foo_with_defaults", "android_common")
// Check AAPT2 link flags.
aapt2Flags := strings.Split(m.Output("package-res.apk").Args["flags"], " ")
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, aapt2Flags)
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check device location.
path := android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath := []string{shared.JoinPath("out/target/product/test_device/product/overlay/default_theme")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
//
// RRO module without defaults
//
m = ctx.ModuleForTests("foo_barebones", "android_common")
// Check AAPT2 link flags.
aapt2Flags = strings.Split(m.Output("package-res.apk").Args["flags"], " ")
unexpectedFlags := "--keep-raw-values"
if inList(unexpectedFlags, aapt2Flags) {
t.Errorf("unexpected value, %q is present in aapt2 link flags, %q", unexpectedFlags, aapt2Flags)
}
// Check device location.
path = android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/system/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
}
func | (t *testing.T) {
ctx, _ := testJava(t, `
runtime_resource_overlay {
name: "foo_overlay",
certificate: "platform",
product_specific: true,
sdk_version: "current",
}
override_runtime_resource_overlay {
name: "bar_overlay",
base: "foo_overlay",
package_name: "com.android.bar.overlay",
target_package_name: "com.android.bar",
}
`)
expectedVariants := []struct {
moduleName string
variantName string
apkPath string
overrides []string
targetVariant string
packageFlag string
targetPackageFlag string
}{
{
variantName: "android_common",
apkPath: "out/soong/target/product/test_device/product/overlay/foo_overlay.apk",
overrides: nil,
targetVariant: "android_common",
packageFlag: "",
targetPackageFlag: "",
},
{
variantName: "android_common_bar_overlay",
apkPath: "out/soong/target/product/test_device/product/overlay/bar_overlay.apk",
overrides: []string{"foo_overlay"},
targetVariant: "android_common_bar",
packageFlag: "com.android.bar.overlay",
targetPackageFlag: "com.android.bar",
},
}
for _, expected := range expectedVariants {
variant := ctx.ModuleForTests("foo_overlay", expected.variantName)
// Check the final apk name
variant.Output(expected.apkPath)
// Check if the overrides field values are correctly aggregated.
mod := variant.Module().(*RuntimeResourceOverlay)
if !reflect.DeepEqual(expected.overrides, mod.properties.Overrides) {
t.Errorf("Incorrect overrides property value, expected: %q, got: %q",
expected.overrides, mod.properties.Overrides)
}
// Check aapt2 flags.
res := variant.Output("package-res.apk")
aapt2Flags := res.Args["flags"]
checkAapt2LinkFlag(t, aapt2Flags, "rename-manifest-package", expected.packageFlag)
checkAapt2LinkFlag(t, aapt2Flags, "rename-resources-package", "")
checkAapt2LinkFlag(t, aapt2Flags, "rename-overlay-target-package", expected.targetPackageFlag)
}
}
func TestEnforceRRO_propagatesToDependencies(t *testing.T) {
testCases := []struct {
name string
enforceRROTargets []string
rroDirs map[string][]string
}{
{
name: "no RRO",
enforceRROTargets: nil,
rroDirs: map[string][]string{
"foo": nil,
"bar": nil,
},
},
{
name: "enforce RRO on all",
enforceRROTargets: []string{"*"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
{
name: "enforce RRO on foo",
enforceRROTargets: []string{"foo"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
}
productResourceOverlays := []string{
"product/vendor/blah/overlay",
}
fs := android.MockFS{
"lib2/res/values/strings.xml": nil,
"product/vendor/blah/overlay/lib2/res/values/strings.xml": nil,
}
bp := `
android_app {
name: "foo",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_app {
name: "bar",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_library {
name: "lib",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib2"],
}
android_library {
name: "lib2",
sdk_version: "current",
resource_dirs: ["lib2/res"],
}
`
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
android.FixtureModifyProductVariables(func(variables android.FixtureProductVariables) {
variables.ProductResourceOverlays = productResourceOverlays
if testCase.enforceRROTargets != nil {
variables.EnforceRROTargets = testCase.enforceRROTargets
}
}),
).RunTestWithBp(t, bp)
modules := []string{"foo", "bar"}
for _, moduleName := range modules {
module := result.ModuleForTests(moduleName, "android_common")
mkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, module.Module())[0]
actualRRODirs := mkEntries.EntryMap["LOCAL_SOONG_PRODUCT_RRO_DIRS"]
if !reflect.DeepEqual(actualRRODirs, testCase.rroDirs[moduleName]) {
t.Errorf("exected %s LOCAL_SOONG_PRODUCT_RRO_DIRS entry: %v\ngot:%q",
moduleName, testCase.rroDirs[moduleName], actualRRODirs)
}
}
})
}
}
| TestOverrideRuntimeResourceOverlay | identifier_name |
rro_test.go | // Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"reflect"
"strings"
"testing"
"android/soong/android"
"android/soong/shared"
)
func TestRuntimeResourceOverlay(t *testing.T) {
fs := android.MockFS{
"baz/res/res/values/strings.xml": nil,
"bar/res/res/values/strings.xml": nil,
}
bp := `
runtime_resource_overlay {
name: "foo",
certificate: "platform",
lineage: "lineage.bin",
product_specific: true,
static_libs: ["bar"],
resource_libs: ["baz"],
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_themed",
certificate: "platform",
product_specific: true,
theme: "faza",
overrides: ["foo"], | }
android_app {
name: "baz",
sdk_version: "current",
resource_dirs: ["baz/res"],
}
`
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
).RunTestWithBp(t, bp)
m := result.ModuleForTests("foo", "android_common")
// Check AAPT2 link flags.
aapt2Flags := m.Output("package-res.apk").Args["flags"]
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, strings.Split(aapt2Flags, " "))
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check overlay.list output for static_libs dependency.
overlayList := android.PathsRelativeToTop(m.Output("aapt2/overlay.list").Inputs)
staticLibPackage := "out/soong/.intermediates/bar/android_common/package-res.apk"
if !inList(staticLibPackage, overlayList) {
t.Errorf("Stactic lib res package %q missing in overlay list: %q", staticLibPackage, overlayList)
}
// Check AAPT2 link flags for resource_libs dependency.
resourceLibFlag := "-I " + "out/soong/.intermediates/baz/android_common/package-res.apk"
if !strings.Contains(aapt2Flags, resourceLibFlag) {
t.Errorf("Resource lib flag %q missing in aapt2 link flags: %q", resourceLibFlag, aapt2Flags)
}
// Check cert signing flag.
signedApk := m.Output("signed/foo.apk")
lineageFlag := signedApk.Args["flags"]
expectedLineageFlag := "--lineage lineage.bin"
if expectedLineageFlag != lineageFlag {
t.Errorf("Incorrect signing lineage flags, expected: %q, got: %q", expectedLineageFlag, lineageFlag)
}
signingFlag := signedApk.Args["certificates"]
expected := "build/make/target/product/security/platform.x509.pem build/make/target/product/security/platform.pk8"
if expected != signingFlag {
t.Errorf("Incorrect signing flags, expected: %q, got: %q", expected, signingFlag)
}
androidMkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path := androidMkEntries.EntryMap["LOCAL_CERTIFICATE"]
expectedPath := []string{"build/make/target/product/security/platform.x509.pem"}
if !reflect.DeepEqual(path, expectedPath) {
t.Errorf("Unexpected LOCAL_CERTIFICATE value: %v, expected: %v", path, expectedPath)
}
// Check device location.
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
// A themed module has a different device location
m = result.ModuleForTests("foo_themed", "android_common")
androidMkEntries = android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay/faza")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
overrides := androidMkEntries.EntryMap["LOCAL_OVERRIDES_PACKAGES"]
expectedOverrides := []string{"foo"}
if !reflect.DeepEqual(overrides, expectedOverrides) {
t.Errorf("Unexpected LOCAL_OVERRIDES_PACKAGES value: %v, expected: %v", overrides, expectedOverrides)
}
}
func TestRuntimeResourceOverlay_JavaDefaults(t *testing.T) {
ctx, config := testJava(t, `
java_defaults {
name: "rro_defaults",
theme: "default_theme",
product_specific: true,
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_with_defaults",
defaults: ["rro_defaults"],
}
runtime_resource_overlay {
name: "foo_barebones",
}
`)
//
// RRO module with defaults
//
m := ctx.ModuleForTests("foo_with_defaults", "android_common")
// Check AAPT2 link flags.
aapt2Flags := strings.Split(m.Output("package-res.apk").Args["flags"], " ")
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, aapt2Flags)
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check device location.
path := android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath := []string{shared.JoinPath("out/target/product/test_device/product/overlay/default_theme")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
//
// RRO module without defaults
//
m = ctx.ModuleForTests("foo_barebones", "android_common")
// Check AAPT2 link flags.
aapt2Flags = strings.Split(m.Output("package-res.apk").Args["flags"], " ")
unexpectedFlags := "--keep-raw-values"
if inList(unexpectedFlags, aapt2Flags) {
t.Errorf("unexpected value, %q is present in aapt2 link flags, %q", unexpectedFlags, aapt2Flags)
}
// Check device location.
path = android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/system/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
}
func TestOverrideRuntimeResourceOverlay(t *testing.T) {
ctx, _ := testJava(t, `
runtime_resource_overlay {
name: "foo_overlay",
certificate: "platform",
product_specific: true,
sdk_version: "current",
}
override_runtime_resource_overlay {
name: "bar_overlay",
base: "foo_overlay",
package_name: "com.android.bar.overlay",
target_package_name: "com.android.bar",
}
`)
expectedVariants := []struct {
moduleName string
variantName string
apkPath string
overrides []string
targetVariant string
packageFlag string
targetPackageFlag string
}{
{
variantName: "android_common",
apkPath: "out/soong/target/product/test_device/product/overlay/foo_overlay.apk",
overrides: nil,
targetVariant: "android_common",
packageFlag: "",
targetPackageFlag: "",
},
{
variantName: "android_common_bar_overlay",
apkPath: "out/soong/target/product/test_device/product/overlay/bar_overlay.apk",
overrides: []string{"foo_overlay"},
targetVariant: "android_common_bar",
packageFlag: "com.android.bar.overlay",
targetPackageFlag: "com.android.bar",
},
}
for _, expected := range expectedVariants {
variant := ctx.ModuleForTests("foo_overlay", expected.variantName)
// Check the final apk name
variant.Output(expected.apkPath)
// Check if the overrides field values are correctly aggregated.
mod := variant.Module().(*RuntimeResourceOverlay)
if !reflect.DeepEqual(expected.overrides, mod.properties.Overrides) {
t.Errorf("Incorrect overrides property value, expected: %q, got: %q",
expected.overrides, mod.properties.Overrides)
}
// Check aapt2 flags.
res := variant.Output("package-res.apk")
aapt2Flags := res.Args["flags"]
checkAapt2LinkFlag(t, aapt2Flags, "rename-manifest-package", expected.packageFlag)
checkAapt2LinkFlag(t, aapt2Flags, "rename-resources-package", "")
checkAapt2LinkFlag(t, aapt2Flags, "rename-overlay-target-package", expected.targetPackageFlag)
}
}
func TestEnforceRRO_propagatesToDependencies(t *testing.T) {
testCases := []struct {
name string
enforceRROTargets []string
rroDirs map[string][]string
}{
{
name: "no RRO",
enforceRROTargets: nil,
rroDirs: map[string][]string{
"foo": nil,
"bar": nil,
},
},
{
name: "enforce RRO on all",
enforceRROTargets: []string{"*"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
{
name: "enforce RRO on foo",
enforceRROTargets: []string{"foo"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
}
productResourceOverlays := []string{
"product/vendor/blah/overlay",
}
fs := android.MockFS{
"lib2/res/values/strings.xml": nil,
"product/vendor/blah/overlay/lib2/res/values/strings.xml": nil,
}
bp := `
android_app {
name: "foo",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_app {
name: "bar",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_library {
name: "lib",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib2"],
}
android_library {
name: "lib2",
sdk_version: "current",
resource_dirs: ["lib2/res"],
}
`
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
android.FixtureModifyProductVariables(func(variables android.FixtureProductVariables) {
variables.ProductResourceOverlays = productResourceOverlays
if testCase.enforceRROTargets != nil {
variables.EnforceRROTargets = testCase.enforceRROTargets
}
}),
).RunTestWithBp(t, bp)
modules := []string{"foo", "bar"}
for _, moduleName := range modules {
module := result.ModuleForTests(moduleName, "android_common")
mkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, module.Module())[0]
actualRRODirs := mkEntries.EntryMap["LOCAL_SOONG_PRODUCT_RRO_DIRS"]
if !reflect.DeepEqual(actualRRODirs, testCase.rroDirs[moduleName]) {
t.Errorf("exected %s LOCAL_SOONG_PRODUCT_RRO_DIRS entry: %v\ngot:%q",
moduleName, testCase.rroDirs[moduleName], actualRRODirs)
}
}
})
}
} | }
android_library {
name: "bar",
resource_dirs: ["bar/res"], | random_line_split |
rro_test.go | // Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"reflect"
"strings"
"testing"
"android/soong/android"
"android/soong/shared"
)
func TestRuntimeResourceOverlay(t *testing.T) {
fs := android.MockFS{
"baz/res/res/values/strings.xml": nil,
"bar/res/res/values/strings.xml": nil,
}
bp := `
runtime_resource_overlay {
name: "foo",
certificate: "platform",
lineage: "lineage.bin",
product_specific: true,
static_libs: ["bar"],
resource_libs: ["baz"],
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_themed",
certificate: "platform",
product_specific: true,
theme: "faza",
overrides: ["foo"],
}
android_library {
name: "bar",
resource_dirs: ["bar/res"],
}
android_app {
name: "baz",
sdk_version: "current",
resource_dirs: ["baz/res"],
}
`
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
).RunTestWithBp(t, bp)
m := result.ModuleForTests("foo", "android_common")
// Check AAPT2 link flags.
aapt2Flags := m.Output("package-res.apk").Args["flags"]
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, strings.Split(aapt2Flags, " "))
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check overlay.list output for static_libs dependency.
overlayList := android.PathsRelativeToTop(m.Output("aapt2/overlay.list").Inputs)
staticLibPackage := "out/soong/.intermediates/bar/android_common/package-res.apk"
if !inList(staticLibPackage, overlayList) {
t.Errorf("Stactic lib res package %q missing in overlay list: %q", staticLibPackage, overlayList)
}
// Check AAPT2 link flags for resource_libs dependency.
resourceLibFlag := "-I " + "out/soong/.intermediates/baz/android_common/package-res.apk"
if !strings.Contains(aapt2Flags, resourceLibFlag) {
t.Errorf("Resource lib flag %q missing in aapt2 link flags: %q", resourceLibFlag, aapt2Flags)
}
// Check cert signing flag.
signedApk := m.Output("signed/foo.apk")
lineageFlag := signedApk.Args["flags"]
expectedLineageFlag := "--lineage lineage.bin"
if expectedLineageFlag != lineageFlag {
t.Errorf("Incorrect signing lineage flags, expected: %q, got: %q", expectedLineageFlag, lineageFlag)
}
signingFlag := signedApk.Args["certificates"]
expected := "build/make/target/product/security/platform.x509.pem build/make/target/product/security/platform.pk8"
if expected != signingFlag {
t.Errorf("Incorrect signing flags, expected: %q, got: %q", expected, signingFlag)
}
androidMkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path := androidMkEntries.EntryMap["LOCAL_CERTIFICATE"]
expectedPath := []string{"build/make/target/product/security/platform.x509.pem"}
if !reflect.DeepEqual(path, expectedPath) |
// Check device location.
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
// A themed module has a different device location
m = result.ModuleForTests("foo_themed", "android_common")
androidMkEntries = android.AndroidMkEntriesForTest(t, result.TestContext, m.Module())[0]
path = androidMkEntries.EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/product/overlay/faza")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", result.Config, expectedPath, path)
overrides := androidMkEntries.EntryMap["LOCAL_OVERRIDES_PACKAGES"]
expectedOverrides := []string{"foo"}
if !reflect.DeepEqual(overrides, expectedOverrides) {
t.Errorf("Unexpected LOCAL_OVERRIDES_PACKAGES value: %v, expected: %v", overrides, expectedOverrides)
}
}
func TestRuntimeResourceOverlay_JavaDefaults(t *testing.T) {
ctx, config := testJava(t, `
java_defaults {
name: "rro_defaults",
theme: "default_theme",
product_specific: true,
aaptflags: ["--keep-raw-values"],
}
runtime_resource_overlay {
name: "foo_with_defaults",
defaults: ["rro_defaults"],
}
runtime_resource_overlay {
name: "foo_barebones",
}
`)
//
// RRO module with defaults
//
m := ctx.ModuleForTests("foo_with_defaults", "android_common")
// Check AAPT2 link flags.
aapt2Flags := strings.Split(m.Output("package-res.apk").Args["flags"], " ")
expectedFlags := []string{"--keep-raw-values", "--no-resource-deduping", "--no-resource-removal"}
absentFlags := android.RemoveListFromList(expectedFlags, aapt2Flags)
if len(absentFlags) > 0 {
t.Errorf("expected values, %q are missing in aapt2 link flags, %q", absentFlags, aapt2Flags)
}
// Check device location.
path := android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath := []string{shared.JoinPath("out/target/product/test_device/product/overlay/default_theme")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
//
// RRO module without defaults
//
m = ctx.ModuleForTests("foo_barebones", "android_common")
// Check AAPT2 link flags.
aapt2Flags = strings.Split(m.Output("package-res.apk").Args["flags"], " ")
unexpectedFlags := "--keep-raw-values"
if inList(unexpectedFlags, aapt2Flags) {
t.Errorf("unexpected value, %q is present in aapt2 link flags, %q", unexpectedFlags, aapt2Flags)
}
// Check device location.
path = android.AndroidMkEntriesForTest(t, ctx, m.Module())[0].EntryMap["LOCAL_MODULE_PATH"]
expectedPath = []string{shared.JoinPath("out/target/product/test_device/system/overlay")}
android.AssertStringPathsRelativeToTopEquals(t, "LOCAL_MODULE_PATH", config, expectedPath, path)
}
func TestOverrideRuntimeResourceOverlay(t *testing.T) {
ctx, _ := testJava(t, `
runtime_resource_overlay {
name: "foo_overlay",
certificate: "platform",
product_specific: true,
sdk_version: "current",
}
override_runtime_resource_overlay {
name: "bar_overlay",
base: "foo_overlay",
package_name: "com.android.bar.overlay",
target_package_name: "com.android.bar",
}
`)
expectedVariants := []struct {
moduleName string
variantName string
apkPath string
overrides []string
targetVariant string
packageFlag string
targetPackageFlag string
}{
{
variantName: "android_common",
apkPath: "out/soong/target/product/test_device/product/overlay/foo_overlay.apk",
overrides: nil,
targetVariant: "android_common",
packageFlag: "",
targetPackageFlag: "",
},
{
variantName: "android_common_bar_overlay",
apkPath: "out/soong/target/product/test_device/product/overlay/bar_overlay.apk",
overrides: []string{"foo_overlay"},
targetVariant: "android_common_bar",
packageFlag: "com.android.bar.overlay",
targetPackageFlag: "com.android.bar",
},
}
for _, expected := range expectedVariants {
variant := ctx.ModuleForTests("foo_overlay", expected.variantName)
// Check the final apk name
variant.Output(expected.apkPath)
// Check if the overrides field values are correctly aggregated.
mod := variant.Module().(*RuntimeResourceOverlay)
if !reflect.DeepEqual(expected.overrides, mod.properties.Overrides) {
t.Errorf("Incorrect overrides property value, expected: %q, got: %q",
expected.overrides, mod.properties.Overrides)
}
// Check aapt2 flags.
res := variant.Output("package-res.apk")
aapt2Flags := res.Args["flags"]
checkAapt2LinkFlag(t, aapt2Flags, "rename-manifest-package", expected.packageFlag)
checkAapt2LinkFlag(t, aapt2Flags, "rename-resources-package", "")
checkAapt2LinkFlag(t, aapt2Flags, "rename-overlay-target-package", expected.targetPackageFlag)
}
}
func TestEnforceRRO_propagatesToDependencies(t *testing.T) {
testCases := []struct {
name string
enforceRROTargets []string
rroDirs map[string][]string
}{
{
name: "no RRO",
enforceRROTargets: nil,
rroDirs: map[string][]string{
"foo": nil,
"bar": nil,
},
},
{
name: "enforce RRO on all",
enforceRROTargets: []string{"*"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
{
name: "enforce RRO on foo",
enforceRROTargets: []string{"foo"},
rroDirs: map[string][]string{
"foo": {"product/vendor/blah/overlay/lib2/res"},
"bar": {"product/vendor/blah/overlay/lib2/res"},
},
},
}
productResourceOverlays := []string{
"product/vendor/blah/overlay",
}
fs := android.MockFS{
"lib2/res/values/strings.xml": nil,
"product/vendor/blah/overlay/lib2/res/values/strings.xml": nil,
}
bp := `
android_app {
name: "foo",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_app {
name: "bar",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib"],
}
android_library {
name: "lib",
sdk_version: "current",
resource_dirs: [],
static_libs: ["lib2"],
}
android_library {
name: "lib2",
sdk_version: "current",
resource_dirs: ["lib2/res"],
}
`
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
result := android.GroupFixturePreparers(
PrepareForTestWithJavaDefaultModules,
PrepareForTestWithOverlayBuildComponents,
fs.AddToFixture(),
android.FixtureModifyProductVariables(func(variables android.FixtureProductVariables) {
variables.ProductResourceOverlays = productResourceOverlays
if testCase.enforceRROTargets != nil {
variables.EnforceRROTargets = testCase.enforceRROTargets
}
}),
).RunTestWithBp(t, bp)
modules := []string{"foo", "bar"}
for _, moduleName := range modules {
module := result.ModuleForTests(moduleName, "android_common")
mkEntries := android.AndroidMkEntriesForTest(t, result.TestContext, module.Module())[0]
actualRRODirs := mkEntries.EntryMap["LOCAL_SOONG_PRODUCT_RRO_DIRS"]
if !reflect.DeepEqual(actualRRODirs, testCase.rroDirs[moduleName]) {
t.Errorf("exected %s LOCAL_SOONG_PRODUCT_RRO_DIRS entry: %v\ngot:%q",
moduleName, testCase.rroDirs[moduleName], actualRRODirs)
}
}
})
}
}
| {
t.Errorf("Unexpected LOCAL_CERTIFICATE value: %v, expected: %v", path, expectedPath)
} | conditional_block |
youtubeService.go | package youtubeservice
import (
"errors"
"net/url"
"sort"
"strings"
"code.mine/dating_server/gateway"
"code.mine/dating_server/mapping"
"code.mine/dating_server/repo"
"code.mine/dating_server/types"
"github.com/agnivade/levenshtein"
stemmer "github.com/agonopol/go-stem"
"github.com/bbalet/stopwords"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
)
const (
developerKey = "AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg"
)
// YoutubeController -
type YoutubeController struct {
gateway gateway.Gateway
repo repo.Repo
}
// YOUTUBE ID IS IN THE URL
// GetYoutubeVideoDetails -
func (c *YoutubeController) GetYoutubeVideoDetails(videoURL *string) (*types.UserVideoItem, error) {
// https://www.googleapis.com/youtube/v3/videos?id=D95qIe5pLuA&key=AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg&part=snippet,statistics,topicDetails
if videoURL == nil {
return nil, errors.New("Need url")
}
// attempt to get the video is
u, err := url.Parse(mapping.StrToV(videoURL))
if err != nil {
return nil, err
}
var videoID *string
values := u.Query()["id"]
if len(values) == 0 {
videoID, err = c.gateway.GetYoutubeVideoID(videoURL)
if err != nil {
return nil, errors.New("could not get video ID")
}
}
response, err := c.gateway.GetYoutubeVideoDetails(videoID)
if err != nil {
return nil, err
}
// baseURL := "https://www.googleapis.com/youtube/v3/videos?"
// url := fmt.Sprintf("%s&id=%s&key=%s&part=snippet,statistics,topicDetails", baseURL, *videoID, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.UserVideoItem{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
return response, nil
}
// GetYoutubeVideoID -
func (c *YoutubeController) | (youtubeURL *string) (*string, error) {
if youtubeURL == nil {
return nil, errors.New("Need url")
}
_, err := c.gateway.GetYoutubeVideoID(youtubeURL)
if err != nil {
return nil, err
}
return nil, err
// baseURL := "https://www.googleapis.com/youtube/v3/search?"
// url := fmt.Sprintf("%spart=%s&maxResults=1&q=%s&type=video&key=%s", baseURL, "snippet", *youtubeURL, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.VideoIDResponse{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
// if len(response.Items) < 1 {
// return nil, errors.New("response.items is empty")
// }
// videoResponse := response.Items[0]
// if videoResponse.ID == nil {
// return nil, errors.New("item ID is nil")
// }
// if videoResponse.ID.VideoID == nil {
// return nil, errors.New("videoID is nil")
// }
// return videoResponse.ID.VideoID, nil
}
// https://blog.codecentric.de/en/2017/08/gomock-tutorial/
// GetEligibleUsers -
func (c *YoutubeController) GetEligibleUsers(user *types.User) ([]*types.User, error) {
// city := user.City
// partnerGender := user.PartnerGender
// get all eligible uuids.
// c, err := DB.GetCollection("users")
// if err != nil {
// return nil, err
// }
filters := &bson.M{
"zipcode": user.Zipcode,
"partnerGender": user.Gender, // ppl looking for my gender
"gender": user.PartnerGender, // their gender is what I want.
}
options := options.Find()
options.SetLimit(int64(50000))
users, err := c.repo.GetUsersByFilter(filters, options)
if err != nil {
return nil, err
}
// cursor, err := c.Find(context.Background(), filters, options)
// users := []*types.User{}
// if err = cursor.All(context.Background(), &users); err != nil {
// return nil, err
// }
return users, nil
}
// TODO – add in title words to tags
// RankAndMatchYoutubeVideos -
func (c *YoutubeController) RankAndMatchYoutubeVideos(user *types.User) ([]*types.User, error) {
if user == nil {
return nil, errors.New("user is nil")
}
// make sure user cant get themselves
users, err := c.GetEligibleUsers(user)
if err != nil {
return nil, err
}
userUUIDToUser := map[string]*types.User{}
userUUIDs := []*string{}
for _, user := range users {
userUUIDs = append(userUUIDs, user.UUID)
userUUIDToUser[*user.UUID] = user
}
// get video by user
userVideos, err := c.repo.GetVideosByUserUUID(user.UUID)
if err != nil {
return nil, err
}
youtubeVideoCandidates, err := c.repo.GetVideosByAllUserUUIDs(userUUIDs)
if err != nil {
return nil, err
}
sortedVideoScoreList := c.GetSortedVideoList(userVideos, youtubeVideoCandidates)
processedUUIDs := map[string]bool{}
sortedUsers := []*types.User{}
for _, videoScore := range sortedVideoScoreList {
if !processedUUIDs[*videoScore.Video.UserUUID] {
processedUUIDs[*videoScore.Video.UserUUID] = true
sortedUsers = append(sortedUsers, userUUIDToUser[*videoScore.Video.UserUUID])
}
}
if len(sortedUsers) <= 3 {
return sortedUsers, nil
}
return sortedUsers[:3], nil
}
// make sure this is a set
func getTagsFromVideo(video *types.UserVideoItem) []string {
userTags := []string{}
for _, tag := range video.Items[0].Snippet.Tags {
words := strings.Split(tag, " ")
for _, w := range words {
userTags = append(userTags, w)
}
}
titleWords := strings.Split(video.Items[0].Snippet.Title, " ")
for _, w := range titleWords {
userTags = append(userTags, w)
}
descriptionWords := strings.Split(video.Items[0].Snippet.Description, " ")
for _, w := range descriptionWords {
userTags = append(userTags, w)
}
freeFormText := strings.Join(userTags, " ")
freeFormText = CleanString(freeFormText)
userTags = strings.Split(freeFormText, " ")
processedWords := []string{}
for _, w := range userTags {
w = GetStemOfWord(w)
w = strings.ToLower(w)
if len(w) > 1 {
processedWords = append(processedWords, w)
}
}
return processedWords
}
// Score -
type Score struct {
Score float64
Video *types.UserVideoItem
}
// we want to do this by video so we can detect a particularly strong match
// between videos
// you can also match by most common words – stem the word
// you can also use this as a dictionary
// https://gist.github.com/dgp/1b24bf2961521bd75d6c
// https://techpostplus.com/youtube-video-categories-list-faqs-and-solutions/#YouTube_video_category_name_and_id_list
// match similar categories
// GetSortedVideoList -
func (c *YoutubeController) GetSortedVideoList(
userVideos []*types.UserVideoItem,
candidateVideos []*types.UserVideoItem,
) []Score {
scores := []Score{}
for _, userVideo := range userVideos {
// need to do CleanString somehow on these tags
userTags := getTagsFromVideo(userVideo)
userCategoryID := userVideo.Items[0].Snippet.CategoryID
var totalScore float64
// for every video in the candidate videos, check how well they match up against
// the user tags
for _, video := range candidateVideos {
videoTags := getTagsFromVideo(video)
wordFrequencyCandidateVideos := map[string]int{}
for _, tag := range videoTags {
wordFrequencyCandidateVideos[tag]++
// start off with 5 as an arbitrary value
// remember to clean text of stop words!
// you can also prob do better here with different levels of word frequnecy
}
for _, tag := range userTags {
if wordFrequencyCandidateVideos[tag] >= 6 {
totalScore += 6
} else if wordFrequencyCandidateVideos[tag] >= 4 {
totalScore += 4
} else if wordFrequencyCandidateVideos[tag] >= 2 {
totalScore += 2
}
}
// number of similar words
similarWordsScore := calculateDistanceScoreBetweenTags(userTags, videoTags)
totalScore += float64(similarWordsScore) * 2
categoryScore := calculateCategoryScore(userCategoryID, video.Items[0].Snippet.CategoryID)
totalScore += categoryScore * 5
videoScore := Score{
Score: totalScore,
Video: video,
}
scores = append(scores, videoScore)
}
}
sort.Slice(scores, func(i, j int) bool {
return scores[i].Score < scores[j].Score
})
return scores
}
// you could have secondary scores as well
// + scores of if they're both in each others slices vs just 1
func calculateCategoryScore(userCategoryID int, candidateCategoryID int) float64 {
if VideoCategoryMap[userCategoryID] == nil || VideoCategoryMap[candidateCategoryID] == nil {
return 0
}
containsCandidateCategory := false
containsUserCategory := false
for _, category := range VideoCategoryMap[userCategoryID] {
if category == candidateCategoryID {
containsCandidateCategory = true
}
}
for _, category := range VideoCategoryMap[candidateCategoryID] {
if category == userCategoryID {
containsUserCategory = true
}
}
if containsCandidateCategory && containsUserCategory {
return 2
}
if containsCandidateCategory && containsUserCategory {
return 1
}
return 0
}
// I think you should calculate by word, not by tag
// may want to stem each word in the phrase too
// you can also check if there's like 5 words or something that are in common that are more then 5 chars long
// if so you can give one particular score
// if not, then do the fuzzy matching.
// add in title, description
func calculateDistanceScoreBetweenTags(userTags, videoTags []string) int {
count := 0
for _, userTag := range userTags {
for _, videoTag := range videoTags {
distance := float64(levenshtein.ComputeDistance(userTag, videoTag))
// if its not the SAME exact word...
// but requires less then a few tarnsitions...
// so we are counting the number of similar words
if distance != 0 && distance < 4 {
count++
}
}
}
// remember that lower the distance, higher the similarity
return count
}
// CleanString -
func CleanString(text string) string {
text = strings.Replace(text, "\n", " ", -1)
// regHTTP := regexp.MustCompile(`/^https.*$/`)
// regWWW := regexp.MustCompile(`/^www.*$/`)
// text = reg.ReplaceAllString(text, "")
if strings.Contains(text, "http") {
text = ""
}
if strings.Contains(text, "https") {
text = ""
}
if strings.Contains(text, "www") {
text = ""
}
text = RemoveStopWords(text)
return text
}
// RemoveStopWords -
func RemoveStopWords(text string) string {
cleanedString := stopwords.CleanString(text, "en", false)
return cleanedString
}
// GetStemsOfText -
func GetStemsOfText(text string) string {
s := strings.Split(text, " ")
for i, v := range s {
s[i] = GetStemOfWord(v)
}
return strings.Join(s, " ")
}
// GetStemOfWord –
func GetStemOfWord(word string) string {
wordAsBytes := []byte((word))
res := string(stemmer.Stem(wordAsBytes))
return res
}
// func (youtube *YoutubeController) GetTopMatches(videos)
| GetYoutubeVideoID | identifier_name |
youtubeService.go | package youtubeservice
import (
"errors"
"net/url"
"sort"
"strings"
"code.mine/dating_server/gateway"
"code.mine/dating_server/mapping"
"code.mine/dating_server/repo"
"code.mine/dating_server/types"
"github.com/agnivade/levenshtein"
stemmer "github.com/agonopol/go-stem"
"github.com/bbalet/stopwords"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
)
const (
developerKey = "AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg"
)
// YoutubeController -
type YoutubeController struct {
gateway gateway.Gateway
repo repo.Repo
}
// YOUTUBE ID IS IN THE URL
// GetYoutubeVideoDetails -
func (c *YoutubeController) GetYoutubeVideoDetails(videoURL *string) (*types.UserVideoItem, error) {
// https://www.googleapis.com/youtube/v3/videos?id=D95qIe5pLuA&key=AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg&part=snippet,statistics,topicDetails
if videoURL == nil {
return nil, errors.New("Need url")
}
// attempt to get the video is
u, err := url.Parse(mapping.StrToV(videoURL))
if err != nil {
return nil, err
} | videoID, err = c.gateway.GetYoutubeVideoID(videoURL)
if err != nil {
return nil, errors.New("could not get video ID")
}
}
response, err := c.gateway.GetYoutubeVideoDetails(videoID)
if err != nil {
return nil, err
}
// baseURL := "https://www.googleapis.com/youtube/v3/videos?"
// url := fmt.Sprintf("%s&id=%s&key=%s&part=snippet,statistics,topicDetails", baseURL, *videoID, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.UserVideoItem{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
return response, nil
}
// GetYoutubeVideoID -
func (c *YoutubeController) GetYoutubeVideoID(youtubeURL *string) (*string, error) {
if youtubeURL == nil {
return nil, errors.New("Need url")
}
_, err := c.gateway.GetYoutubeVideoID(youtubeURL)
if err != nil {
return nil, err
}
return nil, err
// baseURL := "https://www.googleapis.com/youtube/v3/search?"
// url := fmt.Sprintf("%spart=%s&maxResults=1&q=%s&type=video&key=%s", baseURL, "snippet", *youtubeURL, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.VideoIDResponse{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
// if len(response.Items) < 1 {
// return nil, errors.New("response.items is empty")
// }
// videoResponse := response.Items[0]
// if videoResponse.ID == nil {
// return nil, errors.New("item ID is nil")
// }
// if videoResponse.ID.VideoID == nil {
// return nil, errors.New("videoID is nil")
// }
// return videoResponse.ID.VideoID, nil
}
// https://blog.codecentric.de/en/2017/08/gomock-tutorial/
// GetEligibleUsers -
func (c *YoutubeController) GetEligibleUsers(user *types.User) ([]*types.User, error) {
// city := user.City
// partnerGender := user.PartnerGender
// get all eligible uuids.
// c, err := DB.GetCollection("users")
// if err != nil {
// return nil, err
// }
filters := &bson.M{
"zipcode": user.Zipcode,
"partnerGender": user.Gender, // ppl looking for my gender
"gender": user.PartnerGender, // their gender is what I want.
}
options := options.Find()
options.SetLimit(int64(50000))
users, err := c.repo.GetUsersByFilter(filters, options)
if err != nil {
return nil, err
}
// cursor, err := c.Find(context.Background(), filters, options)
// users := []*types.User{}
// if err = cursor.All(context.Background(), &users); err != nil {
// return nil, err
// }
return users, nil
}
// TODO – add in title words to tags
// RankAndMatchYoutubeVideos -
func (c *YoutubeController) RankAndMatchYoutubeVideos(user *types.User) ([]*types.User, error) {
if user == nil {
return nil, errors.New("user is nil")
}
// make sure user cant get themselves
users, err := c.GetEligibleUsers(user)
if err != nil {
return nil, err
}
userUUIDToUser := map[string]*types.User{}
userUUIDs := []*string{}
for _, user := range users {
userUUIDs = append(userUUIDs, user.UUID)
userUUIDToUser[*user.UUID] = user
}
// get video by user
userVideos, err := c.repo.GetVideosByUserUUID(user.UUID)
if err != nil {
return nil, err
}
youtubeVideoCandidates, err := c.repo.GetVideosByAllUserUUIDs(userUUIDs)
if err != nil {
return nil, err
}
sortedVideoScoreList := c.GetSortedVideoList(userVideos, youtubeVideoCandidates)
processedUUIDs := map[string]bool{}
sortedUsers := []*types.User{}
for _, videoScore := range sortedVideoScoreList {
if !processedUUIDs[*videoScore.Video.UserUUID] {
processedUUIDs[*videoScore.Video.UserUUID] = true
sortedUsers = append(sortedUsers, userUUIDToUser[*videoScore.Video.UserUUID])
}
}
if len(sortedUsers) <= 3 {
return sortedUsers, nil
}
return sortedUsers[:3], nil
}
// make sure this is a set
func getTagsFromVideo(video *types.UserVideoItem) []string {
userTags := []string{}
for _, tag := range video.Items[0].Snippet.Tags {
words := strings.Split(tag, " ")
for _, w := range words {
userTags = append(userTags, w)
}
}
titleWords := strings.Split(video.Items[0].Snippet.Title, " ")
for _, w := range titleWords {
userTags = append(userTags, w)
}
descriptionWords := strings.Split(video.Items[0].Snippet.Description, " ")
for _, w := range descriptionWords {
userTags = append(userTags, w)
}
freeFormText := strings.Join(userTags, " ")
freeFormText = CleanString(freeFormText)
userTags = strings.Split(freeFormText, " ")
processedWords := []string{}
for _, w := range userTags {
w = GetStemOfWord(w)
w = strings.ToLower(w)
if len(w) > 1 {
processedWords = append(processedWords, w)
}
}
return processedWords
}
// Score -
type Score struct {
Score float64
Video *types.UserVideoItem
}
// we want to do this by video so we can detect a particularly strong match
// between videos
// you can also match by most common words – stem the word
// you can also use this as a dictionary
// https://gist.github.com/dgp/1b24bf2961521bd75d6c
// https://techpostplus.com/youtube-video-categories-list-faqs-and-solutions/#YouTube_video_category_name_and_id_list
// match similar categories
// GetSortedVideoList -
func (c *YoutubeController) GetSortedVideoList(
userVideos []*types.UserVideoItem,
candidateVideos []*types.UserVideoItem,
) []Score {
scores := []Score{}
for _, userVideo := range userVideos {
// need to do CleanString somehow on these tags
userTags := getTagsFromVideo(userVideo)
userCategoryID := userVideo.Items[0].Snippet.CategoryID
var totalScore float64
// for every video in the candidate videos, check how well they match up against
// the user tags
for _, video := range candidateVideos {
videoTags := getTagsFromVideo(video)
wordFrequencyCandidateVideos := map[string]int{}
for _, tag := range videoTags {
wordFrequencyCandidateVideos[tag]++
// start off with 5 as an arbitrary value
// remember to clean text of stop words!
// you can also prob do better here with different levels of word frequnecy
}
for _, tag := range userTags {
if wordFrequencyCandidateVideos[tag] >= 6 {
totalScore += 6
} else if wordFrequencyCandidateVideos[tag] >= 4 {
totalScore += 4
} else if wordFrequencyCandidateVideos[tag] >= 2 {
totalScore += 2
}
}
// number of similar words
similarWordsScore := calculateDistanceScoreBetweenTags(userTags, videoTags)
totalScore += float64(similarWordsScore) * 2
categoryScore := calculateCategoryScore(userCategoryID, video.Items[0].Snippet.CategoryID)
totalScore += categoryScore * 5
videoScore := Score{
Score: totalScore,
Video: video,
}
scores = append(scores, videoScore)
}
}
sort.Slice(scores, func(i, j int) bool {
return scores[i].Score < scores[j].Score
})
return scores
}
// you could have secondary scores as well
// + scores of if they're both in each others slices vs just 1
func calculateCategoryScore(userCategoryID int, candidateCategoryID int) float64 {
if VideoCategoryMap[userCategoryID] == nil || VideoCategoryMap[candidateCategoryID] == nil {
return 0
}
containsCandidateCategory := false
containsUserCategory := false
for _, category := range VideoCategoryMap[userCategoryID] {
if category == candidateCategoryID {
containsCandidateCategory = true
}
}
for _, category := range VideoCategoryMap[candidateCategoryID] {
if category == userCategoryID {
containsUserCategory = true
}
}
if containsCandidateCategory && containsUserCategory {
return 2
}
if containsCandidateCategory && containsUserCategory {
return 1
}
return 0
}
// I think you should calculate by word, not by tag
// may want to stem each word in the phrase too
// you can also check if there's like 5 words or something that are in common that are more then 5 chars long
// if so you can give one particular score
// if not, then do the fuzzy matching.
// add in title, description
func calculateDistanceScoreBetweenTags(userTags, videoTags []string) int {
count := 0
for _, userTag := range userTags {
for _, videoTag := range videoTags {
distance := float64(levenshtein.ComputeDistance(userTag, videoTag))
// if its not the SAME exact word...
// but requires less then a few tarnsitions...
// so we are counting the number of similar words
if distance != 0 && distance < 4 {
count++
}
}
}
// remember that lower the distance, higher the similarity
return count
}
// CleanString -
func CleanString(text string) string {
text = strings.Replace(text, "\n", " ", -1)
// regHTTP := regexp.MustCompile(`/^https.*$/`)
// regWWW := regexp.MustCompile(`/^www.*$/`)
// text = reg.ReplaceAllString(text, "")
if strings.Contains(text, "http") {
text = ""
}
if strings.Contains(text, "https") {
text = ""
}
if strings.Contains(text, "www") {
text = ""
}
text = RemoveStopWords(text)
return text
}
// RemoveStopWords -
func RemoveStopWords(text string) string {
cleanedString := stopwords.CleanString(text, "en", false)
return cleanedString
}
// GetStemsOfText -
func GetStemsOfText(text string) string {
s := strings.Split(text, " ")
for i, v := range s {
s[i] = GetStemOfWord(v)
}
return strings.Join(s, " ")
}
// GetStemOfWord –
func GetStemOfWord(word string) string {
wordAsBytes := []byte((word))
res := string(stemmer.Stem(wordAsBytes))
return res
}
// func (youtube *YoutubeController) GetTopMatches(videos) | var videoID *string
values := u.Query()["id"]
if len(values) == 0 { | random_line_split |
youtubeService.go | package youtubeservice
import (
"errors"
"net/url"
"sort"
"strings"
"code.mine/dating_server/gateway"
"code.mine/dating_server/mapping"
"code.mine/dating_server/repo"
"code.mine/dating_server/types"
"github.com/agnivade/levenshtein"
stemmer "github.com/agonopol/go-stem"
"github.com/bbalet/stopwords"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
)
const (
developerKey = "AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg"
)
// YoutubeController -
type YoutubeController struct {
gateway gateway.Gateway
repo repo.Repo
}
// YOUTUBE ID IS IN THE URL
// GetYoutubeVideoDetails -
func (c *YoutubeController) GetYoutubeVideoDetails(videoURL *string) (*types.UserVideoItem, error) {
// https://www.googleapis.com/youtube/v3/videos?id=D95qIe5pLuA&key=AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg&part=snippet,statistics,topicDetails
if videoURL == nil {
return nil, errors.New("Need url")
}
// attempt to get the video is
u, err := url.Parse(mapping.StrToV(videoURL))
if err != nil {
return nil, err
}
var videoID *string
values := u.Query()["id"]
if len(values) == 0 {
videoID, err = c.gateway.GetYoutubeVideoID(videoURL)
if err != nil {
return nil, errors.New("could not get video ID")
}
}
response, err := c.gateway.GetYoutubeVideoDetails(videoID)
if err != nil {
return nil, err
}
// baseURL := "https://www.googleapis.com/youtube/v3/videos?"
// url := fmt.Sprintf("%s&id=%s&key=%s&part=snippet,statistics,topicDetails", baseURL, *videoID, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.UserVideoItem{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
return response, nil
}
// GetYoutubeVideoID -
func (c *YoutubeController) GetYoutubeVideoID(youtubeURL *string) (*string, error) {
if youtubeURL == nil {
return nil, errors.New("Need url")
}
_, err := c.gateway.GetYoutubeVideoID(youtubeURL)
if err != nil {
return nil, err
}
return nil, err
// baseURL := "https://www.googleapis.com/youtube/v3/search?"
// url := fmt.Sprintf("%spart=%s&maxResults=1&q=%s&type=video&key=%s", baseURL, "snippet", *youtubeURL, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.VideoIDResponse{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
// if len(response.Items) < 1 {
// return nil, errors.New("response.items is empty")
// }
// videoResponse := response.Items[0]
// if videoResponse.ID == nil {
// return nil, errors.New("item ID is nil")
// }
// if videoResponse.ID.VideoID == nil {
// return nil, errors.New("videoID is nil")
// }
// return videoResponse.ID.VideoID, nil
}
// https://blog.codecentric.de/en/2017/08/gomock-tutorial/
// GetEligibleUsers -
func (c *YoutubeController) GetEligibleUsers(user *types.User) ([]*types.User, error) {
// city := user.City
// partnerGender := user.PartnerGender
// get all eligible uuids.
// c, err := DB.GetCollection("users")
// if err != nil {
// return nil, err
// }
filters := &bson.M{
"zipcode": user.Zipcode,
"partnerGender": user.Gender, // ppl looking for my gender
"gender": user.PartnerGender, // their gender is what I want.
}
options := options.Find()
options.SetLimit(int64(50000))
users, err := c.repo.GetUsersByFilter(filters, options)
if err != nil {
return nil, err
}
// cursor, err := c.Find(context.Background(), filters, options)
// users := []*types.User{}
// if err = cursor.All(context.Background(), &users); err != nil {
// return nil, err
// }
return users, nil
}
// TODO – add in title words to tags
// RankAndMatchYoutubeVideos -
func (c *YoutubeController) RankAndMatchYoutubeVideos(user *types.User) ([]*types.User, error) {
if user == nil {
return nil, errors.New("user is nil")
}
// make sure user cant get themselves
users, err := c.GetEligibleUsers(user)
if err != nil {
return nil, err
}
userUUIDToUser := map[string]*types.User{}
userUUIDs := []*string{}
for _, user := range users {
userUUIDs = append(userUUIDs, user.UUID)
userUUIDToUser[*user.UUID] = user
}
// get video by user
userVideos, err := c.repo.GetVideosByUserUUID(user.UUID)
if err != nil {
return nil, err
}
youtubeVideoCandidates, err := c.repo.GetVideosByAllUserUUIDs(userUUIDs)
if err != nil {
return nil, err
}
sortedVideoScoreList := c.GetSortedVideoList(userVideos, youtubeVideoCandidates)
processedUUIDs := map[string]bool{}
sortedUsers := []*types.User{}
for _, videoScore := range sortedVideoScoreList {
if !processedUUIDs[*videoScore.Video.UserUUID] {
processedUUIDs[*videoScore.Video.UserUUID] = true
sortedUsers = append(sortedUsers, userUUIDToUser[*videoScore.Video.UserUUID])
}
}
if len(sortedUsers) <= 3 {
return sortedUsers, nil
}
return sortedUsers[:3], nil
}
// make sure this is a set
func getTagsFromVideo(video *types.UserVideoItem) []string {
userTags := []string{}
for _, tag := range video.Items[0].Snippet.Tags {
words := strings.Split(tag, " ")
for _, w := range words {
userTags = append(userTags, w)
}
}
titleWords := strings.Split(video.Items[0].Snippet.Title, " ")
for _, w := range titleWords {
userTags = append(userTags, w)
}
descriptionWords := strings.Split(video.Items[0].Snippet.Description, " ")
for _, w := range descriptionWords {
userTags = append(userTags, w)
}
freeFormText := strings.Join(userTags, " ")
freeFormText = CleanString(freeFormText)
userTags = strings.Split(freeFormText, " ")
processedWords := []string{}
for _, w := range userTags {
w = GetStemOfWord(w)
w = strings.ToLower(w)
if len(w) > 1 {
processedWords = append(processedWords, w)
}
}
return processedWords
}
// Score -
type Score struct {
Score float64
Video *types.UserVideoItem
}
// we want to do this by video so we can detect a particularly strong match
// between videos
// you can also match by most common words – stem the word
// you can also use this as a dictionary
// https://gist.github.com/dgp/1b24bf2961521bd75d6c
// https://techpostplus.com/youtube-video-categories-list-faqs-and-solutions/#YouTube_video_category_name_and_id_list
// match similar categories
// GetSortedVideoList -
func (c *YoutubeController) GetSortedVideoList(
userVideos []*types.UserVideoItem,
candidateVideos []*types.UserVideoItem,
) []Score {
scores := []Score{}
for _, userVideo := range userVideos {
// need to do CleanString somehow on these tags
userTags := getTagsFromVideo(userVideo)
userCategoryID := userVideo.Items[0].Snippet.CategoryID
var totalScore float64
// for every video in the candidate videos, check how well they match up against
// the user tags
for _, video := range candidateVideos {
videoTags := getTagsFromVideo(video)
wordFrequencyCandidateVideos := map[string]int{}
for _, tag := range videoTags {
wordFrequencyCandidateVideos[tag]++
// start off with 5 as an arbitrary value
// remember to clean text of stop words!
// you can also prob do better here with different levels of word frequnecy
}
for _, tag := range userTags {
if wordFrequencyCandidateVideos[tag] >= 6 {
totalScore += 6
} else if wordFrequencyCandidateVideos[tag] >= 4 {
totalScore += 4
} else if wordFrequencyCandidateVideos[tag] >= 2 {
totalScore += 2
}
}
// number of similar words
similarWordsScore := calculateDistanceScoreBetweenTags(userTags, videoTags)
totalScore += float64(similarWordsScore) * 2
categoryScore := calculateCategoryScore(userCategoryID, video.Items[0].Snippet.CategoryID)
totalScore += categoryScore * 5
videoScore := Score{
Score: totalScore,
Video: video,
}
scores = append(scores, videoScore)
}
}
sort.Slice(scores, func(i, j int) bool {
return scores[i].Score < scores[j].Score
})
return scores
}
// you could have secondary scores as well
// + scores of if they're both in each others slices vs just 1
func calculateCategoryScore(userCategoryID int, candidateCategoryID int) float64 {
if | think you should calculate by word, not by tag
// may want to stem each word in the phrase too
// you can also check if there's like 5 words or something that are in common that are more then 5 chars long
// if so you can give one particular score
// if not, then do the fuzzy matching.
// add in title, description
func calculateDistanceScoreBetweenTags(userTags, videoTags []string) int {
count := 0
for _, userTag := range userTags {
for _, videoTag := range videoTags {
distance := float64(levenshtein.ComputeDistance(userTag, videoTag))
// if its not the SAME exact word...
// but requires less then a few tarnsitions...
// so we are counting the number of similar words
if distance != 0 && distance < 4 {
count++
}
}
}
// remember that lower the distance, higher the similarity
return count
}
// CleanString -
func CleanString(text string) string {
text = strings.Replace(text, "\n", " ", -1)
// regHTTP := regexp.MustCompile(`/^https.*$/`)
// regWWW := regexp.MustCompile(`/^www.*$/`)
// text = reg.ReplaceAllString(text, "")
if strings.Contains(text, "http") {
text = ""
}
if strings.Contains(text, "https") {
text = ""
}
if strings.Contains(text, "www") {
text = ""
}
text = RemoveStopWords(text)
return text
}
// RemoveStopWords -
func RemoveStopWords(text string) string {
cleanedString := stopwords.CleanString(text, "en", false)
return cleanedString
}
// GetStemsOfText -
func GetStemsOfText(text string) string {
s := strings.Split(text, " ")
for i, v := range s {
s[i] = GetStemOfWord(v)
}
return strings.Join(s, " ")
}
// GetStemOfWord –
func GetStemOfWord(word string) string {
wordAsBytes := []byte((word))
res := string(stemmer.Stem(wordAsBytes))
return res
}
// func (youtube *YoutubeController) GetTopMatches(videos)
| VideoCategoryMap[userCategoryID] == nil || VideoCategoryMap[candidateCategoryID] == nil {
return 0
}
containsCandidateCategory := false
containsUserCategory := false
for _, category := range VideoCategoryMap[userCategoryID] {
if category == candidateCategoryID {
containsCandidateCategory = true
}
}
for _, category := range VideoCategoryMap[candidateCategoryID] {
if category == userCategoryID {
containsUserCategory = true
}
}
if containsCandidateCategory && containsUserCategory {
return 2
}
if containsCandidateCategory && containsUserCategory {
return 1
}
return 0
}
// I | identifier_body |
youtubeService.go | package youtubeservice
import (
"errors"
"net/url"
"sort"
"strings"
"code.mine/dating_server/gateway"
"code.mine/dating_server/mapping"
"code.mine/dating_server/repo"
"code.mine/dating_server/types"
"github.com/agnivade/levenshtein"
stemmer "github.com/agonopol/go-stem"
"github.com/bbalet/stopwords"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
)
const (
developerKey = "AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg"
)
// YoutubeController -
type YoutubeController struct {
gateway gateway.Gateway
repo repo.Repo
}
// YOUTUBE ID IS IN THE URL
// GetYoutubeVideoDetails -
func (c *YoutubeController) GetYoutubeVideoDetails(videoURL *string) (*types.UserVideoItem, error) {
// https://www.googleapis.com/youtube/v3/videos?id=D95qIe5pLuA&key=AIzaSyBhFXscTPZr892Uj5h2wRghkFAqTPYtcEg&part=snippet,statistics,topicDetails
if videoURL == nil {
return nil, errors.New("Need url")
}
// attempt to get the video is
u, err := url.Parse(mapping.StrToV(videoURL))
if err != nil {
return nil, err
}
var videoID *string
values := u.Query()["id"]
if len(values) == 0 {
videoID, err = c.gateway.GetYoutubeVideoID(videoURL)
if err != nil {
return nil, errors.New("could not get video ID")
}
}
response, err := c.gateway.GetYoutubeVideoDetails(videoID)
if err != nil {
return nil, err
}
// baseURL := "https://www.googleapis.com/youtube/v3/videos?"
// url := fmt.Sprintf("%s&id=%s&key=%s&part=snippet,statistics,topicDetails", baseURL, *videoID, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.UserVideoItem{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
return response, nil
}
// GetYoutubeVideoID -
func (c *YoutubeController) GetYoutubeVideoID(youtubeURL *string) (*string, error) {
if youtubeURL == nil {
return nil, errors.New("Need url")
}
_, err := c.gateway.GetYoutubeVideoID(youtubeURL)
if err != nil {
return nil, err
}
return nil, err
// baseURL := "https://www.googleapis.com/youtube/v3/search?"
// url := fmt.Sprintf("%spart=%s&maxResults=1&q=%s&type=video&key=%s", baseURL, "snippet", *youtubeURL, developerKey)
// req, err := http.NewRequest("GET", url, nil)
// if err != nil {
// return nil, err
// }
// response := &types.VideoIDResponse{}
// client := &http.Client{}
// resp, err := client.Do(req)
// if err != nil {
// return nil, err
// }
// err = json.NewDecoder(resp.Body).Decode(&response)
// if err != nil {
// return nil, err
// }
// if response == nil {
// return nil, errors.New("response is nil")
// }
// if len(response.Items) < 1 {
// return nil, errors.New("response.items is empty")
// }
// videoResponse := response.Items[0]
// if videoResponse.ID == nil {
// return nil, errors.New("item ID is nil")
// }
// if videoResponse.ID.VideoID == nil {
// return nil, errors.New("videoID is nil")
// }
// return videoResponse.ID.VideoID, nil
}
// https://blog.codecentric.de/en/2017/08/gomock-tutorial/
// GetEligibleUsers -
func (c *YoutubeController) GetEligibleUsers(user *types.User) ([]*types.User, error) {
// city := user.City
// partnerGender := user.PartnerGender
// get all eligible uuids.
// c, err := DB.GetCollection("users")
// if err != nil {
// return nil, err
// }
filters := &bson.M{
"zipcode": user.Zipcode,
"partnerGender": user.Gender, // ppl looking for my gender
"gender": user.PartnerGender, // their gender is what I want.
}
options := options.Find()
options.SetLimit(int64(50000))
users, err := c.repo.GetUsersByFilter(filters, options)
if err != nil |
// cursor, err := c.Find(context.Background(), filters, options)
// users := []*types.User{}
// if err = cursor.All(context.Background(), &users); err != nil {
// return nil, err
// }
return users, nil
}
// TODO – add in title words to tags
// RankAndMatchYoutubeVideos -
func (c *YoutubeController) RankAndMatchYoutubeVideos(user *types.User) ([]*types.User, error) {
if user == nil {
return nil, errors.New("user is nil")
}
// make sure user cant get themselves
users, err := c.GetEligibleUsers(user)
if err != nil {
return nil, err
}
userUUIDToUser := map[string]*types.User{}
userUUIDs := []*string{}
for _, user := range users {
userUUIDs = append(userUUIDs, user.UUID)
userUUIDToUser[*user.UUID] = user
}
// get video by user
userVideos, err := c.repo.GetVideosByUserUUID(user.UUID)
if err != nil {
return nil, err
}
youtubeVideoCandidates, err := c.repo.GetVideosByAllUserUUIDs(userUUIDs)
if err != nil {
return nil, err
}
sortedVideoScoreList := c.GetSortedVideoList(userVideos, youtubeVideoCandidates)
processedUUIDs := map[string]bool{}
sortedUsers := []*types.User{}
for _, videoScore := range sortedVideoScoreList {
if !processedUUIDs[*videoScore.Video.UserUUID] {
processedUUIDs[*videoScore.Video.UserUUID] = true
sortedUsers = append(sortedUsers, userUUIDToUser[*videoScore.Video.UserUUID])
}
}
if len(sortedUsers) <= 3 {
return sortedUsers, nil
}
return sortedUsers[:3], nil
}
// make sure this is a set
func getTagsFromVideo(video *types.UserVideoItem) []string {
userTags := []string{}
for _, tag := range video.Items[0].Snippet.Tags {
words := strings.Split(tag, " ")
for _, w := range words {
userTags = append(userTags, w)
}
}
titleWords := strings.Split(video.Items[0].Snippet.Title, " ")
for _, w := range titleWords {
userTags = append(userTags, w)
}
descriptionWords := strings.Split(video.Items[0].Snippet.Description, " ")
for _, w := range descriptionWords {
userTags = append(userTags, w)
}
freeFormText := strings.Join(userTags, " ")
freeFormText = CleanString(freeFormText)
userTags = strings.Split(freeFormText, " ")
processedWords := []string{}
for _, w := range userTags {
w = GetStemOfWord(w)
w = strings.ToLower(w)
if len(w) > 1 {
processedWords = append(processedWords, w)
}
}
return processedWords
}
// Score -
type Score struct {
Score float64
Video *types.UserVideoItem
}
// we want to do this by video so we can detect a particularly strong match
// between videos
// you can also match by most common words – stem the word
// you can also use this as a dictionary
// https://gist.github.com/dgp/1b24bf2961521bd75d6c
// https://techpostplus.com/youtube-video-categories-list-faqs-and-solutions/#YouTube_video_category_name_and_id_list
// match similar categories
// GetSortedVideoList -
func (c *YoutubeController) GetSortedVideoList(
userVideos []*types.UserVideoItem,
candidateVideos []*types.UserVideoItem,
) []Score {
scores := []Score{}
for _, userVideo := range userVideos {
// need to do CleanString somehow on these tags
userTags := getTagsFromVideo(userVideo)
userCategoryID := userVideo.Items[0].Snippet.CategoryID
var totalScore float64
// for every video in the candidate videos, check how well they match up against
// the user tags
for _, video := range candidateVideos {
videoTags := getTagsFromVideo(video)
wordFrequencyCandidateVideos := map[string]int{}
for _, tag := range videoTags {
wordFrequencyCandidateVideos[tag]++
// start off with 5 as an arbitrary value
// remember to clean text of stop words!
// you can also prob do better here with different levels of word frequnecy
}
for _, tag := range userTags {
if wordFrequencyCandidateVideos[tag] >= 6 {
totalScore += 6
} else if wordFrequencyCandidateVideos[tag] >= 4 {
totalScore += 4
} else if wordFrequencyCandidateVideos[tag] >= 2 {
totalScore += 2
}
}
// number of similar words
similarWordsScore := calculateDistanceScoreBetweenTags(userTags, videoTags)
totalScore += float64(similarWordsScore) * 2
categoryScore := calculateCategoryScore(userCategoryID, video.Items[0].Snippet.CategoryID)
totalScore += categoryScore * 5
videoScore := Score{
Score: totalScore,
Video: video,
}
scores = append(scores, videoScore)
}
}
sort.Slice(scores, func(i, j int) bool {
return scores[i].Score < scores[j].Score
})
return scores
}
// you could have secondary scores as well
// + scores of if they're both in each others slices vs just 1
func calculateCategoryScore(userCategoryID int, candidateCategoryID int) float64 {
if VideoCategoryMap[userCategoryID] == nil || VideoCategoryMap[candidateCategoryID] == nil {
return 0
}
containsCandidateCategory := false
containsUserCategory := false
for _, category := range VideoCategoryMap[userCategoryID] {
if category == candidateCategoryID {
containsCandidateCategory = true
}
}
for _, category := range VideoCategoryMap[candidateCategoryID] {
if category == userCategoryID {
containsUserCategory = true
}
}
if containsCandidateCategory && containsUserCategory {
return 2
}
if containsCandidateCategory && containsUserCategory {
return 1
}
return 0
}
// I think you should calculate by word, not by tag
// may want to stem each word in the phrase too
// you can also check if there's like 5 words or something that are in common that are more then 5 chars long
// if so you can give one particular score
// if not, then do the fuzzy matching.
// add in title, description
func calculateDistanceScoreBetweenTags(userTags, videoTags []string) int {
count := 0
for _, userTag := range userTags {
for _, videoTag := range videoTags {
distance := float64(levenshtein.ComputeDistance(userTag, videoTag))
// if its not the SAME exact word...
// but requires less then a few tarnsitions...
// so we are counting the number of similar words
if distance != 0 && distance < 4 {
count++
}
}
}
// remember that lower the distance, higher the similarity
return count
}
// CleanString -
func CleanString(text string) string {
text = strings.Replace(text, "\n", " ", -1)
// regHTTP := regexp.MustCompile(`/^https.*$/`)
// regWWW := regexp.MustCompile(`/^www.*$/`)
// text = reg.ReplaceAllString(text, "")
if strings.Contains(text, "http") {
text = ""
}
if strings.Contains(text, "https") {
text = ""
}
if strings.Contains(text, "www") {
text = ""
}
text = RemoveStopWords(text)
return text
}
// RemoveStopWords -
func RemoveStopWords(text string) string {
cleanedString := stopwords.CleanString(text, "en", false)
return cleanedString
}
// GetStemsOfText -
func GetStemsOfText(text string) string {
s := strings.Split(text, " ")
for i, v := range s {
s[i] = GetStemOfWord(v)
}
return strings.Join(s, " ")
}
// GetStemOfWord –
func GetStemOfWord(word string) string {
wordAsBytes := []byte((word))
res := string(stemmer.Stem(wordAsBytes))
return res
}
// func (youtube *YoutubeController) GetTopMatches(videos)
| {
return nil, err
} | conditional_block |
mod.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use core::mem::transmute;
use core::mem::MaybeUninit;
use core::ptr;
use core::sync::atomic::fence;
use core::sync::atomic::Ordering::*;
use atomic::Atomic;
use lazy_static::lazy_static;
use crate::callbacks;
use crate::signal;
use crate::slot_map::SlotKey;
use crate::slot_map::SlotMap;
use crate::tool::Tool;
use crate::tool::ToolGlobal;
pub mod guard;
pub type HandlerInput = libc::siginfo_t;
/// We need to keep track of the sigaction that the user specified or what was
/// originally provided as a default separately from what we execute directly as
/// a signal handler.
#[derive(Debug, Clone, Copy)]
struct SigActionPair {
/// Prisinte sigaction provided by the user or os
guest_facing_action: libc::sigaction,
/// The actual sigaction we are using
internal_action: libc::sigaction,
}
impl SigActionPair {
/// Create a new SigActionPair from the original sig action and an override
/// for the default handler. The created pair will contain the original
/// action, and a synthetic action with the handler replaced if an override
/// is provided or if the the sa_sigaction is one of the non-function-
/// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`)
fn new(original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self {
let mut internal_action = original.clone();
// This is safe because it is only reading from a mut static that is
// guaranteed to have been completely set before this function
// is called
internal_action.sa_sigaction = unsafe {
match (original.sa_sigaction, override_handler) {
(_, Some(override_handler)) => override_handler,
(libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER
.expect("Default handlers should be set before registering actions"),
(default_action, None) => default_action,
}
};
SigActionPair {
guest_facing_action: original,
internal_action,
}
}
}
lazy_static! {
/// This is where we are storing the registered actions for each signal.
/// We have to store them as Options for now because our slot map requires
/// its stored type to implement default
static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new();
}
// The sighandler_t type has some values that aren't pointers that are still
// valid. They aren't executable, so we need an executable version that we
// control for each. Those are below
/// Storage of our default handler for the libc::SIG_DFL
static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_IGN
static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_ERR
static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None;
/// This function invokes the function specified by the given sigaction directly
/// with the given signal value or siginfo as arguments depending on whether
/// the sigaction's flags indicate it is expecting a sigaction or siginfo.
/// Note. In the case that the action is requesting sigaction, the 3rd argument
/// to the handler will always be null. The specifications for sigaction say the
/// third argument is a pointer to the context for the signal being raised, but
/// we cannot guarantee that context will be valid with the handler function is
/// executed. It also seems like that argument's use is rare, so we are omitting
/// it for the time being. When T122210155, we should be able to provide the ctx
/// argument without introducing unsafety.
unsafe fn invoke_signal_handler(
signal_val: libc::c_int,
action: &libc::sigaction,
sig_info: libc::siginfo_t,
) |
/// Register the given sigaction as the default. Optionally an override function
/// can be passed in that will us to change the default handler for an action
fn insert_action(
sigaction: libc::sigaction,
override_default_handler: Option<libc::sighandler_t>,
) -> SlotKey {
HANDLER_SLOT_MAP.insert(Some(SigActionPair::new(
sigaction,
override_default_handler,
)))
}
/// Register a signal handler for the guest and return the sigaction currently
/// registered for the specified signal
#[allow(dead_code)]
pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction {
register_guest_handler_impl(signal_value, new_action, false)
.expect("All signals should have pre-registered guest handlers before now")
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_DFL` which is the default handler
/// value for almost all signals. This function will stop all threads in order
/// to raise thread-exit events for each
pub extern "C" fn default_exit_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(0);
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_IGN` which is the default handler
/// value for lots of signals. This function does nothing, but allows uniform
/// treatment of function pointers in signal handlers (instead of checking for)
///specific values of sighandler_t before calling
pub extern "C" fn default_ignore_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_ERR` which is the default handler
/// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc).
/// This function will stop all threads in order to raise thread-exit events
/// for each, but the error code will be non-zero
pub extern "C" fn default_error_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(1);
}
/// This macro defines the functions and constants and api for signals based on
/// an input set of signal. There should only be one invocation of the macro,
/// and it is below. It allows us to express the list of signals we are
/// supporting with properties on each to deal with edge cases
macro_rules! generate_signal_handlers {
(
default_exit_handler: $default_exit_handler_fn:expr,
default_ignore_handler: $default_ignore_handler_fn:expr,
default_error_handler: $default_error_handler_fn:expr,
signals: [$($signal_name:ident $({
$(override_default = $override_default_handler:expr;)?
$(guest_handler_allowed = $guest_handler_allowed:expr;)?
})?),+$(,)?]) => {
/// All signal values as i32
mod signal_values {
$(
pub const $signal_name: i32 = libc::$signal_name as i32;
)+
}
/// Storage for the slot keys that point to the handlers for each signal
mod handler_keys {
use super::*;
$(
pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None);
)+
}
/// Handler functions for each signal
mod reverie_handlers {
use super::*;
$(
#[allow(non_snake_case)]
pub fn $signal_name(handler_input: HandlerInput) {
if let Some(Some(SigActionPair {
internal_action,
..
})) = handler_keys::$signal_name
.load(Relaxed)
.and_then(|key| HANDLER_SLOT_MAP.get(key))
{
unsafe {
invoke_signal_handler(
signal_values::$signal_name as libc::c_int,
internal_action,
handler_input,
);
}
}
}
)+
}
/// This is the function that will be registered for all signals.
/// guest and default handlers for each signal will be dispatched from
/// here using the global sequencer to prevent signals from interfering
/// with reverie or its tool's state
pub extern "C" fn central_handler<T: ToolGlobal>(
real_signal_value: i32,
sig_info_ptr: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
let wrapped_handler = match real_signal_value {
$(
signal_values::$signal_name => reverie_handlers::$signal_name,
)+
_ => panic!("Invalid signal {}", real_signal_value)
};
let sig_info = unsafe { *sig_info_ptr };
T::global().handle_signal_event(real_signal_value);
signal::guard::invoke_guarded(wrapped_handler, sig_info);
}
/// This is the funtion that needs to be called to initialize all the
/// signal handling machinery. This will register our central handler
/// for all signals
pub fn register_central_handler<T: ToolGlobal>() {
// Register the default handler functions that correspond to the
// scalar sighandler_t behaviors. This is safe because this will
// only be done before the first syscall is handled, and only
// one thread will be active.
unsafe {
DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn
as *const libc::c_void
as libc::sighandler_t);
}
// To make sure handlers are set before continuing
fence(SeqCst);
$( unsafe {
let sa_sigaction = central_handler::<T>
as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void)
as *mut libc::c_void
as libc::sighandler_t;
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset");
libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name);
let action = libc::sigaction {
sa_sigaction,
sa_mask: sa_mask.assume_init(),
sa_flags: 0x14000000,
sa_restorer: None,
};
let mut original_action : MaybeUninit<libc::sigaction>
= MaybeUninit::uninit();
assert_eq!(0, libc::sigaction(
signal_values::$signal_name as libc::c_int,
&action as *const libc::sigaction,
original_action.as_mut_ptr(),
), "Failed to register central handler for {}", stringify!($signal_name));
let override_default_handler = None $($(
.or(Some(
$override_default_handler as *const libc::c_void as libc::sighandler_t)
)
)?)?;
let handler_key = insert_action(
original_action.assume_init(),
override_default_handler,
);
handler_keys::$signal_name.store(Some(handler_key), SeqCst);
} )+
}
/// Register the given action for the given signal. The force-allow
/// flag means that the handler will be registered even if guest
/// handlers are disallowed for the given signal. Return a copy of the
/// sigaction that was previously associated with the given signal
fn register_guest_handler_impl(
signal_value: i32,
new_action: libc::sigaction,
force_allow: bool
) -> Option<libc::sigaction> {
let (handler_key, guest_handler_allowed, signal_name) = match signal_value {
$(
signal_values::$signal_name => {
let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?);
let signal_name = stringify!($signal_name);
(&handler_keys::$signal_name, allowed, signal_name)
},
)+
_ => panic!("Invalid signal {}", signal_value)
};
if !guest_handler_allowed {
panic!("Guest handler registration for {} is not supported", signal_name);
}
let new_action_key = insert_action(new_action, None);
let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed);
// The first time this function is called, there won't be a stored
// key for every signal action, but if there is return it. It is
// safe because the key being used must have come from the same
// map, and because no elements are deleted, the get operation
// will always succeed
old_action_key_opt.map(|old_action_key| unsafe {
HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action
})
}
/// Get the sigaction registered for the given signal if there is one.
/// The returned sigaction will either be the original default sigaction
/// set by default for the application or the unaltered sigaction
/// registered by the user
#[allow(dead_code)]
pub fn get_registered_guest_handler(
signal_value: i32
) -> libc::sigaction {
let current_action_key = match signal_value {
$(
signal_values::$signal_name => {
handler_keys::$signal_name
.load(Relaxed)
.expect("All signals should have guest handlers before now")
}
)+
_ => panic!("Invalid signal {}", signal_value)
};
// This is safe because the key being used must have come from the
// same map, and because no elements are deleted, the get operation
// will always succeed
unsafe {
HANDLER_SLOT_MAP.get_unchecked(current_action_key)
.unwrap().guest_facing_action
}
}
};
}
generate_signal_handlers! {
default_exit_handler: default_exit_handler::<T>,
default_ignore_handler: default_ignore_handler::<T>,
default_error_handler: default_error_handler::<T>,
signals: [
SIGHUP,
SIGINT,
SIGQUIT,
// SIGILL, <- needs special synchronous handling Todo(T129735993)
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
// SIGKILL, <- cannot be handled directly Todo(T129348205)
SIGUSR1,
// SIGSEGV, <- needs special synchronous handling Todo(T129735993)
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGSTKFLT {
// This is our controlled exit signal. If the guest tries to
// register a handler for it, we will panic rather than chancining
// undefined behavior
override_default = crate::callbacks::handle_exit_signal::<T>;
guest_handler_allowed = false;
},
// SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829
SIGCONT,
// SIGSTOP, <- cannot be handled directly Todo(T129348205)
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS,
]
}
| {
if action.sa_flags & libc::SA_SIGINFO > 0 {
let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(
signal_val,
&sig_info as *const libc::siginfo_t,
ptr::null::<libc::c_void>(),
);
} else {
let to_run: extern "C" fn(libc::c_int) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(signal_val);
}
} | identifier_body |
mod.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use core::mem::transmute;
use core::mem::MaybeUninit;
use core::ptr;
use core::sync::atomic::fence;
use core::sync::atomic::Ordering::*;
use atomic::Atomic;
use lazy_static::lazy_static;
use crate::callbacks;
use crate::signal;
use crate::slot_map::SlotKey;
use crate::slot_map::SlotMap;
use crate::tool::Tool;
use crate::tool::ToolGlobal;
pub mod guard;
pub type HandlerInput = libc::siginfo_t;
/// We need to keep track of the sigaction that the user specified or what was
/// originally provided as a default separately from what we execute directly as
/// a signal handler.
#[derive(Debug, Clone, Copy)]
struct SigActionPair {
/// Prisinte sigaction provided by the user or os
guest_facing_action: libc::sigaction,
/// The actual sigaction we are using
internal_action: libc::sigaction,
}
impl SigActionPair {
/// Create a new SigActionPair from the original sig action and an override
/// for the default handler. The created pair will contain the original
/// action, and a synthetic action with the handler replaced if an override
/// is provided or if the the sa_sigaction is one of the non-function-
/// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`)
fn new(original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self {
let mut internal_action = original.clone();
// This is safe because it is only reading from a mut static that is
// guaranteed to have been completely set before this function
// is called
internal_action.sa_sigaction = unsafe {
match (original.sa_sigaction, override_handler) {
(_, Some(override_handler)) => override_handler,
(libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER
.expect("Default handlers should be set before registering actions"),
(default_action, None) => default_action,
}
};
SigActionPair {
guest_facing_action: original,
internal_action,
}
}
}
lazy_static! {
/// This is where we are storing the registered actions for each signal.
/// We have to store them as Options for now because our slot map requires
/// its stored type to implement default
static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new();
}
// The sighandler_t type has some values that aren't pointers that are still
// valid. They aren't executable, so we need an executable version that we
// control for each. Those are below
/// Storage of our default handler for the libc::SIG_DFL
static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_IGN
static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_ERR
static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None;
/// This function invokes the function specified by the given sigaction directly
/// with the given signal value or siginfo as arguments depending on whether
/// the sigaction's flags indicate it is expecting a sigaction or siginfo.
/// Note. In the case that the action is requesting sigaction, the 3rd argument
/// to the handler will always be null. The specifications for sigaction say the
/// third argument is a pointer to the context for the signal being raised, but
/// we cannot guarantee that context will be valid with the handler function is
/// executed. It also seems like that argument's use is rare, so we are omitting
/// it for the time being. When T122210155, we should be able to provide the ctx
/// argument without introducing unsafety.
unsafe fn invoke_signal_handler(
signal_val: libc::c_int,
action: &libc::sigaction,
sig_info: libc::siginfo_t,
) {
if action.sa_flags & libc::SA_SIGINFO > 0 {
let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(
signal_val,
&sig_info as *const libc::siginfo_t,
ptr::null::<libc::c_void>(),
);
} else {
let to_run: extern "C" fn(libc::c_int) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(signal_val);
}
}
/// Register the given sigaction as the default. Optionally an override function
/// can be passed in that will us to change the default handler for an action
fn insert_action(
sigaction: libc::sigaction,
override_default_handler: Option<libc::sighandler_t>,
) -> SlotKey {
HANDLER_SLOT_MAP.insert(Some(SigActionPair::new(
sigaction,
override_default_handler,
)))
}
/// Register a signal handler for the guest and return the sigaction currently
/// registered for the specified signal
#[allow(dead_code)]
pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction {
register_guest_handler_impl(signal_value, new_action, false)
.expect("All signals should have pre-registered guest handlers before now")
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_DFL` which is the default handler
/// value for almost all signals. This function will stop all threads in order
/// to raise thread-exit events for each
pub extern "C" fn default_exit_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(0);
} | /// treatment of function pointers in signal handlers (instead of checking for)
///specific values of sighandler_t before calling
pub extern "C" fn default_ignore_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_ERR` which is the default handler
/// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc).
/// This function will stop all threads in order to raise thread-exit events
/// for each, but the error code will be non-zero
pub extern "C" fn default_error_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(1);
}
/// This macro defines the functions and constants and api for signals based on
/// an input set of signal. There should only be one invocation of the macro,
/// and it is below. It allows us to express the list of signals we are
/// supporting with properties on each to deal with edge cases
macro_rules! generate_signal_handlers {
(
default_exit_handler: $default_exit_handler_fn:expr,
default_ignore_handler: $default_ignore_handler_fn:expr,
default_error_handler: $default_error_handler_fn:expr,
signals: [$($signal_name:ident $({
$(override_default = $override_default_handler:expr;)?
$(guest_handler_allowed = $guest_handler_allowed:expr;)?
})?),+$(,)?]) => {
/// All signal values as i32
mod signal_values {
$(
pub const $signal_name: i32 = libc::$signal_name as i32;
)+
}
/// Storage for the slot keys that point to the handlers for each signal
mod handler_keys {
use super::*;
$(
pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None);
)+
}
/// Handler functions for each signal
mod reverie_handlers {
use super::*;
$(
#[allow(non_snake_case)]
pub fn $signal_name(handler_input: HandlerInput) {
if let Some(Some(SigActionPair {
internal_action,
..
})) = handler_keys::$signal_name
.load(Relaxed)
.and_then(|key| HANDLER_SLOT_MAP.get(key))
{
unsafe {
invoke_signal_handler(
signal_values::$signal_name as libc::c_int,
internal_action,
handler_input,
);
}
}
}
)+
}
/// This is the function that will be registered for all signals.
/// guest and default handlers for each signal will be dispatched from
/// here using the global sequencer to prevent signals from interfering
/// with reverie or its tool's state
pub extern "C" fn central_handler<T: ToolGlobal>(
real_signal_value: i32,
sig_info_ptr: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
let wrapped_handler = match real_signal_value {
$(
signal_values::$signal_name => reverie_handlers::$signal_name,
)+
_ => panic!("Invalid signal {}", real_signal_value)
};
let sig_info = unsafe { *sig_info_ptr };
T::global().handle_signal_event(real_signal_value);
signal::guard::invoke_guarded(wrapped_handler, sig_info);
}
/// This is the funtion that needs to be called to initialize all the
/// signal handling machinery. This will register our central handler
/// for all signals
pub fn register_central_handler<T: ToolGlobal>() {
// Register the default handler functions that correspond to the
// scalar sighandler_t behaviors. This is safe because this will
// only be done before the first syscall is handled, and only
// one thread will be active.
unsafe {
DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn
as *const libc::c_void
as libc::sighandler_t);
}
// To make sure handlers are set before continuing
fence(SeqCst);
$( unsafe {
let sa_sigaction = central_handler::<T>
as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void)
as *mut libc::c_void
as libc::sighandler_t;
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset");
libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name);
let action = libc::sigaction {
sa_sigaction,
sa_mask: sa_mask.assume_init(),
sa_flags: 0x14000000,
sa_restorer: None,
};
let mut original_action : MaybeUninit<libc::sigaction>
= MaybeUninit::uninit();
assert_eq!(0, libc::sigaction(
signal_values::$signal_name as libc::c_int,
&action as *const libc::sigaction,
original_action.as_mut_ptr(),
), "Failed to register central handler for {}", stringify!($signal_name));
let override_default_handler = None $($(
.or(Some(
$override_default_handler as *const libc::c_void as libc::sighandler_t)
)
)?)?;
let handler_key = insert_action(
original_action.assume_init(),
override_default_handler,
);
handler_keys::$signal_name.store(Some(handler_key), SeqCst);
} )+
}
/// Register the given action for the given signal. The force-allow
/// flag means that the handler will be registered even if guest
/// handlers are disallowed for the given signal. Return a copy of the
/// sigaction that was previously associated with the given signal
fn register_guest_handler_impl(
signal_value: i32,
new_action: libc::sigaction,
force_allow: bool
) -> Option<libc::sigaction> {
let (handler_key, guest_handler_allowed, signal_name) = match signal_value {
$(
signal_values::$signal_name => {
let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?);
let signal_name = stringify!($signal_name);
(&handler_keys::$signal_name, allowed, signal_name)
},
)+
_ => panic!("Invalid signal {}", signal_value)
};
if !guest_handler_allowed {
panic!("Guest handler registration for {} is not supported", signal_name);
}
let new_action_key = insert_action(new_action, None);
let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed);
// The first time this function is called, there won't be a stored
// key for every signal action, but if there is return it. It is
// safe because the key being used must have come from the same
// map, and because no elements are deleted, the get operation
// will always succeed
old_action_key_opt.map(|old_action_key| unsafe {
HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action
})
}
/// Get the sigaction registered for the given signal if there is one.
/// The returned sigaction will either be the original default sigaction
/// set by default for the application or the unaltered sigaction
/// registered by the user
#[allow(dead_code)]
pub fn get_registered_guest_handler(
signal_value: i32
) -> libc::sigaction {
let current_action_key = match signal_value {
$(
signal_values::$signal_name => {
handler_keys::$signal_name
.load(Relaxed)
.expect("All signals should have guest handlers before now")
}
)+
_ => panic!("Invalid signal {}", signal_value)
};
// This is safe because the key being used must have come from the
// same map, and because no elements are deleted, the get operation
// will always succeed
unsafe {
HANDLER_SLOT_MAP.get_unchecked(current_action_key)
.unwrap().guest_facing_action
}
}
};
}
generate_signal_handlers! {
default_exit_handler: default_exit_handler::<T>,
default_ignore_handler: default_ignore_handler::<T>,
default_error_handler: default_error_handler::<T>,
signals: [
SIGHUP,
SIGINT,
SIGQUIT,
// SIGILL, <- needs special synchronous handling Todo(T129735993)
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
// SIGKILL, <- cannot be handled directly Todo(T129348205)
SIGUSR1,
// SIGSEGV, <- needs special synchronous handling Todo(T129735993)
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGSTKFLT {
// This is our controlled exit signal. If the guest tries to
// register a handler for it, we will panic rather than chancining
// undefined behavior
override_default = crate::callbacks::handle_exit_signal::<T>;
guest_handler_allowed = false;
},
// SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829
SIGCONT,
// SIGSTOP, <- cannot be handled directly Todo(T129348205)
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS,
]
} |
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_IGN` which is the default handler
/// value for lots of signals. This function does nothing, but allows uniform | random_line_split |
mod.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use core::mem::transmute;
use core::mem::MaybeUninit;
use core::ptr;
use core::sync::atomic::fence;
use core::sync::atomic::Ordering::*;
use atomic::Atomic;
use lazy_static::lazy_static;
use crate::callbacks;
use crate::signal;
use crate::slot_map::SlotKey;
use crate::slot_map::SlotMap;
use crate::tool::Tool;
use crate::tool::ToolGlobal;
pub mod guard;
pub type HandlerInput = libc::siginfo_t;
/// We need to keep track of the sigaction that the user specified or what was
/// originally provided as a default separately from what we execute directly as
/// a signal handler.
#[derive(Debug, Clone, Copy)]
struct SigActionPair {
/// Prisinte sigaction provided by the user or os
guest_facing_action: libc::sigaction,
/// The actual sigaction we are using
internal_action: libc::sigaction,
}
impl SigActionPair {
/// Create a new SigActionPair from the original sig action and an override
/// for the default handler. The created pair will contain the original
/// action, and a synthetic action with the handler replaced if an override
/// is provided or if the the sa_sigaction is one of the non-function-
/// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`)
fn | (original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self {
let mut internal_action = original.clone();
// This is safe because it is only reading from a mut static that is
// guaranteed to have been completely set before this function
// is called
internal_action.sa_sigaction = unsafe {
match (original.sa_sigaction, override_handler) {
(_, Some(override_handler)) => override_handler,
(libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER
.expect("Default handlers should be set before registering actions"),
(libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER
.expect("Default handlers should be set before registering actions"),
(default_action, None) => default_action,
}
};
SigActionPair {
guest_facing_action: original,
internal_action,
}
}
}
lazy_static! {
/// This is where we are storing the registered actions for each signal.
/// We have to store them as Options for now because our slot map requires
/// its stored type to implement default
static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new();
}
// The sighandler_t type has some values that aren't pointers that are still
// valid. They aren't executable, so we need an executable version that we
// control for each. Those are below
/// Storage of our default handler for the libc::SIG_DFL
static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_IGN
static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None;
/// Storage of our default handler for the libc::SIG_ERR
static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None;
/// This function invokes the function specified by the given sigaction directly
/// with the given signal value or siginfo as arguments depending on whether
/// the sigaction's flags indicate it is expecting a sigaction or siginfo.
/// Note. In the case that the action is requesting sigaction, the 3rd argument
/// to the handler will always be null. The specifications for sigaction say the
/// third argument is a pointer to the context for the signal being raised, but
/// we cannot guarantee that context will be valid with the handler function is
/// executed. It also seems like that argument's use is rare, so we are omitting
/// it for the time being. When T122210155, we should be able to provide the ctx
/// argument without introducing unsafety.
unsafe fn invoke_signal_handler(
signal_val: libc::c_int,
action: &libc::sigaction,
sig_info: libc::siginfo_t,
) {
if action.sa_flags & libc::SA_SIGINFO > 0 {
let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(
signal_val,
&sig_info as *const libc::siginfo_t,
ptr::null::<libc::c_void>(),
);
} else {
let to_run: extern "C" fn(libc::c_int) =
transmute(action.sa_sigaction as *const libc::c_void);
to_run(signal_val);
}
}
/// Register the given sigaction as the default. Optionally an override function
/// can be passed in that will us to change the default handler for an action
fn insert_action(
sigaction: libc::sigaction,
override_default_handler: Option<libc::sighandler_t>,
) -> SlotKey {
HANDLER_SLOT_MAP.insert(Some(SigActionPair::new(
sigaction,
override_default_handler,
)))
}
/// Register a signal handler for the guest and return the sigaction currently
/// registered for the specified signal
#[allow(dead_code)]
pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction {
register_guest_handler_impl(signal_value, new_action, false)
.expect("All signals should have pre-registered guest handlers before now")
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_DFL` which is the default handler
/// value for almost all signals. This function will stop all threads in order
/// to raise thread-exit events for each
pub extern "C" fn default_exit_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(0);
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_IGN` which is the default handler
/// value for lots of signals. This function does nothing, but allows uniform
/// treatment of function pointers in signal handlers (instead of checking for)
///specific values of sighandler_t before calling
pub extern "C" fn default_ignore_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
}
/// This is our replacement for default handlers where
/// `libc::sighandler_t = libc::SIG_ERR` which is the default handler
/// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc).
/// This function will stop all threads in order to raise thread-exit events
/// for each, but the error code will be non-zero
pub extern "C" fn default_error_handler<T: ToolGlobal>(
_signal_value: libc::c_int,
_siginfo: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
callbacks::exit_group::<T>(1);
}
/// This macro defines the functions and constants and api for signals based on
/// an input set of signal. There should only be one invocation of the macro,
/// and it is below. It allows us to express the list of signals we are
/// supporting with properties on each to deal with edge cases
macro_rules! generate_signal_handlers {
(
default_exit_handler: $default_exit_handler_fn:expr,
default_ignore_handler: $default_ignore_handler_fn:expr,
default_error_handler: $default_error_handler_fn:expr,
signals: [$($signal_name:ident $({
$(override_default = $override_default_handler:expr;)?
$(guest_handler_allowed = $guest_handler_allowed:expr;)?
})?),+$(,)?]) => {
/// All signal values as i32
mod signal_values {
$(
pub const $signal_name: i32 = libc::$signal_name as i32;
)+
}
/// Storage for the slot keys that point to the handlers for each signal
mod handler_keys {
use super::*;
$(
pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None);
)+
}
/// Handler functions for each signal
mod reverie_handlers {
use super::*;
$(
#[allow(non_snake_case)]
pub fn $signal_name(handler_input: HandlerInput) {
if let Some(Some(SigActionPair {
internal_action,
..
})) = handler_keys::$signal_name
.load(Relaxed)
.and_then(|key| HANDLER_SLOT_MAP.get(key))
{
unsafe {
invoke_signal_handler(
signal_values::$signal_name as libc::c_int,
internal_action,
handler_input,
);
}
}
}
)+
}
/// This is the function that will be registered for all signals.
/// guest and default handlers for each signal will be dispatched from
/// here using the global sequencer to prevent signals from interfering
/// with reverie or its tool's state
pub extern "C" fn central_handler<T: ToolGlobal>(
real_signal_value: i32,
sig_info_ptr: *const libc::siginfo_t,
_ctx: *const libc::c_void,
) {
let wrapped_handler = match real_signal_value {
$(
signal_values::$signal_name => reverie_handlers::$signal_name,
)+
_ => panic!("Invalid signal {}", real_signal_value)
};
let sig_info = unsafe { *sig_info_ptr };
T::global().handle_signal_event(real_signal_value);
signal::guard::invoke_guarded(wrapped_handler, sig_info);
}
/// This is the funtion that needs to be called to initialize all the
/// signal handling machinery. This will register our central handler
/// for all signals
pub fn register_central_handler<T: ToolGlobal>() {
// Register the default handler functions that correspond to the
// scalar sighandler_t behaviors. This is safe because this will
// only be done before the first syscall is handled, and only
// one thread will be active.
unsafe {
DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn
as *const libc::c_void
as libc::sighandler_t);
DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn
as *const libc::c_void
as libc::sighandler_t);
}
// To make sure handlers are set before continuing
fence(SeqCst);
$( unsafe {
let sa_sigaction = central_handler::<T>
as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void)
as *mut libc::c_void
as libc::sighandler_t;
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset");
libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name);
let action = libc::sigaction {
sa_sigaction,
sa_mask: sa_mask.assume_init(),
sa_flags: 0x14000000,
sa_restorer: None,
};
let mut original_action : MaybeUninit<libc::sigaction>
= MaybeUninit::uninit();
assert_eq!(0, libc::sigaction(
signal_values::$signal_name as libc::c_int,
&action as *const libc::sigaction,
original_action.as_mut_ptr(),
), "Failed to register central handler for {}", stringify!($signal_name));
let override_default_handler = None $($(
.or(Some(
$override_default_handler as *const libc::c_void as libc::sighandler_t)
)
)?)?;
let handler_key = insert_action(
original_action.assume_init(),
override_default_handler,
);
handler_keys::$signal_name.store(Some(handler_key), SeqCst);
} )+
}
/// Register the given action for the given signal. The force-allow
/// flag means that the handler will be registered even if guest
/// handlers are disallowed for the given signal. Return a copy of the
/// sigaction that was previously associated with the given signal
fn register_guest_handler_impl(
signal_value: i32,
new_action: libc::sigaction,
force_allow: bool
) -> Option<libc::sigaction> {
let (handler_key, guest_handler_allowed, signal_name) = match signal_value {
$(
signal_values::$signal_name => {
let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?);
let signal_name = stringify!($signal_name);
(&handler_keys::$signal_name, allowed, signal_name)
},
)+
_ => panic!("Invalid signal {}", signal_value)
};
if !guest_handler_allowed {
panic!("Guest handler registration for {} is not supported", signal_name);
}
let new_action_key = insert_action(new_action, None);
let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed);
// The first time this function is called, there won't be a stored
// key for every signal action, but if there is return it. It is
// safe because the key being used must have come from the same
// map, and because no elements are deleted, the get operation
// will always succeed
old_action_key_opt.map(|old_action_key| unsafe {
HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action
})
}
/// Get the sigaction registered for the given signal if there is one.
/// The returned sigaction will either be the original default sigaction
/// set by default for the application or the unaltered sigaction
/// registered by the user
#[allow(dead_code)]
pub fn get_registered_guest_handler(
signal_value: i32
) -> libc::sigaction {
let current_action_key = match signal_value {
$(
signal_values::$signal_name => {
handler_keys::$signal_name
.load(Relaxed)
.expect("All signals should have guest handlers before now")
}
)+
_ => panic!("Invalid signal {}", signal_value)
};
// This is safe because the key being used must have come from the
// same map, and because no elements are deleted, the get operation
// will always succeed
unsafe {
HANDLER_SLOT_MAP.get_unchecked(current_action_key)
.unwrap().guest_facing_action
}
}
};
}
generate_signal_handlers! {
default_exit_handler: default_exit_handler::<T>,
default_ignore_handler: default_ignore_handler::<T>,
default_error_handler: default_error_handler::<T>,
signals: [
SIGHUP,
SIGINT,
SIGQUIT,
// SIGILL, <- needs special synchronous handling Todo(T129735993)
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
// SIGKILL, <- cannot be handled directly Todo(T129348205)
SIGUSR1,
// SIGSEGV, <- needs special synchronous handling Todo(T129735993)
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGSTKFLT {
// This is our controlled exit signal. If the guest tries to
// register a handler for it, we will panic rather than chancining
// undefined behavior
override_default = crate::callbacks::handle_exit_signal::<T>;
guest_handler_allowed = false;
},
// SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829
SIGCONT,
// SIGSTOP, <- cannot be handled directly Todo(T129348205)
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS,
]
}
| new | identifier_name |
featureExtractors.py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
|
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| sumIntProbs *= songVec['interval_probs'][i] | conditional_block |
featureExtractors.py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
|
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret | identifier_body |
featureExtractors.py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {} | mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return | random_line_split | |
featureExtractors.py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def | (patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| getFeaturesForClasses | identifier_name |
single_train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
|
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def lr_lambda_fun(current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path | old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""") | identifier_body |
single_train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""")
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def lr_lambda_fun(current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
| evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path | if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache() | random_line_split |
single_train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""")
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
|
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def lr_lambda_fun(current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path | self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX) | conditional_block |
single_train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""")
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def | (current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path | lr_lambda_fun | identifier_name |
settings.py | # -*- coding: utf-8 -*-
# Scrapy settings for amazonFrontCrawl project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amazonFrontCrawl'
SPIDER_MODULES = ['amazonFrontCrawl.spiders']
NEWSPIDER_MODULE = 'amazonFrontCrawl.spiders'
COMMANDS_MODULE = 'amazonFrontCrawl.commands'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'amazonFrontCrawl (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amazonFrontCrawl.middlewares.AmazonfrontcrawlSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amazonFrontCrawl.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'amazonFrontCrawl.pipelines.AmazonfrontcrawlPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# log
LOG_FILE = '/home/zz/projects/amazonFrontCrawl/amazonFrontCrawl.log'
# LOG_LEVEL = 'INFO'
LOG_LEVEL = 'WARNING' #OPTIONS: NOTEST,DEBUG,INFO,WARNING,ERROR,CRITICAL
# download
FILES_STORE = '/home/zz/projects/amazonFrontCrawl/spider_downloads'
# 90 days of delay for files expiration
FILES_EXPIRES = 30
# user agents
# USER_AGENTS = [ \
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
# "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
# "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
# ]
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5", | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
# PROXIES copy from http://www.xicidaili.com/
PROXIES = [
# {'ip_port': '123.169.90.79:808', 'user_pass': ''},
# {'ip_port': '110.73.0.72:8123', 'user_pass': ''},
# {'ip_port': '60.171.96.194:51345', 'user_pass': ''},
# {'ip_port': '125.73.40.123:8118', 'user_pass': ''},
# {'ip_port': '115.197.142.193:8118', 'user_pass': ''},
# {'ip_port': '125.88.74.122:83', 'user_pass': ''},
# {'ip_port': 'proxy.abuyun.com:9020', 'user_pass': 'H8898418F770549D:5923ACEE06EF0248'},
]
# proxyHost = "proxy.abuyun.com"
# proxyPort = "9020"
# proxyUser = "H522863F43AU23PD"
# proxyPass = "83729A106193F254"
# middleware
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'amazonFrontCrawl.middlewares.RandomUserAgent.RandomUserAgentMiddleware': 300,
'amazonFrontCrawl.middlewares.RandomProxy.RandomProxyMiddleware': 400,
'amazonFrontCrawl.middlewares.JSMiddleware.PhantomJSMiddleware' : 543,
'amazonFrontCrawl.middlewares.AdvertMiddleware.AdvertMiddleware' : 550,
# 'amazonFrontCrawl.middlewares.JavaScriptMiddleware.JavaScriptMiddleware' : 543,
# 'scrapy_splash.SplashCookiesMiddleware': 723,
# 'scrapy_splash.SplashMiddleware': 725,
# 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# SPIDER_MIDDLEWARES = {
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# }
# 设置一个去重的类
# DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# 如果你使用scrapy http 缓存系统,那你就有必要启用这个scrapy-splash的缓存系统
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# parse js/javascript/java script
# http://www.cnblogs.com/zhonghuasong/p/5976003.html
# SPLASH_URL = 'http://localhost:8050'
# SPLASH_URL = 'http://127.0.0.1:8050/'
# MySQL configuration
MYSQL_HOST = '192.168.2.97' # 127.0.0.1
#MYSQL_DBNAME = 'amazonfrontcrawl'
MYSQL_DBNAME = 'amazonfrontcrawl_dev'
MYSQL_USER = 'lepython'
MYSQL_PASSWD = 'qaz123456'
MYSQL_PORT = 3306
# MYSQL_HOST = '192.168.2.98' # 127.0.0.1
# MYSQL_DBNAME = 'amazonFrontCrawl'
# MYSQL_USER = 'root'
# MYSQL_PASSWD = 'root123'
# MYSQL_PORT = 3306
# item pipelines
ITEM_PIPELINES = {
'amazonFrontCrawl.pipelines.ProductInsalesPipeline': 300, # save to MySQL DB
'amazonFrontCrawl.pipelines.JsonWithEncodingPipeline': 300, # save to file
}
# tables
AMAZON_REF_START_URLS = 'amazon_ref_start_urls'
AMAZON_REF_PRODUCT_LIST = 'amazon_ref_product_list'
AMAZON_REF_KEYWORD_LIST = 'amazon_ref_keyword_list'
AMAZON_REF_TODAY_DEALS = 'amazon_ref_today_deals'
AMAZON_REF_SHOP_LIST = 'amazon_ref_shop_list'
AMAZON_REF_TOP_REVIEWER_LIST = 'amazon_ref_top_reviewer_list'
#
KEYWORD_SEARCH_PAGES_MAX = 10
CONCURRENT_ITEMS = 5
CONCURRENT_REQUESTS = 5 | "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6", | random_line_split |
set1.rs | pub fn base64_encode(bytes: &[u8]) -> Result<String, &'static str> {
let mut result = String::new();
for group in bytes.chunks(3) {
let extended = match group.len() {
1 => [group[0], 0, 0],
2 => [group[0], group[1], 0],
3 => [group[0], group[1], group[2]],
_ => return Err("chunk too large!"),
};
for i in 0..=3 {
let sextet = match i {
0 => ((extended[0] & 0xFC) >> 2),
1 => ((extended[0] & 0x03) << 4) | ((extended[1] & 0xF0) >> 4),
2 => ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6),
3 => ((extended[2] & 0x3F) << 0),
_ => return Err("too many groups!"),
};
let symbol: char = match sextet {
c @ 0...25 => char::from(0x41 + c),
c @ 26...51 => char::from(0x61 + c - 26),
c @ 52...61 => char::from(0x30 + c - 52),
62 => '+',
63 => '/',
_ => return Err("too many bits!"),
};
if (group.len() as i8) - (i as i8) >= 0 {
result.push(symbol);
} else {
result.push('=');
}
}
}
return Ok(result);
}
pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> {
let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4);
let encoded_stripped = encoded
.as_bytes()
.iter()
.cloned()
.filter(|letter| match *letter {
b'\n' => false,
_ => true,
})
.collect::<Vec<u8>>();
for group in encoded_stripped.chunks(4) {
if group.len() != 4 {
return Err("chunk too small!");
}
let mut padding: i8 = 0;
let sextets = group
.iter()
.map(|letter| match *letter {
c @ b'A'...b'Z' => Ok(c as u8 - 0x41),
c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26),
c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52),
b'+' => Ok(62),
b'/' => Ok(63),
b'=' => {
padding += 1;
Ok(0)
}
_ => Err("illegal character!"),
})
.collect::<Result<Vec<u8>, &'static str>>()?;
for i in 0..=2 {
let octet = match i {
0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4),
1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2),
2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0),
_ => return Err("too many octets!"),
};
if (i as i8) < (3 - padding) {
result.push(octet);
}
}
}
return Ok(result);
}
pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> {
if a.len() != b.len() {
return Err("buffer size mismatch");
}
let result = a.iter()
.zip(b)
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return Ok(result);
}
pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> {
if data.len() != other.len() {
return Err("buffer size mismatch");
}
let xor_count = data.iter_mut()
.zip(other)
.map(|(data_elem, other_elem)| {
*data_elem ^= other_elem;
})
.count();
return Ok(xor_count);
}
pub fn | (plaintext: &[u8], key: &[u8]) -> Vec<u8> {
let result = plaintext
.iter()
.zip(key.iter().cycle())
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return result;
}
use std::collections::BTreeMap;
pub fn char_freq_score(text: &[u8]) -> f64 {
let mut non_printable_count = 0;
let letter_freq: BTreeMap<u8, f64> = btreemap! {
b'a' => 0.08167,
b'b' => 0.01492,
b'c' => 0.02782,
b'd' => 0.04253,
b'e' => 0.12702,
b'f' => 0.02228,
b'g' => 0.02015,
b'h' => 0.06094,
b'i' => 0.06966,
b'j' => 0.00153,
b'k' => 0.00772,
b'l' => 0.04025,
b'm' => 0.02406,
b'n' => 0.06749,
b'o' => 0.07507,
b'p' => 0.01929,
b'q' => 0.00095,
b'r' => 0.05987,
b's' => 0.06327,
b't' => 0.09056,
b'u' => 0.02758,
b'v' => 0.00978,
b'w' => 0.02360,
b'x' => 0.00150,
b'y' => 0.01974,
b'z' => 0.00074,
};
let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new();
for letter in b'a'..=b'z' {
letter_counts.insert(letter, 0);
}
let mut num_letters = 0;
for letter in text {
match *letter {
// null
0 => {}
// non-printable characters
1...9 => non_printable_count += 1,
// newline
10 => {}
// more non-printable characters
11...31 => non_printable_count += 1,
// space
32 => {}
// printable symbols, including digits (ascii '!' - '@')
33...64 => {}
// upper-case letters
c @ 65...90 => {
*letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '[' - '`')
91...96 => {}
// lower-case letters
c @ 97...122 => {
*letter_counts.get_mut(&c).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '{' - '~')
123...126 => {}
// non-printable characters
_ => non_printable_count += 1,
}
}
if num_letters == 0 {
return 10000.0 + (non_printable_count as f64 * 500.0);
}
let mut chisquared = 0.0;
for (key, prob) in letter_freq {
chisquared += (num_letters as f64)
* ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0)
/ prob;
}
return chisquared + (non_printable_count as f64 * 500.0);
}
extern crate bit_vec;
use self::bit_vec::BitVec;
pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> {
if a.len() != b.len() {
return Err("sequences must have same length");
}
let result = a.iter()
.zip(b.iter())
.map(|(aa, bb)| -> u32 {
BitVec::from_bytes(&[aa ^ bb])
.iter()
.map(|val| val as u32)
.sum()
})
.sum();
return Ok(result);
}
pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 {
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; ciphertext.len()];
if let Ok(decoded_bytes) = xor(ciphertext, &key) {
let score = char_freq_score(&decoded_bytes);
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_, key, _) = decoded[0];
return key;
}
pub fn to_hex(data: &[u8]) -> String {
data.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>()
.join("")
}
#[cfg(test)]
mod tests {
use set1;
extern crate hex;
use self::hex::FromHex;
#[test]
fn base64_encode() {
let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d";
let example_bytes = Vec::from_hex(example_hex).unwrap();
if let Ok(b64) = set1::base64_encode(&example_bytes) {
assert_eq!(
b64,
"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
);
} else {
panic!();
}
let test = "foobar".as_bytes();
if let Ok(b64) = set1::base64_encode(&test) {
assert_eq!(b64, "Zm9vYmFy");
} else {
panic!();
}
}
#[test]
fn base64_decode() {
let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t";
if let Ok(example) = set1::base64_decode(example_b64) {
assert_eq!(
example,
Vec::from_hex(
"49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d"
).unwrap()
);
} else {
panic!();
}
let b64 = "Zm9vYmFy";
if let Ok(test) = set1::base64_decode(&b64) {
assert_eq!(test, "foobar".as_bytes());
} else {
panic!();
}
}
#[test]
fn xor() {
let a = "1c0111001f010100061a024b53535009181c";
let b = "686974207468652062756c6c277320657965";
let res = "746865206b696420646f6e277420706c6179";
let a_bytes = Vec::from_hex(a).unwrap();
let b_bytes = Vec::from_hex(b).unwrap();
let res_bytes = Vec::from_hex(res).unwrap();
match set1::xor(&a_bytes, &b_bytes) {
Ok(r) => assert_eq!(r, res_bytes),
Err(str) => panic!(str),
};
}
use std::collections::BTreeMap;
use std::str;
#[test]
fn single_byte_xor_cipher() {
let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736";
let encoded_bytes = Vec::from_hex(encoded).unwrap();
//don't want to use a map here, because we'll lose any values with the same score
//let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new();
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; encoded_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
//decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes));
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
//let &(key, ref value) = decoded.values().next().unwrap();
let (_, key, ref value) = decoded[0];
assert_eq!(key, 88);
assert_eq!(
str::from_utf8(value.as_slice()).unwrap(),
"Cooking MC's like a pound of bacon"
);
}
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
#[test]
fn detect_single_char_xor() {
let file = File::open("challenge-data/4.txt").unwrap();
let reader = BufReader::new(file);
let mut decoded = BTreeMap::new();
let mut line_num = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; line_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes));
}
}
}
line_num += 1;
}
let mut found = false;
for (score, &(line, key, ref value)) in decoded.iter() {
let score: f64 = *score as f64 / 1000.0;
if score < 100.0 {
if line == 170 && key == 53 {
let value = str::from_utf8(value).unwrap();
assert_eq!(value, "Now that the party is jumping\n");
found = true;
}
}
}
assert!(found, "decrypted string not found!");
}
#[test]
fn repeating_key_xor() {
let plaintext = "Burning 'em, if you ain't quick and nimble\n\
I go crazy when I hear a cymbal";
let key = "ICE";
let plaintext = plaintext.as_bytes();
let key = key.as_bytes();
let ciphertext = set1::xor_repeat(&plaintext, &key);
let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\
324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\
283165286326302e27282f";
let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap();
assert_eq!(ciphertext, ciphertext_ref);
}
#[test]
fn hamming_distance() {
assert_eq!(
set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes())
.unwrap(),
37
);
}
#[test]
fn break_repeating_key_xor() {
let mut f = File::open("challenge-data/6.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let mut results: Vec<(f32, usize)> = Vec::with_capacity(40);
for keysize in 2..=40 {
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let norm_distances = sequences
.chunks(2)
.filter(|maybe_pair| maybe_pair.len() == 2)
.filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len())
.map(|pair| {
set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32
})
.collect::<Vec<f32>>();
let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32;
results.push((norm_dist_avg, keysize));
}
results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let keysize = results[0].1;
assert_eq!(keysize, 29);
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize);
for i in 0..keysize {
let mut line = Vec::with_capacity(sequences.len());
for j in 0..sequences.len() {
if i < sequences[j].len() {
line.push(sequences[j][i]);
}
}
transposed.push(line);
}
let mut key: Vec<u8> = Vec::with_capacity(keysize);
for block in transposed {
let key_byte = set1::find_best_single_byte_xor(&block);
key.push(key_byte);
}
assert_eq!(
str::from_utf8(&key).unwrap(),
"Terminator X: Bring the noise"
);
let plaintext = set1::xor_repeat(&decoded, &key);
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/6_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
extern crate openssl;
use self::openssl::symm;
use self::openssl::symm::Cipher;
use std::io::prelude::*;
#[test]
fn aes_ecb_mode() {
let mut f = File::open("challenge-data/7.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let plaintext = symm::decrypt(
Cipher::aes_128_ecb(),
"YELLOW SUBMARINE".as_bytes(),
None,
&decoded,
).unwrap();
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/7_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
#[test]
fn detect_aes_ecb_mode() {
let f = File::open("challenge-data/8.txt").unwrap();
let reader = BufReader::new(f);
let mut results: Vec<(u32, usize, Vec<u8>)> = Vec::new();
let mut linenum = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
let sequences = line_bytes.chunks(16).collect::<Vec<&[u8]>>();
let mut scores: Vec<u32> = Vec::new();
for i in 0..sequences.len() {
for j in (i + 1)..sequences.len() {
let score = set1::hamming_distance(sequences[i], sequences[j]).unwrap();
scores.push(score);
}
}
scores.sort();
results.push((scores[0], linenum, line_bytes.clone()));
linenum += 1;
}
}
results.sort_by_key(|elem| elem.0);
assert_eq!(results[0].1, 132);
}
}
| xor_repeat | identifier_name |
set1.rs | pub fn base64_encode(bytes: &[u8]) -> Result<String, &'static str> {
let mut result = String::new();
for group in bytes.chunks(3) {
let extended = match group.len() {
1 => [group[0], 0, 0],
2 => [group[0], group[1], 0],
3 => [group[0], group[1], group[2]],
_ => return Err("chunk too large!"),
};
for i in 0..=3 {
let sextet = match i {
0 => ((extended[0] & 0xFC) >> 2),
1 => ((extended[0] & 0x03) << 4) | ((extended[1] & 0xF0) >> 4),
2 => ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6),
3 => ((extended[2] & 0x3F) << 0),
_ => return Err("too many groups!"),
};
let symbol: char = match sextet {
c @ 0...25 => char::from(0x41 + c),
c @ 26...51 => char::from(0x61 + c - 26),
c @ 52...61 => char::from(0x30 + c - 52),
62 => '+',
63 => '/',
_ => return Err("too many bits!"),
};
if (group.len() as i8) - (i as i8) >= 0 {
result.push(symbol);
} else {
result.push('=');
}
}
}
return Ok(result);
}
pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> {
let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4);
let encoded_stripped = encoded
.as_bytes()
.iter()
.cloned()
.filter(|letter| match *letter {
b'\n' => false,
_ => true,
})
.collect::<Vec<u8>>();
for group in encoded_stripped.chunks(4) {
if group.len() != 4 {
return Err("chunk too small!");
}
let mut padding: i8 = 0;
let sextets = group
.iter()
.map(|letter| match *letter {
c @ b'A'...b'Z' => Ok(c as u8 - 0x41),
c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26),
c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52),
b'+' => Ok(62),
b'/' => Ok(63),
b'=' => {
padding += 1;
Ok(0)
}
_ => Err("illegal character!"),
})
.collect::<Result<Vec<u8>, &'static str>>()?;
for i in 0..=2 {
let octet = match i {
0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4),
1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2),
2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0),
_ => return Err("too many octets!"),
};
if (i as i8) < (3 - padding) {
result.push(octet);
}
}
}
return Ok(result);
}
pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> {
if a.len() != b.len() {
return Err("buffer size mismatch");
}
let result = a.iter()
.zip(b)
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return Ok(result);
}
pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> {
if data.len() != other.len() {
return Err("buffer size mismatch");
}
let xor_count = data.iter_mut()
.zip(other)
.map(|(data_elem, other_elem)| {
*data_elem ^= other_elem;
})
.count();
return Ok(xor_count);
}
pub fn xor_repeat(plaintext: &[u8], key: &[u8]) -> Vec<u8> {
let result = plaintext
.iter()
.zip(key.iter().cycle())
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return result;
}
use std::collections::BTreeMap;
pub fn char_freq_score(text: &[u8]) -> f64 {
let mut non_printable_count = 0;
let letter_freq: BTreeMap<u8, f64> = btreemap! {
b'a' => 0.08167,
b'b' => 0.01492,
b'c' => 0.02782,
b'd' => 0.04253,
b'e' => 0.12702,
b'f' => 0.02228,
b'g' => 0.02015,
b'h' => 0.06094,
b'i' => 0.06966,
b'j' => 0.00153,
b'k' => 0.00772,
b'l' => 0.04025,
b'm' => 0.02406,
b'n' => 0.06749,
b'o' => 0.07507,
b'p' => 0.01929,
b'q' => 0.00095,
b'r' => 0.05987,
b's' => 0.06327,
b't' => 0.09056,
b'u' => 0.02758,
b'v' => 0.00978,
b'w' => 0.02360,
b'x' => 0.00150,
b'y' => 0.01974,
b'z' => 0.00074,
};
let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new();
for letter in b'a'..=b'z' {
letter_counts.insert(letter, 0);
}
let mut num_letters = 0;
for letter in text {
match *letter {
// null
0 => {}
// non-printable characters
1...9 => non_printable_count += 1,
// newline
10 => {}
// more non-printable characters
11...31 => non_printable_count += 1,
// space
32 => {}
// printable symbols, including digits (ascii '!' - '@')
33...64 => {}
// upper-case letters
c @ 65...90 => {
*letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '[' - '`')
91...96 => {}
// lower-case letters
c @ 97...122 => {
*letter_counts.get_mut(&c).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '{' - '~')
123...126 => {}
// non-printable characters
_ => non_printable_count += 1,
}
}
if num_letters == 0 {
return 10000.0 + (non_printable_count as f64 * 500.0);
}
let mut chisquared = 0.0;
for (key, prob) in letter_freq {
chisquared += (num_letters as f64)
* ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0)
/ prob;
}
return chisquared + (non_printable_count as f64 * 500.0);
}
extern crate bit_vec;
use self::bit_vec::BitVec;
pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> {
if a.len() != b.len() {
return Err("sequences must have same length");
}
let result = a.iter()
.zip(b.iter())
.map(|(aa, bb)| -> u32 {
BitVec::from_bytes(&[aa ^ bb])
.iter()
.map(|val| val as u32)
.sum()
})
.sum();
return Ok(result);
}
pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 {
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; ciphertext.len()];
if let Ok(decoded_bytes) = xor(ciphertext, &key) {
let score = char_freq_score(&decoded_bytes);
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_, key, _) = decoded[0];
return key;
}
pub fn to_hex(data: &[u8]) -> String {
data.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>()
.join("")
}
#[cfg(test)]
mod tests {
use set1;
extern crate hex;
use self::hex::FromHex;
#[test]
fn base64_encode() {
let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d";
let example_bytes = Vec::from_hex(example_hex).unwrap();
if let Ok(b64) = set1::base64_encode(&example_bytes) {
assert_eq!(
b64,
"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
);
} else {
panic!();
}
let test = "foobar".as_bytes();
if let Ok(b64) = set1::base64_encode(&test) {
assert_eq!(b64, "Zm9vYmFy");
} else {
panic!();
}
}
#[test]
fn base64_decode() {
let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t";
if let Ok(example) = set1::base64_decode(example_b64) {
assert_eq!(
example,
Vec::from_hex(
"49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d"
).unwrap()
);
} else {
panic!();
}
let b64 = "Zm9vYmFy";
if let Ok(test) = set1::base64_decode(&b64) {
assert_eq!(test, "foobar".as_bytes());
} else {
panic!();
}
}
#[test]
fn xor() {
let a = "1c0111001f010100061a024b53535009181c";
let b = "686974207468652062756c6c277320657965";
let res = "746865206b696420646f6e277420706c6179";
let a_bytes = Vec::from_hex(a).unwrap();
let b_bytes = Vec::from_hex(b).unwrap();
let res_bytes = Vec::from_hex(res).unwrap();
match set1::xor(&a_bytes, &b_bytes) {
Ok(r) => assert_eq!(r, res_bytes),
Err(str) => panic!(str),
};
}
use std::collections::BTreeMap;
use std::str;
#[test]
fn single_byte_xor_cipher() {
let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736";
let encoded_bytes = Vec::from_hex(encoded).unwrap();
//don't want to use a map here, because we'll lose any values with the same score
//let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new();
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; encoded_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
//decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes));
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
//let &(key, ref value) = decoded.values().next().unwrap();
let (_, key, ref value) = decoded[0];
assert_eq!(key, 88);
assert_eq!(
str::from_utf8(value.as_slice()).unwrap(),
"Cooking MC's like a pound of bacon"
);
}
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
#[test]
fn detect_single_char_xor() {
let file = File::open("challenge-data/4.txt").unwrap();
let reader = BufReader::new(file);
let mut decoded = BTreeMap::new();
let mut line_num = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; line_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes));
}
}
}
line_num += 1;
}
let mut found = false;
for (score, &(line, key, ref value)) in decoded.iter() {
let score: f64 = *score as f64 / 1000.0;
if score < 100.0 {
if line == 170 && key == 53 {
let value = str::from_utf8(value).unwrap();
assert_eq!(value, "Now that the party is jumping\n");
found = true;
}
}
}
assert!(found, "decrypted string not found!");
}
#[test]
fn repeating_key_xor() |
#[test]
fn hamming_distance() {
assert_eq!(
set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes())
.unwrap(),
37
);
}
#[test]
fn break_repeating_key_xor() {
let mut f = File::open("challenge-data/6.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let mut results: Vec<(f32, usize)> = Vec::with_capacity(40);
for keysize in 2..=40 {
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let norm_distances = sequences
.chunks(2)
.filter(|maybe_pair| maybe_pair.len() == 2)
.filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len())
.map(|pair| {
set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32
})
.collect::<Vec<f32>>();
let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32;
results.push((norm_dist_avg, keysize));
}
results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let keysize = results[0].1;
assert_eq!(keysize, 29);
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize);
for i in 0..keysize {
let mut line = Vec::with_capacity(sequences.len());
for j in 0..sequences.len() {
if i < sequences[j].len() {
line.push(sequences[j][i]);
}
}
transposed.push(line);
}
let mut key: Vec<u8> = Vec::with_capacity(keysize);
for block in transposed {
let key_byte = set1::find_best_single_byte_xor(&block);
key.push(key_byte);
}
assert_eq!(
str::from_utf8(&key).unwrap(),
"Terminator X: Bring the noise"
);
let plaintext = set1::xor_repeat(&decoded, &key);
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/6_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
extern crate openssl;
use self::openssl::symm;
use self::openssl::symm::Cipher;
use std::io::prelude::*;
#[test]
fn aes_ecb_mode() {
let mut f = File::open("challenge-data/7.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let plaintext = symm::decrypt(
Cipher::aes_128_ecb(),
"YELLOW SUBMARINE".as_bytes(),
None,
&decoded,
).unwrap();
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/7_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
#[test]
fn detect_aes_ecb_mode() {
let f = File::open("challenge-data/8.txt").unwrap();
let reader = BufReader::new(f);
let mut results: Vec<(u32, usize, Vec<u8>)> = Vec::new();
let mut linenum = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
let sequences = line_bytes.chunks(16).collect::<Vec<&[u8]>>();
let mut scores: Vec<u32> = Vec::new();
for i in 0..sequences.len() {
for j in (i + 1)..sequences.len() {
let score = set1::hamming_distance(sequences[i], sequences[j]).unwrap();
scores.push(score);
}
}
scores.sort();
results.push((scores[0], linenum, line_bytes.clone()));
linenum += 1;
}
}
results.sort_by_key(|elem| elem.0);
assert_eq!(results[0].1, 132);
}
}
| {
let plaintext = "Burning 'em, if you ain't quick and nimble\n\
I go crazy when I hear a cymbal";
let key = "ICE";
let plaintext = plaintext.as_bytes();
let key = key.as_bytes();
let ciphertext = set1::xor_repeat(&plaintext, &key);
let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\
324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\
283165286326302e27282f";
let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap();
assert_eq!(ciphertext, ciphertext_ref);
} | identifier_body |
set1.rs | pub fn base64_encode(bytes: &[u8]) -> Result<String, &'static str> {
let mut result = String::new();
for group in bytes.chunks(3) {
let extended = match group.len() {
1 => [group[0], 0, 0],
2 => [group[0], group[1], 0],
3 => [group[0], group[1], group[2]],
_ => return Err("chunk too large!"),
};
for i in 0..=3 {
let sextet = match i {
0 => ((extended[0] & 0xFC) >> 2),
1 => ((extended[0] & 0x03) << 4) | ((extended[1] & 0xF0) >> 4),
2 => ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6),
3 => ((extended[2] & 0x3F) << 0),
_ => return Err("too many groups!"),
};
let symbol: char = match sextet {
c @ 0...25 => char::from(0x41 + c),
c @ 26...51 => char::from(0x61 + c - 26),
c @ 52...61 => char::from(0x30 + c - 52),
62 => '+',
63 => '/',
_ => return Err("too many bits!"),
};
if (group.len() as i8) - (i as i8) >= 0 {
result.push(symbol);
} else {
result.push('=');
}
}
}
return Ok(result);
}
pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> {
let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4);
let encoded_stripped = encoded
.as_bytes()
.iter()
.cloned()
.filter(|letter| match *letter {
b'\n' => false,
_ => true,
})
.collect::<Vec<u8>>();
for group in encoded_stripped.chunks(4) {
if group.len() != 4 {
return Err("chunk too small!");
}
let mut padding: i8 = 0;
let sextets = group
.iter()
.map(|letter| match *letter {
c @ b'A'...b'Z' => Ok(c as u8 - 0x41),
c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26),
c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52),
b'+' => Ok(62),
b'/' => Ok(63),
b'=' => {
padding += 1;
Ok(0)
}
_ => Err("illegal character!"),
})
.collect::<Result<Vec<u8>, &'static str>>()?;
for i in 0..=2 {
let octet = match i {
0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4),
1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2),
2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0),
_ => return Err("too many octets!"),
};
if (i as i8) < (3 - padding) {
result.push(octet);
}
}
}
return Ok(result);
}
pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> {
if a.len() != b.len() {
return Err("buffer size mismatch");
}
let result = a.iter()
.zip(b)
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return Ok(result);
}
pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> {
if data.len() != other.len() {
return Err("buffer size mismatch");
}
let xor_count = data.iter_mut()
.zip(other)
.map(|(data_elem, other_elem)| {
*data_elem ^= other_elem;
})
.count();
return Ok(xor_count);
}
pub fn xor_repeat(plaintext: &[u8], key: &[u8]) -> Vec<u8> {
let result = plaintext
.iter()
.zip(key.iter().cycle())
.map(|pair| match pair {
(&aa, &bb) => aa ^ bb,
})
.collect::<Vec<u8>>();
return result;
}
use std::collections::BTreeMap;
pub fn char_freq_score(text: &[u8]) -> f64 {
let mut non_printable_count = 0;
let letter_freq: BTreeMap<u8, f64> = btreemap! {
b'a' => 0.08167,
b'b' => 0.01492,
b'c' => 0.02782,
b'd' => 0.04253,
b'e' => 0.12702,
b'f' => 0.02228,
b'g' => 0.02015,
b'h' => 0.06094,
b'i' => 0.06966,
b'j' => 0.00153,
b'k' => 0.00772,
b'l' => 0.04025,
b'm' => 0.02406,
b'n' => 0.06749,
b'o' => 0.07507,
b'p' => 0.01929,
b'q' => 0.00095,
b'r' => 0.05987,
b's' => 0.06327,
b't' => 0.09056,
b'u' => 0.02758,
b'v' => 0.00978,
b'w' => 0.02360,
b'x' => 0.00150,
b'y' => 0.01974,
b'z' => 0.00074,
};
let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new();
for letter in b'a'..=b'z' {
letter_counts.insert(letter, 0);
}
let mut num_letters = 0;
for letter in text {
match *letter {
// null
0 => {}
// non-printable characters
1...9 => non_printable_count += 1,
// newline
10 => {}
// more non-printable characters
11...31 => non_printable_count += 1,
// space
32 => {}
// printable symbols, including digits (ascii '!' - '@')
33...64 => {}
// upper-case letters
c @ 65...90 => {
*letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '[' - '`')
91...96 => {}
// lower-case letters
c @ 97...122 => {
*letter_counts.get_mut(&c).unwrap() += 1;
num_letters += 1;
}
// more printable symbols (ascii '{' - '~')
123...126 => {}
// non-printable characters
_ => non_printable_count += 1,
}
}
if num_letters == 0 {
return 10000.0 + (non_printable_count as f64 * 500.0);
}
let mut chisquared = 0.0;
for (key, prob) in letter_freq {
chisquared += (num_letters as f64)
* ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0)
/ prob;
}
return chisquared + (non_printable_count as f64 * 500.0);
}
extern crate bit_vec;
use self::bit_vec::BitVec;
pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> {
if a.len() != b.len() {
return Err("sequences must have same length");
}
let result = a.iter()
.zip(b.iter())
.map(|(aa, bb)| -> u32 {
BitVec::from_bytes(&[aa ^ bb])
.iter()
.map(|val| val as u32)
.sum()
})
.sum();
return Ok(result);
}
pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 {
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; ciphertext.len()];
if let Ok(decoded_bytes) = xor(ciphertext, &key) {
let score = char_freq_score(&decoded_bytes);
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_, key, _) = decoded[0];
return key;
}
pub fn to_hex(data: &[u8]) -> String {
data.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>()
.join("")
}
#[cfg(test)]
mod tests {
use set1;
extern crate hex;
use self::hex::FromHex;
#[test]
fn base64_encode() {
let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d";
let example_bytes = Vec::from_hex(example_hex).unwrap();
if let Ok(b64) = set1::base64_encode(&example_bytes) {
assert_eq!(
b64,
"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
);
} else {
panic!();
}
let test = "foobar".as_bytes();
if let Ok(b64) = set1::base64_encode(&test) {
assert_eq!(b64, "Zm9vYmFy");
} else {
panic!();
}
}
#[test]
fn base64_decode() {
let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t";
if let Ok(example) = set1::base64_decode(example_b64) {
assert_eq!(
example,
Vec::from_hex(
"49276d206b696c6c696e6720796f757220627261696e206c\
696b65206120706f69736f6e6f7573206d757368726f6f6d"
).unwrap()
);
} else {
panic!();
}
let b64 = "Zm9vYmFy";
if let Ok(test) = set1::base64_decode(&b64) {
assert_eq!(test, "foobar".as_bytes());
} else {
panic!(); | #[test]
fn xor() {
let a = "1c0111001f010100061a024b53535009181c";
let b = "686974207468652062756c6c277320657965";
let res = "746865206b696420646f6e277420706c6179";
let a_bytes = Vec::from_hex(a).unwrap();
let b_bytes = Vec::from_hex(b).unwrap();
let res_bytes = Vec::from_hex(res).unwrap();
match set1::xor(&a_bytes, &b_bytes) {
Ok(r) => assert_eq!(r, res_bytes),
Err(str) => panic!(str),
};
}
use std::collections::BTreeMap;
use std::str;
#[test]
fn single_byte_xor_cipher() {
let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736";
let encoded_bytes = Vec::from_hex(encoded).unwrap();
//don't want to use a map here, because we'll lose any values with the same score
//let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new();
let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256);
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; encoded_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
//decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes));
decoded.push((score, i as u8, decoded_bytes));
}
}
decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
//let &(key, ref value) = decoded.values().next().unwrap();
let (_, key, ref value) = decoded[0];
assert_eq!(key, 88);
assert_eq!(
str::from_utf8(value.as_slice()).unwrap(),
"Cooking MC's like a pound of bacon"
);
}
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
#[test]
fn detect_single_char_xor() {
let file = File::open("challenge-data/4.txt").unwrap();
let reader = BufReader::new(file);
let mut decoded = BTreeMap::new();
let mut line_num = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
for i in 0..=256 {
let key: Vec<u8> = vec![i as u8; line_bytes.len()];
if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) {
let score = set1::char_freq_score(&decoded_bytes);
decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes));
}
}
}
line_num += 1;
}
let mut found = false;
for (score, &(line, key, ref value)) in decoded.iter() {
let score: f64 = *score as f64 / 1000.0;
if score < 100.0 {
if line == 170 && key == 53 {
let value = str::from_utf8(value).unwrap();
assert_eq!(value, "Now that the party is jumping\n");
found = true;
}
}
}
assert!(found, "decrypted string not found!");
}
#[test]
fn repeating_key_xor() {
let plaintext = "Burning 'em, if you ain't quick and nimble\n\
I go crazy when I hear a cymbal";
let key = "ICE";
let plaintext = plaintext.as_bytes();
let key = key.as_bytes();
let ciphertext = set1::xor_repeat(&plaintext, &key);
let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\
324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\
283165286326302e27282f";
let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap();
assert_eq!(ciphertext, ciphertext_ref);
}
#[test]
fn hamming_distance() {
assert_eq!(
set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes())
.unwrap(),
37
);
}
#[test]
fn break_repeating_key_xor() {
let mut f = File::open("challenge-data/6.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let mut results: Vec<(f32, usize)> = Vec::with_capacity(40);
for keysize in 2..=40 {
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let norm_distances = sequences
.chunks(2)
.filter(|maybe_pair| maybe_pair.len() == 2)
.filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len())
.map(|pair| {
set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32
})
.collect::<Vec<f32>>();
let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32;
results.push((norm_dist_avg, keysize));
}
results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let keysize = results[0].1;
assert_eq!(keysize, 29);
let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>();
let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize);
for i in 0..keysize {
let mut line = Vec::with_capacity(sequences.len());
for j in 0..sequences.len() {
if i < sequences[j].len() {
line.push(sequences[j][i]);
}
}
transposed.push(line);
}
let mut key: Vec<u8> = Vec::with_capacity(keysize);
for block in transposed {
let key_byte = set1::find_best_single_byte_xor(&block);
key.push(key_byte);
}
assert_eq!(
str::from_utf8(&key).unwrap(),
"Terminator X: Bring the noise"
);
let plaintext = set1::xor_repeat(&decoded, &key);
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/6_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
extern crate openssl;
use self::openssl::symm;
use self::openssl::symm::Cipher;
use std::io::prelude::*;
#[test]
fn aes_ecb_mode() {
let mut f = File::open("challenge-data/7.txt").unwrap();
let mut encoded = String::new();
f.read_to_string(&mut encoded).unwrap();
let decoded = set1::base64_decode(&encoded).unwrap();
let plaintext = symm::decrypt(
Cipher::aes_128_ecb(),
"YELLOW SUBMARINE".as_bytes(),
None,
&decoded,
).unwrap();
let plaintext = str::from_utf8(&plaintext).unwrap();
let mut f = File::open("challenge-data/7_plaintext.txt").unwrap();
let mut plaintext_ref = String::new();
f.read_to_string(&mut plaintext_ref).unwrap();
assert_eq!(plaintext, plaintext_ref);
}
#[test]
fn detect_aes_ecb_mode() {
let f = File::open("challenge-data/8.txt").unwrap();
let reader = BufReader::new(f);
let mut results: Vec<(u32, usize, Vec<u8>)> = Vec::new();
let mut linenum = 0;
for line in reader.lines() {
if let Ok(line) = line {
let line_bytes = Vec::from_hex(line).unwrap();
let sequences = line_bytes.chunks(16).collect::<Vec<&[u8]>>();
let mut scores: Vec<u32> = Vec::new();
for i in 0..sequences.len() {
for j in (i + 1)..sequences.len() {
let score = set1::hamming_distance(sequences[i], sequences[j]).unwrap();
scores.push(score);
}
}
scores.sort();
results.push((scores[0], linenum, line_bytes.clone()));
linenum += 1;
}
}
results.sort_by_key(|elem| elem.0);
assert_eq!(results[0].1, 132);
}
} | }
}
| random_line_split |
alerts.go | package commands
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"io/ioutil"
"net/url"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/grafana/cortex-tools/pkg/client"
"github.com/grafana/cortex-tools/pkg/printer"
)
var (
nonDuplicateAlerts = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "cortextool_alerts_single_source",
Help: "Alerts found by the alerts verify command that are coming from a single source rather than multiple sources..",
},
)
)
// AlertmanagerCommand configures and executes rule related cortex api operations
type AlertmanagerCommand struct {
ClientConfig client.Config
AlertmanagerURL url.URL
AlertmanagerConfigFile string
TemplateFiles []string
DisableColor bool
cli *client.CortexClient
}
// AlertCommand configures and executes rule related PromQL queries for alerts comparison.
type AlertCommand struct {
CortexURL string
IgnoreString string
IgnoreAlerts map[string]interface{}
SourceLabel string
NumSources int
GracePeriod int
CheckFrequency int
ClientConfig client.Config
cli *client.CortexClient
}
// Register rule related commands and flags with the kingpin application
func (a *AlertmanagerCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alertmanager", "View & edit alertmanager configs stored in cortex.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
alertCmd.Flag("tls-ca-path", "TLS CA certificate to verify cortex API as part of mTLS, alternatively set CORTEX_TLS_CA_PATH.").Default("").Envar("CORTEX_TLS_CA_PATH").StringVar(&a.ClientConfig.TLS.CAPath)
alertCmd.Flag("tls-cert-path", "TLS client certificate to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_CERT_PATH.").Default("").Envar("CORTEX_TLS_CERT_PATH").StringVar(&a.ClientConfig.TLS.CertPath)
alertCmd.Flag("tls-key-path", "TLS client certificate private key to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_KEY_PATH.").Default("").Envar("CORTEX_TLS_KEY_PATH").StringVar(&a.ClientConfig.TLS.KeyPath)
// Get Alertmanager Configs Command
getAlertsCmd := alertCmd.Command("get", "Get the alertmanager config currently in the cortex alertmanager.").Action(a.getConfig)
getAlertsCmd.Flag("disable-color", "disable colored output").BoolVar(&a.DisableColor)
alertCmd.Command("delete", "Delete the alertmanager config currently in the cortex alertmanager.").Action(a.deleteConfig)
loadalertCmd := alertCmd.Command("load", "load a set of rules to a designated cortex endpoint").Action(a.loadConfig)
loadalertCmd.Arg("config", "alertmanager configuration to load").Required().StringVar(&a.AlertmanagerConfigFile)
loadalertCmd.Arg("template-files", "The template files to load").ExistingFilesVar(&a.TemplateFiles)
}
func (a *AlertmanagerCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
func (a *AlertmanagerCommand) getConfig(k *kingpin.ParseContext) error {
cfg, templates, err := a.cli.GetAlertmanagerConfig(context.Background())
if err != nil {
if err == client.ErrResourceNotFound {
log.Infof("no alertmanager config currently exist for this user")
return nil
}
return err
}
p := printer.New(a.DisableColor)
return p.PrintAlertmanagerConfig(cfg, templates)
}
func (a *AlertmanagerCommand) loadConfig(k *kingpin.ParseContext) error {
content, err := ioutil.ReadFile(a.AlertmanagerConfigFile)
if err != nil {
return errors.Wrap(err, "unable to load config file: "+a.AlertmanagerConfigFile)
}
cfg := string(content)
_, err = config.Load(cfg)
if err != nil {
return err
}
templates := map[string]string{}
for _, f := range a.TemplateFiles {
tmpl, err := ioutil.ReadFile(f)
if err != nil {
return errors.Wrap(err, "unable to load template file: "+f)
}
templates[f] = string(tmpl)
}
return a.cli.CreateAlertmanagerConfig(context.Background(), cfg, templates)
}
func (a *AlertmanagerCommand) deleteConfig(k *kingpin.ParseContext) error {
err := a.cli.DeleteAlermanagerConfig(context.Background())
if err != nil && err != client.ErrResourceNotFound {
return err
}
return nil
}
func (a *AlertCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alerts", "View active alerts in alertmanager.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
verifyAlertsCmd := alertCmd.Command("verify", "Verifies alerts in an alertmanager cluster are deduplicated; useful for verifying correct configuration when transferring from Prometheus to Cortex alert evaluation.").Action(a.verifyConfig)
verifyAlertsCmd.Flag("ignore-alerts", "A comma separated list of Alert names to ignore in deduplication checks.").StringVar(&a.IgnoreString)
verifyAlertsCmd.Flag("source-label", "Label to look for when deciding if two alerts are duplicates of eachother from separate sources.").Default("prometheus").StringVar(&a.SourceLabel)
verifyAlertsCmd.Flag("grace-period", "Grace period, don't consider alert groups with the incorrect amount of alert replicas erroneous unless the alerts have existed for more than this amount of time, in minutes.").Default("2").IntVar(&a.GracePeriod)
verifyAlertsCmd.Flag("frequency", "Setting this value will turn cortextool into a long-running process, running the alerts verify check every # of minutes specified").IntVar(&a.CheckFrequency)
}
func (a *AlertCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
type queryResult struct {
Status string `json:"status"`
Data queryData `json:"data"`
}
type queryData struct {
ResultType string `json:"resultType"`
Result []metric `json:"result"`
}
type metric struct {
Metric map[string]string `json:"metric"`
}
func (a *AlertCommand) verifyConfig(k *kingpin.ParseContext) error {
var empty interface{}
if a.IgnoreString != "" {
a.IgnoreAlerts = make(map[string]interface{})
chunks := strings.Split(a.IgnoreString, ",")
for _, name := range chunks {
a.IgnoreAlerts[name] = empty
log.Info("Ignoring alerts with name: ", name)
}
}
lhs := fmt.Sprintf("ALERTS{source!=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
rhs := fmt.Sprintf("ALERTS{source=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source!=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
query := fmt.Sprintf("%s or %s", lhs, rhs)
if a.CheckFrequency <= 0 {
_, err := a.runVerifyQuery(context.Background(), query)
return err
}
// Use a different registerer than default so we don't get all the Cortex metrics, but include Go runtime metrics.
goStats := collectors.NewGoCollector()
reg := prometheus.NewRegistry()
reg.MustRegister(nonDuplicateAlerts)
reg.MustRegister(goStats)
http.Handle("/metrics", promhttp.HandlerFor(
reg,
promhttp.HandlerOpts{},
))
go func() {
log.Fatal(http.ListenAndServe(":9090", nil))
}()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
defer func() {
signal.Stop(c)
cancel()
}()
var lastErr error
var n int
go func() {
ticker := time.NewTicker(time.Duration(a.CheckFrequency) * time.Minute)
for {
n, lastErr = a.runVerifyQuery(ctx, query)
nonDuplicateAlerts.Set(float64(n))
select {
case <-c:
cancel()
return
case <-ticker.C:
continue
}
}
}()
<-ctx.Done()
return lastErr
}
func (a *AlertCommand) runVerifyQuery(ctx context.Context, query string) (int, error) {
res, err := a.cli.Query(ctx, query)
if err != nil {
return 0, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
defer res.Body.Close()
var data queryResult
err = json.Unmarshal(body, &data)
if err != nil {
return 0, err
}
for _, m := range data.Data.Result {
if _, ok := a.IgnoreAlerts[m.Metric["alertname"]]; !ok |
}
log.WithFields(log.Fields{"count": len(data.Data.Result)}).Infof("found mismatching alerts")
return len(data.Data.Result), nil
}
| {
log.WithFields(log.Fields{
"alertname": m.Metric["alertname"],
"state": m.Metric,
}).Infof("alert found that was not in both sources")
} | conditional_block |
alerts.go | package commands
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"io/ioutil"
"net/url"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/grafana/cortex-tools/pkg/client"
"github.com/grafana/cortex-tools/pkg/printer"
)
var (
nonDuplicateAlerts = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "cortextool_alerts_single_source",
Help: "Alerts found by the alerts verify command that are coming from a single source rather than multiple sources..",
},
)
)
// AlertmanagerCommand configures and executes rule related cortex api operations
type AlertmanagerCommand struct {
ClientConfig client.Config
AlertmanagerURL url.URL
AlertmanagerConfigFile string
TemplateFiles []string
DisableColor bool
cli *client.CortexClient
}
// AlertCommand configures and executes rule related PromQL queries for alerts comparison.
type AlertCommand struct {
CortexURL string
IgnoreString string
IgnoreAlerts map[string]interface{}
SourceLabel string
NumSources int
GracePeriod int
CheckFrequency int
ClientConfig client.Config
cli *client.CortexClient
}
// Register rule related commands and flags with the kingpin application
func (a *AlertmanagerCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alertmanager", "View & edit alertmanager configs stored in cortex.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
alertCmd.Flag("tls-ca-path", "TLS CA certificate to verify cortex API as part of mTLS, alternatively set CORTEX_TLS_CA_PATH.").Default("").Envar("CORTEX_TLS_CA_PATH").StringVar(&a.ClientConfig.TLS.CAPath)
alertCmd.Flag("tls-cert-path", "TLS client certificate to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_CERT_PATH.").Default("").Envar("CORTEX_TLS_CERT_PATH").StringVar(&a.ClientConfig.TLS.CertPath)
alertCmd.Flag("tls-key-path", "TLS client certificate private key to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_KEY_PATH.").Default("").Envar("CORTEX_TLS_KEY_PATH").StringVar(&a.ClientConfig.TLS.KeyPath)
// Get Alertmanager Configs Command
getAlertsCmd := alertCmd.Command("get", "Get the alertmanager config currently in the cortex alertmanager.").Action(a.getConfig)
getAlertsCmd.Flag("disable-color", "disable colored output").BoolVar(&a.DisableColor)
alertCmd.Command("delete", "Delete the alertmanager config currently in the cortex alertmanager.").Action(a.deleteConfig)
loadalertCmd := alertCmd.Command("load", "load a set of rules to a designated cortex endpoint").Action(a.loadConfig)
loadalertCmd.Arg("config", "alertmanager configuration to load").Required().StringVar(&a.AlertmanagerConfigFile)
loadalertCmd.Arg("template-files", "The template files to load").ExistingFilesVar(&a.TemplateFiles)
}
func (a *AlertmanagerCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
func (a *AlertmanagerCommand) getConfig(k *kingpin.ParseContext) error {
cfg, templates, err := a.cli.GetAlertmanagerConfig(context.Background())
if err != nil {
if err == client.ErrResourceNotFound {
log.Infof("no alertmanager config currently exist for this user")
return nil
}
return err
}
p := printer.New(a.DisableColor)
return p.PrintAlertmanagerConfig(cfg, templates)
}
func (a *AlertmanagerCommand) loadConfig(k *kingpin.ParseContext) error {
content, err := ioutil.ReadFile(a.AlertmanagerConfigFile)
if err != nil {
return errors.Wrap(err, "unable to load config file: "+a.AlertmanagerConfigFile)
}
cfg := string(content)
_, err = config.Load(cfg)
if err != nil {
return err
}
templates := map[string]string{}
for _, f := range a.TemplateFiles {
tmpl, err := ioutil.ReadFile(f)
if err != nil {
return errors.Wrap(err, "unable to load template file: "+f)
}
templates[f] = string(tmpl)
}
return a.cli.CreateAlertmanagerConfig(context.Background(), cfg, templates)
}
func (a *AlertmanagerCommand) deleteConfig(k *kingpin.ParseContext) error {
err := a.cli.DeleteAlermanagerConfig(context.Background())
if err != nil && err != client.ErrResourceNotFound {
return err
}
return nil
}
func (a *AlertCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alerts", "View active alerts in alertmanager.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
verifyAlertsCmd := alertCmd.Command("verify", "Verifies alerts in an alertmanager cluster are deduplicated; useful for verifying correct configuration when transferring from Prometheus to Cortex alert evaluation.").Action(a.verifyConfig)
verifyAlertsCmd.Flag("ignore-alerts", "A comma separated list of Alert names to ignore in deduplication checks.").StringVar(&a.IgnoreString)
verifyAlertsCmd.Flag("source-label", "Label to look for when deciding if two alerts are duplicates of eachother from separate sources.").Default("prometheus").StringVar(&a.SourceLabel)
verifyAlertsCmd.Flag("grace-period", "Grace period, don't consider alert groups with the incorrect amount of alert replicas erroneous unless the alerts have existed for more than this amount of time, in minutes.").Default("2").IntVar(&a.GracePeriod)
verifyAlertsCmd.Flag("frequency", "Setting this value will turn cortextool into a long-running process, running the alerts verify check every # of minutes specified").IntVar(&a.CheckFrequency)
}
func (a *AlertCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
type queryResult struct {
Status string `json:"status"`
Data queryData `json:"data"`
}
type queryData struct {
ResultType string `json:"resultType"`
Result []metric `json:"result"`
}
type metric struct {
Metric map[string]string `json:"metric"`
}
func (a *AlertCommand) | (k *kingpin.ParseContext) error {
var empty interface{}
if a.IgnoreString != "" {
a.IgnoreAlerts = make(map[string]interface{})
chunks := strings.Split(a.IgnoreString, ",")
for _, name := range chunks {
a.IgnoreAlerts[name] = empty
log.Info("Ignoring alerts with name: ", name)
}
}
lhs := fmt.Sprintf("ALERTS{source!=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
rhs := fmt.Sprintf("ALERTS{source=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source!=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
query := fmt.Sprintf("%s or %s", lhs, rhs)
if a.CheckFrequency <= 0 {
_, err := a.runVerifyQuery(context.Background(), query)
return err
}
// Use a different registerer than default so we don't get all the Cortex metrics, but include Go runtime metrics.
goStats := collectors.NewGoCollector()
reg := prometheus.NewRegistry()
reg.MustRegister(nonDuplicateAlerts)
reg.MustRegister(goStats)
http.Handle("/metrics", promhttp.HandlerFor(
reg,
promhttp.HandlerOpts{},
))
go func() {
log.Fatal(http.ListenAndServe(":9090", nil))
}()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
defer func() {
signal.Stop(c)
cancel()
}()
var lastErr error
var n int
go func() {
ticker := time.NewTicker(time.Duration(a.CheckFrequency) * time.Minute)
for {
n, lastErr = a.runVerifyQuery(ctx, query)
nonDuplicateAlerts.Set(float64(n))
select {
case <-c:
cancel()
return
case <-ticker.C:
continue
}
}
}()
<-ctx.Done()
return lastErr
}
func (a *AlertCommand) runVerifyQuery(ctx context.Context, query string) (int, error) {
res, err := a.cli.Query(ctx, query)
if err != nil {
return 0, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
defer res.Body.Close()
var data queryResult
err = json.Unmarshal(body, &data)
if err != nil {
return 0, err
}
for _, m := range data.Data.Result {
if _, ok := a.IgnoreAlerts[m.Metric["alertname"]]; !ok {
log.WithFields(log.Fields{
"alertname": m.Metric["alertname"],
"state": m.Metric,
}).Infof("alert found that was not in both sources")
}
}
log.WithFields(log.Fields{"count": len(data.Data.Result)}).Infof("found mismatching alerts")
return len(data.Data.Result), nil
}
| verifyConfig | identifier_name |
alerts.go | package commands
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"io/ioutil"
"net/url"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/grafana/cortex-tools/pkg/client"
"github.com/grafana/cortex-tools/pkg/printer"
)
var (
nonDuplicateAlerts = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "cortextool_alerts_single_source",
Help: "Alerts found by the alerts verify command that are coming from a single source rather than multiple sources..",
},
)
)
// AlertmanagerCommand configures and executes rule related cortex api operations
type AlertmanagerCommand struct {
ClientConfig client.Config
AlertmanagerURL url.URL
AlertmanagerConfigFile string
TemplateFiles []string
DisableColor bool
cli *client.CortexClient
}
// AlertCommand configures and executes rule related PromQL queries for alerts comparison.
type AlertCommand struct {
CortexURL string
IgnoreString string
IgnoreAlerts map[string]interface{}
SourceLabel string
NumSources int
GracePeriod int
CheckFrequency int
ClientConfig client.Config
cli *client.CortexClient
}
// Register rule related commands and flags with the kingpin application
func (a *AlertmanagerCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alertmanager", "View & edit alertmanager configs stored in cortex.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
alertCmd.Flag("tls-ca-path", "TLS CA certificate to verify cortex API as part of mTLS, alternatively set CORTEX_TLS_CA_PATH.").Default("").Envar("CORTEX_TLS_CA_PATH").StringVar(&a.ClientConfig.TLS.CAPath)
alertCmd.Flag("tls-cert-path", "TLS client certificate to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_CERT_PATH.").Default("").Envar("CORTEX_TLS_CERT_PATH").StringVar(&a.ClientConfig.TLS.CertPath)
alertCmd.Flag("tls-key-path", "TLS client certificate private key to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_KEY_PATH.").Default("").Envar("CORTEX_TLS_KEY_PATH").StringVar(&a.ClientConfig.TLS.KeyPath)
// Get Alertmanager Configs Command
getAlertsCmd := alertCmd.Command("get", "Get the alertmanager config currently in the cortex alertmanager.").Action(a.getConfig)
getAlertsCmd.Flag("disable-color", "disable colored output").BoolVar(&a.DisableColor)
alertCmd.Command("delete", "Delete the alertmanager config currently in the cortex alertmanager.").Action(a.deleteConfig)
loadalertCmd := alertCmd.Command("load", "load a set of rules to a designated cortex endpoint").Action(a.loadConfig)
loadalertCmd.Arg("config", "alertmanager configuration to load").Required().StringVar(&a.AlertmanagerConfigFile)
loadalertCmd.Arg("template-files", "The template files to load").ExistingFilesVar(&a.TemplateFiles)
}
func (a *AlertmanagerCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
func (a *AlertmanagerCommand) getConfig(k *kingpin.ParseContext) error {
cfg, templates, err := a.cli.GetAlertmanagerConfig(context.Background())
if err != nil {
if err == client.ErrResourceNotFound {
log.Infof("no alertmanager config currently exist for this user")
return nil
}
return err
}
p := printer.New(a.DisableColor)
return p.PrintAlertmanagerConfig(cfg, templates)
}
func (a *AlertmanagerCommand) loadConfig(k *kingpin.ParseContext) error {
content, err := ioutil.ReadFile(a.AlertmanagerConfigFile)
if err != nil {
return errors.Wrap(err, "unable to load config file: "+a.AlertmanagerConfigFile)
}
cfg := string(content)
_, err = config.Load(cfg)
if err != nil {
return err
}
templates := map[string]string{}
for _, f := range a.TemplateFiles {
tmpl, err := ioutil.ReadFile(f)
if err != nil {
return errors.Wrap(err, "unable to load template file: "+f)
}
templates[f] = string(tmpl)
}
return a.cli.CreateAlertmanagerConfig(context.Background(), cfg, templates)
}
func (a *AlertmanagerCommand) deleteConfig(k *kingpin.ParseContext) error {
err := a.cli.DeleteAlermanagerConfig(context.Background())
if err != nil && err != client.ErrResourceNotFound {
return err
}
return nil
}
func (a *AlertCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alerts", "View active alerts in alertmanager.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
verifyAlertsCmd := alertCmd.Command("verify", "Verifies alerts in an alertmanager cluster are deduplicated; useful for verifying correct configuration when transferring from Prometheus to Cortex alert evaluation.").Action(a.verifyConfig)
verifyAlertsCmd.Flag("ignore-alerts", "A comma separated list of Alert names to ignore in deduplication checks.").StringVar(&a.IgnoreString)
verifyAlertsCmd.Flag("source-label", "Label to look for when deciding if two alerts are duplicates of eachother from separate sources.").Default("prometheus").StringVar(&a.SourceLabel)
verifyAlertsCmd.Flag("grace-period", "Grace period, don't consider alert groups with the incorrect amount of alert replicas erroneous unless the alerts have existed for more than this amount of time, in minutes.").Default("2").IntVar(&a.GracePeriod)
verifyAlertsCmd.Flag("frequency", "Setting this value will turn cortextool into a long-running process, running the alerts verify check every # of minutes specified").IntVar(&a.CheckFrequency)
}
func (a *AlertCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
type queryResult struct {
Status string `json:"status"`
Data queryData `json:"data"`
}
type queryData struct {
ResultType string `json:"resultType"`
Result []metric `json:"result"`
}
type metric struct {
Metric map[string]string `json:"metric"`
}
func (a *AlertCommand) verifyConfig(k *kingpin.ParseContext) error {
var empty interface{}
if a.IgnoreString != "" {
a.IgnoreAlerts = make(map[string]interface{})
chunks := strings.Split(a.IgnoreString, ",")
for _, name := range chunks {
a.IgnoreAlerts[name] = empty
log.Info("Ignoring alerts with name: ", name)
}
}
lhs := fmt.Sprintf("ALERTS{source!=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
rhs := fmt.Sprintf("ALERTS{source=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source!=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
query := fmt.Sprintf("%s or %s", lhs, rhs)
if a.CheckFrequency <= 0 {
_, err := a.runVerifyQuery(context.Background(), query)
return err
}
// Use a different registerer than default so we don't get all the Cortex metrics, but include Go runtime metrics.
goStats := collectors.NewGoCollector()
reg := prometheus.NewRegistry()
reg.MustRegister(nonDuplicateAlerts)
reg.MustRegister(goStats)
http.Handle("/metrics", promhttp.HandlerFor(
reg,
promhttp.HandlerOpts{},
))
go func() {
log.Fatal(http.ListenAndServe(":9090", nil))
}()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
defer func() {
signal.Stop(c)
cancel()
}() |
go func() {
ticker := time.NewTicker(time.Duration(a.CheckFrequency) * time.Minute)
for {
n, lastErr = a.runVerifyQuery(ctx, query)
nonDuplicateAlerts.Set(float64(n))
select {
case <-c:
cancel()
return
case <-ticker.C:
continue
}
}
}()
<-ctx.Done()
return lastErr
}
func (a *AlertCommand) runVerifyQuery(ctx context.Context, query string) (int, error) {
res, err := a.cli.Query(ctx, query)
if err != nil {
return 0, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
defer res.Body.Close()
var data queryResult
err = json.Unmarshal(body, &data)
if err != nil {
return 0, err
}
for _, m := range data.Data.Result {
if _, ok := a.IgnoreAlerts[m.Metric["alertname"]]; !ok {
log.WithFields(log.Fields{
"alertname": m.Metric["alertname"],
"state": m.Metric,
}).Infof("alert found that was not in both sources")
}
}
log.WithFields(log.Fields{"count": len(data.Data.Result)}).Infof("found mismatching alerts")
return len(data.Data.Result), nil
} | var lastErr error
var n int | random_line_split |
alerts.go | package commands
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"io/ioutil"
"net/url"
"github.com/pkg/errors"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/grafana/cortex-tools/pkg/client"
"github.com/grafana/cortex-tools/pkg/printer"
)
var (
nonDuplicateAlerts = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "cortextool_alerts_single_source",
Help: "Alerts found by the alerts verify command that are coming from a single source rather than multiple sources..",
},
)
)
// AlertmanagerCommand configures and executes rule related cortex api operations
type AlertmanagerCommand struct {
ClientConfig client.Config
AlertmanagerURL url.URL
AlertmanagerConfigFile string
TemplateFiles []string
DisableColor bool
cli *client.CortexClient
}
// AlertCommand configures and executes rule related PromQL queries for alerts comparison.
type AlertCommand struct {
CortexURL string
IgnoreString string
IgnoreAlerts map[string]interface{}
SourceLabel string
NumSources int
GracePeriod int
CheckFrequency int
ClientConfig client.Config
cli *client.CortexClient
}
// Register rule related commands and flags with the kingpin application
func (a *AlertmanagerCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alertmanager", "View & edit alertmanager configs stored in cortex.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
alertCmd.Flag("tls-ca-path", "TLS CA certificate to verify cortex API as part of mTLS, alternatively set CORTEX_TLS_CA_PATH.").Default("").Envar("CORTEX_TLS_CA_PATH").StringVar(&a.ClientConfig.TLS.CAPath)
alertCmd.Flag("tls-cert-path", "TLS client certificate to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_CERT_PATH.").Default("").Envar("CORTEX_TLS_CERT_PATH").StringVar(&a.ClientConfig.TLS.CertPath)
alertCmd.Flag("tls-key-path", "TLS client certificate private key to authenticate with cortex API as part of mTLS, alternatively set CORTEX_TLS_KEY_PATH.").Default("").Envar("CORTEX_TLS_KEY_PATH").StringVar(&a.ClientConfig.TLS.KeyPath)
// Get Alertmanager Configs Command
getAlertsCmd := alertCmd.Command("get", "Get the alertmanager config currently in the cortex alertmanager.").Action(a.getConfig)
getAlertsCmd.Flag("disable-color", "disable colored output").BoolVar(&a.DisableColor)
alertCmd.Command("delete", "Delete the alertmanager config currently in the cortex alertmanager.").Action(a.deleteConfig)
loadalertCmd := alertCmd.Command("load", "load a set of rules to a designated cortex endpoint").Action(a.loadConfig)
loadalertCmd.Arg("config", "alertmanager configuration to load").Required().StringVar(&a.AlertmanagerConfigFile)
loadalertCmd.Arg("template-files", "The template files to load").ExistingFilesVar(&a.TemplateFiles)
}
func (a *AlertmanagerCommand) setup(k *kingpin.ParseContext) error |
func (a *AlertmanagerCommand) getConfig(k *kingpin.ParseContext) error {
cfg, templates, err := a.cli.GetAlertmanagerConfig(context.Background())
if err != nil {
if err == client.ErrResourceNotFound {
log.Infof("no alertmanager config currently exist for this user")
return nil
}
return err
}
p := printer.New(a.DisableColor)
return p.PrintAlertmanagerConfig(cfg, templates)
}
func (a *AlertmanagerCommand) loadConfig(k *kingpin.ParseContext) error {
content, err := ioutil.ReadFile(a.AlertmanagerConfigFile)
if err != nil {
return errors.Wrap(err, "unable to load config file: "+a.AlertmanagerConfigFile)
}
cfg := string(content)
_, err = config.Load(cfg)
if err != nil {
return err
}
templates := map[string]string{}
for _, f := range a.TemplateFiles {
tmpl, err := ioutil.ReadFile(f)
if err != nil {
return errors.Wrap(err, "unable to load template file: "+f)
}
templates[f] = string(tmpl)
}
return a.cli.CreateAlertmanagerConfig(context.Background(), cfg, templates)
}
func (a *AlertmanagerCommand) deleteConfig(k *kingpin.ParseContext) error {
err := a.cli.DeleteAlermanagerConfig(context.Background())
if err != nil && err != client.ErrResourceNotFound {
return err
}
return nil
}
func (a *AlertCommand) Register(app *kingpin.Application) {
alertCmd := app.Command("alerts", "View active alerts in alertmanager.").PreAction(a.setup)
alertCmd.Flag("address", "Address of the cortex cluster, alternatively set CORTEX_ADDRESS.").Envar("CORTEX_ADDRESS").Required().StringVar(&a.ClientConfig.Address)
alertCmd.Flag("id", "Cortex tenant id, alternatively set CORTEX_TENANT_ID.").Envar("CORTEX_TENANT_ID").Required().StringVar(&a.ClientConfig.ID)
alertCmd.Flag("authToken", "Authentication token for bearer token or JWT auth, alternatively set CORTEX_AUTH_TOKEN.").Default("").Envar("CORTEX_AUTH_TOKEN").StringVar(&a.ClientConfig.AuthToken)
alertCmd.Flag("user", "API user to use when contacting cortex, alternatively set CORTEX_API_USER. If empty, CORTEX_TENANT_ID will be used instead.").Default("").Envar("CORTEX_API_USER").StringVar(&a.ClientConfig.User)
alertCmd.Flag("key", "API key to use when contacting cortex, alternatively set CORTEX_API_KEY.").Default("").Envar("CORTEX_API_KEY").StringVar(&a.ClientConfig.Key)
verifyAlertsCmd := alertCmd.Command("verify", "Verifies alerts in an alertmanager cluster are deduplicated; useful for verifying correct configuration when transferring from Prometheus to Cortex alert evaluation.").Action(a.verifyConfig)
verifyAlertsCmd.Flag("ignore-alerts", "A comma separated list of Alert names to ignore in deduplication checks.").StringVar(&a.IgnoreString)
verifyAlertsCmd.Flag("source-label", "Label to look for when deciding if two alerts are duplicates of eachother from separate sources.").Default("prometheus").StringVar(&a.SourceLabel)
verifyAlertsCmd.Flag("grace-period", "Grace period, don't consider alert groups with the incorrect amount of alert replicas erroneous unless the alerts have existed for more than this amount of time, in minutes.").Default("2").IntVar(&a.GracePeriod)
verifyAlertsCmd.Flag("frequency", "Setting this value will turn cortextool into a long-running process, running the alerts verify check every # of minutes specified").IntVar(&a.CheckFrequency)
}
func (a *AlertCommand) setup(k *kingpin.ParseContext) error {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
}
type queryResult struct {
Status string `json:"status"`
Data queryData `json:"data"`
}
type queryData struct {
ResultType string `json:"resultType"`
Result []metric `json:"result"`
}
type metric struct {
Metric map[string]string `json:"metric"`
}
func (a *AlertCommand) verifyConfig(k *kingpin.ParseContext) error {
var empty interface{}
if a.IgnoreString != "" {
a.IgnoreAlerts = make(map[string]interface{})
chunks := strings.Split(a.IgnoreString, ",")
for _, name := range chunks {
a.IgnoreAlerts[name] = empty
log.Info("Ignoring alerts with name: ", name)
}
}
lhs := fmt.Sprintf("ALERTS{source!=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
rhs := fmt.Sprintf("ALERTS{source=\"%s\", alertstate=\"firing\"} offset %dm unless ignoring(source) ALERTS{source!=\"%s\", alertstate=\"firing\"}",
a.SourceLabel,
a.GracePeriod,
a.SourceLabel)
query := fmt.Sprintf("%s or %s", lhs, rhs)
if a.CheckFrequency <= 0 {
_, err := a.runVerifyQuery(context.Background(), query)
return err
}
// Use a different registerer than default so we don't get all the Cortex metrics, but include Go runtime metrics.
goStats := collectors.NewGoCollector()
reg := prometheus.NewRegistry()
reg.MustRegister(nonDuplicateAlerts)
reg.MustRegister(goStats)
http.Handle("/metrics", promhttp.HandlerFor(
reg,
promhttp.HandlerOpts{},
))
go func() {
log.Fatal(http.ListenAndServe(":9090", nil))
}()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
defer func() {
signal.Stop(c)
cancel()
}()
var lastErr error
var n int
go func() {
ticker := time.NewTicker(time.Duration(a.CheckFrequency) * time.Minute)
for {
n, lastErr = a.runVerifyQuery(ctx, query)
nonDuplicateAlerts.Set(float64(n))
select {
case <-c:
cancel()
return
case <-ticker.C:
continue
}
}
}()
<-ctx.Done()
return lastErr
}
func (a *AlertCommand) runVerifyQuery(ctx context.Context, query string) (int, error) {
res, err := a.cli.Query(ctx, query)
if err != nil {
return 0, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
defer res.Body.Close()
var data queryResult
err = json.Unmarshal(body, &data)
if err != nil {
return 0, err
}
for _, m := range data.Data.Result {
if _, ok := a.IgnoreAlerts[m.Metric["alertname"]]; !ok {
log.WithFields(log.Fields{
"alertname": m.Metric["alertname"],
"state": m.Metric,
}).Infof("alert found that was not in both sources")
}
}
log.WithFields(log.Fields{"count": len(data.Data.Result)}).Infof("found mismatching alerts")
return len(data.Data.Result), nil
}
| {
cli, err := client.New(a.ClientConfig)
if err != nil {
return err
}
a.cli = cli
return nil
} | identifier_body |
blog_details_min.js | tocbot['init']({
'tocSelector': '.js-toc',
'contentSelector': '.js-toc-content',
'headingSelector': 'h1,\x20h2,\x20h3',
'hasInnerContainers': !![]
});
$('#payBtn')['popup']({'popup': $('.payButton.popup'), 'on': 'click', 'position': 'bottom\x20center'});
$('#catalog')['popup']({'popup': $('.catalog-click.popup'), 'on': 'click', 'position': 'left\x20center'});
$('.click-wechat')['popup']({'popup': $('.share-wechat'), 'on': 'click', 'position': 'left\x20center'});
var qrcode = new QRCode('qrcode', {
'text': qrcodeurl,
'width': 0x80,
'height': 0x80,
'colorDark': '#000000',
'colorLight': '#ffffff',
'correctLevel': QRCode['CorrectLevel']['H']
});
$('#toTop-div')['click'](function () {
var _0x43a5df = {
'pVsbF': function (_0x5384f8, _0x122261) {
return _0x5384f8(_0x122261);
}
};
_0x43a5df['pVsbF']($, window)['scrollTo'](0x0, 0x1f4);
});
var waypoint = new Waypoint({
'element': document['getElementById']('middle-content'), 'handler': function (_0x320b45) {
var _0x2a7476 = {
'Ckqhg': function (_0x469ff8, _0x5cd11c) {
return _0x469ff8 == _0x5cd11c;
}, 'bDtFE': 'down', 'BqmBe': function (_0x1df816, _0x4701b9) {
return _0x1df816(_0x4701b9);
}, 'eyVUT': '#toolbar-list', 'CEXJj': function (_0x4f2ccf, _0x23e0cc) {
return _0x4f2ccf(_0x23e0cc);
}
};
if (_0x2a7476['Ckqhg'](_0x320b45, _0x2a7476['bDtFE'])) {
_0x2a7476['BqmBe']($, _0x2a7476['eyVUT'])['show'](0x64);
} else {
_0x2a7476['CEXJj']($, _0x2a7476['eyVUT'])['hide'](0x1f4);
}
}
});
$('.ui.form')['form']({
'fields': {
'nickName': {
'identifier': 'nickName',
'rules': [{'type': 'empty', 'prompt': '评论昵称不能为空'}]
}, 'content': {'identifier': 'content', 'rules': [{'type': 'empty', 'prompt': '回复内容不能为空'}]}
}
});
$('#comment-btn-sub')['click'](function () {
var _0x19ea25 = {
'WMqVz': function (_0x568012, _0x2af04b) {
return _0x568012(_0x2af04b);
}, 'NAaLP': '.ui.form', 'hMYzs': 'validate\x20form', 'Tffrz': function (_0x142248) {
return _0x142248();
}
};
var _0x26e47e = _0x19ea25['WMqVz']($, _0x19ea25['NAaLP'])['form'](_0x19ea25['hMYzs']);
if (_0x26e47e) {
_0x19ea25['Tffrz'](postData);
}
});
function postData() {
var _0x283a49 = {
'nGPxR': function (_0xcb4636) {
return _0xcb4636();
}, 'qRqtq': function (_0xc5aaea, _0x5b91a3) {
return _0xc5aaea(_0x5b91a3);
}, 'ELzps': '#command-container', 'KBNIZ': '/comments', 'HdMeD': function (_0x45fc63, _0x16c1c0) {
return _0x45fc63(_0x16c1c0);
}, 'GIJiP': '[name=\x27parentComment.id\x27]', 'redww': function (_0x2573bf, _0x31ff05) {
return _0x2573bf(_0x31ff05);
}, 'KnlCA': '[name=\x27blogBean.id\x27]', 'Ypisp': function (_0x3a4cbe, _0x488760) {
return _0x3a4cbe(_0x488760);
}, 'TuTzK': '[name=\x27content\x27]'
};
_0x283a49['qRqtq']($, _0x283a49['ELzps'])['load'](_0x283a49['KBNIZ'], {
'parentComment.id': _0x283a49['HdMeD']($, _0x283a49['GIJiP'])['val'](),
'blogBean.id': _0x283a49['redww']($, _0x283a49['KnlCA'])['val'](),
'content': _0x283a49['Ypisp']($, _0x283a49['TuTzK'])['val']()
}, function (_0x29c0ab, _0xbe8458, _0x185af1) {
_0x283a49['nGPxR'](clearMessage);
});
}
function clearMessage() {
var _0x47d14d = {
'uqJae': function (_0x2b03ce, _0xb22fbb) {
return _0x2b03ce(_0xb22fbb);
}, 'giuBZ': '[name=\x27content\x27]', 'Xjmxp': function (_0x731754, _0x30f655) {
return _0x731754(_0x30f655);
}, 'STElQ': '[name=\x27parentComment.id\x27]', 'jJieZ': function (_0x46bac3, _0xd2eff0) {
return _0x46bac3(_0xd2eff0);
}, 'ruiil': 'placeholder', 'ImZig': '请输入评论信息...'
};
_0x47d14d['uqJae']($, _0x47d14d['giuBZ'])['val']('');
_0x47d14d['Xjmxp']($, _0x47d14d['STElQ'])['val'](-0x1);
_0x47d14d['jJieZ']($, _0x47d14d['giuBZ'])['attr'](_0x47d14d['ruiil'], _0x47d14d['ImZig']);
}
function reply(_0x4f512a) {
var _0x321d38 = {
| VGcy': '3|1|0|4|5|2',
'HeFaY': function (_0x1b0cd0, _0x2a80fc) {
return _0x1b0cd0(_0x2a80fc);
},
'spJaG': 'comment-nickname',
'ncEqy': '#comment-form',
'bmUZw': function (_0x409e74, _0x533775) {
return _0x409e74(_0x533775);
},
'GosxG': 'comment-id',
'ojyvd': '[name=\x27content\x27]',
'uzAtG': 'placeholder',
'UGoJl': function (_0x18108f, _0x3a66fc) {
return _0x18108f + _0x3a66fc;
},
'SJsRw': function (_0x12ddfd, _0x5420d5) {
return _0x12ddfd(_0x5420d5);
},
'BMfyE': '[name=\x27parentComment.id\x27]'
};
var _0x4153c2 = _0x321d38['iVGcy']['split']('|'), _0x1d690b = 0x0;
while (!![]) {
switch (_0x4153c2[_0x1d690b++]) {
case'0':
console['log'](_0x3f508c);
continue;
case'1':
var _0x3f508c = _0x321d38['HeFaY']($, _0x4f512a)['data'](_0x321d38['spJaG']);
continue;
case'2':
_0x321d38['HeFaY']($, window)['scrollTo'](_0x321d38['HeFaY']($, _0x321d38['ncEqy']), 0x1f4);
continue;
case'3':
var _0x3f4f7e = _0x321d38['bmUZw']($, _0x4f512a)['data'](_0x321d38['GosxG']);
continue;
case'4':
_0x321d38['bmUZw']($, _0x321d38['ojyvd'])['attr'](_0x321d38['uzAtG'], _0x321d38['UGoJl']('@', _0x3f508c))['focus']();
continue;
case'5':
_0x321d38['SJsRw']($, _0x321d38['BMfyE'])['val'](_0x3f4f7e);
continue;
}
break;
}
}
increaseViewCount();
function increaseViewCount() {
var _0x547753 = {
'phXPY': function (_0x2c0ee1, _0x2f87b7) {
return _0x2c0ee1(_0x2f87b7);
}, 'pRMne': '.views', 'MQdBm': 'viewId', 'ElsTJ': '获取数据出错!', 'LnJlb': function (_0x5694d3, _0x2622b8) {
return _0x5694d3 != _0x2622b8;
}, 'UGZdK': 'POST', 'nFSAq': '/addView', 'FNhau': 'json'
};
if (_0x547753['LnJlb']($['cookie'](_0x547753['MQdBm']), blogId)) {
$['ajax']({
'async': ![],
'type': _0x547753['UGZdK'],
'url': _0x547753['nFSAq'],
'data': {'id': blogId},
'dataType': _0x547753['FNhau'],
'success': function (_0x1d5219) {
_0x547753['phXPY']($, _0x547753['pRMne'])['html'](_0x1d5219);
$['cookie'](_0x547753['MQdBm'], blogId, {'path': '/'});
},
'error': function () {
_0x547753['phXPY'](alert, _0x547753['ElsTJ']);
}
});
}
}
$('#like_add')['click'](function () {
var _0x4cb468 = {
'emvlb': 'like_add', 'WADFd': 'countLike', 'deLPk': function (_0x6ee263, _0xc0c7e4) {
return _0x6ee263 != _0xc0c7e4;
}, 'gQVYx': 'likeId', 'YCDvV': function (_0x232617, _0x44591d, _0x458206, _0x1a33db) {
return _0x232617(_0x44591d, _0x458206, _0x1a33db);
}, 'itOwV': 'data-content', 'MTKBe': '你当前还没登录,无法点赞', 'lXVHD': function (_0x17f14a, _0x3379fe) {
return _0x17f14a(_0x3379fe);
}, 'ZnNtx': '#like_add'
};
var _0x169ecf = document['getElementById'](_0x4cb468['emvlb']);
var _0x2eb0c2 = document['getElementById'](_0x4cb468['WADFd']);
if (_0x4cb468['deLPk']($['cookie'](_0x4cb468['gQVYx']), blogId)) {
if (!hasUser) {
_0x4cb468['YCDvV'](postBlogAjax, 0x1, _0x169ecf, _0x2eb0c2);
} else {
_0x169ecf['setAttribute'](_0x4cb468['itOwV'], _0x4cb468['MTKBe']);
}
}
_0x4cb468['lXVHD']($, _0x4cb468['ZnNtx'])['popup']();
});
$('#opposition_add')['click'](function () {
var _0xbda76e = {
'wkmzn': 'opposition_add', 'PXHTe': 'countOpposition', 'QvLuC': function (_0x54e22a, _0x482b63) {
return _0x54e22a != _0x482b63;
}, 'ClCZO': 'likeId', 'QbJIw': function (_0xd61974, _0x11af7b, _0x51cfee, _0x39bf70) {
return _0xd61974(_0x11af7b, _0x51cfee, _0x39bf70);
}, 'HsWQc': 'data-content', 'ptDzr': '你当前还没登录,无法反对', 'SjiyU': function (_0x162153, _0x5a6f4a) {
return _0x162153(_0x5a6f4a);
}, 'YYcND': '#opposition_add'
};
var _0x53a506 = document['getElementById'](_0xbda76e['wkmzn']);
var _0x45c819 = document['getElementById'](_0xbda76e['PXHTe']);
if (_0xbda76e['QvLuC']($['cookie'](_0xbda76e['ClCZO']), blogId)) {
if (!hasUser) {
_0xbda76e['QbJIw'](postBlogAjax, 0x2, _0x53a506, _0x45c819);
} else {
_0x53a506['setAttribute'](_0xbda76e['HsWQc'], _0xbda76e['ptDzr']);
}
}
_0xbda76e['SjiyU']($, _0xbda76e['YYcND'])['popup']();
});
function postBlogAjax(_0x412555, _0x39d39d, _0x1aedd1) {
var _0x17a8a1 = {
'MVKgg': function (_0x50b4d0, _0x163584) {
return _0x50b4d0 == _0x163584;
}, 'lktox': '200', 'RaBsV': function (_0x14f0e8, _0x5763ce) {
return _0x14f0e8 + _0x5763ce;
}, 'DbieH': function (_0x20d280, _0x1d5220) {
return _0x20d280(_0x1d5220);
}, 'pLfBt': 'data-content', 'mmiew': 'likeId', 'qGPIq': 'POST', 'VrgVR': '/addlike', 'wfpEk': 'json'
};
$['ajax']({
'async': ![],
'type': _0x17a8a1['qGPIq'],
'url': _0x17a8a1['VrgVR'],
'data': {'id': blogId, 'typeId': _0x412555},
'dataType': _0x17a8a1['wfpEk'],
'success': function (_0xef938c) {
if (_0x17a8a1['MVKgg'](_0xef938c['code'], _0x17a8a1['lktox'])) {
_0x1aedd1['innerText'] = _0x17a8a1['RaBsV'](_0x17a8a1['DbieH'](Number, _0x1aedd1['innerText']), 0x1);
}
_0x39d39d['setAttribute'](_0x17a8a1['pLfBt'], _0xef938c['message']);
$['cookie'](_0x17a8a1['mmiew'], blogId, {'path': '/'});
}
});
} | 'i | identifier_name |
blog_details_min.js | tocbot['init']({
'tocSelector': '.js-toc',
'contentSelector': '.js-toc-content',
'headingSelector': 'h1,\x20h2,\x20h3',
'hasInnerContainers': !![]
});
$('#payBtn')['popup']({'popup': $('.payButton.popup'), 'on': 'click', 'position': 'bottom\x20center'});
$('#catalog')['popup']({'popup': $('.catalog-click.popup'), 'on': 'click', 'position': 'left\x20center'});
$('.click-wechat')['popup']({'popup': $('.share-wechat'), 'on': 'click', 'position': 'left\x20center'});
var qrcode = new QRCode('qrcode', {
'text': qrcodeurl,
'width': 0x80,
'height': 0x80,
'colorDark': '#000000',
'colorLight': '#ffffff',
'correctLevel': QRCode['CorrectLevel']['H']
});
$('#toTop-div')['click'](function () {
var _0x43a5df = {
'pVsbF': function (_0x5384f8, _0x122261) {
return _0x5384f8(_0x122261);
}
};
_0x43a5df['pVsbF']($, window)['scrollTo'](0x0, 0x1f4);
});
var waypoint = new Waypoint({
'element': document['getElementById']('middle-content'), 'handler': function (_0x320b45) {
var _0x2a7476 = {
'Ckqhg': function (_0x469ff8, _0x5cd11c) {
return _0x469ff8 == _0x5cd11c;
}, 'bDtFE': 'down', 'BqmBe': function (_0x1df816, _0x4701b9) {
return _0x1df816(_0x4701b9);
}, 'eyVUT': '#toolbar-list', 'CEXJj': function (_0x4f2ccf, _0x23e0cc) {
return _0x4f2ccf(_0x23e0cc);
}
};
if (_0x2a7476['Ckqhg'](_0x320b45, _0x2a7476['bDtFE'])) | else {
_0x2a7476['CEXJj']($, _0x2a7476['eyVUT'])['hide'](0x1f4);
}
}
});
$('.ui.form')['form']({
'fields': {
'nickName': {
'identifier': 'nickName',
'rules': [{'type': 'empty', 'prompt': '评论昵称不能为空'}]
}, 'content': {'identifier': 'content', 'rules': [{'type': 'empty', 'prompt': '回复内容不能为空'}]}
}
});
$('#comment-btn-sub')['click'](function () {
var _0x19ea25 = {
'WMqVz': function (_0x568012, _0x2af04b) {
return _0x568012(_0x2af04b);
}, 'NAaLP': '.ui.form', 'hMYzs': 'validate\x20form', 'Tffrz': function (_0x142248) {
return _0x142248();
}
};
var _0x26e47e = _0x19ea25['WMqVz']($, _0x19ea25['NAaLP'])['form'](_0x19ea25['hMYzs']);
if (_0x26e47e) {
_0x19ea25['Tffrz'](postData);
}
});
function postData() {
var _0x283a49 = {
'nGPxR': function (_0xcb4636) {
return _0xcb4636();
}, 'qRqtq': function (_0xc5aaea, _0x5b91a3) {
return _0xc5aaea(_0x5b91a3);
}, 'ELzps': '#command-container', 'KBNIZ': '/comments', 'HdMeD': function (_0x45fc63, _0x16c1c0) {
return _0x45fc63(_0x16c1c0);
}, 'GIJiP': '[name=\x27parentComment.id\x27]', 'redww': function (_0x2573bf, _0x31ff05) {
return _0x2573bf(_0x31ff05);
}, 'KnlCA': '[name=\x27blogBean.id\x27]', 'Ypisp': function (_0x3a4cbe, _0x488760) {
return _0x3a4cbe(_0x488760);
}, 'TuTzK': '[name=\x27content\x27]'
};
_0x283a49['qRqtq']($, _0x283a49['ELzps'])['load'](_0x283a49['KBNIZ'], {
'parentComment.id': _0x283a49['HdMeD']($, _0x283a49['GIJiP'])['val'](),
'blogBean.id': _0x283a49['redww']($, _0x283a49['KnlCA'])['val'](),
'content': _0x283a49['Ypisp']($, _0x283a49['TuTzK'])['val']()
}, function (_0x29c0ab, _0xbe8458, _0x185af1) {
_0x283a49['nGPxR'](clearMessage);
});
}
function clearMessage() {
var _0x47d14d = {
'uqJae': function (_0x2b03ce, _0xb22fbb) {
return _0x2b03ce(_0xb22fbb);
}, 'giuBZ': '[name=\x27content\x27]', 'Xjmxp': function (_0x731754, _0x30f655) {
return _0x731754(_0x30f655);
}, 'STElQ': '[name=\x27parentComment.id\x27]', 'jJieZ': function (_0x46bac3, _0xd2eff0) {
return _0x46bac3(_0xd2eff0);
}, 'ruiil': 'placeholder', 'ImZig': '请输入评论信息...'
};
_0x47d14d['uqJae']($, _0x47d14d['giuBZ'])['val']('');
_0x47d14d['Xjmxp']($, _0x47d14d['STElQ'])['val'](-0x1);
_0x47d14d['jJieZ']($, _0x47d14d['giuBZ'])['attr'](_0x47d14d['ruiil'], _0x47d14d['ImZig']);
}
function reply(_0x4f512a) {
var _0x321d38 = {
'iVGcy': '3|1|0|4|5|2',
'HeFaY': function (_0x1b0cd0, _0x2a80fc) {
return _0x1b0cd0(_0x2a80fc);
},
'spJaG': 'comment-nickname',
'ncEqy': '#comment-form',
'bmUZw': function (_0x409e74, _0x533775) {
return _0x409e74(_0x533775);
},
'GosxG': 'comment-id',
'ojyvd': '[name=\x27content\x27]',
'uzAtG': 'placeholder',
'UGoJl': function (_0x18108f, _0x3a66fc) {
return _0x18108f + _0x3a66fc;
},
'SJsRw': function (_0x12ddfd, _0x5420d5) {
return _0x12ddfd(_0x5420d5);
},
'BMfyE': '[name=\x27parentComment.id\x27]'
};
var _0x4153c2 = _0x321d38['iVGcy']['split']('|'), _0x1d690b = 0x0;
while (!![]) {
switch (_0x4153c2[_0x1d690b++]) {
case'0':
console['log'](_0x3f508c);
continue;
case'1':
var _0x3f508c = _0x321d38['HeFaY']($, _0x4f512a)['data'](_0x321d38['spJaG']);
continue;
case'2':
_0x321d38['HeFaY']($, window)['scrollTo'](_0x321d38['HeFaY']($, _0x321d38['ncEqy']), 0x1f4);
continue;
case'3':
var _0x3f4f7e = _0x321d38['bmUZw']($, _0x4f512a)['data'](_0x321d38['GosxG']);
continue;
case'4':
_0x321d38['bmUZw']($, _0x321d38['ojyvd'])['attr'](_0x321d38['uzAtG'], _0x321d38['UGoJl']('@', _0x3f508c))['focus']();
continue;
case'5':
_0x321d38['SJsRw']($, _0x321d38['BMfyE'])['val'](_0x3f4f7e);
continue;
}
break;
}
}
increaseViewCount();
function increaseViewCount() {
var _0x547753 = {
'phXPY': function (_0x2c0ee1, _0x2f87b7) {
return _0x2c0ee1(_0x2f87b7);
}, 'pRMne': '.views', 'MQdBm': 'viewId', 'ElsTJ': '获取数据出错!', 'LnJlb': function (_0x5694d3, _0x2622b8) {
return _0x5694d3 != _0x2622b8;
}, 'UGZdK': 'POST', 'nFSAq': '/addView', 'FNhau': 'json'
};
if (_0x547753['LnJlb']($['cookie'](_0x547753['MQdBm']), blogId)) {
$['ajax']({
'async': ![],
'type': _0x547753['UGZdK'],
'url': _0x547753['nFSAq'],
'data': {'id': blogId},
'dataType': _0x547753['FNhau'],
'success': function (_0x1d5219) {
_0x547753['phXPY']($, _0x547753['pRMne'])['html'](_0x1d5219);
$['cookie'](_0x547753['MQdBm'], blogId, {'path': '/'});
},
'error': function () {
_0x547753['phXPY'](alert, _0x547753['ElsTJ']);
}
});
}
}
$('#like_add')['click'](function () {
var _0x4cb468 = {
'emvlb': 'like_add', 'WADFd': 'countLike', 'deLPk': function (_0x6ee263, _0xc0c7e4) {
return _0x6ee263 != _0xc0c7e4;
}, 'gQVYx': 'likeId', 'YCDvV': function (_0x232617, _0x44591d, _0x458206, _0x1a33db) {
return _0x232617(_0x44591d, _0x458206, _0x1a33db);
}, 'itOwV': 'data-content', 'MTKBe': '你当前还没登录,无法点赞', 'lXVHD': function (_0x17f14a, _0x3379fe) {
return _0x17f14a(_0x3379fe);
}, 'ZnNtx': '#like_add'
};
var _0x169ecf = document['getElementById'](_0x4cb468['emvlb']);
var _0x2eb0c2 = document['getElementById'](_0x4cb468['WADFd']);
if (_0x4cb468['deLPk']($['cookie'](_0x4cb468['gQVYx']), blogId)) {
if (!hasUser) {
_0x4cb468['YCDvV'](postBlogAjax, 0x1, _0x169ecf, _0x2eb0c2);
} else {
_0x169ecf['setAttribute'](_0x4cb468['itOwV'], _0x4cb468['MTKBe']);
}
}
_0x4cb468['lXVHD']($, _0x4cb468['ZnNtx'])['popup']();
});
$('#opposition_add')['click'](function () {
var _0xbda76e = {
'wkmzn': 'opposition_add', 'PXHTe': 'countOpposition', 'QvLuC': function (_0x54e22a, _0x482b63) {
return _0x54e22a != _0x482b63;
}, 'ClCZO': 'likeId', 'QbJIw': function (_0xd61974, _0x11af7b, _0x51cfee, _0x39bf70) {
return _0xd61974(_0x11af7b, _0x51cfee, _0x39bf70);
}, 'HsWQc': 'data-content', 'ptDzr': '你当前还没登录,无法反对', 'SjiyU': function (_0x162153, _0x5a6f4a) {
return _0x162153(_0x5a6f4a);
}, 'YYcND': '#opposition_add'
};
var _0x53a506 = document['getElementById'](_0xbda76e['wkmzn']);
var _0x45c819 = document['getElementById'](_0xbda76e['PXHTe']);
if (_0xbda76e['QvLuC']($['cookie'](_0xbda76e['ClCZO']), blogId)) {
if (!hasUser) {
_0xbda76e['QbJIw'](postBlogAjax, 0x2, _0x53a506, _0x45c819);
} else {
_0x53a506['setAttribute'](_0xbda76e['HsWQc'], _0xbda76e['ptDzr']);
}
}
_0xbda76e['SjiyU']($, _0xbda76e['YYcND'])['popup']();
});
function postBlogAjax(_0x412555, _0x39d39d, _0x1aedd1) {
var _0x17a8a1 = {
'MVKgg': function (_0x50b4d0, _0x163584) {
return _0x50b4d0 == _0x163584;
}, 'lktox': '200', 'RaBsV': function (_0x14f0e8, _0x5763ce) {
return _0x14f0e8 + _0x5763ce;
}, 'DbieH': function (_0x20d280, _0x1d5220) {
return _0x20d280(_0x1d5220);
}, 'pLfBt': 'data-content', 'mmiew': 'likeId', 'qGPIq': 'POST', 'VrgVR': '/addlike', 'wfpEk': 'json'
};
$['ajax']({
'async': ![],
'type': _0x17a8a1['qGPIq'],
'url': _0x17a8a1['VrgVR'],
'data': {'id': blogId, 'typeId': _0x412555},
'dataType': _0x17a8a1['wfpEk'],
'success': function (_0xef938c) {
if (_0x17a8a1['MVKgg'](_0xef938c['code'], _0x17a8a1['lktox'])) {
_0x1aedd1['innerText'] = _0x17a8a1['RaBsV'](_0x17a8a1['DbieH'](Number, _0x1aedd1['innerText']), 0x1);
}
_0x39d39d['setAttribute'](_0x17a8a1['pLfBt'], _0xef938c['message']);
$['cookie'](_0x17a8a1['mmiew'], blogId, {'path': '/'});
}
});
} | {
_0x2a7476['BqmBe']($, _0x2a7476['eyVUT'])['show'](0x64);
} | conditional_block |
blog_details_min.js | tocbot['init']({
'tocSelector': '.js-toc',
'contentSelector': '.js-toc-content',
'headingSelector': 'h1,\x20h2,\x20h3',
'hasInnerContainers': !![]
});
$('#payBtn')['popup']({'popup': $('.payButton.popup'), 'on': 'click', 'position': 'bottom\x20center'});
$('#catalog')['popup']({'popup': $('.catalog-click.popup'), 'on': 'click', 'position': 'left\x20center'});
$('.click-wechat')['popup']({'popup': $('.share-wechat'), 'on': 'click', 'position': 'left\x20center'});
var qrcode = new QRCode('qrcode', {
'text': qrcodeurl,
'width': 0x80,
'height': 0x80,
'colorDark': '#000000',
'colorLight': '#ffffff',
'correctLevel': QRCode['CorrectLevel']['H']
});
$('#toTop-div')['click'](function () {
var _0x43a5df = {
'pVsbF': function (_0x5384f8, _0x122261) {
return _0x5384f8(_0x122261);
}
};
_0x43a5df['pVsbF']($, window)['scrollTo'](0x0, 0x1f4);
});
var waypoint = new Waypoint({
'element': document['getElementById']('middle-content'), 'handler': function (_0x320b45) {
var _0x2a7476 = {
'Ckqhg': function (_0x469ff8, _0x5cd11c) {
return _0x469ff8 == _0x5cd11c;
}, 'bDtFE': 'down', 'BqmBe': function (_0x1df816, _0x4701b9) {
return _0x1df816(_0x4701b9);
}, 'eyVUT': '#toolbar-list', 'CEXJj': function (_0x4f2ccf, _0x23e0cc) {
return _0x4f2ccf(_0x23e0cc);
}
};
if (_0x2a7476['Ckqhg'](_0x320b45, _0x2a7476['bDtFE'])) {
_0x2a7476['BqmBe']($, _0x2a7476['eyVUT'])['show'](0x64);
} else {
_0x2a7476['CEXJj']($, _0x2a7476['eyVUT'])['hide'](0x1f4);
}
}
});
$('.ui.form')['form']({
'fields': {
'nickName': {
'identifier': 'nickName',
'rules': [{'type': 'empty', 'prompt': '评论昵称不能为空'}]
}, 'content': {'identifier': 'content', 'rules': [{'type': 'empty', 'prompt': '回复内容不能为空'}]}
}
});
$('#comment-btn-sub')['click'](function () {
var _0x19ea25 = {
'WMqVz': function (_0x568012, _0x2af04b) {
return _0x568012(_0x2af04b);
}, 'NAaLP': '.ui.form', 'hMYzs': 'validate\x20form', 'Tffrz': function (_0x142248) {
return _0x142248();
}
};
var _0x26e47e = _0x19ea25['WMqVz']($, _0x19ea25['NAaLP'])['form'](_0x19ea25['hMYzs']);
if (_0x26e47e) {
_0x19ea25['Tffrz'](postData);
}
});
function postData() {
var _0x283a49 = {
'nGPxR': function (_0xcb4636) {
return _0xcb4636();
}, 'qRqtq': function (_0xc5aaea, _0x5b91a3) {
return _0xc5aaea(_0x5b91a3);
}, 'ELzps': '#command-container', 'KBNIZ': '/comments', 'HdMeD': function (_0x45fc63, _0x16c1c0) {
return _0x45fc63(_0x16c1c0);
}, 'GIJiP': '[name=\x27parentComment.id\x27]', 'redww': function (_0x2573bf, _0x31ff05) {
return _0x2573bf(_0x31ff05);
}, 'KnlCA': '[name=\x27blogBean.id\x27]', 'Ypisp': function (_0x3a4cbe, _0x488760) {
return _0x3a4cbe(_0x488760);
}, 'TuTzK': '[name=\x27content\x27]'
};
_0x283a49['qRqtq']($, _0x283a49['ELzps'])['load'](_0x283a49['KBNIZ'], {
'parentComment.id': _0x283a49['HdMeD']($, _0x283a49['GIJiP'])['val'](),
'blogBean.id': _0x283a49['redww']($, _0x283a49['KnlCA'])['val'](),
'content': _0x283a49['Ypisp']($, _0x283a49['TuTzK'])['val']()
}, function (_0x29c0ab, _0xbe8458, _0x185af1) {
_0x283a49['nGPxR'](clearMessage);
});
}
function clearMessage() {
var _0x47d14d = {
| 8 = {
'iVGcy': '3|1|0|4|5|2',
'HeFaY': function (_0x1b0cd0, _0x2a80fc) {
return _0x1b0cd0(_0x2a80fc);
},
'spJaG': 'comment-nickname',
'ncEqy': '#comment-form',
'bmUZw': function (_0x409e74, _0x533775) {
return _0x409e74(_0x533775);
},
'GosxG': 'comment-id',
'ojyvd': '[name=\x27content\x27]',
'uzAtG': 'placeholder',
'UGoJl': function (_0x18108f, _0x3a66fc) {
return _0x18108f + _0x3a66fc;
},
'SJsRw': function (_0x12ddfd, _0x5420d5) {
return _0x12ddfd(_0x5420d5);
},
'BMfyE': '[name=\x27parentComment.id\x27]'
};
var _0x4153c2 = _0x321d38['iVGcy']['split']('|'), _0x1d690b = 0x0;
while (!![]) {
switch (_0x4153c2[_0x1d690b++]) {
case'0':
console['log'](_0x3f508c);
continue;
case'1':
var _0x3f508c = _0x321d38['HeFaY']($, _0x4f512a)['data'](_0x321d38['spJaG']);
continue;
case'2':
_0x321d38['HeFaY']($, window)['scrollTo'](_0x321d38['HeFaY']($, _0x321d38['ncEqy']), 0x1f4);
continue;
case'3':
var _0x3f4f7e = _0x321d38['bmUZw']($, _0x4f512a)['data'](_0x321d38['GosxG']);
continue;
case'4':
_0x321d38['bmUZw']($, _0x321d38['ojyvd'])['attr'](_0x321d38['uzAtG'], _0x321d38['UGoJl']('@', _0x3f508c))['focus']();
continue;
case'5':
_0x321d38['SJsRw']($, _0x321d38['BMfyE'])['val'](_0x3f4f7e);
continue;
}
break;
}
}
increaseViewCount();
function increaseViewCount() {
var _0x547753 = {
'phXPY': function (_0x2c0ee1, _0x2f87b7) {
return _0x2c0ee1(_0x2f87b7);
}, 'pRMne': '.views', 'MQdBm': 'viewId', 'ElsTJ': '获取数据出错!', 'LnJlb': function (_0x5694d3, _0x2622b8) {
return _0x5694d3 != _0x2622b8;
}, 'UGZdK': 'POST', 'nFSAq': '/addView', 'FNhau': 'json'
};
if (_0x547753['LnJlb']($['cookie'](_0x547753['MQdBm']), blogId)) {
$['ajax']({
'async': ![],
'type': _0x547753['UGZdK'],
'url': _0x547753['nFSAq'],
'data': {'id': blogId},
'dataType': _0x547753['FNhau'],
'success': function (_0x1d5219) {
_0x547753['phXPY']($, _0x547753['pRMne'])['html'](_0x1d5219);
$['cookie'](_0x547753['MQdBm'], blogId, {'path': '/'});
},
'error': function () {
_0x547753['phXPY'](alert, _0x547753['ElsTJ']);
}
});
}
}
$('#like_add')['click'](function () {
var _0x4cb468 = {
'emvlb': 'like_add', 'WADFd': 'countLike', 'deLPk': function (_0x6ee263, _0xc0c7e4) {
return _0x6ee263 != _0xc0c7e4;
}, 'gQVYx': 'likeId', 'YCDvV': function (_0x232617, _0x44591d, _0x458206, _0x1a33db) {
return _0x232617(_0x44591d, _0x458206, _0x1a33db);
}, 'itOwV': 'data-content', 'MTKBe': '你当前还没登录,无法点赞', 'lXVHD': function (_0x17f14a, _0x3379fe) {
return _0x17f14a(_0x3379fe);
}, 'ZnNtx': '#like_add'
};
var _0x169ecf = document['getElementById'](_0x4cb468['emvlb']);
var _0x2eb0c2 = document['getElementById'](_0x4cb468['WADFd']);
if (_0x4cb468['deLPk']($['cookie'](_0x4cb468['gQVYx']), blogId)) {
if (!hasUser) {
_0x4cb468['YCDvV'](postBlogAjax, 0x1, _0x169ecf, _0x2eb0c2);
} else {
_0x169ecf['setAttribute'](_0x4cb468['itOwV'], _0x4cb468['MTKBe']);
}
}
_0x4cb468['lXVHD']($, _0x4cb468['ZnNtx'])['popup']();
});
$('#opposition_add')['click'](function () {
var _0xbda76e = {
'wkmzn': 'opposition_add', 'PXHTe': 'countOpposition', 'QvLuC': function (_0x54e22a, _0x482b63) {
return _0x54e22a != _0x482b63;
}, 'ClCZO': 'likeId', 'QbJIw': function (_0xd61974, _0x11af7b, _0x51cfee, _0x39bf70) {
return _0xd61974(_0x11af7b, _0x51cfee, _0x39bf70);
}, 'HsWQc': 'data-content', 'ptDzr': '你当前还没登录,无法反对', 'SjiyU': function (_0x162153, _0x5a6f4a) {
return _0x162153(_0x5a6f4a);
}, 'YYcND': '#opposition_add'
};
var _0x53a506 = document['getElementById'](_0xbda76e['wkmzn']);
var _0x45c819 = document['getElementById'](_0xbda76e['PXHTe']);
if (_0xbda76e['QvLuC']($['cookie'](_0xbda76e['ClCZO']), blogId)) {
if (!hasUser) {
_0xbda76e['QbJIw'](postBlogAjax, 0x2, _0x53a506, _0x45c819);
} else {
_0x53a506['setAttribute'](_0xbda76e['HsWQc'], _0xbda76e['ptDzr']);
}
}
_0xbda76e['SjiyU']($, _0xbda76e['YYcND'])['popup']();
});
function postBlogAjax(_0x412555, _0x39d39d, _0x1aedd1) {
var _0x17a8a1 = {
'MVKgg': function (_0x50b4d0, _0x163584) {
return _0x50b4d0 == _0x163584;
}, 'lktox': '200', 'RaBsV': function (_0x14f0e8, _0x5763ce) {
return _0x14f0e8 + _0x5763ce;
}, 'DbieH': function (_0x20d280, _0x1d5220) {
return _0x20d280(_0x1d5220);
}, 'pLfBt': 'data-content', 'mmiew': 'likeId', 'qGPIq': 'POST', 'VrgVR': '/addlike', 'wfpEk': 'json'
};
$['ajax']({
'async': ![],
'type': _0x17a8a1['qGPIq'],
'url': _0x17a8a1['VrgVR'],
'data': {'id': blogId, 'typeId': _0x412555},
'dataType': _0x17a8a1['wfpEk'],
'success': function (_0xef938c) {
if (_0x17a8a1['MVKgg'](_0xef938c['code'], _0x17a8a1['lktox'])) {
_0x1aedd1['innerText'] = _0x17a8a1['RaBsV'](_0x17a8a1['DbieH'](Number, _0x1aedd1['innerText']), 0x1);
}
_0x39d39d['setAttribute'](_0x17a8a1['pLfBt'], _0xef938c['message']);
$['cookie'](_0x17a8a1['mmiew'], blogId, {'path': '/'});
}
});
} | 'uqJae': function (_0x2b03ce, _0xb22fbb) {
return _0x2b03ce(_0xb22fbb);
}, 'giuBZ': '[name=\x27content\x27]', 'Xjmxp': function (_0x731754, _0x30f655) {
return _0x731754(_0x30f655);
}, 'STElQ': '[name=\x27parentComment.id\x27]', 'jJieZ': function (_0x46bac3, _0xd2eff0) {
return _0x46bac3(_0xd2eff0);
}, 'ruiil': 'placeholder', 'ImZig': '请输入评论信息...'
};
_0x47d14d['uqJae']($, _0x47d14d['giuBZ'])['val']('');
_0x47d14d['Xjmxp']($, _0x47d14d['STElQ'])['val'](-0x1);
_0x47d14d['jJieZ']($, _0x47d14d['giuBZ'])['attr'](_0x47d14d['ruiil'], _0x47d14d['ImZig']);
}
function reply(_0x4f512a) {
var _0x321d3 | identifier_body |
blog_details_min.js | tocbot['init']({
'tocSelector': '.js-toc',
'contentSelector': '.js-toc-content',
'headingSelector': 'h1,\x20h2,\x20h3',
'hasInnerContainers': !![]
});
$('#payBtn')['popup']({'popup': $('.payButton.popup'), 'on': 'click', 'position': 'bottom\x20center'});
$('#catalog')['popup']({'popup': $('.catalog-click.popup'), 'on': 'click', 'position': 'left\x20center'});
$('.click-wechat')['popup']({'popup': $('.share-wechat'), 'on': 'click', 'position': 'left\x20center'});
var qrcode = new QRCode('qrcode', {
'text': qrcodeurl,
'width': 0x80,
'height': 0x80,
'colorDark': '#000000',
'colorLight': '#ffffff',
'correctLevel': QRCode['CorrectLevel']['H']
});
$('#toTop-div')['click'](function () {
var _0x43a5df = {
'pVsbF': function (_0x5384f8, _0x122261) {
return _0x5384f8(_0x122261);
}
};
_0x43a5df['pVsbF']($, window)['scrollTo'](0x0, 0x1f4);
});
var waypoint = new Waypoint({
'element': document['getElementById']('middle-content'), 'handler': function (_0x320b45) {
var _0x2a7476 = {
'Ckqhg': function (_0x469ff8, _0x5cd11c) {
return _0x469ff8 == _0x5cd11c;
}, 'bDtFE': 'down', 'BqmBe': function (_0x1df816, _0x4701b9) {
return _0x1df816(_0x4701b9);
}, 'eyVUT': '#toolbar-list', 'CEXJj': function (_0x4f2ccf, _0x23e0cc) {
return _0x4f2ccf(_0x23e0cc);
}
};
if (_0x2a7476['Ckqhg'](_0x320b45, _0x2a7476['bDtFE'])) {
_0x2a7476['BqmBe']($, _0x2a7476['eyVUT'])['show'](0x64);
} else {
_0x2a7476['CEXJj']($, _0x2a7476['eyVUT'])['hide'](0x1f4);
}
}
});
$('.ui.form')['form']({
'fields': {
'nickName': {
'identifier': 'nickName',
'rules': [{'type': 'empty', 'prompt': '评论昵称不能为空'}]
}, 'content': {'identifier': 'content', 'rules': [{'type': 'empty', 'prompt': '回复内容不能为空'}]}
}
});
$('#comment-btn-sub')['click'](function () {
var _0x19ea25 = {
'WMqVz': function (_0x568012, _0x2af04b) {
return _0x568012(_0x2af04b);
}, 'NAaLP': '.ui.form', 'hMYzs': 'validate\x20form', 'Tffrz': function (_0x142248) {
return _0x142248();
}
};
var _0x26e47e = _0x19ea25['WMqVz']($, _0x19ea25['NAaLP'])['form'](_0x19ea25['hMYzs']);
if (_0x26e47e) {
_0x19ea25['Tffrz'](postData);
}
});
function postData() {
var _0x283a49 = {
'nGPxR': function (_0xcb4636) {
return _0xcb4636();
}, 'qRqtq': function (_0xc5aaea, _0x5b91a3) {
return _0xc5aaea(_0x5b91a3);
}, 'ELzps': '#command-container', 'KBNIZ': '/comments', 'HdMeD': function (_0x45fc63, _0x16c1c0) {
return _0x45fc63(_0x16c1c0);
}, 'GIJiP': '[name=\x27parentComment.id\x27]', 'redww': function (_0x2573bf, _0x31ff05) {
return _0x2573bf(_0x31ff05);
}, 'KnlCA': '[name=\x27blogBean.id\x27]', 'Ypisp': function (_0x3a4cbe, _0x488760) {
return _0x3a4cbe(_0x488760);
}, 'TuTzK': '[name=\x27content\x27]'
};
_0x283a49['qRqtq']($, _0x283a49['ELzps'])['load'](_0x283a49['KBNIZ'], {
'parentComment.id': _0x283a49['HdMeD']($, _0x283a49['GIJiP'])['val'](),
'blogBean.id': _0x283a49['redww']($, _0x283a49['KnlCA'])['val'](),
'content': _0x283a49['Ypisp']($, _0x283a49['TuTzK'])['val']()
}, function (_0x29c0ab, _0xbe8458, _0x185af1) {
_0x283a49['nGPxR'](clearMessage);
});
}
function clearMessage() {
var _0x47d14d = {
'uqJae': function (_0x2b03ce, _0xb22fbb) {
return _0x2b03ce(_0xb22fbb);
}, 'giuBZ': '[name=\x27content\x27]', 'Xjmxp': function (_0x731754, _0x30f655) {
return _0x731754(_0x30f655);
}, 'STElQ': '[name=\x27parentComment.id\x27]', 'jJieZ': function (_0x46bac3, _0xd2eff0) {
return _0x46bac3(_0xd2eff0);
}, 'ruiil': 'placeholder', 'ImZig': '请输入评论信息...'
};
_0x47d14d['uqJae']($, _0x47d14d['giuBZ'])['val']('');
_0x47d14d['Xjmxp']($, _0x47d14d['STElQ'])['val'](-0x1);
_0x47d14d['jJieZ']($, _0x47d14d['giuBZ'])['attr'](_0x47d14d['ruiil'], _0x47d14d['ImZig']);
}
function reply(_0x4f512a) {
var _0x321d38 = {
'iVGcy': '3|1|0|4|5|2',
'HeFaY': function (_0x1b0cd0, _0x2a80fc) {
return _0x1b0cd0(_0x2a80fc);
},
'spJaG': 'comment-nickname',
'ncEqy': '#comment-form',
'bmUZw': function (_0x409e74, _0x533775) {
return _0x409e74(_0x533775);
},
'GosxG': 'comment-id',
'ojyvd': '[name=\x27content\x27]',
'uzAtG': 'placeholder',
'UGoJl': function (_0x18108f, _0x3a66fc) {
return _0x18108f + _0x3a66fc;
},
'SJsRw': function (_0x12ddfd, _0x5420d5) {
return _0x12ddfd(_0x5420d5);
},
'BMfyE': '[name=\x27parentComment.id\x27]'
};
var _0x4153c2 = _0x321d38['iVGcy']['split']('|'), _0x1d690b = 0x0;
while (!![]) {
switch (_0x4153c2[_0x1d690b++]) {
case'0':
console['log'](_0x3f508c);
continue;
case'1':
var _0x3f508c = _0x321d38['HeFaY']($, _0x4f512a)['data'](_0x321d38['spJaG']);
continue;
case'2':
_0x321d38['HeFaY']($, window)['scrollTo'](_0x321d38['HeFaY']($, _0x321d38['ncEqy']), 0x1f4);
continue;
case'3':
var _0x3f4f7e = _0x321d38['bmUZw']($, _0x4f512a)['data'](_0x321d38['GosxG']);
continue;
case'4':
_0x321d38['bmUZw']($, _0x321d38['ojyvd'])['attr'](_0x321d38['uzAtG'], _0x321d38['UGoJl']('@', _0x3f508c))['focus']();
continue;
case'5':
_0x321d38['SJsRw']($, _0x321d38['BMfyE'])['val'](_0x3f4f7e);
continue;
}
break;
}
}
increaseViewCount();
function increaseViewCount() {
var _0x547753 = {
'phXPY': function (_0x2c0ee1, _0x2f87b7) {
return _0x2c0ee1(_0x2f87b7); | if (_0x547753['LnJlb']($['cookie'](_0x547753['MQdBm']), blogId)) {
$['ajax']({
'async': ![],
'type': _0x547753['UGZdK'],
'url': _0x547753['nFSAq'],
'data': {'id': blogId},
'dataType': _0x547753['FNhau'],
'success': function (_0x1d5219) {
_0x547753['phXPY']($, _0x547753['pRMne'])['html'](_0x1d5219);
$['cookie'](_0x547753['MQdBm'], blogId, {'path': '/'});
},
'error': function () {
_0x547753['phXPY'](alert, _0x547753['ElsTJ']);
}
});
}
}
$('#like_add')['click'](function () {
var _0x4cb468 = {
'emvlb': 'like_add', 'WADFd': 'countLike', 'deLPk': function (_0x6ee263, _0xc0c7e4) {
return _0x6ee263 != _0xc0c7e4;
}, 'gQVYx': 'likeId', 'YCDvV': function (_0x232617, _0x44591d, _0x458206, _0x1a33db) {
return _0x232617(_0x44591d, _0x458206, _0x1a33db);
}, 'itOwV': 'data-content', 'MTKBe': '你当前还没登录,无法点赞', 'lXVHD': function (_0x17f14a, _0x3379fe) {
return _0x17f14a(_0x3379fe);
}, 'ZnNtx': '#like_add'
};
var _0x169ecf = document['getElementById'](_0x4cb468['emvlb']);
var _0x2eb0c2 = document['getElementById'](_0x4cb468['WADFd']);
if (_0x4cb468['deLPk']($['cookie'](_0x4cb468['gQVYx']), blogId)) {
if (!hasUser) {
_0x4cb468['YCDvV'](postBlogAjax, 0x1, _0x169ecf, _0x2eb0c2);
} else {
_0x169ecf['setAttribute'](_0x4cb468['itOwV'], _0x4cb468['MTKBe']);
}
}
_0x4cb468['lXVHD']($, _0x4cb468['ZnNtx'])['popup']();
});
$('#opposition_add')['click'](function () {
var _0xbda76e = {
'wkmzn': 'opposition_add', 'PXHTe': 'countOpposition', 'QvLuC': function (_0x54e22a, _0x482b63) {
return _0x54e22a != _0x482b63;
}, 'ClCZO': 'likeId', 'QbJIw': function (_0xd61974, _0x11af7b, _0x51cfee, _0x39bf70) {
return _0xd61974(_0x11af7b, _0x51cfee, _0x39bf70);
}, 'HsWQc': 'data-content', 'ptDzr': '你当前还没登录,无法反对', 'SjiyU': function (_0x162153, _0x5a6f4a) {
return _0x162153(_0x5a6f4a);
}, 'YYcND': '#opposition_add'
};
var _0x53a506 = document['getElementById'](_0xbda76e['wkmzn']);
var _0x45c819 = document['getElementById'](_0xbda76e['PXHTe']);
if (_0xbda76e['QvLuC']($['cookie'](_0xbda76e['ClCZO']), blogId)) {
if (!hasUser) {
_0xbda76e['QbJIw'](postBlogAjax, 0x2, _0x53a506, _0x45c819);
} else {
_0x53a506['setAttribute'](_0xbda76e['HsWQc'], _0xbda76e['ptDzr']);
}
}
_0xbda76e['SjiyU']($, _0xbda76e['YYcND'])['popup']();
});
function postBlogAjax(_0x412555, _0x39d39d, _0x1aedd1) {
var _0x17a8a1 = {
'MVKgg': function (_0x50b4d0, _0x163584) {
return _0x50b4d0 == _0x163584;
}, 'lktox': '200', 'RaBsV': function (_0x14f0e8, _0x5763ce) {
return _0x14f0e8 + _0x5763ce;
}, 'DbieH': function (_0x20d280, _0x1d5220) {
return _0x20d280(_0x1d5220);
}, 'pLfBt': 'data-content', 'mmiew': 'likeId', 'qGPIq': 'POST', 'VrgVR': '/addlike', 'wfpEk': 'json'
};
$['ajax']({
'async': ![],
'type': _0x17a8a1['qGPIq'],
'url': _0x17a8a1['VrgVR'],
'data': {'id': blogId, 'typeId': _0x412555},
'dataType': _0x17a8a1['wfpEk'],
'success': function (_0xef938c) {
if (_0x17a8a1['MVKgg'](_0xef938c['code'], _0x17a8a1['lktox'])) {
_0x1aedd1['innerText'] = _0x17a8a1['RaBsV'](_0x17a8a1['DbieH'](Number, _0x1aedd1['innerText']), 0x1);
}
_0x39d39d['setAttribute'](_0x17a8a1['pLfBt'], _0xef938c['message']);
$['cookie'](_0x17a8a1['mmiew'], blogId, {'path': '/'});
}
});
} | }, 'pRMne': '.views', 'MQdBm': 'viewId', 'ElsTJ': '获取数据出错!', 'LnJlb': function (_0x5694d3, _0x2622b8) {
return _0x5694d3 != _0x2622b8;
}, 'UGZdK': 'POST', 'nFSAq': '/addView', 'FNhau': 'json'
}; | random_line_split |
ACL2011_v2.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import codecs
import logging
import os
import random
import re
import sys
from unixnlp.sys_utils import *
import subprocess
from subprocess import Popen
from collections import Counter
from os import path
from nltk import word_tokenize
from nltk.util import ngrams
from numpy import argsort
from scipy.sparse import coo_matrix
from sklearn import metrics
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from unixnlp.cat_ASCII import cat_ASCII
from unixnlp.similarity import l2_norm
from unixnlp.split_folds import split_folds
from unixnlp.wordmap import WordMap
DEFAULT_LIWC_HOME = path.join(path.dirname(__file__), "lib", "LIWC")
def LIWC(file, LIWC_home=DEFAULT_LIWC_HOME):
"""LIWC_sh = path.join(LIWC_home, "LIWC.sh")
cmd = " ".join([LIWC_sh, file])
p = Command(cmd, shell=True, universal_newlines=True)
(retcode, stdout, stderr) = p.run()"""
program = ['java', 'lib/LIWC/LIWC', '-in', 'test.txt', '-out', 'lib/LIWC/myout.txt', '-dic', 'LIWC2007_English080130.dic']
subprocess.call(program)
# extract features, unknown from stdout.
features = {}
unknown = {}
location = 0
for line in stdout.splitlines():
if line.startswith("Total number of words:"):
wc = int(line[line.find(":")+1:])
if wc == 0:
wc = 0.0000001
elif line.startswith("Categories:"):
location = 1
elif line.startswith("Unknown words:"):
location = 2
elif location == 1:
(feature, count, percent) = line.strip().replace(":", "").split()
features[feature] = (int(count), percent)
elif location == 2:
(feature, count) = line.strip().replace(":", "").split()
unknown[feature] = int(count)
# get tokens to set remaining features.
lines = []
with open(file) as h:
for line in h:
lines.append(line.split())
features["wc"] = (wc, "100%")
features["wps"] = (wc / len(lines), "{:.1%}".format(1 / len(lines)))
fdict = wc - sum(unknown.values())
features["dict"] = (fdict, "{:.1%}".format(fdict / wc))
fsixltr = sum([
sum([
1 for token in line
if len(token) > 6
]) for line in lines])
features["sixltr"] = (fsixltr, "{:.1%}".format(fsixltr / wc))
return (features, unknown)
def main():
parser = argparse.ArgumentParser(description="Replicates ACL 2011 results.")
parser.add_argument("--debug", action="store_true",
help="debug output")
parser.add_argument("--featuremap", metavar="FILE", default="featuremap",
help="location of feature map")
parser.add_argument("--nfolds", metavar="N", type=int, default=5,
help="number of folds to use for cross-validation")
parser.add_argument("--output", metavar="FILE", default="CV_output",
help="file prefix for saving CV output")
parser.add_argument("--top", metavar="N", type=int, default=0,
help="output top N features for each class")
parser.add_argument("data",
help="directory containing review data")
args = parser.parse_args()
# set logging level
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# load featuremap
featuremap = WordMap(args.featuremap)
# load documents, ignoring the folds
if not path.isdir(args.data):
raise IOError("Data directory not found: {0}".format(args.data))
docs = {}
for (dirpath, dirnames, filenames) in os.walk(args.data):
for filename in filenames:
if filename.endswith(".txt"):
# example filename: t_hilton_1.txt, meaning truthful review of the Hilton hotel
(label, hotel, i) = filename.split("_")
# get review text
with codecs.open(path.join(dirpath, filename), 'r', 'utf8') as h:
text = h.read().strip()
# save review
docs.setdefault(hotel, []).append({
'_id': path.join(dirpath, filename),
'class': {'t': -1, 'd': 1}.get(label),
'hotel': hotel,
'text': text
})
# split docs into folds (stratified by hotel)
folds = []
hotels = docs.keys()
random.shuffle(hotels)
for (i, hotels_i) in enumerate(split_folds(hotels, args.nfolds)):
fold = DocumentCollection()
for hotel in hotels_i:
fold += DocumentCollection(docs[hotel])
folds.append(fold)
# split data into folds (no stratification)
#folds = map(DocumentCollection, split_folds(docs, args.nfolds))
# perform cross validation
CV_ids, CV_hotels, CV_labels, CV_preds, CV_binary_preds = [], [], [], [], [] # CV output
for (i, (train, test)) in enumerate(DocumentCollection.get_train_test(folds)):
# get training features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting training features and labels")
train_features = get_features(train, featuremap, is_train=True)
(train_labels, _) = train.flatten()
# choose parameters using nested cross-validation on train folds and grid search
logging.getLogger("fold_{0}".format(i)).debug("tuning parameters")
grid = GridSearchCV(LinearSVC(loss='l1'),
{'C': [0.001, 0.002, 0.004, 0.006, 0.008,
0.01, 0.02, 0.04, 0.06, 0.08,
0.1, 0.2, 0.4, 0.6, 0.8,
1, 2, 4, 6, 8,
10, 20, 40, 60, 80,
100, 200, 400, 600, 800, 1000]}, cv=args.nfolds)
grid.fit(train_features, train_labels)
classifier = grid.best_estimator_
# train classifier using best parameters
logging.getLogger("fold_{0}".format(i)).debug("training w/ params ({0}) and labels ({1})".format(classifier, Counter(train_labels)))
classifier.fit(train_features, train_labels)
# get top N features
if args.top != 0:
logging.getLogger("fold_{0}".format(i)).debug("getting top {0} features per class".format(args.top))
for (label, topN) in get_top_features(classifier, args.top).items():
logging.getLogger("fold_{0}".format(i)).info("class {0}:".format(label))
for id in topN:
logging.getLogger("fold_{0}".format(i)).info(featuremap[id + 1])
# get test features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting test features and labels")
test_features = get_features(test, featuremap, is_train=False)
(test_labels, _) = test.flatten()
# test
logging.getLogger("fold_{0}".format(i)).debug("testing (labels: {0})".format(Counter(test_labels)))
test_preds = list(map(lambda y: y[0], classifier.decision_function(test_features)))
# save CV output
CV_ids.extend(test['_id'])
CV_hotels.extend(test['hotel'])
CV_labels.extend(test_labels)
CV_preds.extend(test_preds)
CV_binary_preds.extend(map(lambda y: 1 if y > 0 else -1, test_preds))
# output various CV reports
logging.getLogger("CV").info(metrics.classification_report(CV_labels, CV_binary_preds))
logging.getLogger("CV").info("Per-class (P)recision, (R)ecall, (F)-score")
logging.getLogger("CV").info("\t-1\t1")
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(CV_labels, CV_binary_preds)
for (metric, results) in [("P", precision), ("R", recall), ("F", fscore)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(metric, results[0], results[1]))
logging.getLogger("CV").info("confusion matrix:")
logging.getLogger("CV").info("real\\pred\t-1\t1")
(c0, c1) = metrics.confusion_matrix(CV_labels, CV_binary_preds)
for (label, results) in [("-1", c0), ("1", c1)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(label, results[0], results[1]))
# save CV predictions
if args.output:
logging.getLogger("CV").debug("saving CV output to {0}".format(args.output))
with codecs.open(args.output, 'w', 'utf8') as h:
for (id, hotel, label, pred) in zip(CV_ids, CV_hotels, CV_labels, CV_preds):
print("{0}\t{1}\t{2}\t{3:f}".format(id, hotel, label, pred), file=h)
# safe featuremap for future use/reference
featuremap.write()
def get_features(dc, featuremap, is_train=True):
row, col, data = [], [], [] # for scikit-learn feature representation
for (d, (label, text)) in enumerate(dc):
# start with n-grams
features = get_ngrams(text, [1,2])
#add LIWC features
fopen = open("liwc.txt","w")
fopen.writelines(text)
fopen.close()
(liwc_features, _) = LIWC("liwc.txt")
new_features = get_liwcFeatures(liwc_features)
new_features = unit_normalize(new_features)
# unit normalization
features = unit_normalize(features)
features.update(new_features)
# convert features to scikit-learn format
for (feature, value) in features.items():
# map features to identifiers
id = None
if feature in featuremap or is_train: # ignore features not seen in training
id = featuremap[feature] - 1
# add feature value to scikit-learn representation
if id is not None:
row.append(d)
col.append(id)
data.append(value)
return coo_matrix( (data, (row, col) ), shape=[len(dc), len(featuremap)] ).tocsr()
def get_liwcFeatures(liwc_features):
features = {}
for feature in liwc_features:
features.update({"LIWC_"+feature: liwc_features[feature][0]})
return features
def get_ngrams(text, N):
text = cat_ASCII(text)
text = re.sub("\s+", " ", text)
text = text.lower()
features = Counter()
tokens = word_tokenize(text)
for n in N:
for ngram in ngrams(tokens, n):
feature = "{0}GRAMS_{1}".format(n, "__".join(ngram))
features.update({feature: 1})
return features
def unit_normalize(features):
norm = l2_norm(features)
return dict([(k, v / norm) for (k, v) in features.items()])
# extract the top N weighted features from the model learned by classifier
def get_top_features(classifier, N):
# get top features for each class
sorted_coefs = argsort(classifier.coef_[0])
return {
-1: sorted_coefs[:N].tolist(),
1: reversed(sorted_coefs[-N:].tolist())
}
class | :
def __init__(self, collection=[], label_key="class", text_key="text"):
self.collection = list(collection)
self.label_key = label_key
self.text_key = text_key
def __add__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
return DocumentCollection(self.collection + other.collection, self.label_key, self.text_key)
def __iadd__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
self.collection.extend(other.collection)
return self
def __getitem__(self, key):
return [doc[key] if key else None
for doc in self.collection]
def __len__(self):
return len(self.collection)
def __iter__(self):
for doc in self.collection:
yield (self._label(doc), self._text(doc))
raise StopIteration
def _label(self, doc):
return doc[self.label_key]
def _text(self, doc):
return doc[self.text_key]
def flatten(self):
return zip(*iter(self))
@staticmethod
def get_train_test(folds):
for (test_i, test) in enumerate(folds):
train_folds = [folds[i] for i in range(len(folds)) if i != test_i]
train = reduce(lambda a, b: a + b, train_folds, DocumentCollection())
yield (train, test)
if __name__ == '__main__':
main()
| DocumentCollection | identifier_name |
ACL2011_v2.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import codecs
import logging
import os
import random
import re
import sys
from unixnlp.sys_utils import *
import subprocess
from subprocess import Popen
from collections import Counter
from os import path
from nltk import word_tokenize
from nltk.util import ngrams
from numpy import argsort
from scipy.sparse import coo_matrix
from sklearn import metrics
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from unixnlp.cat_ASCII import cat_ASCII
from unixnlp.similarity import l2_norm
from unixnlp.split_folds import split_folds
from unixnlp.wordmap import WordMap
DEFAULT_LIWC_HOME = path.join(path.dirname(__file__), "lib", "LIWC")
def LIWC(file, LIWC_home=DEFAULT_LIWC_HOME):
"""LIWC_sh = path.join(LIWC_home, "LIWC.sh")
cmd = " ".join([LIWC_sh, file])
p = Command(cmd, shell=True, universal_newlines=True)
(retcode, stdout, stderr) = p.run()"""
program = ['java', 'lib/LIWC/LIWC', '-in', 'test.txt', '-out', 'lib/LIWC/myout.txt', '-dic', 'LIWC2007_English080130.dic']
subprocess.call(program)
# extract features, unknown from stdout.
features = {}
unknown = {}
location = 0
for line in stdout.splitlines():
if line.startswith("Total number of words:"):
wc = int(line[line.find(":")+1:])
if wc == 0:
wc = 0.0000001
elif line.startswith("Categories:"):
location = 1
elif line.startswith("Unknown words:"):
location = 2
elif location == 1:
(feature, count, percent) = line.strip().replace(":", "").split()
features[feature] = (int(count), percent)
elif location == 2:
(feature, count) = line.strip().replace(":", "").split()
unknown[feature] = int(count)
# get tokens to set remaining features.
lines = []
with open(file) as h:
for line in h:
lines.append(line.split())
features["wc"] = (wc, "100%")
features["wps"] = (wc / len(lines), "{:.1%}".format(1 / len(lines)))
fdict = wc - sum(unknown.values())
features["dict"] = (fdict, "{:.1%}".format(fdict / wc))
fsixltr = sum([
sum([
1 for token in line
if len(token) > 6
]) for line in lines])
features["sixltr"] = (fsixltr, "{:.1%}".format(fsixltr / wc))
return (features, unknown)
def main():
parser = argparse.ArgumentParser(description="Replicates ACL 2011 results.")
parser.add_argument("--debug", action="store_true",
help="debug output")
parser.add_argument("--featuremap", metavar="FILE", default="featuremap",
help="location of feature map")
parser.add_argument("--nfolds", metavar="N", type=int, default=5,
help="number of folds to use for cross-validation")
parser.add_argument("--output", metavar="FILE", default="CV_output",
help="file prefix for saving CV output")
parser.add_argument("--top", metavar="N", type=int, default=0,
help="output top N features for each class")
parser.add_argument("data",
help="directory containing review data")
args = parser.parse_args()
# set logging level
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# load featuremap
featuremap = WordMap(args.featuremap)
# load documents, ignoring the folds
if not path.isdir(args.data):
raise IOError("Data directory not found: {0}".format(args.data))
docs = {}
for (dirpath, dirnames, filenames) in os.walk(args.data):
for filename in filenames:
if filename.endswith(".txt"):
# example filename: t_hilton_1.txt, meaning truthful review of the Hilton hotel
(label, hotel, i) = filename.split("_")
# get review text
with codecs.open(path.join(dirpath, filename), 'r', 'utf8') as h:
text = h.read().strip()
# save review
docs.setdefault(hotel, []).append({
'_id': path.join(dirpath, filename),
'class': {'t': -1, 'd': 1}.get(label),
'hotel': hotel,
'text': text
})
# split docs into folds (stratified by hotel)
folds = []
hotels = docs.keys()
random.shuffle(hotels)
for (i, hotels_i) in enumerate(split_folds(hotels, args.nfolds)):
fold = DocumentCollection()
for hotel in hotels_i:
fold += DocumentCollection(docs[hotel])
folds.append(fold)
# split data into folds (no stratification)
#folds = map(DocumentCollection, split_folds(docs, args.nfolds))
# perform cross validation
CV_ids, CV_hotels, CV_labels, CV_preds, CV_binary_preds = [], [], [], [], [] # CV output
for (i, (train, test)) in enumerate(DocumentCollection.get_train_test(folds)):
# get training features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting training features and labels")
train_features = get_features(train, featuremap, is_train=True)
(train_labels, _) = train.flatten()
# choose parameters using nested cross-validation on train folds and grid search
logging.getLogger("fold_{0}".format(i)).debug("tuning parameters")
grid = GridSearchCV(LinearSVC(loss='l1'),
{'C': [0.001, 0.002, 0.004, 0.006, 0.008,
0.01, 0.02, 0.04, 0.06, 0.08,
0.1, 0.2, 0.4, 0.6, 0.8,
1, 2, 4, 6, 8,
10, 20, 40, 60, 80,
100, 200, 400, 600, 800, 1000]}, cv=args.nfolds)
grid.fit(train_features, train_labels)
classifier = grid.best_estimator_
# train classifier using best parameters
logging.getLogger("fold_{0}".format(i)).debug("training w/ params ({0}) and labels ({1})".format(classifier, Counter(train_labels)))
classifier.fit(train_features, train_labels)
# get top N features
if args.top != 0:
logging.getLogger("fold_{0}".format(i)).debug("getting top {0} features per class".format(args.top))
for (label, topN) in get_top_features(classifier, args.top).items():
logging.getLogger("fold_{0}".format(i)).info("class {0}:".format(label))
for id in topN:
logging.getLogger("fold_{0}".format(i)).info(featuremap[id + 1])
# get test features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting test features and labels")
test_features = get_features(test, featuremap, is_train=False)
(test_labels, _) = test.flatten()
# test
logging.getLogger("fold_{0}".format(i)).debug("testing (labels: {0})".format(Counter(test_labels)))
test_preds = list(map(lambda y: y[0], classifier.decision_function(test_features)))
# save CV output
CV_ids.extend(test['_id'])
CV_hotels.extend(test['hotel'])
CV_labels.extend(test_labels)
CV_preds.extend(test_preds)
CV_binary_preds.extend(map(lambda y: 1 if y > 0 else -1, test_preds))
# output various CV reports
logging.getLogger("CV").info(metrics.classification_report(CV_labels, CV_binary_preds))
logging.getLogger("CV").info("Per-class (P)recision, (R)ecall, (F)-score")
logging.getLogger("CV").info("\t-1\t1")
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(CV_labels, CV_binary_preds)
for (metric, results) in [("P", precision), ("R", recall), ("F", fscore)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(metric, results[0], results[1]))
logging.getLogger("CV").info("confusion matrix:")
logging.getLogger("CV").info("real\\pred\t-1\t1")
(c0, c1) = metrics.confusion_matrix(CV_labels, CV_binary_preds)
for (label, results) in [("-1", c0), ("1", c1)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(label, results[0], results[1]))
# save CV predictions
if args.output:
logging.getLogger("CV").debug("saving CV output to {0}".format(args.output))
with codecs.open(args.output, 'w', 'utf8') as h:
for (id, hotel, label, pred) in zip(CV_ids, CV_hotels, CV_labels, CV_preds):
print("{0}\t{1}\t{2}\t{3:f}".format(id, hotel, label, pred), file=h)
# safe featuremap for future use/reference
featuremap.write()
def get_features(dc, featuremap, is_train=True):
row, col, data = [], [], [] # for scikit-learn feature representation
for (d, (label, text)) in enumerate(dc):
# start with n-grams
features = get_ngrams(text, [1,2])
#add LIWC features
fopen = open("liwc.txt","w")
fopen.writelines(text)
fopen.close()
(liwc_features, _) = LIWC("liwc.txt")
new_features = get_liwcFeatures(liwc_features)
new_features = unit_normalize(new_features)
# unit normalization
features = unit_normalize(features)
features.update(new_features)
# convert features to scikit-learn format
for (feature, value) in features.items():
# map features to identifiers
id = None
if feature in featuremap or is_train: # ignore features not seen in training
id = featuremap[feature] - 1
# add feature value to scikit-learn representation
if id is not None:
row.append(d)
col.append(id)
data.append(value)
return coo_matrix( (data, (row, col) ), shape=[len(dc), len(featuremap)] ).tocsr()
def get_liwcFeatures(liwc_features):
features = {}
for feature in liwc_features:
features.update({"LIWC_"+feature: liwc_features[feature][0]})
return features
def get_ngrams(text, N):
text = cat_ASCII(text)
text = re.sub("\s+", " ", text)
text = text.lower()
features = Counter()
tokens = word_tokenize(text)
for n in N:
for ngram in ngrams(tokens, n):
feature = "{0}GRAMS_{1}".format(n, "__".join(ngram))
features.update({feature: 1})
return features
def unit_normalize(features):
norm = l2_norm(features)
return dict([(k, v / norm) for (k, v) in features.items()])
# extract the top N weighted features from the model learned by classifier
def get_top_features(classifier, N):
# get top features for each class
sorted_coefs = argsort(classifier.coef_[0])
return {
-1: sorted_coefs[:N].tolist(),
1: reversed(sorted_coefs[-N:].tolist())
}
class DocumentCollection:
def __init__(self, collection=[], label_key="class", text_key="text"):
self.collection = list(collection)
self.label_key = label_key
self.text_key = text_key
def __add__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
return DocumentCollection(self.collection + other.collection, self.label_key, self.text_key)
def __iadd__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
self.collection.extend(other.collection)
return self
def __getitem__(self, key):
return [doc[key] if key else None
for doc in self.collection]
def __len__(self):
return len(self.collection)
def __iter__(self):
for doc in self.collection:
yield (self._label(doc), self._text(doc))
raise StopIteration
def _label(self, doc):
return doc[self.label_key]
def _text(self, doc):
|
def flatten(self):
return zip(*iter(self))
@staticmethod
def get_train_test(folds):
for (test_i, test) in enumerate(folds):
train_folds = [folds[i] for i in range(len(folds)) if i != test_i]
train = reduce(lambda a, b: a + b, train_folds, DocumentCollection())
yield (train, test)
if __name__ == '__main__':
main()
| return doc[self.text_key] | identifier_body |
ACL2011_v2.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import codecs
import logging
import os
import random
import re
import sys
from unixnlp.sys_utils import *
import subprocess
from subprocess import Popen
from collections import Counter
from os import path
from nltk import word_tokenize
from nltk.util import ngrams
from numpy import argsort
from scipy.sparse import coo_matrix
from sklearn import metrics
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from unixnlp.cat_ASCII import cat_ASCII
from unixnlp.similarity import l2_norm
from unixnlp.split_folds import split_folds
from unixnlp.wordmap import WordMap
DEFAULT_LIWC_HOME = path.join(path.dirname(__file__), "lib", "LIWC")
def LIWC(file, LIWC_home=DEFAULT_LIWC_HOME):
"""LIWC_sh = path.join(LIWC_home, "LIWC.sh")
cmd = " ".join([LIWC_sh, file])
p = Command(cmd, shell=True, universal_newlines=True)
(retcode, stdout, stderr) = p.run()"""
program = ['java', 'lib/LIWC/LIWC', '-in', 'test.txt', '-out', 'lib/LIWC/myout.txt', '-dic', 'LIWC2007_English080130.dic']
subprocess.call(program)
# extract features, unknown from stdout.
features = {}
unknown = {}
location = 0
for line in stdout.splitlines():
if line.startswith("Total number of words:"):
wc = int(line[line.find(":")+1:])
if wc == 0:
wc = 0.0000001
elif line.startswith("Categories:"):
location = 1
elif line.startswith("Unknown words:"):
location = 2
elif location == 1:
(feature, count, percent) = line.strip().replace(":", "").split()
features[feature] = (int(count), percent)
elif location == 2:
(feature, count) = line.strip().replace(":", "").split()
unknown[feature] = int(count)
# get tokens to set remaining features.
lines = []
with open(file) as h:
for line in h:
lines.append(line.split())
features["wc"] = (wc, "100%")
features["wps"] = (wc / len(lines), "{:.1%}".format(1 / len(lines)))
fdict = wc - sum(unknown.values())
features["dict"] = (fdict, "{:.1%}".format(fdict / wc))
fsixltr = sum([
sum([
1 for token in line
if len(token) > 6
]) for line in lines])
features["sixltr"] = (fsixltr, "{:.1%}".format(fsixltr / wc))
return (features, unknown)
def main():
parser = argparse.ArgumentParser(description="Replicates ACL 2011 results.")
parser.add_argument("--debug", action="store_true",
help="debug output")
parser.add_argument("--featuremap", metavar="FILE", default="featuremap",
help="location of feature map")
parser.add_argument("--nfolds", metavar="N", type=int, default=5,
help="number of folds to use for cross-validation")
parser.add_argument("--output", metavar="FILE", default="CV_output",
help="file prefix for saving CV output")
parser.add_argument("--top", metavar="N", type=int, default=0,
help="output top N features for each class")
parser.add_argument("data",
help="directory containing review data")
args = parser.parse_args()
# set logging level
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# load featuremap
featuremap = WordMap(args.featuremap)
# load documents, ignoring the folds
if not path.isdir(args.data):
raise IOError("Data directory not found: {0}".format(args.data))
docs = {}
for (dirpath, dirnames, filenames) in os.walk(args.data):
for filename in filenames:
if filename.endswith(".txt"):
# example filename: t_hilton_1.txt, meaning truthful review of the Hilton hotel
(label, hotel, i) = filename.split("_")
# get review text
with codecs.open(path.join(dirpath, filename), 'r', 'utf8') as h:
text = h.read().strip()
# save review
docs.setdefault(hotel, []).append({
'_id': path.join(dirpath, filename),
'class': {'t': -1, 'd': 1}.get(label),
'hotel': hotel,
'text': text
})
# split docs into folds (stratified by hotel)
folds = []
hotels = docs.keys()
random.shuffle(hotels)
for (i, hotels_i) in enumerate(split_folds(hotels, args.nfolds)):
fold = DocumentCollection()
for hotel in hotels_i:
fold += DocumentCollection(docs[hotel])
folds.append(fold)
# split data into folds (no stratification)
#folds = map(DocumentCollection, split_folds(docs, args.nfolds))
# perform cross validation | # get training features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting training features and labels")
train_features = get_features(train, featuremap, is_train=True)
(train_labels, _) = train.flatten()
# choose parameters using nested cross-validation on train folds and grid search
logging.getLogger("fold_{0}".format(i)).debug("tuning parameters")
grid = GridSearchCV(LinearSVC(loss='l1'),
{'C': [0.001, 0.002, 0.004, 0.006, 0.008,
0.01, 0.02, 0.04, 0.06, 0.08,
0.1, 0.2, 0.4, 0.6, 0.8,
1, 2, 4, 6, 8,
10, 20, 40, 60, 80,
100, 200, 400, 600, 800, 1000]}, cv=args.nfolds)
grid.fit(train_features, train_labels)
classifier = grid.best_estimator_
# train classifier using best parameters
logging.getLogger("fold_{0}".format(i)).debug("training w/ params ({0}) and labels ({1})".format(classifier, Counter(train_labels)))
classifier.fit(train_features, train_labels)
# get top N features
if args.top != 0:
logging.getLogger("fold_{0}".format(i)).debug("getting top {0} features per class".format(args.top))
for (label, topN) in get_top_features(classifier, args.top).items():
logging.getLogger("fold_{0}".format(i)).info("class {0}:".format(label))
for id in topN:
logging.getLogger("fold_{0}".format(i)).info(featuremap[id + 1])
# get test features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting test features and labels")
test_features = get_features(test, featuremap, is_train=False)
(test_labels, _) = test.flatten()
# test
logging.getLogger("fold_{0}".format(i)).debug("testing (labels: {0})".format(Counter(test_labels)))
test_preds = list(map(lambda y: y[0], classifier.decision_function(test_features)))
# save CV output
CV_ids.extend(test['_id'])
CV_hotels.extend(test['hotel'])
CV_labels.extend(test_labels)
CV_preds.extend(test_preds)
CV_binary_preds.extend(map(lambda y: 1 if y > 0 else -1, test_preds))
# output various CV reports
logging.getLogger("CV").info(metrics.classification_report(CV_labels, CV_binary_preds))
logging.getLogger("CV").info("Per-class (P)recision, (R)ecall, (F)-score")
logging.getLogger("CV").info("\t-1\t1")
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(CV_labels, CV_binary_preds)
for (metric, results) in [("P", precision), ("R", recall), ("F", fscore)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(metric, results[0], results[1]))
logging.getLogger("CV").info("confusion matrix:")
logging.getLogger("CV").info("real\\pred\t-1\t1")
(c0, c1) = metrics.confusion_matrix(CV_labels, CV_binary_preds)
for (label, results) in [("-1", c0), ("1", c1)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(label, results[0], results[1]))
# save CV predictions
if args.output:
logging.getLogger("CV").debug("saving CV output to {0}".format(args.output))
with codecs.open(args.output, 'w', 'utf8') as h:
for (id, hotel, label, pred) in zip(CV_ids, CV_hotels, CV_labels, CV_preds):
print("{0}\t{1}\t{2}\t{3:f}".format(id, hotel, label, pred), file=h)
# safe featuremap for future use/reference
featuremap.write()
def get_features(dc, featuremap, is_train=True):
row, col, data = [], [], [] # for scikit-learn feature representation
for (d, (label, text)) in enumerate(dc):
# start with n-grams
features = get_ngrams(text, [1,2])
#add LIWC features
fopen = open("liwc.txt","w")
fopen.writelines(text)
fopen.close()
(liwc_features, _) = LIWC("liwc.txt")
new_features = get_liwcFeatures(liwc_features)
new_features = unit_normalize(new_features)
# unit normalization
features = unit_normalize(features)
features.update(new_features)
# convert features to scikit-learn format
for (feature, value) in features.items():
# map features to identifiers
id = None
if feature in featuremap or is_train: # ignore features not seen in training
id = featuremap[feature] - 1
# add feature value to scikit-learn representation
if id is not None:
row.append(d)
col.append(id)
data.append(value)
return coo_matrix( (data, (row, col) ), shape=[len(dc), len(featuremap)] ).tocsr()
def get_liwcFeatures(liwc_features):
features = {}
for feature in liwc_features:
features.update({"LIWC_"+feature: liwc_features[feature][0]})
return features
def get_ngrams(text, N):
text = cat_ASCII(text)
text = re.sub("\s+", " ", text)
text = text.lower()
features = Counter()
tokens = word_tokenize(text)
for n in N:
for ngram in ngrams(tokens, n):
feature = "{0}GRAMS_{1}".format(n, "__".join(ngram))
features.update({feature: 1})
return features
def unit_normalize(features):
norm = l2_norm(features)
return dict([(k, v / norm) for (k, v) in features.items()])
# extract the top N weighted features from the model learned by classifier
def get_top_features(classifier, N):
# get top features for each class
sorted_coefs = argsort(classifier.coef_[0])
return {
-1: sorted_coefs[:N].tolist(),
1: reversed(sorted_coefs[-N:].tolist())
}
class DocumentCollection:
def __init__(self, collection=[], label_key="class", text_key="text"):
self.collection = list(collection)
self.label_key = label_key
self.text_key = text_key
def __add__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
return DocumentCollection(self.collection + other.collection, self.label_key, self.text_key)
def __iadd__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
self.collection.extend(other.collection)
return self
def __getitem__(self, key):
return [doc[key] if key else None
for doc in self.collection]
def __len__(self):
return len(self.collection)
def __iter__(self):
for doc in self.collection:
yield (self._label(doc), self._text(doc))
raise StopIteration
def _label(self, doc):
return doc[self.label_key]
def _text(self, doc):
return doc[self.text_key]
def flatten(self):
return zip(*iter(self))
@staticmethod
def get_train_test(folds):
for (test_i, test) in enumerate(folds):
train_folds = [folds[i] for i in range(len(folds)) if i != test_i]
train = reduce(lambda a, b: a + b, train_folds, DocumentCollection())
yield (train, test)
if __name__ == '__main__':
main() | CV_ids, CV_hotels, CV_labels, CV_preds, CV_binary_preds = [], [], [], [], [] # CV output
for (i, (train, test)) in enumerate(DocumentCollection.get_train_test(folds)): | random_line_split |
ACL2011_v2.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import codecs
import logging
import os
import random
import re
import sys
from unixnlp.sys_utils import *
import subprocess
from subprocess import Popen
from collections import Counter
from os import path
from nltk import word_tokenize
from nltk.util import ngrams
from numpy import argsort
from scipy.sparse import coo_matrix
from sklearn import metrics
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from unixnlp.cat_ASCII import cat_ASCII
from unixnlp.similarity import l2_norm
from unixnlp.split_folds import split_folds
from unixnlp.wordmap import WordMap
DEFAULT_LIWC_HOME = path.join(path.dirname(__file__), "lib", "LIWC")
def LIWC(file, LIWC_home=DEFAULT_LIWC_HOME):
"""LIWC_sh = path.join(LIWC_home, "LIWC.sh")
cmd = " ".join([LIWC_sh, file])
p = Command(cmd, shell=True, universal_newlines=True)
(retcode, stdout, stderr) = p.run()"""
program = ['java', 'lib/LIWC/LIWC', '-in', 'test.txt', '-out', 'lib/LIWC/myout.txt', '-dic', 'LIWC2007_English080130.dic']
subprocess.call(program)
# extract features, unknown from stdout.
features = {}
unknown = {}
location = 0
for line in stdout.splitlines():
if line.startswith("Total number of words:"):
wc = int(line[line.find(":")+1:])
if wc == 0:
wc = 0.0000001
elif line.startswith("Categories:"):
location = 1
elif line.startswith("Unknown words:"):
location = 2
elif location == 1:
(feature, count, percent) = line.strip().replace(":", "").split()
features[feature] = (int(count), percent)
elif location == 2:
(feature, count) = line.strip().replace(":", "").split()
unknown[feature] = int(count)
# get tokens to set remaining features.
lines = []
with open(file) as h:
for line in h:
lines.append(line.split())
features["wc"] = (wc, "100%")
features["wps"] = (wc / len(lines), "{:.1%}".format(1 / len(lines)))
fdict = wc - sum(unknown.values())
features["dict"] = (fdict, "{:.1%}".format(fdict / wc))
fsixltr = sum([
sum([
1 for token in line
if len(token) > 6
]) for line in lines])
features["sixltr"] = (fsixltr, "{:.1%}".format(fsixltr / wc))
return (features, unknown)
def main():
parser = argparse.ArgumentParser(description="Replicates ACL 2011 results.")
parser.add_argument("--debug", action="store_true",
help="debug output")
parser.add_argument("--featuremap", metavar="FILE", default="featuremap",
help="location of feature map")
parser.add_argument("--nfolds", metavar="N", type=int, default=5,
help="number of folds to use for cross-validation")
parser.add_argument("--output", metavar="FILE", default="CV_output",
help="file prefix for saving CV output")
parser.add_argument("--top", metavar="N", type=int, default=0,
help="output top N features for each class")
parser.add_argument("data",
help="directory containing review data")
args = parser.parse_args()
# set logging level
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
|
# load featuremap
featuremap = WordMap(args.featuremap)
# load documents, ignoring the folds
if not path.isdir(args.data):
raise IOError("Data directory not found: {0}".format(args.data))
docs = {}
for (dirpath, dirnames, filenames) in os.walk(args.data):
for filename in filenames:
if filename.endswith(".txt"):
# example filename: t_hilton_1.txt, meaning truthful review of the Hilton hotel
(label, hotel, i) = filename.split("_")
# get review text
with codecs.open(path.join(dirpath, filename), 'r', 'utf8') as h:
text = h.read().strip()
# save review
docs.setdefault(hotel, []).append({
'_id': path.join(dirpath, filename),
'class': {'t': -1, 'd': 1}.get(label),
'hotel': hotel,
'text': text
})
# split docs into folds (stratified by hotel)
folds = []
hotels = docs.keys()
random.shuffle(hotels)
for (i, hotels_i) in enumerate(split_folds(hotels, args.nfolds)):
fold = DocumentCollection()
for hotel in hotels_i:
fold += DocumentCollection(docs[hotel])
folds.append(fold)
# split data into folds (no stratification)
#folds = map(DocumentCollection, split_folds(docs, args.nfolds))
# perform cross validation
CV_ids, CV_hotels, CV_labels, CV_preds, CV_binary_preds = [], [], [], [], [] # CV output
for (i, (train, test)) in enumerate(DocumentCollection.get_train_test(folds)):
# get training features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting training features and labels")
train_features = get_features(train, featuremap, is_train=True)
(train_labels, _) = train.flatten()
# choose parameters using nested cross-validation on train folds and grid search
logging.getLogger("fold_{0}".format(i)).debug("tuning parameters")
grid = GridSearchCV(LinearSVC(loss='l1'),
{'C': [0.001, 0.002, 0.004, 0.006, 0.008,
0.01, 0.02, 0.04, 0.06, 0.08,
0.1, 0.2, 0.4, 0.6, 0.8,
1, 2, 4, 6, 8,
10, 20, 40, 60, 80,
100, 200, 400, 600, 800, 1000]}, cv=args.nfolds)
grid.fit(train_features, train_labels)
classifier = grid.best_estimator_
# train classifier using best parameters
logging.getLogger("fold_{0}".format(i)).debug("training w/ params ({0}) and labels ({1})".format(classifier, Counter(train_labels)))
classifier.fit(train_features, train_labels)
# get top N features
if args.top != 0:
logging.getLogger("fold_{0}".format(i)).debug("getting top {0} features per class".format(args.top))
for (label, topN) in get_top_features(classifier, args.top).items():
logging.getLogger("fold_{0}".format(i)).info("class {0}:".format(label))
for id in topN:
logging.getLogger("fold_{0}".format(i)).info(featuremap[id + 1])
# get test features and labels
logging.getLogger("fold_{0}".format(i)).debug("getting test features and labels")
test_features = get_features(test, featuremap, is_train=False)
(test_labels, _) = test.flatten()
# test
logging.getLogger("fold_{0}".format(i)).debug("testing (labels: {0})".format(Counter(test_labels)))
test_preds = list(map(lambda y: y[0], classifier.decision_function(test_features)))
# save CV output
CV_ids.extend(test['_id'])
CV_hotels.extend(test['hotel'])
CV_labels.extend(test_labels)
CV_preds.extend(test_preds)
CV_binary_preds.extend(map(lambda y: 1 if y > 0 else -1, test_preds))
# output various CV reports
logging.getLogger("CV").info(metrics.classification_report(CV_labels, CV_binary_preds))
logging.getLogger("CV").info("Per-class (P)recision, (R)ecall, (F)-score")
logging.getLogger("CV").info("\t-1\t1")
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(CV_labels, CV_binary_preds)
for (metric, results) in [("P", precision), ("R", recall), ("F", fscore)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(metric, results[0], results[1]))
logging.getLogger("CV").info("confusion matrix:")
logging.getLogger("CV").info("real\\pred\t-1\t1")
(c0, c1) = metrics.confusion_matrix(CV_labels, CV_binary_preds)
for (label, results) in [("-1", c0), ("1", c1)]:
logging.getLogger("CV").info("{0}\t{1}\t{2}".format(label, results[0], results[1]))
# save CV predictions
if args.output:
logging.getLogger("CV").debug("saving CV output to {0}".format(args.output))
with codecs.open(args.output, 'w', 'utf8') as h:
for (id, hotel, label, pred) in zip(CV_ids, CV_hotels, CV_labels, CV_preds):
print("{0}\t{1}\t{2}\t{3:f}".format(id, hotel, label, pred), file=h)
# safe featuremap for future use/reference
featuremap.write()
def get_features(dc, featuremap, is_train=True):
row, col, data = [], [], [] # for scikit-learn feature representation
for (d, (label, text)) in enumerate(dc):
# start with n-grams
features = get_ngrams(text, [1,2])
#add LIWC features
fopen = open("liwc.txt","w")
fopen.writelines(text)
fopen.close()
(liwc_features, _) = LIWC("liwc.txt")
new_features = get_liwcFeatures(liwc_features)
new_features = unit_normalize(new_features)
# unit normalization
features = unit_normalize(features)
features.update(new_features)
# convert features to scikit-learn format
for (feature, value) in features.items():
# map features to identifiers
id = None
if feature in featuremap or is_train: # ignore features not seen in training
id = featuremap[feature] - 1
# add feature value to scikit-learn representation
if id is not None:
row.append(d)
col.append(id)
data.append(value)
return coo_matrix( (data, (row, col) ), shape=[len(dc), len(featuremap)] ).tocsr()
def get_liwcFeatures(liwc_features):
features = {}
for feature in liwc_features:
features.update({"LIWC_"+feature: liwc_features[feature][0]})
return features
def get_ngrams(text, N):
text = cat_ASCII(text)
text = re.sub("\s+", " ", text)
text = text.lower()
features = Counter()
tokens = word_tokenize(text)
for n in N:
for ngram in ngrams(tokens, n):
feature = "{0}GRAMS_{1}".format(n, "__".join(ngram))
features.update({feature: 1})
return features
def unit_normalize(features):
norm = l2_norm(features)
return dict([(k, v / norm) for (k, v) in features.items()])
# extract the top N weighted features from the model learned by classifier
def get_top_features(classifier, N):
# get top features for each class
sorted_coefs = argsort(classifier.coef_[0])
return {
-1: sorted_coefs[:N].tolist(),
1: reversed(sorted_coefs[-N:].tolist())
}
class DocumentCollection:
def __init__(self, collection=[], label_key="class", text_key="text"):
self.collection = list(collection)
self.label_key = label_key
self.text_key = text_key
def __add__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
return DocumentCollection(self.collection + other.collection, self.label_key, self.text_key)
def __iadd__(self, other):
if self.label_key != other.label_key or self.text_key != other.text_key:
raise Exception("Mismatched keys!")
self.collection.extend(other.collection)
return self
def __getitem__(self, key):
return [doc[key] if key else None
for doc in self.collection]
def __len__(self):
return len(self.collection)
def __iter__(self):
for doc in self.collection:
yield (self._label(doc), self._text(doc))
raise StopIteration
def _label(self, doc):
return doc[self.label_key]
def _text(self, doc):
return doc[self.text_key]
def flatten(self):
return zip(*iter(self))
@staticmethod
def get_train_test(folds):
for (test_i, test) in enumerate(folds):
train_folds = [folds[i] for i in range(len(folds)) if i != test_i]
train = reduce(lambda a, b: a + b, train_folds, DocumentCollection())
yield (train, test)
if __name__ == '__main__':
main()
| logging.basicConfig(level=logging.INFO) | conditional_block |
Reber, Kotovsky (1997).py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
#psychopy version: 1.85.6
from __future__ import division
from psychopy import locale_setup, gui, visual, core, data, event, sound
from psychopy.constants import NOT_STARTED, STARTED, FINISHED
from tkinter import Tk
from random import choice, randint
from collections import OrderedDict
import numpy as np
import os
import sys
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
with open(os.path.join('statistics about participant', 'stats.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_1 = {cond: int(qty) for cond,qty in info_participant.items() if int(qty) <= 20}
condition_mapping_1 = (u'Control', u'One', u'Two', u'Three')
condition_mapping_2 = ('Control_0', 'Control_2', 'One_0', 'One_2', 'Two_0', 'Two_2', 'Three_0', 'Three_2')
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant_2 = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_2 = {cond: int(qty) for cond,qty in info_participant_2.items() if int(qty) <= 10}
# Store info about the experiment session
expName = u'Implicit Learning and WM'
expInfo = {u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
condition_wm_1 = choice(possible_conditions_1.items())
expInfo[u'group'] = condition_mapping_1.index(condition_wm_1[0]) #group == WM_CONDITION
expInfo[u'Experiment part'] = u'1'
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = os.path.join(_thisDir, u'data\\{}_{}_{}'.format(expInfo['participant'], expName, expInfo['date']))
exp = data.ExperimentHandler(name=expName,
extraInfo=expInfo, runtimeInfo=None,
originPath=_thisDir,
savePickle=True, saveWideText=True,
dataFileName=filename)
#PREFERENCE
TIME_BETWEEN_SOUND = 2
def circle_factory(name, position):
return visual.Circle(
win=win, name=name,
edges=100, radius=100, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=size, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_open_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=3, pos=position,
lineWidth=1, lineColor=[1,1,1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def chosen_object(mouse, list_of_objects):
for object in list_of_objects:
if mouse.isPressedIn(object, buttons=[0]):
return object
return False
def board_state(circles, boxes):
state = []
for box, circle in zip(boxes, circles):
state.append(box.overlaps(circle))
state += [True]
return np.array(state)
def can_move(obj, condition):
if not isinstance(obj, visual.Circle):
return False
index = obj.name
if (condition[index] == True) and all(condition[index+1:5] == False):
return True
return False
lst = ['A', 'C', 'D', 'W', 'F']
if expInfo[u'group']in (u'1', u'2', u'3'):
number = int(expInfo[u'group'])
else:
number = 2
overall_instruction_text = u'''Здравствуйте!
В процессе эксперимента Вам будет необходимо решить задачу
(инструкция на следующем слайде) и выполнить второстепенное задание.
В качестве второстепенного задания Вам будет необходимо слушать буквы английского алфавита и запоминать их.
Когда прозвучит специальный звуковой сигнал, Вам будет необходимо нажать на клавиатуре букву,
которая была произнесена {} от звукового сигнала.
То есть в последовательности: A C D W F *звуковой сигнал*
необходимо нажать на клавиатуре букву "{}"
Если Вам всё понятно, то нажмите пробел.'''.format(number, lst[-int(number)])
problem_instruction = u'''Цель задания - вынуть все пять шаров из коробок.
Шар можно вынуть или положить обратно в коробку, нажав на него компьютерной мышью.
Шар можно вынуть или положить обратно в коробку только в том случае, если верхняя часть коробки открыта.
Например, прямо сейчас два шара справа можно вынуть, а три слева – нельзя.
В процессе вынимания и возвращения шаров в коробки верхушки коробок будут открываться и закрываться.
Суть задания в том, чтобы двигать шары определенным образом, открывая нужные коробки.
Так, Вы сможете вынуть все шары из коробок.'''
#Get screen resolution
root = Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
win = visual.Window(
size=[screen_width, screen_height], fullscr=True,
screen=0, color=[1,1,1], units = 'pix')
whole_experiment_clock = core.Clock()
mouse = event.Mouse(win=win)
#create all needed objects
STEP = 250
OBJECT_PLACE = xrange( -STEP*2, STEP*2+1, STEP )
OBJECTS_NAMES = range(1,6)
Y_POSITION = 0
HOW_FAR_TO_MOVE = 300
BOX_SIZE = 250
for (name, pos) in zip( OBJECTS_NAMES, OBJECT_PLACE):
exec u'box_{0} = box_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION, BOX_SIZE)
exec u'circle_{0} = circle_factory( {0}, ({1},{2}) )'.format(name, pos, Y_POSITION)
exec u'box_op_{0} = box_open_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION+BOX_SIZE/2, BOX_SIZE-BOX_SIZE*0.02)
BOXES = (box_1, box_2, box_3, box_4, box_5)
CIRCLES = (circle_1, circle_2, circle_3, circle_4, circle_5)
BOX_OPEN = (box_op_1, box_op_2, box_op_3, box_op_4, box_op_5)
BEEP_SOUND = u'stimuli\\zbeep.wav'
SOUNDS = tuple([u'stimuli\\{}.wav'.format(char) for char in 'QWERTYUIOPASDFGHJKLZXCVBNM'] + [BEEP_SOUND]*2)
instruction_text = visual.TextStim(win=win, pos=(0, -300), opacity=1, color='black', text=problem_instruction, height = 30, wrapWidth=2000)
if expInfo[u'group'] in ('1', '2', '3'):
secondary_task_text = u'Напишите звук, который был {} назад\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'.format(expInfo[u'group'])
else:
secondary_task_text = u'Напишите любой звук, главное не выбирайте один и тот же\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'
secondary_task_instruction = visual.TextStim(win=win, pos=(0, 300), opacity=1, color=u'black', text=secondary_task_text, height = 50, wrapWidth=2000)
secondary_task_window = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'', height = 200)
mouse.clicks = 0
mouse.time = []
sound_gen = sound.Sound(secs=TIME_BETWEEN_SOUND)
sound_to_play = choice(SOUNDS[:-2])
sound_gen.setSound(sound_to_play)
should_report = True
pressed_down = False #allowing only single mouse clicks
to_display_list = []
to_display_str = u''
sounds = [sound_to_play[-5]]
for experiment in range(2):
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
t = whole_experiment_clock.getTime()
moves = 0
if experiment == 0:
overall_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=overall_instruction_text, height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
overall_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
elif experiment == 1:
expInfo[u'Experiment part'] = u'2'
condition_wm_2 = choice(possible_conditions_2.items())
type_cond = condition_mapping_2.index(condition_wm_2[0])
expInfo[u'group'] = 0 if type_cond % 2 == 0 else 2#group == WM_CONDITION
for circle in CIRCLES:
circle.pos -= (0, HOW_FAR_TO_MOVE)
second_task_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'Сейчас нужно будет решить задачу ещё раз\n\
Если готовы нажмите пробел', height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
second_task_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 1
box.opacity = 1
circle.opacity = 1
trial = core.Clock()
trial.reset()
time = 0
while True:
| secondary_task_instruction.draw()
if len(keyboard_presses) > 0:
was_pressed = keyboard_presses[0]
if was_pressed == 'escape':
sound_gen.stop()
core.quit()
if was_pressed == 'return' and len(to_display_str) == 1:
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Sounds between respons' , sounds)
exp.addData(u'Participant chousen', to_display_str)
exp.addData(u'Corectness of WM task', 1 if to_display_str == sounds[-int(expInfo[u'group'])] else 0)
exp.addData(u'Time', t)
del sounds[:]
to_display_str = u''
del to_display_list[:]
should_report = False
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
if can_move(circle, condition_to_move):
box_op.opacity = 0
box.opacity = 1
circle.opacity = 1
break
if was_pressed == 'backspace' and len(to_display_list) != 0:
del to_display_list[-1]
elif len(was_pressed) == 1:
to_display_list.append(was_pressed.upper())
to_display_str = ''.join(to_display_list)
del keyboard_presses[:]
secondary_task_window.text = to_display_str
secondary_task_window.draw()
win.flip()
if sound_gen.status == FINISHED and time+sound_gen.secs<t:
should_report = True
sound_gen.status = NOT_STARTED
sound_to_play = choice(SOUNDS) if len(sounds) > int(expInfo[u'group']) and sound_to_play != BEEP_SOUND else SOUNDS[randint(0, len(SOUNDS)-3)]
sound_gen.setSound(sound_to_play)
condition_to_move = board_state(CIRCLES, BOXES)
clicked = chosen_object(mouse, CIRCLES)
can = can_move(clicked, condition_to_move)
#problem is solved checker
if True not in condition_to_move[:5]:
exp.nextEntry()
event.clearEvents(eventType='keyboard')
break
for circle, box_op in zip(CIRCLES, BOX_OPEN):
if can_move(circle, condition_to_move):
box_op.opacity = 1
else:
box_op.opacity = 0
if mouse.getPressed()[0]:
if not pressed_down:
if can:
clicked.pos += (0,HOW_FAR_TO_MOVE) if clicked.pos[1] == 0 else (0, -HOW_FAR_TO_MOVE)
pressed_down = True
moves += 1
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Time', t)
exp.nextEntry()
else:
pressed_down = False
win.flip()
result = u"Спасибо за участие\nЭксперимент занял {:.0f} минут и {:.2f} секунд\nДля выхода нажмите любую клавишу"
final_words = visual.TextStim(win=win, pos=(0, 0), opacity=1, color='black', text=result, height = 50, wrapWidth=1650)
while True:
if event.getKeys():
break
t = whole_experiment_clock.getTime()
final_words.text = result.format(t//60, t%60)
final_words.draw()
win.flip()
print info_participant, info_participant_2
with open(os.path.join('statistics about participant', 'stats.csv'), 'w') as f:
info_participant[condition_wm_1[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{}\n'.format(*info_participant.keys()))
else:
f.write(u'{},{},{},{}\n'.format(*info_participant.values()))
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'w') as f:
info_participant_2[condition_wm_2[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.keys()))
else:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.values()))
core.quit()
| if event.getKeys(keyList=["escape"]):
sound_gen.stop()
exp.nextEntry()
core.quit()
t = trial.getTime()
instruction_text.draw()
if sound_gen.status == NOT_STARTED:
time = trial.getTime()
if sound_to_play != BEEP_SOUND:
sound_itself = sound_to_play[-5]
sounds.append(sound_itself)
sound_gen.play()
if sound_to_play == BEEP_SOUND and sound_gen.status == FINISHED and should_report:
event.clearEvents(eventType='keyboard')
while True:
keyboard_presses = event.getKeys() #отчёт сюда
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
| conditional_block |
Reber, Kotovsky (1997).py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
#psychopy version: 1.85.6
from __future__ import division
from psychopy import locale_setup, gui, visual, core, data, event, sound
from psychopy.constants import NOT_STARTED, STARTED, FINISHED
from tkinter import Tk
from random import choice, randint
from collections import OrderedDict
import numpy as np
import os
import sys
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
with open(os.path.join('statistics about participant', 'stats.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_1 = {cond: int(qty) for cond,qty in info_participant.items() if int(qty) <= 20}
condition_mapping_1 = (u'Control', u'One', u'Two', u'Three')
condition_mapping_2 = ('Control_0', 'Control_2', 'One_0', 'One_2', 'Two_0', 'Two_2', 'Three_0', 'Three_2')
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant_2 = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_2 = {cond: int(qty) for cond,qty in info_participant_2.items() if int(qty) <= 10}
# Store info about the experiment session
expName = u'Implicit Learning and WM'
expInfo = {u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
condition_wm_1 = choice(possible_conditions_1.items())
expInfo[u'group'] = condition_mapping_1.index(condition_wm_1[0]) #group == WM_CONDITION
expInfo[u'Experiment part'] = u'1'
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = os.path.join(_thisDir, u'data\\{}_{}_{}'.format(expInfo['participant'], expName, expInfo['date']))
exp = data.ExperimentHandler(name=expName,
extraInfo=expInfo, runtimeInfo=None,
originPath=_thisDir,
savePickle=True, saveWideText=True,
dataFileName=filename)
#PREFERENCE
TIME_BETWEEN_SOUND = 2
def circle_factory(name, position):
return visual.Circle(
win=win, name=name,
edges=100, radius=100, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=size, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def | (name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=3, pos=position,
lineWidth=1, lineColor=[1,1,1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def chosen_object(mouse, list_of_objects):
for object in list_of_objects:
if mouse.isPressedIn(object, buttons=[0]):
return object
return False
def board_state(circles, boxes):
state = []
for box, circle in zip(boxes, circles):
state.append(box.overlaps(circle))
state += [True]
return np.array(state)
def can_move(obj, condition):
if not isinstance(obj, visual.Circle):
return False
index = obj.name
if (condition[index] == True) and all(condition[index+1:5] == False):
return True
return False
lst = ['A', 'C', 'D', 'W', 'F']
if expInfo[u'group']in (u'1', u'2', u'3'):
number = int(expInfo[u'group'])
else:
number = 2
overall_instruction_text = u'''Здравствуйте!
В процессе эксперимента Вам будет необходимо решить задачу
(инструкция на следующем слайде) и выполнить второстепенное задание.
В качестве второстепенного задания Вам будет необходимо слушать буквы английского алфавита и запоминать их.
Когда прозвучит специальный звуковой сигнал, Вам будет необходимо нажать на клавиатуре букву,
которая была произнесена {} от звукового сигнала.
То есть в последовательности: A C D W F *звуковой сигнал*
необходимо нажать на клавиатуре букву "{}"
Если Вам всё понятно, то нажмите пробел.'''.format(number, lst[-int(number)])
problem_instruction = u'''Цель задания - вынуть все пять шаров из коробок.
Шар можно вынуть или положить обратно в коробку, нажав на него компьютерной мышью.
Шар можно вынуть или положить обратно в коробку только в том случае, если верхняя часть коробки открыта.
Например, прямо сейчас два шара справа можно вынуть, а три слева – нельзя.
В процессе вынимания и возвращения шаров в коробки верхушки коробок будут открываться и закрываться.
Суть задания в том, чтобы двигать шары определенным образом, открывая нужные коробки.
Так, Вы сможете вынуть все шары из коробок.'''
#Get screen resolution
root = Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
win = visual.Window(
size=[screen_width, screen_height], fullscr=True,
screen=0, color=[1,1,1], units = 'pix')
whole_experiment_clock = core.Clock()
mouse = event.Mouse(win=win)
#create all needed objects
STEP = 250
OBJECT_PLACE = xrange( -STEP*2, STEP*2+1, STEP )
OBJECTS_NAMES = range(1,6)
Y_POSITION = 0
HOW_FAR_TO_MOVE = 300
BOX_SIZE = 250
for (name, pos) in zip( OBJECTS_NAMES, OBJECT_PLACE):
exec u'box_{0} = box_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION, BOX_SIZE)
exec u'circle_{0} = circle_factory( {0}, ({1},{2}) )'.format(name, pos, Y_POSITION)
exec u'box_op_{0} = box_open_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION+BOX_SIZE/2, BOX_SIZE-BOX_SIZE*0.02)
BOXES = (box_1, box_2, box_3, box_4, box_5)
CIRCLES = (circle_1, circle_2, circle_3, circle_4, circle_5)
BOX_OPEN = (box_op_1, box_op_2, box_op_3, box_op_4, box_op_5)
BEEP_SOUND = u'stimuli\\zbeep.wav'
SOUNDS = tuple([u'stimuli\\{}.wav'.format(char) for char in 'QWERTYUIOPASDFGHJKLZXCVBNM'] + [BEEP_SOUND]*2)
instruction_text = visual.TextStim(win=win, pos=(0, -300), opacity=1, color='black', text=problem_instruction, height = 30, wrapWidth=2000)
if expInfo[u'group'] in ('1', '2', '3'):
secondary_task_text = u'Напишите звук, который был {} назад\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'.format(expInfo[u'group'])
else:
secondary_task_text = u'Напишите любой звук, главное не выбирайте один и тот же\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'
secondary_task_instruction = visual.TextStim(win=win, pos=(0, 300), opacity=1, color=u'black', text=secondary_task_text, height = 50, wrapWidth=2000)
secondary_task_window = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'', height = 200)
mouse.clicks = 0
mouse.time = []
sound_gen = sound.Sound(secs=TIME_BETWEEN_SOUND)
sound_to_play = choice(SOUNDS[:-2])
sound_gen.setSound(sound_to_play)
should_report = True
pressed_down = False #allowing only single mouse clicks
to_display_list = []
to_display_str = u''
sounds = [sound_to_play[-5]]
for experiment in range(2):
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
t = whole_experiment_clock.getTime()
moves = 0
if experiment == 0:
overall_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=overall_instruction_text, height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
overall_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
elif experiment == 1:
expInfo[u'Experiment part'] = u'2'
condition_wm_2 = choice(possible_conditions_2.items())
type_cond = condition_mapping_2.index(condition_wm_2[0])
expInfo[u'group'] = 0 if type_cond % 2 == 0 else 2#group == WM_CONDITION
for circle in CIRCLES:
circle.pos -= (0, HOW_FAR_TO_MOVE)
second_task_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'Сейчас нужно будет решить задачу ещё раз\n\
Если готовы нажмите пробел', height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
second_task_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 1
box.opacity = 1
circle.opacity = 1
trial = core.Clock()
trial.reset()
time = 0
while True:
if event.getKeys(keyList=["escape"]):
sound_gen.stop()
exp.nextEntry()
core.quit()
t = trial.getTime()
instruction_text.draw()
if sound_gen.status == NOT_STARTED:
time = trial.getTime()
if sound_to_play != BEEP_SOUND:
sound_itself = sound_to_play[-5]
sounds.append(sound_itself)
sound_gen.play()
if sound_to_play == BEEP_SOUND and sound_gen.status == FINISHED and should_report:
event.clearEvents(eventType='keyboard')
while True:
keyboard_presses = event.getKeys() #отчёт сюда
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
secondary_task_instruction.draw()
if len(keyboard_presses) > 0:
was_pressed = keyboard_presses[0]
if was_pressed == 'escape':
sound_gen.stop()
core.quit()
if was_pressed == 'return' and len(to_display_str) == 1:
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Sounds between respons' , sounds)
exp.addData(u'Participant chousen', to_display_str)
exp.addData(u'Corectness of WM task', 1 if to_display_str == sounds[-int(expInfo[u'group'])] else 0)
exp.addData(u'Time', t)
del sounds[:]
to_display_str = u''
del to_display_list[:]
should_report = False
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
if can_move(circle, condition_to_move):
box_op.opacity = 0
box.opacity = 1
circle.opacity = 1
break
if was_pressed == 'backspace' and len(to_display_list) != 0:
del to_display_list[-1]
elif len(was_pressed) == 1:
to_display_list.append(was_pressed.upper())
to_display_str = ''.join(to_display_list)
del keyboard_presses[:]
secondary_task_window.text = to_display_str
secondary_task_window.draw()
win.flip()
if sound_gen.status == FINISHED and time+sound_gen.secs<t:
should_report = True
sound_gen.status = NOT_STARTED
sound_to_play = choice(SOUNDS) if len(sounds) > int(expInfo[u'group']) and sound_to_play != BEEP_SOUND else SOUNDS[randint(0, len(SOUNDS)-3)]
sound_gen.setSound(sound_to_play)
condition_to_move = board_state(CIRCLES, BOXES)
clicked = chosen_object(mouse, CIRCLES)
can = can_move(clicked, condition_to_move)
#problem is solved checker
if True not in condition_to_move[:5]:
exp.nextEntry()
event.clearEvents(eventType='keyboard')
break
for circle, box_op in zip(CIRCLES, BOX_OPEN):
if can_move(circle, condition_to_move):
box_op.opacity = 1
else:
box_op.opacity = 0
if mouse.getPressed()[0]:
if not pressed_down:
if can:
clicked.pos += (0,HOW_FAR_TO_MOVE) if clicked.pos[1] == 0 else (0, -HOW_FAR_TO_MOVE)
pressed_down = True
moves += 1
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Time', t)
exp.nextEntry()
else:
pressed_down = False
win.flip()
result = u"Спасибо за участие\nЭксперимент занял {:.0f} минут и {:.2f} секунд\nДля выхода нажмите любую клавишу"
final_words = visual.TextStim(win=win, pos=(0, 0), opacity=1, color='black', text=result, height = 50, wrapWidth=1650)
while True:
if event.getKeys():
break
t = whole_experiment_clock.getTime()
final_words.text = result.format(t//60, t%60)
final_words.draw()
win.flip()
print info_participant, info_participant_2
with open(os.path.join('statistics about participant', 'stats.csv'), 'w') as f:
info_participant[condition_wm_1[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{}\n'.format(*info_participant.keys()))
else:
f.write(u'{},{},{},{}\n'.format(*info_participant.values()))
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'w') as f:
info_participant_2[condition_wm_2[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.keys()))
else:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.values()))
core.quit()
| box_open_factory | identifier_name |
Reber, Kotovsky (1997).py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
#psychopy version: 1.85.6
from __future__ import division
from psychopy import locale_setup, gui, visual, core, data, event, sound
from psychopy.constants import NOT_STARTED, STARTED, FINISHED
from tkinter import Tk
from random import choice, randint
from collections import OrderedDict
import numpy as np
import os
import sys
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
with open(os.path.join('statistics about participant', 'stats.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_1 = {cond: int(qty) for cond,qty in info_participant.items() if int(qty) <= 20}
condition_mapping_1 = (u'Control', u'One', u'Two', u'Three')
condition_mapping_2 = ('Control_0', 'Control_2', 'One_0', 'One_2', 'Two_0', 'Two_2', 'Three_0', 'Three_2')
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant_2 = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_2 = {cond: int(qty) for cond,qty in info_participant_2.items() if int(qty) <= 10}
# Store info about the experiment session
expName = u'Implicit Learning and WM'
expInfo = {u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
condition_wm_1 = choice(possible_conditions_1.items())
expInfo[u'group'] = condition_mapping_1.index(condition_wm_1[0]) #group == WM_CONDITION
expInfo[u'Experiment part'] = u'1'
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = os.path.join(_thisDir, u'data\\{}_{}_{}'.format(expInfo['participant'], expName, expInfo['date']))
exp = data.ExperimentHandler(name=expName,
extraInfo=expInfo, runtimeInfo=None,
originPath=_thisDir,
savePickle=True, saveWideText=True,
dataFileName=filename)
#PREFERENCE
TIME_BETWEEN_SOUND = 2
def circle_factory(name, position):
return visual.Circle(
win=win, name=name,
edges=100, radius=100, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=size, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_open_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=3, pos=position,
lineWidth=1, lineColor=[1,1,1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def chosen_object(mouse, list_of_objects):
for object in list_of_objects:
if mouse.isPressedIn(object, buttons=[0]):
return object
return False
def board_state(circles, boxes):
|
def can_move(obj, condition):
if not isinstance(obj, visual.Circle):
return False
index = obj.name
if (condition[index] == True) and all(condition[index+1:5] == False):
return True
return False
lst = ['A', 'C', 'D', 'W', 'F']
if expInfo[u'group']in (u'1', u'2', u'3'):
number = int(expInfo[u'group'])
else:
number = 2
overall_instruction_text = u'''Здравствуйте!
В процессе эксперимента Вам будет необходимо решить задачу
(инструкция на следующем слайде) и выполнить второстепенное задание.
В качестве второстепенного задания Вам будет необходимо слушать буквы английского алфавита и запоминать их.
Когда прозвучит специальный звуковой сигнал, Вам будет необходимо нажать на клавиатуре букву,
которая была произнесена {} от звукового сигнала.
То есть в последовательности: A C D W F *звуковой сигнал*
необходимо нажать на клавиатуре букву "{}"
Если Вам всё понятно, то нажмите пробел.'''.format(number, lst[-int(number)])
problem_instruction = u'''Цель задания - вынуть все пять шаров из коробок.
Шар можно вынуть или положить обратно в коробку, нажав на него компьютерной мышью.
Шар можно вынуть или положить обратно в коробку только в том случае, если верхняя часть коробки открыта.
Например, прямо сейчас два шара справа можно вынуть, а три слева – нельзя.
В процессе вынимания и возвращения шаров в коробки верхушки коробок будут открываться и закрываться.
Суть задания в том, чтобы двигать шары определенным образом, открывая нужные коробки.
Так, Вы сможете вынуть все шары из коробок.'''
#Get screen resolution
root = Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
win = visual.Window(
size=[screen_width, screen_height], fullscr=True,
screen=0, color=[1,1,1], units = 'pix')
whole_experiment_clock = core.Clock()
mouse = event.Mouse(win=win)
#create all needed objects
STEP = 250
OBJECT_PLACE = xrange( -STEP*2, STEP*2+1, STEP )
OBJECTS_NAMES = range(1,6)
Y_POSITION = 0
HOW_FAR_TO_MOVE = 300
BOX_SIZE = 250
for (name, pos) in zip( OBJECTS_NAMES, OBJECT_PLACE):
exec u'box_{0} = box_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION, BOX_SIZE)
exec u'circle_{0} = circle_factory( {0}, ({1},{2}) )'.format(name, pos, Y_POSITION)
exec u'box_op_{0} = box_open_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION+BOX_SIZE/2, BOX_SIZE-BOX_SIZE*0.02)
BOXES = (box_1, box_2, box_3, box_4, box_5)
CIRCLES = (circle_1, circle_2, circle_3, circle_4, circle_5)
BOX_OPEN = (box_op_1, box_op_2, box_op_3, box_op_4, box_op_5)
BEEP_SOUND = u'stimuli\\zbeep.wav'
SOUNDS = tuple([u'stimuli\\{}.wav'.format(char) for char in 'QWERTYUIOPASDFGHJKLZXCVBNM'] + [BEEP_SOUND]*2)
instruction_text = visual.TextStim(win=win, pos=(0, -300), opacity=1, color='black', text=problem_instruction, height = 30, wrapWidth=2000)
if expInfo[u'group'] in ('1', '2', '3'):
secondary_task_text = u'Напишите звук, который был {} назад\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'.format(expInfo[u'group'])
else:
secondary_task_text = u'Напишите любой звук, главное не выбирайте один и тот же\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'
secondary_task_instruction = visual.TextStim(win=win, pos=(0, 300), opacity=1, color=u'black', text=secondary_task_text, height = 50, wrapWidth=2000)
secondary_task_window = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'', height = 200)
mouse.clicks = 0
mouse.time = []
sound_gen = sound.Sound(secs=TIME_BETWEEN_SOUND)
sound_to_play = choice(SOUNDS[:-2])
sound_gen.setSound(sound_to_play)
should_report = True
pressed_down = False #allowing only single mouse clicks
to_display_list = []
to_display_str = u''
sounds = [sound_to_play[-5]]
for experiment in range(2):
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
t = whole_experiment_clock.getTime()
moves = 0
if experiment == 0:
overall_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=overall_instruction_text, height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
overall_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
elif experiment == 1:
expInfo[u'Experiment part'] = u'2'
condition_wm_2 = choice(possible_conditions_2.items())
type_cond = condition_mapping_2.index(condition_wm_2[0])
expInfo[u'group'] = 0 if type_cond % 2 == 0 else 2#group == WM_CONDITION
for circle in CIRCLES:
circle.pos -= (0, HOW_FAR_TO_MOVE)
second_task_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'Сейчас нужно будет решить задачу ещё раз\n\
Если готовы нажмите пробел', height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
second_task_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 1
box.opacity = 1
circle.opacity = 1
trial = core.Clock()
trial.reset()
time = 0
while True:
if event.getKeys(keyList=["escape"]):
sound_gen.stop()
exp.nextEntry()
core.quit()
t = trial.getTime()
instruction_text.draw()
if sound_gen.status == NOT_STARTED:
time = trial.getTime()
if sound_to_play != BEEP_SOUND:
sound_itself = sound_to_play[-5]
sounds.append(sound_itself)
sound_gen.play()
if sound_to_play == BEEP_SOUND and sound_gen.status == FINISHED and should_report:
event.clearEvents(eventType='keyboard')
while True:
keyboard_presses = event.getKeys() #отчёт сюда
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
secondary_task_instruction.draw()
if len(keyboard_presses) > 0:
was_pressed = keyboard_presses[0]
if was_pressed == 'escape':
sound_gen.stop()
core.quit()
if was_pressed == 'return' and len(to_display_str) == 1:
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Sounds between respons' , sounds)
exp.addData(u'Participant chousen', to_display_str)
exp.addData(u'Corectness of WM task', 1 if to_display_str == sounds[-int(expInfo[u'group'])] else 0)
exp.addData(u'Time', t)
del sounds[:]
to_display_str = u''
del to_display_list[:]
should_report = False
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
if can_move(circle, condition_to_move):
box_op.opacity = 0
box.opacity = 1
circle.opacity = 1
break
if was_pressed == 'backspace' and len(to_display_list) != 0:
del to_display_list[-1]
elif len(was_pressed) == 1:
to_display_list.append(was_pressed.upper())
to_display_str = ''.join(to_display_list)
del keyboard_presses[:]
secondary_task_window.text = to_display_str
secondary_task_window.draw()
win.flip()
if sound_gen.status == FINISHED and time+sound_gen.secs<t:
should_report = True
sound_gen.status = NOT_STARTED
sound_to_play = choice(SOUNDS) if len(sounds) > int(expInfo[u'group']) and sound_to_play != BEEP_SOUND else SOUNDS[randint(0, len(SOUNDS)-3)]
sound_gen.setSound(sound_to_play)
condition_to_move = board_state(CIRCLES, BOXES)
clicked = chosen_object(mouse, CIRCLES)
can = can_move(clicked, condition_to_move)
#problem is solved checker
if True not in condition_to_move[:5]:
exp.nextEntry()
event.clearEvents(eventType='keyboard')
break
for circle, box_op in zip(CIRCLES, BOX_OPEN):
if can_move(circle, condition_to_move):
box_op.opacity = 1
else:
box_op.opacity = 0
if mouse.getPressed()[0]:
if not pressed_down:
if can:
clicked.pos += (0,HOW_FAR_TO_MOVE) if clicked.pos[1] == 0 else (0, -HOW_FAR_TO_MOVE)
pressed_down = True
moves += 1
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Time', t)
exp.nextEntry()
else:
pressed_down = False
win.flip()
result = u"Спасибо за участие\nЭксперимент занял {:.0f} минут и {:.2f} секунд\nДля выхода нажмите любую клавишу"
final_words = visual.TextStim(win=win, pos=(0, 0), opacity=1, color='black', text=result, height = 50, wrapWidth=1650)
while True:
if event.getKeys():
break
t = whole_experiment_clock.getTime()
final_words.text = result.format(t//60, t%60)
final_words.draw()
win.flip()
print info_participant, info_participant_2
with open(os.path.join('statistics about participant', 'stats.csv'), 'w') as f:
info_participant[condition_wm_1[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{}\n'.format(*info_participant.keys()))
else:
f.write(u'{},{},{},{}\n'.format(*info_participant.values()))
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'w') as f:
info_participant_2[condition_wm_2[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.keys()))
else:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.values()))
core.quit()
| state = []
for box, circle in zip(boxes, circles):
state.append(box.overlaps(circle))
state += [True]
return np.array(state) | identifier_body |
Reber, Kotovsky (1997).py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
#psychopy version: 1.85.6
from __future__ import division
from psychopy import locale_setup, gui, visual, core, data, event, sound
from psychopy.constants import NOT_STARTED, STARTED, FINISHED
from tkinter import Tk
from random import choice, randint
from collections import OrderedDict
import numpy as np
import os
import sys
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
with open(os.path.join('statistics about participant', 'stats.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_1 = {cond: int(qty) for cond,qty in info_participant.items() if int(qty) <= 20}
condition_mapping_1 = (u'Control', u'One', u'Two', u'Three')
condition_mapping_2 = ('Control_0', 'Control_2', 'One_0', 'One_2', 'Two_0', 'Two_2', 'Three_0', 'Three_2')
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'r') as f:
lines_data = f.readlines()
line_1 = lines_data[0].strip('\n').split(',')
line_2 = lines_data[1].strip('\n').split(',')
info_participant_2 = OrderedDict({cond: int(qty) for cond,qty in zip(line_1, line_2)})
possible_conditions_2 = {cond: int(qty) for cond,qty in info_participant_2.items() if int(qty) <= 10}
# Store info about the experiment session
expName = u'Implicit Learning and WM'
expInfo = {u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
condition_wm_1 = choice(possible_conditions_1.items())
expInfo[u'group'] = condition_mapping_1.index(condition_wm_1[0]) #group == WM_CONDITION
expInfo[u'Experiment part'] = u'1'
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = os.path.join(_thisDir, u'data\\{}_{}_{}'.format(expInfo['participant'], expName, expInfo['date']))
exp = data.ExperimentHandler(name=expName,
extraInfo=expInfo, runtimeInfo=None,
originPath=_thisDir,
savePickle=True, saveWideText=True,
dataFileName=filename)
#PREFERENCE
TIME_BETWEEN_SOUND = 2
def circle_factory(name, position):
return visual.Circle(
win=win, name=name,
edges=100, radius=100, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=size, pos=position,
lineWidth=3, lineColor=[-1,-1,-1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def box_open_factory(name, position, size):
return visual.Rect(
win=win, name=name,
width=size, height=3, pos=position,
lineWidth=1, lineColor=[1,1,1],
fillColor=[1,1,1], opacity=0, interpolate=True, autoDraw=True,
)
def chosen_object(mouse, list_of_objects):
for object in list_of_objects:
if mouse.isPressedIn(object, buttons=[0]):
return object
return False
def board_state(circles, boxes):
state = []
for box, circle in zip(boxes, circles):
state.append(box.overlaps(circle))
state += [True]
return np.array(state)
def can_move(obj, condition):
if not isinstance(obj, visual.Circle):
return False
index = obj.name
if (condition[index] == True) and all(condition[index+1:5] == False):
return True
return False
lst = ['A', 'C', 'D', 'W', 'F']
if expInfo[u'group']in (u'1', u'2', u'3'):
number = int(expInfo[u'group'])
else:
number = 2
overall_instruction_text = u'''Здравствуйте!
В процессе эксперимента Вам будет необходимо решить задачу
(инструкция на следующем слайде) и выполнить второстепенное задание.
В качестве второстепенного задания Вам будет необходимо слушать буквы английского алфавита и запоминать их.
Когда прозвучит специальный звуковой сигнал, Вам будет необходимо нажать на клавиатуре букву,
которая была произнесена {} от звукового сигнала.
То есть в последовательности: A C D W F *звуковой сигнал*
необходимо нажать на клавиатуре букву "{}"
Если Вам всё понятно, то нажмите пробел.'''.format(number, lst[-int(number)])
problem_instruction = u'''Цель задания - вынуть все пять шаров из коробок.
Шар можно вынуть или положить обратно в коробку, нажав на него компьютерной мышью.
Шар можно вынуть или положить обратно в коробку только в том случае, если верхняя часть коробки открыта.
Например, прямо сейчас два шара справа можно вынуть, а три слева – нельзя.
В процессе вынимания и возвращения шаров в коробки верхушки коробок будут открываться и закрываться.
Суть задания в том, чтобы двигать шары определенным образом, открывая нужные коробки.
Так, Вы сможете вынуть все шары из коробок.'''
#Get screen resolution
root = Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
win = visual.Window(
size=[screen_width, screen_height], fullscr=True,
screen=0, color=[1,1,1], units = 'pix')
whole_experiment_clock = core.Clock()
mouse = event.Mouse(win=win)
#create all needed objects
STEP = 250
OBJECT_PLACE = xrange( -STEP*2, STEP*2+1, STEP )
OBJECTS_NAMES = range(1,6)
Y_POSITION = 0
HOW_FAR_TO_MOVE = 300
BOX_SIZE = 250
for (name, pos) in zip( OBJECTS_NAMES, OBJECT_PLACE):
exec u'box_{0} = box_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION, BOX_SIZE)
exec u'circle_{0} = circle_factory( {0}, ({1},{2}) )'.format(name, pos, Y_POSITION)
exec u'box_op_{0} = box_open_factory( {0}, ({1},{2}), {3} )'.format(name, pos, Y_POSITION+BOX_SIZE/2, BOX_SIZE-BOX_SIZE*0.02)
BOXES = (box_1, box_2, box_3, box_4, box_5)
CIRCLES = (circle_1, circle_2, circle_3, circle_4, circle_5)
BOX_OPEN = (box_op_1, box_op_2, box_op_3, box_op_4, box_op_5)
BEEP_SOUND = u'stimuli\\zbeep.wav'
SOUNDS = tuple([u'stimuli\\{}.wav'.format(char) for char in 'QWERTYUIOPASDFGHJKLZXCVBNM'] + [BEEP_SOUND]*2)
instruction_text = visual.TextStim(win=win, pos=(0, -300), opacity=1, color='black', text=problem_instruction, height = 30, wrapWidth=2000)
if expInfo[u'group'] in ('1', '2', '3'):
secondary_task_text = u'Напишите звук, который был {} назад\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'.format(expInfo[u'group'])
else:
secondary_task_text = u'Напишите любой звук, главное не выбирайте один и тот же\nОтвет должен состоять из одной буквы\n\
Для ответа нажмите enter'
secondary_task_instruction = visual.TextStim(win=win, pos=(0, 300), opacity=1, color=u'black', text=secondary_task_text, height = 50, wrapWidth=2000)
secondary_task_window = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'', height = 200)
mouse.clicks = 0
mouse.time = []
sound_gen = sound.Sound(secs=TIME_BETWEEN_SOUND)
sound_to_play = choice(SOUNDS[:-2])
sound_gen.setSound(sound_to_play)
should_report = True
pressed_down = False #allowing only single mouse clicks
to_display_list = []
to_display_str = u''
sounds = [sound_to_play[-5]]
for experiment in range(2):
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
t = whole_experiment_clock.getTime()
moves = 0
if experiment == 0:
overall_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=overall_instruction_text, height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
overall_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
elif experiment == 1:
expInfo[u'Experiment part'] = u'2'
condition_wm_2 = choice(possible_conditions_2.items())
type_cond = condition_mapping_2.index(condition_wm_2[0])
expInfo[u'group'] = 0 if type_cond % 2 == 0 else 2#group == WM_CONDITION
for circle in CIRCLES:
circle.pos -= (0, HOW_FAR_TO_MOVE)
second_task_instruction = visual.TextStim(win=win, pos=(0, 0), opacity=1, color=u'black', text=u'Сейчас нужно будет решить задачу ещё раз\n\
Если готовы нажмите пробел', height = 30, wrapWidth=1500)
while True:
if event.getKeys(['space']):
event.clearEvents(eventType='keyboard')
break
second_task_instruction.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 1
box.opacity = 1
circle.opacity = 1
trial = core.Clock()
trial.reset()
time = 0
while True:
if event.getKeys(keyList=["escape"]):
sound_gen.stop()
exp.nextEntry()
core.quit()
t = trial.getTime()
instruction_text.draw()
if sound_gen.status == NOT_STARTED:
time = trial.getTime()
if sound_to_play != BEEP_SOUND:
sound_itself = sound_to_play[-5]
sounds.append(sound_itself)
sound_gen.play()
if sound_to_play == BEEP_SOUND and sound_gen.status == FINISHED and should_report:
event.clearEvents(eventType='keyboard')
while True:
keyboard_presses = event.getKeys() #отчёт сюда
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
box_op.opacity = 0
box.opacity = 0
circle.opacity = 0
secondary_task_instruction.draw()
if len(keyboard_presses) > 0:
was_pressed = keyboard_presses[0]
if was_pressed == 'escape':
sound_gen.stop()
core.quit()
if was_pressed == 'return' and len(to_display_str) == 1:
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Sounds between respons' , sounds)
exp.addData(u'Participant chousen', to_display_str)
exp.addData(u'Corectness of WM task', 1 if to_display_str == sounds[-int(expInfo[u'group'])] else 0)
exp.addData(u'Time', t)
del sounds[:]
to_display_str = u''
del to_display_list[:]
should_report = False
for circle, box_op, box in zip(CIRCLES, BOX_OPEN, BOXES):
if can_move(circle, condition_to_move):
box_op.opacity = 0
box.opacity = 1
circle.opacity = 1
break
if was_pressed == 'backspace' and len(to_display_list) != 0:
del to_display_list[-1]
elif len(was_pressed) == 1:
to_display_list.append(was_pressed.upper())
to_display_str = ''.join(to_display_list)
del keyboard_presses[:]
secondary_task_window.text = to_display_str
secondary_task_window.draw()
win.flip()
if sound_gen.status == FINISHED and time+sound_gen.secs<t:
should_report = True
sound_gen.status = NOT_STARTED
sound_to_play = choice(SOUNDS) if len(sounds) > int(expInfo[u'group']) and sound_to_play != BEEP_SOUND else SOUNDS[randint(0, len(SOUNDS)-3)]
sound_gen.setSound(sound_to_play)
condition_to_move = board_state(CIRCLES, BOXES)
clicked = chosen_object(mouse, CIRCLES)
can = can_move(clicked, condition_to_move)
#problem is solved checker
if True not in condition_to_move[:5]:
exp.nextEntry()
event.clearEvents(eventType='keyboard')
break
for circle, box_op in zip(CIRCLES, BOX_OPEN):
if can_move(circle, condition_to_move):
box_op.opacity = 1 | else:
box_op.opacity = 0
if mouse.getPressed()[0]:
if not pressed_down:
if can:
clicked.pos += (0,HOW_FAR_TO_MOVE) if clicked.pos[1] == 0 else (0, -HOW_FAR_TO_MOVE)
pressed_down = True
moves += 1
exp.addData('Move number', moves)
exp.addData('Board state', condition_to_move[:6])
exp.addData(u'Time', t)
exp.nextEntry()
else:
pressed_down = False
win.flip()
result = u"Спасибо за участие\nЭксперимент занял {:.0f} минут и {:.2f} секунд\nДля выхода нажмите любую клавишу"
final_words = visual.TextStim(win=win, pos=(0, 0), opacity=1, color='black', text=result, height = 50, wrapWidth=1650)
while True:
if event.getKeys():
break
t = whole_experiment_clock.getTime()
final_words.text = result.format(t//60, t%60)
final_words.draw()
win.flip()
print info_participant, info_participant_2
with open(os.path.join('statistics about participant', 'stats.csv'), 'w') as f:
info_participant[condition_wm_1[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{}\n'.format(*info_participant.keys()))
else:
f.write(u'{},{},{},{}\n'.format(*info_participant.values()))
with open(os.path.join('statistics about participant', 'stats_2.csv'), 'w') as f:
info_participant_2[condition_wm_2[0]] += 1
for line in range(1,3):
if line == 1:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.keys()))
else:
f.write(u'{},{},{},{},{},{}\n'.format(*info_participant_2.values()))
core.quit() | random_line_split | |
renderer.go | // The package man outputs man pages from mmmark markdown.
package man
// Lots of code copied from https://github.com/cpuguy83/go-md2man, but adapated to mmark
// and made to support mmark features.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/html"
"github.com/mmarkdown/mmark/v2/lang"
"github.com/mmarkdown/mmark/v2/mast"
)
// Flags control optional behavior of Markdown renderer.
type Flags int
// HTML renderer configuration options.
const (
FlagsNone Flags = 0
ManFragment Flags = 1 << iota // Don't generate a complete document
CommonFlags Flags = FlagsNone
)
// RendererOptions is a collection of supplementary parameters tweaking
// the behavior of various parts of Markdown renderer.
type RendererOptions struct {
Flags Flags // Flags allow customizing this renderer's behavior
Language lang.Lang // Output language for the document.
// if set, called at the start of RenderNode(). Allows replacing rendering of some nodes
RenderNodeHook html.RenderNodeFunc
// Comments is a list of comments the renderer should detect when
// parsing code blocks and detecting callouts.
Comments [][]byte
}
// Renderer implements Renderer interface for Markdown output.
type Renderer struct {
opts RendererOptions
Title *mast.Title
listLevel int
allListLevel int
}
// NewRenderer creates and configures an Renderer object, which satisfies the Renderer interface.
func NewRenderer(opts RendererOptions) *Renderer {
return &Renderer{opts: opts}
}
func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) {
r.outs(w, "\n.br\n")
}
func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) {
// TODO: what should this output?
}
func (r *Renderer) title(w io.Writer, node *mast.Title, entering bool) {
if !entering {
return
}
if node.Date.IsZero() {
node.Date = time.Now().UTC()
}
// track back to first space and assume the rest is the section, don't parse it as a number
i := len(node.Title) - 1
for i > 0 && node.Title[i-1] != ' ' {
i--
}
section := 1
title := node.Title
switch {
case i > 0:
d, err := strconv.Atoi(node.Title[i:])
if err != nil {
log.Print("No section number found at end of title, defaulting to 1")
} else {
section = d
title = node.Title[:i-1]
}
}
if i == 0 {
log.Print("No section number found at end of title, defaulting to 1")
}
r.outs(w, fmt.Sprintf(".TH %q", strings.ToUpper(title)))
r.outs(w, fmt.Sprintf(" %d", section))
r.outs(w, fmt.Sprintf(" %q", node.Date.Format("January 2006")))
r.outs(w, fmt.Sprintf(" %q", node.Area))
r.outs(w, fmt.Sprintf(" %q", node.Workgroup))
r.outs(w, "\n")
}
func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) {
if entering {
switch node.Level {
case 1, 2:
r.outs(w, "\n.SH ")
default:
r.outs(w, "\n.SS ")
}
}
}
func (r *Renderer) citation(w io.Writer, node *ast.Citation, entering bool) {
r.outs(w, "[")
for i, dest := range node.Destination {
if i > 0 {
r.outs(w, ", ")
}
r.out(w, dest)
}
r.outs(w, "]")
}
func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) {
if entering {
// If in lists, suppress paragraphs. Unless we know the list contains
// block level elements, but then only apply this after the first paragraph.
parent := para.Parent
if parent != nil {
if _, ok := parent.(*ast.ListItem); ok {
// if we're the first para return, otherwise output a PP
c := parent.GetChildren()
i := 0
par := 0
for i = range c {
_, ok := c[i].(*ast.Paragraph)
if ok {
par++
}
if c[i] == para {
if par > 1 {
// No .PP because that messes up formatting.
r.outs(w, "\n\n")
}
}
}
return
}
}
r.outs(w, "\n.PP\n")
return
}
r.outs(w, "\n")
}
func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) {
if list.IsFootnotesList {
return
}
// normal list
if entering {
r.allListLevel++
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel++
}
if r.allListLevel > 1 {
r.outs(w, "\n.RS\n")
} else {
r.outs(w, "\n")
}
return
}
if r.allListLevel > 1 {
r.outs(w, "\n.RE\n")
} else {
r.outs(w, "\n")
}
r.allListLevel--
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel--
}
}
func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) {
if entering {
// footnotes
if listItem.RefLink != nil {
// get number in the list
children := listItem.Parent.GetChildren()
for i := range children {
if listItem == children[i] {
r.outs(w, fmt.Sprintf("\n.IP [%d]\n", i+1))
}
}
return
}
x := listItem.ListFlags
switch {
case x&ast.ListTypeOrdered != 0:
children := listItem.GetParent().GetChildren()
i := 0
for i = 0; i < len(children); i++ {
if children[i] == listItem {
break
}
}
start := listItem.GetParent().(*ast.List).Start
r.outs(w, fmt.Sprintf(".IP %d\\. 4\n", start+i+1))
case x&ast.ListTypeTerm != 0:
r.outs(w, ".TP\n")
case x&ast.ListTypeDefinition != 0:
r.outs(w, "")
default:
if r.listLevel%2 == 0 {
r.outs(w, ".IP \\(en 4\n")
} else {
r.outs(w, ".IP \\(bu 4\n")
}
}
}
}
func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
escapeSpecialChars(r, w, codeBlock.Literal)
r.outs(w, "\n.fi\n.RE\n")
}
}
func (r *Renderer) table(w io.Writer, tab *ast.Table, entering bool) {
// The tbl renderer want to see the entire table's columns, rows first
if entering {
r.outs(w, "\n.RS\n.TS\nallbox;\n")
cells := rows(tab)
for r1 := 0; r1 < len(cells); r1++ {
align := ""
for c := 0; c < len(cells[r1]); c++ {
x := cells[r1][c]
switch x.Align {
case ast.TableAlignmentLeft:
align += "l "
case ast.TableAlignmentRight:
align += "r "
case ast.TableAlignmentCenter:
fallthrough
default:
align += "c "
}
if x.ColSpan > 0 {
align += strings.Repeat("s ", x.ColSpan-1)
}
}
r.outs(w, strings.TrimSpace(align)+"\n")
}
r.outs(w, ".\n")
return
}
r.outs(w, ".TE\n.RE\n\n")
}
func (r *Renderer) tableRow(w io.Writer, tableRow *ast.TableRow, entering bool) {
if !entering {
r.outs(w, "\n")
}
}
func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) {
if tableCell.IsHeader {
r.outOneOf(w, entering, "\\fB", "\\fP")
}
parent := tableCell.Parent
if tableCell == ast.GetFirstChild(parent) {
return
}
if entering {
r.outs(w, "\t")
return
}
}
func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) {}
func (r *Renderer) crossReference(w io.Writer, cr *ast.CrossReference, entering bool) {
if !entering {
return
}
r.out(w, bytes.ToUpper(cr.Destination))
}
func (r *Renderer) index(w io.Writer, index *ast.Index, entering bool) {}
func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) {
if link.Footnote != nil {
if entering {
r.outs(w, fmt.Sprintf("\\u[%d]\\d", link.NoteID))
}
return
}
// !entering so the URL comes after the link text.
if !entering {
r.outs(w, "\n\\[la]")
r.out(w, link.Destination)
r.outs(w, "\\[ra]")
}
}
func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) {
// only works with `ascii-art` images
if !bytes.HasSuffix(node.Destination, []byte(".ascii-art")) {
// remove from tree, we can use RemoveFromTree, because that re-shuffles things and can
// make us output things twice.
node.SetChildren(nil)
node = nil
return
}
if !entering {
r.outs(w, "\n.fi\n.RE\n")
return
}
node.SetChildren(nil) // remove Title, if any, we can type set it.
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
img, err := ioutil.ReadFile(string(node.Destination)) // markdown, doens't err, this can be an empty image, log maybe??
if err != nil {
img = []byte(err.Error())
}
escapeSpecialChars(r, w, img)
}
func (r *Renderer) mathBlock(w io.Writer, mathBlock *ast.MathBlock, entering bool) {
// may indent it?
}
func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) {
// check what we have here and throw away any non ascii-art figures
// CaptionFigure
// Paragraph
// Text
// Image 'url=array-vs-slice.svg'
// Text 'Array versus Slice svg'
// Text '\n'
// Image 'url=array-vs-slice.ascii-art'
// Text 'Array versus Slice ascii-art'
//
// The image with svg will be removed as child and then we continue to walk the AST.
for _, child := range figure.GetChildren() {
// for figures/images, these are wrapped below a paragraph.
// TODO: can there be more than 1 paragraph??
if p, ok := child.(*ast.Paragraph); ok {
for _, img := range p.GetChildren() {
x, ok := img.(*ast.Image)
if !ok {
continue
}
// if not ascii-art, remove entirely
if !bytes.HasSuffix(x.Destination, []byte(".ascii-art")) {
ast.RemoveFromTree(img) // this is save because we're not accessing any of the children just yet.
continue
}
img.SetChildren(nil) // remove alt text
}
}
}
}
func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) {
what := ast.GetFirstChild(caption.Parent)
if !entering {
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RE\n")
case *ast.CodeBlock, *ast.Paragraph: // Paragraph is here because that wrap an image.
r.outs(w, "\n.RE\n")
case *ast.BlockQuote:
r.outs(w, "\n.RE\n")
}
return
}
// get parent, get first child for type
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RS\n")
case *ast.CodeBlock, *ast.Paragraph:
r.outs(w, "\n.RS\n")
case *ast.BlockQuote:
r.outs(w, "\n.RS\n")
r.outs(w, "\\(en ")
}
}
func (r *Renderer) blockQuote(w io.Writer, block *ast.BlockQuote, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
func (r *Renderer) aside(w io.Writer, block *ast.Aside, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
// RenderNode renders a markdown node to markdown.
func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus {
if r.opts.RenderNodeHook != nil {
status, didHandle := r.opts.RenderNodeHook(w, node, entering)
if didHandle {
return status
}
}
if attr := mast.AttributeFromNode(node); attr != nil && entering {
}
switch node := node.(type) {
case *ast.Document:
// do nothing
case *mast.Title:
r.title(w, node, entering)
r.Title = node // save for later.
case *mast.Authors:
r.authors(w, node, entering)
case *mast.Bibliography:
if entering {
r.outs(w, "\n.SH \"")
r.outs(w, strings.ToUpper(r.opts.Language.Bibliography()))
r.outs(w, "\"\n")
}
case *mast.BibliographyItem:
r.bibliographyItem(w, node, entering)
case *mast.DocumentIndex, *mast.IndexLetter, *mast.IndexItem, *mast.IndexSubItem, *mast.IndexLink:
case *mast.ReferenceBlock:
// ignore
case *ast.Footnotes:
r.footnotes(w, node, entering)
case *ast.Text:
r.text(w, node, entering)
case *ast.Softbreak:
// TODO
case *ast.Hardbreak:
r.hardBreak(w, node)
case *ast.NonBlockingSpace:
r.outs(w, "\\0")
case *ast.Callout:
r.callout(w, node, entering)
case *ast.Emph:
r.outOneOf(w, entering, "\\fI", "\\fP")
case *ast.Strong:
r.outOneOf(w, entering, "\\fB", "\\fP")
case *ast.Del:
r.outOneOf(w, entering, "~~", "~~")
case *ast.Citation:
r.citation(w, node, entering)
case *ast.DocumentMatter:
r.matter(w, node, entering)
case *ast.Heading:
r.heading(w, node, entering)
case *ast.HorizontalRule:
if entering {
r.outs(w, "\n.ti 0\n\\l'\\n(.l─'\n")
}
case *ast.Paragraph:
r.paragraph(w, node, entering)
case *ast.HTMLSpan:
r.out(w, node.Literal)
case *ast.HTMLBlock:
r.out(w, node.Literal)
case *ast.List:
r.list(w, node, entering)
case *ast.ListItem:
r.listItem(w, node, entering)
case *ast.CodeBlock:
r.codeBlock(w, node, entering)
case *ast.Caption:
r.caption(w, node, entering)
case *ast.CaptionFigure:
r.captionFigure(w, node, entering)
case *ast.Table:
r.table(w, node, entering)
case *ast.TableCell:
r.tableCell(w, node, entering)
case *ast.TableHeader:
case *ast.TableBody:
case *ast.TableFooter:
case *ast.TableRow:
r.tableRow(w, node, entering)
case *ast.BlockQuote:
r.blockQuote(w, node, entering)
case *ast.Aside:
r.aside(w, node, entering)
case *ast.CrossReference:
r.crossReference(w, node, entering)
case *ast.Index:
r.index(w, node, entering)
case *ast.Link:
r.link(w, node, entering)
case *ast.Math:
if entering {
r.out(w, node.Literal)
}
case *ast.Image:
r.image(w, node, entering)
case *ast.Code:
r.outs(w, "\\fB\\fC")
r.out(w, node.Literal)
r.outs(w, "\\fR")
case *ast.MathBlock:
r.mathBlock(w, node, entering)
case *ast.Subscript:
r.outOneOf(w, true, "\\d", "\\u")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\d", "\\u")
case *ast.Superscript:
r.outOneOf(w, true, "\\u", "\\d")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\u", "\\d")
default:
panic(fmt.Sprintf("Unknown node %T", node))
}
return ast.GoToNext
}
func (r *Renderer) callout(w io.Writer, node *ast.Callout, entering bool) {
if entering {
r.outs(w, "\\fB")
r.out(w, node.ID)
r.outs(w, "\\fP")
return
}
}
func (r *Renderer) text(w io.Writer, node *ast.Text, entering bool) {
if !entering {
return
}
text := node.Literal
parent := node.Parent
if parent != nil {
if _, ok := parent.(*ast.Heading); ok {
text = bytes.ToUpper(text)
text = append(text, byte('"'))
text = append([]byte{byte('"')}, text...)
}
}
r.out(w, text)
}
func (r *Renderer) fo | io.Writer, node ast.Node, entering bool) {
if !entering {
return
}
r.outs(w, "\n.SH \""+strings.ToUpper(r.opts.Language.Footnotes())+"\"\n")
}
func (r *Renderer) RenderHeader(w io.Writer, _ ast.Node) {
if r.opts.Flags&ManFragment != 0 {
return
}
r.outs(w, `.\" Generated by Mmark Markdown Processer - mmark.miek.nl`+"\n")
}
func (r *Renderer) RenderFooter(w io.Writer, node ast.Node) {}
func (r *Renderer) bibliographyItem(w io.Writer, bib *mast.BibliographyItem, entering bool) {
if !entering {
return
}
if bib.Reference == nil {
return
}
r.outs(w, ".TP\n")
r.outs(w, fmt.Sprintf("[%s]\n", bib.Anchor))
for _, author := range bib.Reference.Front.Authors {
writeNonEmptyString(w, author.Fullname)
if author.Organization != nil {
writeNonEmptyString(w, author.Organization.Value)
}
}
writeNonEmptyString(w, bib.Reference.Front.Title)
if bib.Reference.Target != "" {
r.outs(w, "\\[la]")
r.outs(w, bib.Reference.Target)
r.outs(w, "\\[ra]")
}
writeNonEmptyString(w, bib.Reference.Front.Date.Year)
r.outs(w, "\n")
}
func writeNonEmptyString(w io.Writer, s string) {
if s == "" {
return
}
io.WriteString(w, s)
io.WriteString(w, "\n")
}
| otnotes(w | identifier_name |
renderer.go | // The package man outputs man pages from mmmark markdown.
package man
// Lots of code copied from https://github.com/cpuguy83/go-md2man, but adapated to mmark
// and made to support mmark features.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/html"
"github.com/mmarkdown/mmark/v2/lang"
"github.com/mmarkdown/mmark/v2/mast"
)
// Flags control optional behavior of Markdown renderer.
type Flags int
// HTML renderer configuration options.
const (
FlagsNone Flags = 0
ManFragment Flags = 1 << iota // Don't generate a complete document
CommonFlags Flags = FlagsNone
)
// RendererOptions is a collection of supplementary parameters tweaking
// the behavior of various parts of Markdown renderer.
type RendererOptions struct {
Flags Flags // Flags allow customizing this renderer's behavior
Language lang.Lang // Output language for the document.
// if set, called at the start of RenderNode(). Allows replacing rendering of some nodes
RenderNodeHook html.RenderNodeFunc
// Comments is a list of comments the renderer should detect when
// parsing code blocks and detecting callouts.
Comments [][]byte
}
// Renderer implements Renderer interface for Markdown output.
type Renderer struct {
opts RendererOptions
Title *mast.Title
listLevel int
allListLevel int
}
// NewRenderer creates and configures an Renderer object, which satisfies the Renderer interface.
func NewRenderer(opts RendererOptions) *Renderer {
return &Renderer{opts: opts}
}
func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) {
r.outs(w, "\n.br\n")
}
func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) {
// TODO: what should this output?
}
func (r *Renderer) title(w io.Writer, node *mast.Title, entering bool) {
if !entering {
return
}
if node.Date.IsZero() {
node.Date = time.Now().UTC()
}
// track back to first space and assume the rest is the section, don't parse it as a number
i := len(node.Title) - 1
for i > 0 && node.Title[i-1] != ' ' {
i--
}
section := 1
title := node.Title
switch {
case i > 0:
d, err := strconv.Atoi(node.Title[i:])
if err != nil {
log.Print("No section number found at end of title, defaulting to 1")
} else {
section = d
title = node.Title[:i-1]
}
}
if i == 0 {
log.Print("No section number found at end of title, defaulting to 1")
}
r.outs(w, fmt.Sprintf(".TH %q", strings.ToUpper(title)))
r.outs(w, fmt.Sprintf(" %d", section))
r.outs(w, fmt.Sprintf(" %q", node.Date.Format("January 2006")))
r.outs(w, fmt.Sprintf(" %q", node.Area))
r.outs(w, fmt.Sprintf(" %q", node.Workgroup))
r.outs(w, "\n")
}
func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) {
if entering {
switch node.Level {
case 1, 2:
r.outs(w, "\n.SH ")
default:
r.outs(w, "\n.SS ")
}
}
}
func (r *Renderer) citation(w io.Writer, node *ast.Citation, entering bool) {
r.outs(w, "[")
for i, dest := range node.Destination {
if i > 0 {
r.outs(w, ", ")
}
r.out(w, dest)
}
r.outs(w, "]")
}
func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) {
if entering {
// If in lists, suppress paragraphs. Unless we know the list contains
// block level elements, but then only apply this after the first paragraph.
parent := para.Parent
if parent != nil {
if _, ok := parent.(*ast.ListItem); ok {
// if we're the first para return, otherwise output a PP
c := parent.GetChildren()
i := 0
par := 0
for i = range c {
_, ok := c[i].(*ast.Paragraph)
if ok {
par++
}
if c[i] == para {
if par > 1 {
// No .PP because that messes up formatting.
r.outs(w, "\n\n")
}
}
}
return
}
}
r.outs(w, "\n.PP\n")
return
}
r.outs(w, "\n")
}
func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) {
if list.IsFootnotesList {
return
}
// normal list
if entering {
r.allListLevel++
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel++
}
if r.allListLevel > 1 {
r.outs(w, "\n.RS\n")
} else {
r.outs(w, "\n")
}
return
}
if r.allListLevel > 1 {
r.outs(w, "\n.RE\n")
} else {
r.outs(w, "\n")
}
r.allListLevel--
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel--
}
}
func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) {
if entering {
// footnotes
if listItem.RefLink != nil {
// get number in the list
children := listItem.Parent.GetChildren()
for i := range children {
if listItem == children[i] {
r.outs(w, fmt.Sprintf("\n.IP [%d]\n", i+1))
}
}
return
}
x := listItem.ListFlags
switch {
case x&ast.ListTypeOrdered != 0:
children := listItem.GetParent().GetChildren()
i := 0
for i = 0; i < len(children); i++ {
if children[i] == listItem {
break
}
}
start := listItem.GetParent().(*ast.List).Start
r.outs(w, fmt.Sprintf(".IP %d\\. 4\n", start+i+1))
case x&ast.ListTypeTerm != 0:
r.outs(w, ".TP\n")
case x&ast.ListTypeDefinition != 0:
r.outs(w, "")
default:
if r.listLevel%2 == 0 {
r.outs(w, ".IP \\(en 4\n")
} else {
r.outs(w, ".IP \\(bu 4\n")
}
}
}
}
func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
escapeSpecialChars(r, w, codeBlock.Literal)
r.outs(w, "\n.fi\n.RE\n")
}
} | if entering {
r.outs(w, "\n.RS\n.TS\nallbox;\n")
cells := rows(tab)
for r1 := 0; r1 < len(cells); r1++ {
align := ""
for c := 0; c < len(cells[r1]); c++ {
x := cells[r1][c]
switch x.Align {
case ast.TableAlignmentLeft:
align += "l "
case ast.TableAlignmentRight:
align += "r "
case ast.TableAlignmentCenter:
fallthrough
default:
align += "c "
}
if x.ColSpan > 0 {
align += strings.Repeat("s ", x.ColSpan-1)
}
}
r.outs(w, strings.TrimSpace(align)+"\n")
}
r.outs(w, ".\n")
return
}
r.outs(w, ".TE\n.RE\n\n")
}
func (r *Renderer) tableRow(w io.Writer, tableRow *ast.TableRow, entering bool) {
if !entering {
r.outs(w, "\n")
}
}
func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) {
if tableCell.IsHeader {
r.outOneOf(w, entering, "\\fB", "\\fP")
}
parent := tableCell.Parent
if tableCell == ast.GetFirstChild(parent) {
return
}
if entering {
r.outs(w, "\t")
return
}
}
func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) {}
func (r *Renderer) crossReference(w io.Writer, cr *ast.CrossReference, entering bool) {
if !entering {
return
}
r.out(w, bytes.ToUpper(cr.Destination))
}
func (r *Renderer) index(w io.Writer, index *ast.Index, entering bool) {}
func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) {
if link.Footnote != nil {
if entering {
r.outs(w, fmt.Sprintf("\\u[%d]\\d", link.NoteID))
}
return
}
// !entering so the URL comes after the link text.
if !entering {
r.outs(w, "\n\\[la]")
r.out(w, link.Destination)
r.outs(w, "\\[ra]")
}
}
func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) {
// only works with `ascii-art` images
if !bytes.HasSuffix(node.Destination, []byte(".ascii-art")) {
// remove from tree, we can use RemoveFromTree, because that re-shuffles things and can
// make us output things twice.
node.SetChildren(nil)
node = nil
return
}
if !entering {
r.outs(w, "\n.fi\n.RE\n")
return
}
node.SetChildren(nil) // remove Title, if any, we can type set it.
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
img, err := ioutil.ReadFile(string(node.Destination)) // markdown, doens't err, this can be an empty image, log maybe??
if err != nil {
img = []byte(err.Error())
}
escapeSpecialChars(r, w, img)
}
func (r *Renderer) mathBlock(w io.Writer, mathBlock *ast.MathBlock, entering bool) {
// may indent it?
}
func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) {
// check what we have here and throw away any non ascii-art figures
// CaptionFigure
// Paragraph
// Text
// Image 'url=array-vs-slice.svg'
// Text 'Array versus Slice svg'
// Text '\n'
// Image 'url=array-vs-slice.ascii-art'
// Text 'Array versus Slice ascii-art'
//
// The image with svg will be removed as child and then we continue to walk the AST.
for _, child := range figure.GetChildren() {
// for figures/images, these are wrapped below a paragraph.
// TODO: can there be more than 1 paragraph??
if p, ok := child.(*ast.Paragraph); ok {
for _, img := range p.GetChildren() {
x, ok := img.(*ast.Image)
if !ok {
continue
}
// if not ascii-art, remove entirely
if !bytes.HasSuffix(x.Destination, []byte(".ascii-art")) {
ast.RemoveFromTree(img) // this is save because we're not accessing any of the children just yet.
continue
}
img.SetChildren(nil) // remove alt text
}
}
}
}
func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) {
what := ast.GetFirstChild(caption.Parent)
if !entering {
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RE\n")
case *ast.CodeBlock, *ast.Paragraph: // Paragraph is here because that wrap an image.
r.outs(w, "\n.RE\n")
case *ast.BlockQuote:
r.outs(w, "\n.RE\n")
}
return
}
// get parent, get first child for type
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RS\n")
case *ast.CodeBlock, *ast.Paragraph:
r.outs(w, "\n.RS\n")
case *ast.BlockQuote:
r.outs(w, "\n.RS\n")
r.outs(w, "\\(en ")
}
}
func (r *Renderer) blockQuote(w io.Writer, block *ast.BlockQuote, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
func (r *Renderer) aside(w io.Writer, block *ast.Aside, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
// RenderNode renders a markdown node to markdown.
func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus {
if r.opts.RenderNodeHook != nil {
status, didHandle := r.opts.RenderNodeHook(w, node, entering)
if didHandle {
return status
}
}
if attr := mast.AttributeFromNode(node); attr != nil && entering {
}
switch node := node.(type) {
case *ast.Document:
// do nothing
case *mast.Title:
r.title(w, node, entering)
r.Title = node // save for later.
case *mast.Authors:
r.authors(w, node, entering)
case *mast.Bibliography:
if entering {
r.outs(w, "\n.SH \"")
r.outs(w, strings.ToUpper(r.opts.Language.Bibliography()))
r.outs(w, "\"\n")
}
case *mast.BibliographyItem:
r.bibliographyItem(w, node, entering)
case *mast.DocumentIndex, *mast.IndexLetter, *mast.IndexItem, *mast.IndexSubItem, *mast.IndexLink:
case *mast.ReferenceBlock:
// ignore
case *ast.Footnotes:
r.footnotes(w, node, entering)
case *ast.Text:
r.text(w, node, entering)
case *ast.Softbreak:
// TODO
case *ast.Hardbreak:
r.hardBreak(w, node)
case *ast.NonBlockingSpace:
r.outs(w, "\\0")
case *ast.Callout:
r.callout(w, node, entering)
case *ast.Emph:
r.outOneOf(w, entering, "\\fI", "\\fP")
case *ast.Strong:
r.outOneOf(w, entering, "\\fB", "\\fP")
case *ast.Del:
r.outOneOf(w, entering, "~~", "~~")
case *ast.Citation:
r.citation(w, node, entering)
case *ast.DocumentMatter:
r.matter(w, node, entering)
case *ast.Heading:
r.heading(w, node, entering)
case *ast.HorizontalRule:
if entering {
r.outs(w, "\n.ti 0\n\\l'\\n(.l─'\n")
}
case *ast.Paragraph:
r.paragraph(w, node, entering)
case *ast.HTMLSpan:
r.out(w, node.Literal)
case *ast.HTMLBlock:
r.out(w, node.Literal)
case *ast.List:
r.list(w, node, entering)
case *ast.ListItem:
r.listItem(w, node, entering)
case *ast.CodeBlock:
r.codeBlock(w, node, entering)
case *ast.Caption:
r.caption(w, node, entering)
case *ast.CaptionFigure:
r.captionFigure(w, node, entering)
case *ast.Table:
r.table(w, node, entering)
case *ast.TableCell:
r.tableCell(w, node, entering)
case *ast.TableHeader:
case *ast.TableBody:
case *ast.TableFooter:
case *ast.TableRow:
r.tableRow(w, node, entering)
case *ast.BlockQuote:
r.blockQuote(w, node, entering)
case *ast.Aside:
r.aside(w, node, entering)
case *ast.CrossReference:
r.crossReference(w, node, entering)
case *ast.Index:
r.index(w, node, entering)
case *ast.Link:
r.link(w, node, entering)
case *ast.Math:
if entering {
r.out(w, node.Literal)
}
case *ast.Image:
r.image(w, node, entering)
case *ast.Code:
r.outs(w, "\\fB\\fC")
r.out(w, node.Literal)
r.outs(w, "\\fR")
case *ast.MathBlock:
r.mathBlock(w, node, entering)
case *ast.Subscript:
r.outOneOf(w, true, "\\d", "\\u")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\d", "\\u")
case *ast.Superscript:
r.outOneOf(w, true, "\\u", "\\d")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\u", "\\d")
default:
panic(fmt.Sprintf("Unknown node %T", node))
}
return ast.GoToNext
}
func (r *Renderer) callout(w io.Writer, node *ast.Callout, entering bool) {
if entering {
r.outs(w, "\\fB")
r.out(w, node.ID)
r.outs(w, "\\fP")
return
}
}
func (r *Renderer) text(w io.Writer, node *ast.Text, entering bool) {
if !entering {
return
}
text := node.Literal
parent := node.Parent
if parent != nil {
if _, ok := parent.(*ast.Heading); ok {
text = bytes.ToUpper(text)
text = append(text, byte('"'))
text = append([]byte{byte('"')}, text...)
}
}
r.out(w, text)
}
func (r *Renderer) footnotes(w io.Writer, node ast.Node, entering bool) {
if !entering {
return
}
r.outs(w, "\n.SH \""+strings.ToUpper(r.opts.Language.Footnotes())+"\"\n")
}
func (r *Renderer) RenderHeader(w io.Writer, _ ast.Node) {
if r.opts.Flags&ManFragment != 0 {
return
}
r.outs(w, `.\" Generated by Mmark Markdown Processer - mmark.miek.nl`+"\n")
}
func (r *Renderer) RenderFooter(w io.Writer, node ast.Node) {}
func (r *Renderer) bibliographyItem(w io.Writer, bib *mast.BibliographyItem, entering bool) {
if !entering {
return
}
if bib.Reference == nil {
return
}
r.outs(w, ".TP\n")
r.outs(w, fmt.Sprintf("[%s]\n", bib.Anchor))
for _, author := range bib.Reference.Front.Authors {
writeNonEmptyString(w, author.Fullname)
if author.Organization != nil {
writeNonEmptyString(w, author.Organization.Value)
}
}
writeNonEmptyString(w, bib.Reference.Front.Title)
if bib.Reference.Target != "" {
r.outs(w, "\\[la]")
r.outs(w, bib.Reference.Target)
r.outs(w, "\\[ra]")
}
writeNonEmptyString(w, bib.Reference.Front.Date.Year)
r.outs(w, "\n")
}
func writeNonEmptyString(w io.Writer, s string) {
if s == "" {
return
}
io.WriteString(w, s)
io.WriteString(w, "\n")
} |
func (r *Renderer) table(w io.Writer, tab *ast.Table, entering bool) {
// The tbl renderer want to see the entire table's columns, rows first | random_line_split |
renderer.go | // The package man outputs man pages from mmmark markdown.
package man
// Lots of code copied from https://github.com/cpuguy83/go-md2man, but adapated to mmark
// and made to support mmark features.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/html"
"github.com/mmarkdown/mmark/v2/lang"
"github.com/mmarkdown/mmark/v2/mast"
)
// Flags control optional behavior of Markdown renderer.
type Flags int
// HTML renderer configuration options.
const (
FlagsNone Flags = 0
ManFragment Flags = 1 << iota // Don't generate a complete document
CommonFlags Flags = FlagsNone
)
// RendererOptions is a collection of supplementary parameters tweaking
// the behavior of various parts of Markdown renderer.
type RendererOptions struct {
Flags Flags // Flags allow customizing this renderer's behavior
Language lang.Lang // Output language for the document.
// if set, called at the start of RenderNode(). Allows replacing rendering of some nodes
RenderNodeHook html.RenderNodeFunc
// Comments is a list of comments the renderer should detect when
// parsing code blocks and detecting callouts.
Comments [][]byte
}
// Renderer implements Renderer interface for Markdown output.
type Renderer struct {
opts RendererOptions
Title *mast.Title
listLevel int
allListLevel int
}
// NewRenderer creates and configures an Renderer object, which satisfies the Renderer interface.
func NewRenderer(opts RendererOptions) *Renderer {
return &Renderer{opts: opts}
}
func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) {
r.outs(w, "\n.br\n")
}
func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) {
// TODO: what should this output?
}
func (r *Renderer) title(w io.Writer, node *mast.Title, entering bool) {
if !entering {
return
}
if node.Date.IsZero() {
node.Date = time.Now().UTC()
}
// track back to first space and assume the rest is the section, don't parse it as a number
i := len(node.Title) - 1
for i > 0 && node.Title[i-1] != ' ' {
i--
}
section := 1
title := node.Title
switch {
case i > 0:
d, err := strconv.Atoi(node.Title[i:])
if err != nil {
log.Print("No section number found at end of title, defaulting to 1")
} else {
section = d
title = node.Title[:i-1]
}
}
if i == 0 {
log.Print("No section number found at end of title, defaulting to 1")
}
r.outs(w, fmt.Sprintf(".TH %q", strings.ToUpper(title)))
r.outs(w, fmt.Sprintf(" %d", section))
r.outs(w, fmt.Sprintf(" %q", node.Date.Format("January 2006")))
r.outs(w, fmt.Sprintf(" %q", node.Area))
r.outs(w, fmt.Sprintf(" %q", node.Workgroup))
r.outs(w, "\n")
}
func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) {
if entering {
switch node.Level {
case 1, 2:
r.outs(w, "\n.SH ")
default:
r.outs(w, "\n.SS ")
}
}
}
func (r *Renderer) citation(w io.Writer, node *ast.Citation, entering bool) {
r.outs(w, "[")
for i, dest := range node.Destination {
if i > 0 {
r.outs(w, ", ")
}
r.out(w, dest)
}
r.outs(w, "]")
}
func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) {
if entering {
// If in lists, suppress paragraphs. Unless we know the list contains
// block level elements, but then only apply this after the first paragraph.
parent := para.Parent
if parent != nil {
if _, ok := parent.(*ast.ListItem); ok {
// if we're the first para return, otherwise output a PP
c := parent.GetChildren()
i := 0
par := 0
for i = range c {
_, ok := c[i].(*ast.Paragraph)
if ok {
par++
}
if c[i] == para {
if par > 1 {
// No .PP because that messes up formatting.
r.outs(w, "\n\n")
}
}
}
return
}
}
r.outs(w, "\n.PP\n")
return
}
r.outs(w, "\n")
}
func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) {
if list.IsFootnotesList {
return
}
// normal list
if entering {
r.allListLevel++
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel++
}
if r.allListLevel > 1 {
r.outs(w, "\n.RS\n")
} else {
r.outs(w, "\n")
}
return
}
if r.allListLevel > 1 {
r.outs(w, "\n.RE\n")
} else {
r.outs(w, "\n")
}
r.allListLevel--
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel--
}
}
func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) {
if entering {
// footnotes
if listItem.RefLink != nil {
// get number in the list
children := listItem.Parent.GetChildren()
for i := range children {
if listItem == children[i] {
r.outs(w, fmt.Sprintf("\n.IP [%d]\n", i+1))
}
}
return
}
x := listItem.ListFlags
switch {
case x&ast.ListTypeOrdered != 0:
children := listItem.GetParent().GetChildren()
i := 0
for i = 0; i < len(children); i++ {
if children[i] == listItem {
break
}
}
start := listItem.GetParent().(*ast.List).Start
r.outs(w, fmt.Sprintf(".IP %d\\. 4\n", start+i+1))
case x&ast.ListTypeTerm != 0:
r.outs(w, ".TP\n")
case x&ast.ListTypeDefinition != 0:
r.outs(w, "")
default:
if r.listLevel%2 == 0 {
r.outs(w, ".IP \\(en 4\n")
} else {
r.outs(w, ".IP \\(bu 4\n")
}
}
}
}
func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
escapeSpecialChars(r, w, codeBlock.Literal)
r.outs(w, "\n.fi\n.RE\n")
}
}
func (r *Renderer) table(w io.Writer, tab *ast.Table, entering bool) {
// The tbl renderer want to see the entire table's columns, rows first
if entering {
r.outs(w, "\n.RS\n.TS\nallbox;\n")
cells := rows(tab)
for r1 := 0; r1 < len(cells); r1++ {
align := ""
for c := 0; c < len(cells[r1]); c++ {
x := cells[r1][c]
switch x.Align {
case ast.TableAlignmentLeft:
align += "l "
case ast.TableAlignmentRight:
align += "r "
case ast.TableAlignmentCenter:
fallthrough
default:
align += "c "
}
if x.ColSpan > 0 {
align += strings.Repeat("s ", x.ColSpan-1)
}
}
r.outs(w, strings.TrimSpace(align)+"\n")
}
r.outs(w, ".\n")
return
}
r.outs(w, ".TE\n.RE\n\n")
}
func (r *Renderer) tableRow(w io.Writer, tableRow *ast.TableRow, entering bool) {
if !entering {
r.outs(w, "\n")
}
}
func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) {
if tableCell.IsHeader {
r.outOneOf(w, entering, "\\fB", "\\fP")
}
parent := tableCell.Parent
if tableCell == ast.GetFirstChild(parent) {
return
}
if entering {
r.outs(w, "\t")
return
}
}
func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) {}
func (r *Renderer) crossReference(w io.Writer, cr *ast.CrossReference, entering bool) {
if !entering {
return
}
r.out(w, bytes.ToUpper(cr.Destination))
}
func (r *Renderer) index(w io.Writer, index *ast.Index, entering bool) {}
func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) {
if link.Footnote != nil {
if entering {
r.outs(w, fmt.Sprintf("\\u[%d]\\d", link.NoteID))
}
return
}
// !entering so the URL comes after the link text.
if !entering {
r.outs(w, "\n\\[la]")
r.out(w, link.Destination)
r.outs(w, "\\[ra]")
}
}
func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) {
// only works with `ascii-art` images
if !bytes.HasSuffix(node.Destination, []byte(".ascii-art")) {
// remove from tree, we can use RemoveFromTree, because that re-shuffles things and can
// make us output things twice.
node.SetChildren(nil)
node = nil
return
}
if !entering {
r.outs(w, "\n.fi\n.RE\n")
return
}
node.SetChildren(nil) // remove Title, if any, we can type set it.
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
img, err := ioutil.ReadFile(string(node.Destination)) // markdown, doens't err, this can be an empty image, log maybe??
if err != nil {
img = []byte(err.Error())
}
escapeSpecialChars(r, w, img)
}
func (r *Renderer) mathBlock(w io.Writer, mathBlock *ast.MathBlock, entering bool) {
// may indent it?
}
func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) {
// check what we have here and throw away any non ascii-art figures
// CaptionFigure
// Paragraph
// Text
// Image 'url=array-vs-slice.svg'
// Text 'Array versus Slice svg'
// Text '\n'
// Image 'url=array-vs-slice.ascii-art'
// Text 'Array versus Slice ascii-art'
//
// The image with svg will be removed as child and then we continue to walk the AST.
for _, child := range figure.GetChildren() {
// for figures/images, these are wrapped below a paragraph.
// TODO: can there be more than 1 paragraph??
if p, ok := child.(*ast.Paragraph); ok {
for _, img := range p.GetChildren() {
x, ok := img.(*ast.Image)
if !ok {
continue
}
// if not ascii-art, remove entirely
if !bytes.HasSuffix(x.Destination, []byte(".ascii-art")) {
ast.RemoveFromTree(img) // this is save because we're not accessing any of the children just yet.
continue
}
img.SetChildren(nil) // remove alt text
}
}
}
}
func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) {
what := ast.GetFirstChild(caption.Parent)
if !entering {
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RE\n")
case *ast.CodeBlock, *ast.Paragraph: // Paragraph is here because that wrap an image.
r.outs(w, "\n.RE\n")
case *ast.BlockQuote:
r.outs(w, "\n.RE\n")
}
return
}
// get parent, get first child for type
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RS\n")
case *ast.CodeBlock, *ast.Paragraph:
r.outs(w, "\n.RS\n")
case *ast.BlockQuote:
r.outs(w, "\n.RS\n")
r.outs(w, "\\(en ")
}
}
func (r *Renderer) blockQuote(w io.Writer, block *ast.BlockQuote, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
func (r *Renderer) aside(w io.Writer, block *ast.Aside, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
// RenderNode renders a markdown node to markdown.
func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus {
if r.opts.RenderNodeHook != nil {
status, didHandle := r.opts.RenderNodeHook(w, node, entering)
if didHandle {
return status
}
}
if attr := mast.AttributeFromNode(node); attr != nil && entering {
}
switch node := node.(type) {
case *ast.Document:
// do nothing
case *mast.Title:
r.title(w, node, entering)
r.Title = node // save for later.
case *mast.Authors:
r.authors(w, node, entering)
case *mast.Bibliography:
if entering {
r.outs(w, "\n.SH \"")
r.outs(w, strings.ToUpper(r.opts.Language.Bibliography()))
r.outs(w, "\"\n")
}
case *mast.BibliographyItem:
r.bibliographyItem(w, node, entering)
case *mast.DocumentIndex, *mast.IndexLetter, *mast.IndexItem, *mast.IndexSubItem, *mast.IndexLink:
case *mast.ReferenceBlock:
// ignore
case *ast.Footnotes:
r.footnotes(w, node, entering)
case *ast.Text:
r.text(w, node, entering)
case *ast.Softbreak:
// TODO
case *ast.Hardbreak:
r.hardBreak(w, node)
case *ast.NonBlockingSpace:
r.outs(w, "\\0")
case *ast.Callout:
r.callout(w, node, entering)
case *ast.Emph:
r.outOneOf(w, entering, "\\fI", "\\fP")
case *ast.Strong:
r.outOneOf(w, entering, "\\fB", "\\fP")
case *ast.Del:
r.outOneOf(w, entering, "~~", "~~")
case *ast.Citation:
r.citation(w, node, entering)
case *ast.DocumentMatter:
r.matter(w, node, entering)
case *ast.Heading:
r.heading(w, node, entering)
case *ast.HorizontalRule:
if entering {
r.outs(w, "\n.ti 0\n\\l'\\n(.l─'\n")
}
case *ast.Paragraph:
r.paragraph(w, node, entering)
case *ast.HTMLSpan:
r.out(w, node.Literal)
case *ast.HTMLBlock:
r.out(w, node.Literal)
case *ast.List:
r.list(w, node, entering)
case *ast.ListItem:
r.listItem(w, node, entering)
case *ast.CodeBlock:
r.codeBlock(w, node, entering)
case *ast.Caption:
r.caption(w, node, entering)
case *ast.CaptionFigure:
r.captionFigure(w, node, entering)
case *ast.Table:
r.table(w, node, entering)
case *ast.TableCell:
r.tableCell(w, node, entering)
case *ast.TableHeader:
case *ast.TableBody:
case *ast.TableFooter:
case *ast.TableRow:
r.tableRow(w, node, entering)
case *ast.BlockQuote:
r.blockQuote(w, node, entering)
case *ast.Aside:
r.aside(w, node, entering)
case *ast.CrossReference:
r.crossReference(w, node, entering)
case *ast.Index:
r.index(w, node, entering)
case *ast.Link:
r.link(w, node, entering)
case *ast.Math:
if entering {
r.out(w, node.Literal)
}
case *ast.Image:
r.image(w, node, entering)
case *ast.Code:
r.outs(w, "\\fB\\fC")
r.out(w, node.Literal)
r.outs(w, "\\fR")
case *ast.MathBlock:
r.mathBlock(w, node, entering)
case *ast.Subscript:
r.outOneOf(w, true, "\\d", "\\u")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\d", "\\u")
case *ast.Superscript:
r.outOneOf(w, true, "\\u", "\\d")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\u", "\\d")
default:
panic(fmt.Sprintf("Unknown node %T", node))
}
return ast.GoToNext
}
func (r *Renderer) callout(w io.Writer, node *ast.Callout, entering bool) {
if entering {
r.outs(w, "\\fB")
r.out(w, node.ID)
r.outs(w, "\\fP")
return
}
}
func (r *Renderer) text(w io.Writer, node *ast.Text, entering bool) {
if !entering {
return
}
text := node.Literal
parent := node.Parent
if parent != nil {
if _, ok := parent.(*ast.Heading); ok {
text = bytes.ToUpper(text)
text = append(text, byte('"'))
text = append([]byte{byte('"')}, text...)
}
}
r.out(w, text)
}
func (r *Renderer) footnotes(w io.Writer, node ast.Node, entering bool) {
if !entering {
return
}
r.outs(w, "\n.SH \""+strings.ToUpper(r.opts.Language.Footnotes())+"\"\n")
}
func (r *Renderer) RenderHeader(w io.Writer, _ ast.Node) {
if r.opts.Flags&ManFragment != 0 {
return
}
r.outs(w, `.\" Generated by Mmark Markdown Processer - mmark.miek.nl`+"\n")
}
func (r *Renderer) RenderFooter(w io.Writer, node ast.Node) {}
func (r *Renderer) bibliographyItem(w io.Writer, bib *mast.BibliographyItem, entering bool) {
if !entering {
return
}
if bib.Reference == nil {
return
}
r.outs(w, ".TP\n")
r.outs(w, fmt.Sprintf("[%s]\n", bib.Anchor))
for _, author := range bib.Reference.Front.Authors {
writeNonEmptyString(w, author.Fullname)
if author.Organization != nil {
writeNonEmptyString(w, author.Organization.Value)
}
}
writeNonEmptyString(w, bib.Reference.Front.Title)
if bib.Reference.Target != "" {
r.outs(w, "\\[la]")
r.outs(w, bib.Reference.Target)
r.outs(w, "\\[ra]")
}
writeNonEmptyString(w, bib.Reference.Front.Date.Year)
r.outs(w, "\n")
}
func writeNonEmptyString(w io.Writer, s string) {
| if s == "" {
return
}
io.WriteString(w, s)
io.WriteString(w, "\n")
}
| identifier_body | |
renderer.go | // The package man outputs man pages from mmmark markdown.
package man
// Lots of code copied from https://github.com/cpuguy83/go-md2man, but adapated to mmark
// and made to support mmark features.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/html"
"github.com/mmarkdown/mmark/v2/lang"
"github.com/mmarkdown/mmark/v2/mast"
)
// Flags control optional behavior of Markdown renderer.
type Flags int
// HTML renderer configuration options.
const (
FlagsNone Flags = 0
ManFragment Flags = 1 << iota // Don't generate a complete document
CommonFlags Flags = FlagsNone
)
// RendererOptions is a collection of supplementary parameters tweaking
// the behavior of various parts of Markdown renderer.
type RendererOptions struct {
Flags Flags // Flags allow customizing this renderer's behavior
Language lang.Lang // Output language for the document.
// if set, called at the start of RenderNode(). Allows replacing rendering of some nodes
RenderNodeHook html.RenderNodeFunc
// Comments is a list of comments the renderer should detect when
// parsing code blocks and detecting callouts.
Comments [][]byte
}
// Renderer implements Renderer interface for Markdown output.
type Renderer struct {
opts RendererOptions
Title *mast.Title
listLevel int
allListLevel int
}
// NewRenderer creates and configures an Renderer object, which satisfies the Renderer interface.
func NewRenderer(opts RendererOptions) *Renderer {
return &Renderer{opts: opts}
}
func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) {
r.outs(w, "\n.br\n")
}
func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) {
// TODO: what should this output?
}
func (r *Renderer) title(w io.Writer, node *mast.Title, entering bool) {
if !entering {
return
}
if node.Date.IsZero() {
node.Date = time.Now().UTC()
}
// track back to first space and assume the rest is the section, don't parse it as a number
i := len(node.Title) - 1
for i > 0 && node.Title[i-1] != ' ' {
i--
}
section := 1
title := node.Title
switch {
case i > 0:
d, err := strconv.Atoi(node.Title[i:])
if err != nil {
log.Print("No section number found at end of title, defaulting to 1")
} else {
section = d
title = node.Title[:i-1]
}
}
if i == 0 {
log.Print("No section number found at end of title, defaulting to 1")
}
r.outs(w, fmt.Sprintf(".TH %q", strings.ToUpper(title)))
r.outs(w, fmt.Sprintf(" %d", section))
r.outs(w, fmt.Sprintf(" %q", node.Date.Format("January 2006")))
r.outs(w, fmt.Sprintf(" %q", node.Area))
r.outs(w, fmt.Sprintf(" %q", node.Workgroup))
r.outs(w, "\n")
}
func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) {
if entering {
switch node.Level {
case 1, 2:
r.outs(w, "\n.SH ")
default:
r.outs(w, "\n.SS ")
}
}
}
func (r *Renderer) citation(w io.Writer, node *ast.Citation, entering bool) {
r.outs(w, "[")
for i, dest := range node.Destination {
if i > 0 {
r.outs(w, ", ")
}
r.out(w, dest)
}
r.outs(w, "]")
}
func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) {
if entering {
// If in lists, suppress paragraphs. Unless we know the list contains
// block level elements, but then only apply this after the first paragraph.
parent := para.Parent
if parent != nil {
if _, ok := parent.(*ast.ListItem); ok {
// if we're the first para return, otherwise output a PP
c := parent.GetChildren()
i := 0
par := 0
for i = range c |
return
}
}
r.outs(w, "\n.PP\n")
return
}
r.outs(w, "\n")
}
func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) {
if list.IsFootnotesList {
return
}
// normal list
if entering {
r.allListLevel++
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel++
}
if r.allListLevel > 1 {
r.outs(w, "\n.RS\n")
} else {
r.outs(w, "\n")
}
return
}
if r.allListLevel > 1 {
r.outs(w, "\n.RE\n")
} else {
r.outs(w, "\n")
}
r.allListLevel--
if list.ListFlags&ast.ListTypeOrdered == 0 && list.ListFlags&ast.ListTypeTerm == 0 && list.ListFlags&ast.ListTypeDefinition == 0 {
r.listLevel--
}
}
func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) {
if entering {
// footnotes
if listItem.RefLink != nil {
// get number in the list
children := listItem.Parent.GetChildren()
for i := range children {
if listItem == children[i] {
r.outs(w, fmt.Sprintf("\n.IP [%d]\n", i+1))
}
}
return
}
x := listItem.ListFlags
switch {
case x&ast.ListTypeOrdered != 0:
children := listItem.GetParent().GetChildren()
i := 0
for i = 0; i < len(children); i++ {
if children[i] == listItem {
break
}
}
start := listItem.GetParent().(*ast.List).Start
r.outs(w, fmt.Sprintf(".IP %d\\. 4\n", start+i+1))
case x&ast.ListTypeTerm != 0:
r.outs(w, ".TP\n")
case x&ast.ListTypeDefinition != 0:
r.outs(w, "")
default:
if r.listLevel%2 == 0 {
r.outs(w, ".IP \\(en 4\n")
} else {
r.outs(w, ".IP \\(bu 4\n")
}
}
}
}
func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
escapeSpecialChars(r, w, codeBlock.Literal)
r.outs(w, "\n.fi\n.RE\n")
}
}
func (r *Renderer) table(w io.Writer, tab *ast.Table, entering bool) {
// The tbl renderer want to see the entire table's columns, rows first
if entering {
r.outs(w, "\n.RS\n.TS\nallbox;\n")
cells := rows(tab)
for r1 := 0; r1 < len(cells); r1++ {
align := ""
for c := 0; c < len(cells[r1]); c++ {
x := cells[r1][c]
switch x.Align {
case ast.TableAlignmentLeft:
align += "l "
case ast.TableAlignmentRight:
align += "r "
case ast.TableAlignmentCenter:
fallthrough
default:
align += "c "
}
if x.ColSpan > 0 {
align += strings.Repeat("s ", x.ColSpan-1)
}
}
r.outs(w, strings.TrimSpace(align)+"\n")
}
r.outs(w, ".\n")
return
}
r.outs(w, ".TE\n.RE\n\n")
}
func (r *Renderer) tableRow(w io.Writer, tableRow *ast.TableRow, entering bool) {
if !entering {
r.outs(w, "\n")
}
}
func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) {
if tableCell.IsHeader {
r.outOneOf(w, entering, "\\fB", "\\fP")
}
parent := tableCell.Parent
if tableCell == ast.GetFirstChild(parent) {
return
}
if entering {
r.outs(w, "\t")
return
}
}
func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) {}
func (r *Renderer) crossReference(w io.Writer, cr *ast.CrossReference, entering bool) {
if !entering {
return
}
r.out(w, bytes.ToUpper(cr.Destination))
}
func (r *Renderer) index(w io.Writer, index *ast.Index, entering bool) {}
func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) {
if link.Footnote != nil {
if entering {
r.outs(w, fmt.Sprintf("\\u[%d]\\d", link.NoteID))
}
return
}
// !entering so the URL comes after the link text.
if !entering {
r.outs(w, "\n\\[la]")
r.out(w, link.Destination)
r.outs(w, "\\[ra]")
}
}
func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) {
// only works with `ascii-art` images
if !bytes.HasSuffix(node.Destination, []byte(".ascii-art")) {
// remove from tree, we can use RemoveFromTree, because that re-shuffles things and can
// make us output things twice.
node.SetChildren(nil)
node = nil
return
}
if !entering {
r.outs(w, "\n.fi\n.RE\n")
return
}
node.SetChildren(nil) // remove Title, if any, we can type set it.
r.outs(w, "\n.PP\n.RS\n\n.nf\n")
img, err := ioutil.ReadFile(string(node.Destination)) // markdown, doens't err, this can be an empty image, log maybe??
if err != nil {
img = []byte(err.Error())
}
escapeSpecialChars(r, w, img)
}
func (r *Renderer) mathBlock(w io.Writer, mathBlock *ast.MathBlock, entering bool) {
// may indent it?
}
func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) {
// check what we have here and throw away any non ascii-art figures
// CaptionFigure
// Paragraph
// Text
// Image 'url=array-vs-slice.svg'
// Text 'Array versus Slice svg'
// Text '\n'
// Image 'url=array-vs-slice.ascii-art'
// Text 'Array versus Slice ascii-art'
//
// The image with svg will be removed as child and then we continue to walk the AST.
for _, child := range figure.GetChildren() {
// for figures/images, these are wrapped below a paragraph.
// TODO: can there be more than 1 paragraph??
if p, ok := child.(*ast.Paragraph); ok {
for _, img := range p.GetChildren() {
x, ok := img.(*ast.Image)
if !ok {
continue
}
// if not ascii-art, remove entirely
if !bytes.HasSuffix(x.Destination, []byte(".ascii-art")) {
ast.RemoveFromTree(img) // this is save because we're not accessing any of the children just yet.
continue
}
img.SetChildren(nil) // remove alt text
}
}
}
}
func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) {
what := ast.GetFirstChild(caption.Parent)
if !entering {
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RE\n")
case *ast.CodeBlock, *ast.Paragraph: // Paragraph is here because that wrap an image.
r.outs(w, "\n.RE\n")
case *ast.BlockQuote:
r.outs(w, "\n.RE\n")
}
return
}
// get parent, get first child for type
switch what.(type) {
case *ast.Table:
r.outs(w, "\n.RS\n")
case *ast.CodeBlock, *ast.Paragraph:
r.outs(w, "\n.RS\n")
case *ast.BlockQuote:
r.outs(w, "\n.RS\n")
r.outs(w, "\\(en ")
}
}
func (r *Renderer) blockQuote(w io.Writer, block *ast.BlockQuote, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
func (r *Renderer) aside(w io.Writer, block *ast.Aside, entering bool) {
if entering {
r.outs(w, "\n.PP\n.RS\n")
} else {
r.outs(w, "\n.RE\n")
}
}
// RenderNode renders a markdown node to markdown.
func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus {
if r.opts.RenderNodeHook != nil {
status, didHandle := r.opts.RenderNodeHook(w, node, entering)
if didHandle {
return status
}
}
if attr := mast.AttributeFromNode(node); attr != nil && entering {
}
switch node := node.(type) {
case *ast.Document:
// do nothing
case *mast.Title:
r.title(w, node, entering)
r.Title = node // save for later.
case *mast.Authors:
r.authors(w, node, entering)
case *mast.Bibliography:
if entering {
r.outs(w, "\n.SH \"")
r.outs(w, strings.ToUpper(r.opts.Language.Bibliography()))
r.outs(w, "\"\n")
}
case *mast.BibliographyItem:
r.bibliographyItem(w, node, entering)
case *mast.DocumentIndex, *mast.IndexLetter, *mast.IndexItem, *mast.IndexSubItem, *mast.IndexLink:
case *mast.ReferenceBlock:
// ignore
case *ast.Footnotes:
r.footnotes(w, node, entering)
case *ast.Text:
r.text(w, node, entering)
case *ast.Softbreak:
// TODO
case *ast.Hardbreak:
r.hardBreak(w, node)
case *ast.NonBlockingSpace:
r.outs(w, "\\0")
case *ast.Callout:
r.callout(w, node, entering)
case *ast.Emph:
r.outOneOf(w, entering, "\\fI", "\\fP")
case *ast.Strong:
r.outOneOf(w, entering, "\\fB", "\\fP")
case *ast.Del:
r.outOneOf(w, entering, "~~", "~~")
case *ast.Citation:
r.citation(w, node, entering)
case *ast.DocumentMatter:
r.matter(w, node, entering)
case *ast.Heading:
r.heading(w, node, entering)
case *ast.HorizontalRule:
if entering {
r.outs(w, "\n.ti 0\n\\l'\\n(.l─'\n")
}
case *ast.Paragraph:
r.paragraph(w, node, entering)
case *ast.HTMLSpan:
r.out(w, node.Literal)
case *ast.HTMLBlock:
r.out(w, node.Literal)
case *ast.List:
r.list(w, node, entering)
case *ast.ListItem:
r.listItem(w, node, entering)
case *ast.CodeBlock:
r.codeBlock(w, node, entering)
case *ast.Caption:
r.caption(w, node, entering)
case *ast.CaptionFigure:
r.captionFigure(w, node, entering)
case *ast.Table:
r.table(w, node, entering)
case *ast.TableCell:
r.tableCell(w, node, entering)
case *ast.TableHeader:
case *ast.TableBody:
case *ast.TableFooter:
case *ast.TableRow:
r.tableRow(w, node, entering)
case *ast.BlockQuote:
r.blockQuote(w, node, entering)
case *ast.Aside:
r.aside(w, node, entering)
case *ast.CrossReference:
r.crossReference(w, node, entering)
case *ast.Index:
r.index(w, node, entering)
case *ast.Link:
r.link(w, node, entering)
case *ast.Math:
if entering {
r.out(w, node.Literal)
}
case *ast.Image:
r.image(w, node, entering)
case *ast.Code:
r.outs(w, "\\fB\\fC")
r.out(w, node.Literal)
r.outs(w, "\\fR")
case *ast.MathBlock:
r.mathBlock(w, node, entering)
case *ast.Subscript:
r.outOneOf(w, true, "\\d", "\\u")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\d", "\\u")
case *ast.Superscript:
r.outOneOf(w, true, "\\u", "\\d")
if entering {
r.out(w, node.Literal)
}
r.outOneOf(w, false, "\\u", "\\d")
default:
panic(fmt.Sprintf("Unknown node %T", node))
}
return ast.GoToNext
}
func (r *Renderer) callout(w io.Writer, node *ast.Callout, entering bool) {
if entering {
r.outs(w, "\\fB")
r.out(w, node.ID)
r.outs(w, "\\fP")
return
}
}
func (r *Renderer) text(w io.Writer, node *ast.Text, entering bool) {
if !entering {
return
}
text := node.Literal
parent := node.Parent
if parent != nil {
if _, ok := parent.(*ast.Heading); ok {
text = bytes.ToUpper(text)
text = append(text, byte('"'))
text = append([]byte{byte('"')}, text...)
}
}
r.out(w, text)
}
func (r *Renderer) footnotes(w io.Writer, node ast.Node, entering bool) {
if !entering {
return
}
r.outs(w, "\n.SH \""+strings.ToUpper(r.opts.Language.Footnotes())+"\"\n")
}
func (r *Renderer) RenderHeader(w io.Writer, _ ast.Node) {
if r.opts.Flags&ManFragment != 0 {
return
}
r.outs(w, `.\" Generated by Mmark Markdown Processer - mmark.miek.nl`+"\n")
}
func (r *Renderer) RenderFooter(w io.Writer, node ast.Node) {}
func (r *Renderer) bibliographyItem(w io.Writer, bib *mast.BibliographyItem, entering bool) {
if !entering {
return
}
if bib.Reference == nil {
return
}
r.outs(w, ".TP\n")
r.outs(w, fmt.Sprintf("[%s]\n", bib.Anchor))
for _, author := range bib.Reference.Front.Authors {
writeNonEmptyString(w, author.Fullname)
if author.Organization != nil {
writeNonEmptyString(w, author.Organization.Value)
}
}
writeNonEmptyString(w, bib.Reference.Front.Title)
if bib.Reference.Target != "" {
r.outs(w, "\\[la]")
r.outs(w, bib.Reference.Target)
r.outs(w, "\\[ra]")
}
writeNonEmptyString(w, bib.Reference.Front.Date.Year)
r.outs(w, "\n")
}
func writeNonEmptyString(w io.Writer, s string) {
if s == "" {
return
}
io.WriteString(w, s)
io.WriteString(w, "\n")
}
| {
_, ok := c[i].(*ast.Paragraph)
if ok {
par++
}
if c[i] == para {
if par > 1 {
// No .PP because that messes up formatting.
r.outs(w, "\n\n")
}
}
} | conditional_block |
main.rs | #[macro_use]
extern crate colored_print;
extern crate atty;
use colored_print::color::ConsoleColor;
use colored_print::color::ConsoleColor::*;
use std::env;
use std::ffi::OsStr;
use std::fmt;
use std::fmt::Display;
use std::fs;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
fn colorize() -> bool {
use atty;
use atty::Stream;
atty::is(Stream::Stdout)
}
fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String {
let name = path
.file_stem()
.expect("internal error: there are no files without name!")
.to_str()
.expect("internal error: file name cannot be represented in UTF-8.");
let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!(""));
format!("{}{}{}", name, suffix, ext)
}
enum CompilationResult {
Success {
ir_path: PathBuf,
llvm_ir: String,
cc_output: String,
},
Failure {
cc_output: String,
},
}
/// returns the result of compilation with clang (for reference)
fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll")));
// compile
let output = Command::new("clang")
.arg("-O0")
.arg("-S")
.arg("-emit-llvm")
.arg("-o")
.arg(ir_path.display().to_string())
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !ir_path.exists() {
return Ok(CompilationResult::Failure { cc_output });
}
let mut llvm_ir = String::new();
File::open(&ir_path)?.read_to_string(&mut llvm_ir)?;
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
/// returns the llvm_ir of compilation with our current compiler
fn current_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll")));
// compile
let output = Command::new("cargo")
.arg("run")
.arg("--")
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if output.stdout.is_empty() {
// compilation failed.
return Ok(CompilationResult::Failure { cc_output });
}
File::create(&ir_path)?.write_all(&output.stdout)?;
let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned();
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
enum AssemblyResult {
Success {
asm_output: String,
exec_path: PathBuf,
},
Failure {
asm_output: String,
},
Unreached,
}
fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> {
let exec_path = if cfg!(windows) {
src_path.with_extension("exe")
} else {
let file_name = src_path
.file_stem()
.expect("internal error: no file has no basename");
src_path.with_file_name(file_name)
};
if !src_path.exists() {
panic!("internal error: compilation has succeeded but no LLVM IR?");
}
let output = Command::new("clang")
.arg("-o")
.arg(&exec_path)
.arg(&src_path)
.output()?;
let asm_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !exec_path.exists() {
return Ok(AssemblyResult::Failure { asm_output });
}
Ok(AssemblyResult::Success {
asm_output,
exec_path,
})
}
enum ExecutionResult {
Success {
status: Option<i32>,
stdout: String,
stderr: String,
},
Unreached,
}
/// returns the execution of the binary placed in the specified path
fn execute(path: &Path) -> io::Result<ExecutionResult> {
if !path.exists() {
return Ok(ExecutionResult::Success {
status: None,
stdout: String::new(),
stderr: String::new(),
});
}
let mut child = Command::new(&path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let status = child.wait()?;
let mut child_stdout = child
.stdout
.expect("internal error: failed to get child stdout.");
let mut child_stderr = child
.stderr
.expect("internel error: failed to get child stderr.");
let (mut stdout, mut stderr) = (String::new(), String::new());
child_stdout.read_to_string(&mut stdout)?;
child_stderr.read_to_string(&mut stderr)?;
let status = status.code();
Ok(ExecutionResult::Success {
status,
stdout,
stderr,
})
}
fn print_heading(color: ConsoleColor, heading: &str) |
fn print_output(retval: Option<i32>, output: &str) {
colored_print!{
colorize();
Reset, "{}", output;
}
if let Some(code) = retval {
colored_println!{
colorize();
Cyan, "return code";
Reset, ": {}", code;
}
}
}
fn print_stderr(stderr: impl Display) {
colored_print!{
colorize();
LightMagenta, "{}", stderr;
}
}
#[derive(Debug, Copy, Clone)]
enum Version {
Reference,
Current,
}
impl Version {
pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> {
match *self {
Version::Reference => reference_compile,
Version::Current => current_compile,
}
}
}
impl fmt::Display for Version {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version::Reference => write!(b, "Reference"),
Version::Current => write!(b, " Current "),
}
}
}
struct Results {
compilation: CompilationResult,
assembly: AssemblyResult,
execution: ExecutionResult,
}
fn do_for(version: Version, path: &Path) -> io::Result<Results> {
let (compilation, assembly, execution);
// explicitly denote borrowing region
{
compilation = (version.get_compiler_func())(&path)?;
let ir_path = match compilation {
failure @ CompilationResult::Failure { .. } => {
return Ok(Results {
compilation: failure,
assembly: AssemblyResult::Unreached,
execution: ExecutionResult::Unreached,
})
}
CompilationResult::Success { ref ir_path, .. } => ir_path.clone(),
};
assembly = compile_llvm_ir(&ir_path)?;
let exec_path = match assembly {
failure @ AssemblyResult::Failure { .. } => {
return Ok(Results {
compilation: compilation,
assembly: failure,
execution: ExecutionResult::Unreached,
})
}
AssemblyResult::Success { ref exec_path, .. } => exec_path.clone(),
AssemblyResult::Unreached => unreachable!(),
};
execution = execute(&exec_path)?;
}
Ok(Results {
compilation,
assembly,
execution,
})
}
fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) {
const OK: (bool, ConsoleColor, &str) = (true, Green, "OK");
const NG: (bool, ConsoleColor, &str) = (false, Red, "NG");
use ExecutionResult::Success;
match (refr, curr) {
(
Success {
status: ref refr_status,
stdout: ref refr_stdout,
..
},
Success {
status: ref curr_status,
stdout: ref curr_stdout,
..
},
) => {
if (refr_status, refr_stdout) == (curr_status, curr_stdout) {
OK
} else {
NG
}
}
_ => NG,
}
}
fn print_for(version: Version, results: Results) {
print_heading(
LightGreen,
&format!("==================== {} ====================", version),
);
use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER};
print_heading(LightBlue, "> Compilation (C)");
match results.compilation {
CR::Success {
cc_output, llvm_ir, ..
} => {
print_stderr(&cc_output);
print_output(None, &llvm_ir);
}
CR::Failure { cc_output, .. } => {
print_stderr(&cc_output);
return;
}
}
print_heading(LightBlue, "> Compilation (LLVM IR)");
match results.assembly {
AR::Success { asm_output, .. } => {
print_stderr(&asm_output);
}
AR::Failure { asm_output, .. } => {
print_stderr(&asm_output);
return;
}
AR::Unreached => unreachable!(),
}
print_heading(LightBlue, "> Execution");
match results.execution {
ER::Success {
status,
stdout,
stderr,
} => {
print_stderr(&stderr);
print_output(status, &stdout);
}
ER::Unreached => unreachable!(),
}
}
fn main() -> io::Result<()> {
let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v");
let test_src_dir: PathBuf = ["test", "ok"].iter().collect();
walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) != Some("c"),
|path| {
if verbose {
colored_println! {
colorize();
LightGreen, "Removing ";
Reset, "{}", path.display();
}
}
fs::remove_file(&path)
},
)?;
let mut path_to_test: Vec<_> = env::args()
.skip(1)
.filter(|arg| !arg.starts_with("-"))
.map(|file_name| test_src_dir.join(file_name))
.collect();
if path_to_test.is_empty() {
path_to_test = walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) == Some("c"),
|path| Ok(path.to_path_buf()),
)?;
}
let mut any_fails = false;
for path in path_to_test {
colored_print!{
colorize();
LightGreen, " Testing ";
Reset, "file ";
Yellow, "{}", path.display();
Reset, " ... ";
}
if !path.exists() {
println!("not found");
continue;
}
let refr = do_for(Version::Reference, &path)?;
let curr = do_for(Version::Current, &path)?;
let (status, color, judge) = judge(&refr.execution, &curr.execution);
colored_println!{
colorize();
color, "{}", judge;
}
// print info when verbose mode or something fails
if verbose || !status {
print_for(Version::Reference, refr);
print_for(Version::Current, curr);
}
any_fails |= !status;
}
if !any_fails {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "some test fails."))
}
}
fn walk_dir<T>(
dir: &Path,
path_filter: impl Fn(&Path) -> bool + Copy,
cb: impl Fn(&Path) -> io::Result<T> + Copy,
) -> io::Result<Vec<T>> {
let mut result = Vec::new();
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if !path_filter(&path) {
continue;
}
if path.is_dir() {
walk_dir(&path, path_filter, cb)?;
} else {
result.push(cb(&path)?);
}
}
Ok(result)
}
| {
colored_println!{
colorize();
color, "{} ", heading;
}
} | identifier_body |
main.rs | #[macro_use]
extern crate colored_print;
extern crate atty;
use colored_print::color::ConsoleColor;
use colored_print::color::ConsoleColor::*;
use std::env;
use std::ffi::OsStr;
use std::fmt;
use std::fmt::Display;
use std::fs;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
fn colorize() -> bool {
use atty;
use atty::Stream;
atty::is(Stream::Stdout)
}
fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String {
let name = path
.file_stem()
.expect("internal error: there are no files without name!")
.to_str()
.expect("internal error: file name cannot be represented in UTF-8.");
let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!(""));
format!("{}{}{}", name, suffix, ext)
}
enum CompilationResult {
Success {
ir_path: PathBuf,
llvm_ir: String,
cc_output: String,
},
Failure {
cc_output: String,
},
}
/// returns the result of compilation with clang (for reference)
fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll")));
// compile | .arg("-emit-llvm")
.arg("-o")
.arg(ir_path.display().to_string())
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !ir_path.exists() {
return Ok(CompilationResult::Failure { cc_output });
}
let mut llvm_ir = String::new();
File::open(&ir_path)?.read_to_string(&mut llvm_ir)?;
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
/// returns the llvm_ir of compilation with our current compiler
fn current_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll")));
// compile
let output = Command::new("cargo")
.arg("run")
.arg("--")
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if output.stdout.is_empty() {
// compilation failed.
return Ok(CompilationResult::Failure { cc_output });
}
File::create(&ir_path)?.write_all(&output.stdout)?;
let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned();
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
enum AssemblyResult {
Success {
asm_output: String,
exec_path: PathBuf,
},
Failure {
asm_output: String,
},
Unreached,
}
fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> {
let exec_path = if cfg!(windows) {
src_path.with_extension("exe")
} else {
let file_name = src_path
.file_stem()
.expect("internal error: no file has no basename");
src_path.with_file_name(file_name)
};
if !src_path.exists() {
panic!("internal error: compilation has succeeded but no LLVM IR?");
}
let output = Command::new("clang")
.arg("-o")
.arg(&exec_path)
.arg(&src_path)
.output()?;
let asm_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !exec_path.exists() {
return Ok(AssemblyResult::Failure { asm_output });
}
Ok(AssemblyResult::Success {
asm_output,
exec_path,
})
}
enum ExecutionResult {
Success {
status: Option<i32>,
stdout: String,
stderr: String,
},
Unreached,
}
/// returns the execution of the binary placed in the specified path
fn execute(path: &Path) -> io::Result<ExecutionResult> {
if !path.exists() {
return Ok(ExecutionResult::Success {
status: None,
stdout: String::new(),
stderr: String::new(),
});
}
let mut child = Command::new(&path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let status = child.wait()?;
let mut child_stdout = child
.stdout
.expect("internal error: failed to get child stdout.");
let mut child_stderr = child
.stderr
.expect("internel error: failed to get child stderr.");
let (mut stdout, mut stderr) = (String::new(), String::new());
child_stdout.read_to_string(&mut stdout)?;
child_stderr.read_to_string(&mut stderr)?;
let status = status.code();
Ok(ExecutionResult::Success {
status,
stdout,
stderr,
})
}
fn print_heading(color: ConsoleColor, heading: &str) {
colored_println!{
colorize();
color, "{} ", heading;
}
}
fn print_output(retval: Option<i32>, output: &str) {
colored_print!{
colorize();
Reset, "{}", output;
}
if let Some(code) = retval {
colored_println!{
colorize();
Cyan, "return code";
Reset, ": {}", code;
}
}
}
fn print_stderr(stderr: impl Display) {
colored_print!{
colorize();
LightMagenta, "{}", stderr;
}
}
#[derive(Debug, Copy, Clone)]
enum Version {
Reference,
Current,
}
impl Version {
pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> {
match *self {
Version::Reference => reference_compile,
Version::Current => current_compile,
}
}
}
impl fmt::Display for Version {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version::Reference => write!(b, "Reference"),
Version::Current => write!(b, " Current "),
}
}
}
struct Results {
compilation: CompilationResult,
assembly: AssemblyResult,
execution: ExecutionResult,
}
fn do_for(version: Version, path: &Path) -> io::Result<Results> {
let (compilation, assembly, execution);
// explicitly denote borrowing region
{
compilation = (version.get_compiler_func())(&path)?;
let ir_path = match compilation {
failure @ CompilationResult::Failure { .. } => {
return Ok(Results {
compilation: failure,
assembly: AssemblyResult::Unreached,
execution: ExecutionResult::Unreached,
})
}
CompilationResult::Success { ref ir_path, .. } => ir_path.clone(),
};
assembly = compile_llvm_ir(&ir_path)?;
let exec_path = match assembly {
failure @ AssemblyResult::Failure { .. } => {
return Ok(Results {
compilation: compilation,
assembly: failure,
execution: ExecutionResult::Unreached,
})
}
AssemblyResult::Success { ref exec_path, .. } => exec_path.clone(),
AssemblyResult::Unreached => unreachable!(),
};
execution = execute(&exec_path)?;
}
Ok(Results {
compilation,
assembly,
execution,
})
}
fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) {
const OK: (bool, ConsoleColor, &str) = (true, Green, "OK");
const NG: (bool, ConsoleColor, &str) = (false, Red, "NG");
use ExecutionResult::Success;
match (refr, curr) {
(
Success {
status: ref refr_status,
stdout: ref refr_stdout,
..
},
Success {
status: ref curr_status,
stdout: ref curr_stdout,
..
},
) => {
if (refr_status, refr_stdout) == (curr_status, curr_stdout) {
OK
} else {
NG
}
}
_ => NG,
}
}
fn print_for(version: Version, results: Results) {
print_heading(
LightGreen,
&format!("==================== {} ====================", version),
);
use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER};
print_heading(LightBlue, "> Compilation (C)");
match results.compilation {
CR::Success {
cc_output, llvm_ir, ..
} => {
print_stderr(&cc_output);
print_output(None, &llvm_ir);
}
CR::Failure { cc_output, .. } => {
print_stderr(&cc_output);
return;
}
}
print_heading(LightBlue, "> Compilation (LLVM IR)");
match results.assembly {
AR::Success { asm_output, .. } => {
print_stderr(&asm_output);
}
AR::Failure { asm_output, .. } => {
print_stderr(&asm_output);
return;
}
AR::Unreached => unreachable!(),
}
print_heading(LightBlue, "> Execution");
match results.execution {
ER::Success {
status,
stdout,
stderr,
} => {
print_stderr(&stderr);
print_output(status, &stdout);
}
ER::Unreached => unreachable!(),
}
}
fn main() -> io::Result<()> {
let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v");
let test_src_dir: PathBuf = ["test", "ok"].iter().collect();
walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) != Some("c"),
|path| {
if verbose {
colored_println! {
colorize();
LightGreen, "Removing ";
Reset, "{}", path.display();
}
}
fs::remove_file(&path)
},
)?;
let mut path_to_test: Vec<_> = env::args()
.skip(1)
.filter(|arg| !arg.starts_with("-"))
.map(|file_name| test_src_dir.join(file_name))
.collect();
if path_to_test.is_empty() {
path_to_test = walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) == Some("c"),
|path| Ok(path.to_path_buf()),
)?;
}
let mut any_fails = false;
for path in path_to_test {
colored_print!{
colorize();
LightGreen, " Testing ";
Reset, "file ";
Yellow, "{}", path.display();
Reset, " ... ";
}
if !path.exists() {
println!("not found");
continue;
}
let refr = do_for(Version::Reference, &path)?;
let curr = do_for(Version::Current, &path)?;
let (status, color, judge) = judge(&refr.execution, &curr.execution);
colored_println!{
colorize();
color, "{}", judge;
}
// print info when verbose mode or something fails
if verbose || !status {
print_for(Version::Reference, refr);
print_for(Version::Current, curr);
}
any_fails |= !status;
}
if !any_fails {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "some test fails."))
}
}
fn walk_dir<T>(
dir: &Path,
path_filter: impl Fn(&Path) -> bool + Copy,
cb: impl Fn(&Path) -> io::Result<T> + Copy,
) -> io::Result<Vec<T>> {
let mut result = Vec::new();
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if !path_filter(&path) {
continue;
}
if path.is_dir() {
walk_dir(&path, path_filter, cb)?;
} else {
result.push(cb(&path)?);
}
}
Ok(result)
} | let output = Command::new("clang")
.arg("-O0")
.arg("-S") | random_line_split |
main.rs | #[macro_use]
extern crate colored_print;
extern crate atty;
use colored_print::color::ConsoleColor;
use colored_print::color::ConsoleColor::*;
use std::env;
use std::ffi::OsStr;
use std::fmt;
use std::fmt::Display;
use std::fs;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
fn colorize() -> bool {
use atty;
use atty::Stream;
atty::is(Stream::Stdout)
}
fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String {
let name = path
.file_stem()
.expect("internal error: there are no files without name!")
.to_str()
.expect("internal error: file name cannot be represented in UTF-8.");
let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!(""));
format!("{}{}{}", name, suffix, ext)
}
enum CompilationResult {
Success {
ir_path: PathBuf,
llvm_ir: String,
cc_output: String,
},
Failure {
cc_output: String,
},
}
/// returns the result of compilation with clang (for reference)
fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll")));
// compile
let output = Command::new("clang")
.arg("-O0")
.arg("-S")
.arg("-emit-llvm")
.arg("-o")
.arg(ir_path.display().to_string())
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !ir_path.exists() {
return Ok(CompilationResult::Failure { cc_output });
}
let mut llvm_ir = String::new();
File::open(&ir_path)?.read_to_string(&mut llvm_ir)?;
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
/// returns the llvm_ir of compilation with our current compiler
fn current_compile(src_path: &Path) -> io::Result<CompilationResult> {
let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll")));
// compile
let output = Command::new("cargo")
.arg("run")
.arg("--")
.arg(src_path.display().to_string())
.output()?;
let cc_output = String::from_utf8_lossy(&output.stderr).into_owned();
if output.stdout.is_empty() {
// compilation failed.
return Ok(CompilationResult::Failure { cc_output });
}
File::create(&ir_path)?.write_all(&output.stdout)?;
let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned();
Ok(CompilationResult::Success {
ir_path,
llvm_ir,
cc_output,
})
}
enum AssemblyResult {
Success {
asm_output: String,
exec_path: PathBuf,
},
Failure {
asm_output: String,
},
Unreached,
}
fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> {
let exec_path = if cfg!(windows) {
src_path.with_extension("exe")
} else {
let file_name = src_path
.file_stem()
.expect("internal error: no file has no basename");
src_path.with_file_name(file_name)
};
if !src_path.exists() {
panic!("internal error: compilation has succeeded but no LLVM IR?");
}
let output = Command::new("clang")
.arg("-o")
.arg(&exec_path)
.arg(&src_path)
.output()?;
let asm_output = String::from_utf8_lossy(&output.stderr).into_owned();
if !exec_path.exists() {
return Ok(AssemblyResult::Failure { asm_output });
}
Ok(AssemblyResult::Success {
asm_output,
exec_path,
})
}
enum ExecutionResult {
Success {
status: Option<i32>,
stdout: String,
stderr: String,
},
Unreached,
}
/// returns the execution of the binary placed in the specified path
fn execute(path: &Path) -> io::Result<ExecutionResult> {
if !path.exists() {
return Ok(ExecutionResult::Success {
status: None,
stdout: String::new(),
stderr: String::new(),
});
}
let mut child = Command::new(&path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let status = child.wait()?;
let mut child_stdout = child
.stdout
.expect("internal error: failed to get child stdout.");
let mut child_stderr = child
.stderr
.expect("internel error: failed to get child stderr.");
let (mut stdout, mut stderr) = (String::new(), String::new());
child_stdout.read_to_string(&mut stdout)?;
child_stderr.read_to_string(&mut stderr)?;
let status = status.code();
Ok(ExecutionResult::Success {
status,
stdout,
stderr,
})
}
fn print_heading(color: ConsoleColor, heading: &str) {
colored_println!{
colorize();
color, "{} ", heading;
}
}
fn print_output(retval: Option<i32>, output: &str) {
colored_print!{
colorize();
Reset, "{}", output;
}
if let Some(code) = retval {
colored_println!{
colorize();
Cyan, "return code";
Reset, ": {}", code;
}
}
}
fn print_stderr(stderr: impl Display) {
colored_print!{
colorize();
LightMagenta, "{}", stderr;
}
}
#[derive(Debug, Copy, Clone)]
enum Version {
Reference,
Current,
}
impl Version {
pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> {
match *self {
Version::Reference => reference_compile,
Version::Current => current_compile,
}
}
}
impl fmt::Display for Version {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version::Reference => write!(b, "Reference"),
Version::Current => write!(b, " Current "),
}
}
}
struct | {
compilation: CompilationResult,
assembly: AssemblyResult,
execution: ExecutionResult,
}
fn do_for(version: Version, path: &Path) -> io::Result<Results> {
let (compilation, assembly, execution);
// explicitly denote borrowing region
{
compilation = (version.get_compiler_func())(&path)?;
let ir_path = match compilation {
failure @ CompilationResult::Failure { .. } => {
return Ok(Results {
compilation: failure,
assembly: AssemblyResult::Unreached,
execution: ExecutionResult::Unreached,
})
}
CompilationResult::Success { ref ir_path, .. } => ir_path.clone(),
};
assembly = compile_llvm_ir(&ir_path)?;
let exec_path = match assembly {
failure @ AssemblyResult::Failure { .. } => {
return Ok(Results {
compilation: compilation,
assembly: failure,
execution: ExecutionResult::Unreached,
})
}
AssemblyResult::Success { ref exec_path, .. } => exec_path.clone(),
AssemblyResult::Unreached => unreachable!(),
};
execution = execute(&exec_path)?;
}
Ok(Results {
compilation,
assembly,
execution,
})
}
fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) {
const OK: (bool, ConsoleColor, &str) = (true, Green, "OK");
const NG: (bool, ConsoleColor, &str) = (false, Red, "NG");
use ExecutionResult::Success;
match (refr, curr) {
(
Success {
status: ref refr_status,
stdout: ref refr_stdout,
..
},
Success {
status: ref curr_status,
stdout: ref curr_stdout,
..
},
) => {
if (refr_status, refr_stdout) == (curr_status, curr_stdout) {
OK
} else {
NG
}
}
_ => NG,
}
}
fn print_for(version: Version, results: Results) {
print_heading(
LightGreen,
&format!("==================== {} ====================", version),
);
use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER};
print_heading(LightBlue, "> Compilation (C)");
match results.compilation {
CR::Success {
cc_output, llvm_ir, ..
} => {
print_stderr(&cc_output);
print_output(None, &llvm_ir);
}
CR::Failure { cc_output, .. } => {
print_stderr(&cc_output);
return;
}
}
print_heading(LightBlue, "> Compilation (LLVM IR)");
match results.assembly {
AR::Success { asm_output, .. } => {
print_stderr(&asm_output);
}
AR::Failure { asm_output, .. } => {
print_stderr(&asm_output);
return;
}
AR::Unreached => unreachable!(),
}
print_heading(LightBlue, "> Execution");
match results.execution {
ER::Success {
status,
stdout,
stderr,
} => {
print_stderr(&stderr);
print_output(status, &stdout);
}
ER::Unreached => unreachable!(),
}
}
fn main() -> io::Result<()> {
let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v");
let test_src_dir: PathBuf = ["test", "ok"].iter().collect();
walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) != Some("c"),
|path| {
if verbose {
colored_println! {
colorize();
LightGreen, "Removing ";
Reset, "{}", path.display();
}
}
fs::remove_file(&path)
},
)?;
let mut path_to_test: Vec<_> = env::args()
.skip(1)
.filter(|arg| !arg.starts_with("-"))
.map(|file_name| test_src_dir.join(file_name))
.collect();
if path_to_test.is_empty() {
path_to_test = walk_dir(
&test_src_dir,
|path| path.extension().and_then(OsStr::to_str) == Some("c"),
|path| Ok(path.to_path_buf()),
)?;
}
let mut any_fails = false;
for path in path_to_test {
colored_print!{
colorize();
LightGreen, " Testing ";
Reset, "file ";
Yellow, "{}", path.display();
Reset, " ... ";
}
if !path.exists() {
println!("not found");
continue;
}
let refr = do_for(Version::Reference, &path)?;
let curr = do_for(Version::Current, &path)?;
let (status, color, judge) = judge(&refr.execution, &curr.execution);
colored_println!{
colorize();
color, "{}", judge;
}
// print info when verbose mode or something fails
if verbose || !status {
print_for(Version::Reference, refr);
print_for(Version::Current, curr);
}
any_fails |= !status;
}
if !any_fails {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "some test fails."))
}
}
fn walk_dir<T>(
dir: &Path,
path_filter: impl Fn(&Path) -> bool + Copy,
cb: impl Fn(&Path) -> io::Result<T> + Copy,
) -> io::Result<Vec<T>> {
let mut result = Vec::new();
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if !path_filter(&path) {
continue;
}
if path.is_dir() {
walk_dir(&path, path_filter, cb)?;
} else {
result.push(cb(&path)?);
}
}
Ok(result)
}
| Results | identifier_name |
wasm3.go | package wasm3
/*
#cgo LDFLAGS: -lm
// #cgo CFLAGS: -Iinclude
// #cgo darwin LDFLAGS: -L${SRCDIR}/lib/darwin -lm3
// #cgo !android,linux LDFLAGS: -L${SRCDIR}/lib/linux -lm3 -lm
// #cgo android,arm LDFLAGS: -L${SRCDIR}/lib/android/armeabi-v7a -lm3 -lm
// #cgo android,arm64 LDFLAGS: -L${SRCDIR}/lib/android/arm64-v8a -lm3 -lm
// #cgo android,386 LDFLAGS: -L${SRCDIR}/lib/android/x86 -lm3 -lm
// #cgo android,amd64 LDFLAGS: -L${SRCDIR}/lib/android/x86_64 -lm3 -lm
#include "wasm3.h"
#include "m3_api_libc.h"
#include "m3_api_wasi.h"
#include "m3_env.h"
#include "go-wasm3.h"
#include <stdio.h>
typedef uint32_t __wasi_size_t;
// #include "extra/wasi_core.h"
IM3Function module_get_function(IM3Module i_module, int index);
IM3Function module_get_imported_function(IM3Module i_module, int index);
int call(IM3Function i_function, uint32_t i_argc, void* i_argv, void* o_result);
int get_allocated_memory_length(IM3Runtime i_runtime);
u8* get_allocated_memory(IM3Runtime i_runtime);
const void * mowrapper(IM3Runtime runtime, uint64_t * _sp, void * _mem);
int attachFunction(IM3Module i_module, char* moduleName, char* functionName, char* signature);
void* m3ApiOffsetToPtr(void* offset, void* _mem);
const char* function_get_import_module(IM3Function i_function);
const char* function_get_import_field(IM3Function i_function);
int findFunction(IM3Function * o_function, IM3Runtime i_runtime, const char * const i_moduleName, const char * const i_functionName);
void get_function(IM3Function * o_function, IM3Module i_module, int i);
u8 function_get_arg_type(IM3Function i_function, int index);
typedef struct wasi_iovec_t
{
__wasi_size_t buf;
__wasi_size_t buf_len;
} wasi_iovec_t;
*/
import "C"
import (
"errors"
"fmt"
"reflect"
"sync"
"unsafe"
)
const (
PageSize uint32 = 0x10000
)
// RuntimeT is an alias for IM3Runtime
type RuntimeT C.IM3Runtime
// EnvironmentT is an alias for IM3Environment
type EnvironmentT C.IM3Environment
// ModuleT is an alias for IM3Module
type ModuleT C.IM3Module
// FunctionT is an alias for IM3Function
type FunctionT C.IM3Function
// FuncTypeT is an alias for IM3FuncType
type FuncTypeT C.IM3FuncType
// ResultT is an alias for M3Result
type ResultT C.M3Result
// WasiIoVec is an alias for wasi_iovec_t
type WasiIoVec C.wasi_iovec_t
// CallbackFunction is the signature for callbacks
type CallbackFunction func(runtime RuntimeT, sp unsafe.Pointer, mem unsafe.Pointer) int
var slotsToCallbacks = make(map[int]CallbackFunction)
// GetBuf return the internal buffer index
func (w *WasiIoVec) GetBuf() uint32 {
return uint32(w.buf)
}
// GetBufLen return the buffer len
func (w *WasiIoVec) GetBufLen() int {
return int(w.buf_len)
}
//export dynamicFunctionWrapper
func dynamicFunctionWrapper(runtime RuntimeT, _sp unsafe.Pointer, _mem unsafe.Pointer, slot uint64) int {
lock.Lock()
fn := slotsToCallbacks[int(slot)]
lock.Unlock()
return fn(runtime, _sp, _mem)
}
var (
errParseModule = errors.New("Parse error")
errLoadModule = errors.New("Load error")
errFuncLookupFailed = errors.New("Function lookup failed")
)
// Config holds the runtime and environment configuration
type Config struct {
Environment *Environment
StackSize uint
// EnableWASI bool
EnableSpecTest bool
}
// Runtime wraps a WASM3 runtime
type Runtime struct {
ptr RuntimeT
cfg *Config
}
// Ptr returns a IM3Runtime pointer
func (r *Runtime) Ptr() C.IM3Runtime {
return (C.IM3Runtime)(r.ptr)
}
// Load wraps the parse and load module calls.
// This will be replaced by env.ParseModule and Runtime.LoadModule.
func (r *Runtime) Load(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
r.cfg.Environment.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
if module.memoryImported {
module.memoryImported = false
}
result = C.m3_LoadModule(
r.Ptr(),
module,
)
if result != nil {
return nil, errLoadModule
}
result = C.m3_LinkSpecTest(r.Ptr().modules)
if result != nil {
return nil, errors.New("LinkSpecTest failed")
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
m := NewModule((ModuleT)(module))
return m, nil
}
var lock = sync.Mutex{}
// AttachFunction binds a callable function to a module+func
func (r *Runtime) AttachFunction(moduleName string, functionName string, signature string, callback CallbackFunction) {
_moduleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
defer C.free(unsafe.Pointer(_signature))
lock.Lock()
slot := C.attachFunction(r.Ptr().modules, _moduleName, _functionName, _signature)
slotsToCallbacks[int(slot)] = callback
lock.Unlock()
}
// LoadModule wraps m3_LoadModule and returns a module object
func (r *Runtime) LoadModule(module *Module) (*Module, error) {
if module.Ptr().memoryImported {
module.Ptr().memoryImported = false
}
result := C.m3Err_none
result = C.m3_LoadModule(
r.Ptr(),
module.Ptr(),
)
if result != nil {
return nil, errLoadModule
}
if r.cfg.EnableSpecTest {
C.m3_LinkSpecTest(r.Ptr().modules)
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
return module, nil
}
// FindFunction calls m3_FindFunction and returns a call function
func (r *Runtime) FindFunction(funcName string) (FunctionWrapper, error) {
result := C.m3Err_none
var f C.IM3Function
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result = C.m3_FindFunction(
&f,
r.Ptr(),
cFuncName,
)
if result != nil {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// FindFunction does thins
func (r *Runtime) FindFunctionByModule(moduleName string, funcName string) (FunctionWrapper, error) {
var f C.IM3Function
cModuleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(cModuleName))
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result := C.findFunction(
&f,
r.Ptr(),
cModuleName,
cFuncName,
)
if result != 0 {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// Destroy free calls m3_FreeRuntime
func (r *Runtime) Destroy() {
C.m3_FreeRuntime(r.Ptr())
r.cfg.Environment.Destroy()
}
// Memory allows access to runtime Memory.
// Taken from Wasmer extension: https://github.com/wasmerio/go-ext-wasm
func (r *Runtime) Memory() []byte {
mem := C.get_allocated_memory(
r.Ptr(),
)
var data = (*uint8)(mem)
length := r.GetAllocatedMemoryLength()
var header reflect.SliceHeader
header = *(*reflect.SliceHeader)(unsafe.Pointer(&header))
header.Data = uintptr(unsafe.Pointer(data))
header.Len = int(length)
header.Cap = int(length)
return *(*[]byte)(unsafe.Pointer(&header))
}
// GetAllocatedMemoryLength returns the amount of allocated runtime memory
func (r *Runtime) GetAllocatedMemoryLength() int {
length := C.get_allocated_memory_length(r.Ptr())
return int(length)
}
func (r *Runtime) ResizeMemory(numPages int32) error {
err := C.ResizeMemory(r.Ptr(), C.u32(numPages))
if err != C.m3Err_none {
return errors.New(C.GoString(err))
}
return nil
}
// ParseModule is a helper that calls the same function in env.
func (r *Runtime) ParseModule(wasmBytes []byte) (*Module, error) {
return r.cfg.Environment.ParseModule(wasmBytes)
}
func (r *Runtime) PrintRuntimeInfo() {
C.m3_PrintRuntimeInfo(r.Ptr())
C.m3_PrintM3Info()
C.m3_PrintProfilerInfo()
}
// NewRuntime initializes a new runtime
// TODO: nativeStackInfo is passed as NULL
func NewRuntime(cfg *Config) *Runtime {
// env *Environment, stackSize uint
ptr := C.m3_NewRuntime(
cfg.Environment.Ptr(),
C.uint(cfg.StackSize),
nil,
)
return &Runtime{
ptr: (RuntimeT)(ptr),
cfg: cfg,
}
}
// Module wraps a WASM3 module.
type Module struct {
ptr ModuleT
numFunctions int
numImports int
}
// Ptr returns a pointer to IM3Module
func (m *Module) Ptr() C.IM3Module {
return (C.IM3Module)(m.ptr)
}
// GetFunction provides access to IM3Function->functions
func (m *Module) GetFunction(index uint) (*Function, error) {
if uint(m.NumFunctions()) <= index {
return nil, errFuncLookupFailed
}
ptr := C.module_get_function(m.Ptr(), C.int(index))
name := C.GoString(ptr.name)
return &Function{
ptr: (FunctionT)(ptr),
Name: name,
}, nil
}
func (f *Function) GetReturnType() uint8 {
return uint8(f.ptr.funcType.returnType)
}
func (f *Function) GetNumArgs() uint32 {
return uint32(f.ptr.funcType.numArgs)
}
func (f *Function) GetArgType(index int) uint8 {
return uint8(C.function_get_arg_type(f.ptr, C.int(index)))
}
func (f *Function) GetSignature() string {
// TODO this is completely wrong but should work for basic functions for the moment...
s := "i("
for i := uint32(0); i < f.GetNumArgs(); i++ {
s += "i"
}
s += ")"
return s
}
// GetFunctionByName is a helper to lookup functions by name
// TODO: could be optimized by caching function names and pointer on the Go side, right after the load call.
func (m *Module) GetFunctionByName(lookupName string) (*Function, error) {
var fn *Function
for i := 0; i < m.NumFunctions(); i++ {
ptr := C.module_get_function(m.Ptr(), C.int(i))
name := C.GoString(ptr.name)
if name != lookupName {
continue
}
fn = &Function{
ptr: (FunctionT)(ptr),
Name: name,
}
return fn, nil
}
return nil, errFuncLookupFailed
}
// NumFunctions provides access to numFunctions.
func (m *Module) NumFunctions() int {
// In case the number of functions hasn't been resolved yet, retrieve the int and keep it in the structure
if m.numFunctions == -1 {
m.numFunctions = int(m.Ptr().numFunctions)
}
return m.numFunctions
}
func (m *Module) FunctionNames() []string {
functions := make([]string, 0)
for i := 0; i < int(m.Ptr().numFunctions); i++ {
f := C.module_get_function(m.Ptr(), C.int(i))
functions = append(functions, C.GoString(f.name))
fmt.Printf("fun: '%v' module: %p\n", C.GoString(f.name), f.module)
}
return functions
}
// NumImports provides access to numImports
func (m *Module) NumImports() int {
if m.numImports == -1 {
m.numImports = int(m.Ptr().numImports)
}
return m.numImports
}
// TODO: Store the CStrings to later free them!
func (m *Module) LinkRawFunction(moduleName, functionName, signature string, fn unsafe.Pointer) error {
_moduleName := C.CString(moduleName)
// defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
// defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
// defer C.free(unsafe.Pointer(_signature))
result := C.m3_LinkRawFunction(m.Ptr(), _moduleName, _functionName, _signature, (*[0]byte)(fn))
if result != nil {
return fmt.Errorf(C.GoString(result))
}
return nil
}
// GetModule retreive the function's module
func (f *Function) GetModule() *Module {
return NewModule(f.ptr.module)
}
func (f *Function) GetImportModule() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_module(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
func (f *Function) GetImportField() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_field(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
// Name gets the module's name
func (m *Module) Name() string {
return C.GoString(m.ptr.name)
}
// NewModule wraps a WASM3 moduke
func NewModule(ptr ModuleT) *Module {
return &Module{
ptr: ptr,
numFunctions: -1,
numImports: -1,
}
}
// Function is a function wrapper
type Function struct {
ptr FunctionT
// fnWrapper FunctionWrapper
Name string
}
// FunctionWrapper is used to wrap WASM3 call methods and make the calls more idiomatic
type FunctionWrapper func(args ...interface{}) (interface{}, error)
// Ptr returns a pointer to IM3Function
func (f *Function) Ptr() C.IM3Function |
// Call implements a better call function
func (f *Function) Call(args ...interface{}) (interface{}, error) {
length := len(args)
cArgs := make([]int64, length)
for i, v := range args {
p := unsafe.Pointer(&cArgs[i])
switch val := v.(type) {
case int:
*(*C.i32)(p) = C.i32(val)
case int32:
*(*C.i32)(p) = C.i32(val)
case int64:
*(*C.i64)(p) = C.i64(val)
case float32:
*(*C.f32)(p) = C.f32(val)
case float64:
*(*C.f64)(p) = C.f64(val)
default:
return 0, fmt.Errorf("invalid arg type %T", val)
}
}
var result [8]byte
var err C.int
if length == 0 {
err = C.call(f.Ptr(), 0, nil, unsafe.Pointer(&result[0]))
} else {
err = C.call(f.Ptr(), C.uint(length), unsafe.Pointer(&cArgs[0]), unsafe.Pointer(&result[0]))
}
if err == -1 {
return 0, errors.New(LastErrorString())
}
switch f.Ptr().funcType.returnType {
case C.c_m3Type_i32:
return *(*int32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_i64:
return *(*int64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f32:
return *(*float32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f64:
return *(*float64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_none:
return 0, nil
default:
return 0, errors.New("unexpected return type (go)")
}
}
// Environment wraps a WASM3 environment
type Environment struct {
ptr EnvironmentT
}
// ParseModule wraps m3_ParseModule
func (e *Environment) ParseModule(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
e.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
return NewModule((ModuleT)(module)), nil
}
// Ptr returns a pointer to IM3Environment
func (e *Environment) Ptr() C.IM3Environment {
return (C.IM3Environment)(e.ptr)
}
// Destroy calls m3_FreeEnvironment
func (e *Environment) Destroy() {
C.m3_FreeEnvironment(e.Ptr())
}
// NewEnvironment initializes a new environment
func NewEnvironment() *Environment {
ptr := C.m3_NewEnvironment()
return &Environment{
ptr: (EnvironmentT)(ptr),
}
}
| {
return (C.IM3Function)(f.ptr)
} | identifier_body |
wasm3.go | package wasm3
/*
#cgo LDFLAGS: -lm
// #cgo CFLAGS: -Iinclude
// #cgo darwin LDFLAGS: -L${SRCDIR}/lib/darwin -lm3
// #cgo !android,linux LDFLAGS: -L${SRCDIR}/lib/linux -lm3 -lm
// #cgo android,arm LDFLAGS: -L${SRCDIR}/lib/android/armeabi-v7a -lm3 -lm
// #cgo android,arm64 LDFLAGS: -L${SRCDIR}/lib/android/arm64-v8a -lm3 -lm
// #cgo android,386 LDFLAGS: -L${SRCDIR}/lib/android/x86 -lm3 -lm
// #cgo android,amd64 LDFLAGS: -L${SRCDIR}/lib/android/x86_64 -lm3 -lm
#include "wasm3.h"
#include "m3_api_libc.h"
#include "m3_api_wasi.h"
#include "m3_env.h"
#include "go-wasm3.h"
#include <stdio.h>
typedef uint32_t __wasi_size_t;
// #include "extra/wasi_core.h"
IM3Function module_get_function(IM3Module i_module, int index);
IM3Function module_get_imported_function(IM3Module i_module, int index);
int call(IM3Function i_function, uint32_t i_argc, void* i_argv, void* o_result);
int get_allocated_memory_length(IM3Runtime i_runtime);
u8* get_allocated_memory(IM3Runtime i_runtime);
const void * mowrapper(IM3Runtime runtime, uint64_t * _sp, void * _mem);
int attachFunction(IM3Module i_module, char* moduleName, char* functionName, char* signature);
void* m3ApiOffsetToPtr(void* offset, void* _mem);
const char* function_get_import_module(IM3Function i_function);
const char* function_get_import_field(IM3Function i_function);
int findFunction(IM3Function * o_function, IM3Runtime i_runtime, const char * const i_moduleName, const char * const i_functionName);
void get_function(IM3Function * o_function, IM3Module i_module, int i);
u8 function_get_arg_type(IM3Function i_function, int index);
typedef struct wasi_iovec_t
{
__wasi_size_t buf;
__wasi_size_t buf_len;
} wasi_iovec_t;
*/
import "C"
import (
"errors"
"fmt"
"reflect"
"sync"
"unsafe"
)
const (
PageSize uint32 = 0x10000
)
// RuntimeT is an alias for IM3Runtime
type RuntimeT C.IM3Runtime
// EnvironmentT is an alias for IM3Environment
type EnvironmentT C.IM3Environment
// ModuleT is an alias for IM3Module
type ModuleT C.IM3Module
// FunctionT is an alias for IM3Function
type FunctionT C.IM3Function
// FuncTypeT is an alias for IM3FuncType
type FuncTypeT C.IM3FuncType
// ResultT is an alias for M3Result
type ResultT C.M3Result
// WasiIoVec is an alias for wasi_iovec_t
type WasiIoVec C.wasi_iovec_t |
var slotsToCallbacks = make(map[int]CallbackFunction)
// GetBuf return the internal buffer index
func (w *WasiIoVec) GetBuf() uint32 {
return uint32(w.buf)
}
// GetBufLen return the buffer len
func (w *WasiIoVec) GetBufLen() int {
return int(w.buf_len)
}
//export dynamicFunctionWrapper
func dynamicFunctionWrapper(runtime RuntimeT, _sp unsafe.Pointer, _mem unsafe.Pointer, slot uint64) int {
lock.Lock()
fn := slotsToCallbacks[int(slot)]
lock.Unlock()
return fn(runtime, _sp, _mem)
}
var (
errParseModule = errors.New("Parse error")
errLoadModule = errors.New("Load error")
errFuncLookupFailed = errors.New("Function lookup failed")
)
// Config holds the runtime and environment configuration
type Config struct {
Environment *Environment
StackSize uint
// EnableWASI bool
EnableSpecTest bool
}
// Runtime wraps a WASM3 runtime
type Runtime struct {
ptr RuntimeT
cfg *Config
}
// Ptr returns a IM3Runtime pointer
func (r *Runtime) Ptr() C.IM3Runtime {
return (C.IM3Runtime)(r.ptr)
}
// Load wraps the parse and load module calls.
// This will be replaced by env.ParseModule and Runtime.LoadModule.
func (r *Runtime) Load(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
r.cfg.Environment.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
if module.memoryImported {
module.memoryImported = false
}
result = C.m3_LoadModule(
r.Ptr(),
module,
)
if result != nil {
return nil, errLoadModule
}
result = C.m3_LinkSpecTest(r.Ptr().modules)
if result != nil {
return nil, errors.New("LinkSpecTest failed")
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
m := NewModule((ModuleT)(module))
return m, nil
}
var lock = sync.Mutex{}
// AttachFunction binds a callable function to a module+func
func (r *Runtime) AttachFunction(moduleName string, functionName string, signature string, callback CallbackFunction) {
_moduleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
defer C.free(unsafe.Pointer(_signature))
lock.Lock()
slot := C.attachFunction(r.Ptr().modules, _moduleName, _functionName, _signature)
slotsToCallbacks[int(slot)] = callback
lock.Unlock()
}
// LoadModule wraps m3_LoadModule and returns a module object
func (r *Runtime) LoadModule(module *Module) (*Module, error) {
if module.Ptr().memoryImported {
module.Ptr().memoryImported = false
}
result := C.m3Err_none
result = C.m3_LoadModule(
r.Ptr(),
module.Ptr(),
)
if result != nil {
return nil, errLoadModule
}
if r.cfg.EnableSpecTest {
C.m3_LinkSpecTest(r.Ptr().modules)
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
return module, nil
}
// FindFunction calls m3_FindFunction and returns a call function
func (r *Runtime) FindFunction(funcName string) (FunctionWrapper, error) {
result := C.m3Err_none
var f C.IM3Function
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result = C.m3_FindFunction(
&f,
r.Ptr(),
cFuncName,
)
if result != nil {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// FindFunction does thins
func (r *Runtime) FindFunctionByModule(moduleName string, funcName string) (FunctionWrapper, error) {
var f C.IM3Function
cModuleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(cModuleName))
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result := C.findFunction(
&f,
r.Ptr(),
cModuleName,
cFuncName,
)
if result != 0 {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// Destroy free calls m3_FreeRuntime
func (r *Runtime) Destroy() {
C.m3_FreeRuntime(r.Ptr())
r.cfg.Environment.Destroy()
}
// Memory allows access to runtime Memory.
// Taken from Wasmer extension: https://github.com/wasmerio/go-ext-wasm
func (r *Runtime) Memory() []byte {
mem := C.get_allocated_memory(
r.Ptr(),
)
var data = (*uint8)(mem)
length := r.GetAllocatedMemoryLength()
var header reflect.SliceHeader
header = *(*reflect.SliceHeader)(unsafe.Pointer(&header))
header.Data = uintptr(unsafe.Pointer(data))
header.Len = int(length)
header.Cap = int(length)
return *(*[]byte)(unsafe.Pointer(&header))
}
// GetAllocatedMemoryLength returns the amount of allocated runtime memory
func (r *Runtime) GetAllocatedMemoryLength() int {
length := C.get_allocated_memory_length(r.Ptr())
return int(length)
}
func (r *Runtime) ResizeMemory(numPages int32) error {
err := C.ResizeMemory(r.Ptr(), C.u32(numPages))
if err != C.m3Err_none {
return errors.New(C.GoString(err))
}
return nil
}
// ParseModule is a helper that calls the same function in env.
func (r *Runtime) ParseModule(wasmBytes []byte) (*Module, error) {
return r.cfg.Environment.ParseModule(wasmBytes)
}
func (r *Runtime) PrintRuntimeInfo() {
C.m3_PrintRuntimeInfo(r.Ptr())
C.m3_PrintM3Info()
C.m3_PrintProfilerInfo()
}
// NewRuntime initializes a new runtime
// TODO: nativeStackInfo is passed as NULL
func NewRuntime(cfg *Config) *Runtime {
// env *Environment, stackSize uint
ptr := C.m3_NewRuntime(
cfg.Environment.Ptr(),
C.uint(cfg.StackSize),
nil,
)
return &Runtime{
ptr: (RuntimeT)(ptr),
cfg: cfg,
}
}
// Module wraps a WASM3 module.
type Module struct {
ptr ModuleT
numFunctions int
numImports int
}
// Ptr returns a pointer to IM3Module
func (m *Module) Ptr() C.IM3Module {
return (C.IM3Module)(m.ptr)
}
// GetFunction provides access to IM3Function->functions
func (m *Module) GetFunction(index uint) (*Function, error) {
if uint(m.NumFunctions()) <= index {
return nil, errFuncLookupFailed
}
ptr := C.module_get_function(m.Ptr(), C.int(index))
name := C.GoString(ptr.name)
return &Function{
ptr: (FunctionT)(ptr),
Name: name,
}, nil
}
func (f *Function) GetReturnType() uint8 {
return uint8(f.ptr.funcType.returnType)
}
func (f *Function) GetNumArgs() uint32 {
return uint32(f.ptr.funcType.numArgs)
}
func (f *Function) GetArgType(index int) uint8 {
return uint8(C.function_get_arg_type(f.ptr, C.int(index)))
}
func (f *Function) GetSignature() string {
// TODO this is completely wrong but should work for basic functions for the moment...
s := "i("
for i := uint32(0); i < f.GetNumArgs(); i++ {
s += "i"
}
s += ")"
return s
}
// GetFunctionByName is a helper to lookup functions by name
// TODO: could be optimized by caching function names and pointer on the Go side, right after the load call.
func (m *Module) GetFunctionByName(lookupName string) (*Function, error) {
var fn *Function
for i := 0; i < m.NumFunctions(); i++ {
ptr := C.module_get_function(m.Ptr(), C.int(i))
name := C.GoString(ptr.name)
if name != lookupName {
continue
}
fn = &Function{
ptr: (FunctionT)(ptr),
Name: name,
}
return fn, nil
}
return nil, errFuncLookupFailed
}
// NumFunctions provides access to numFunctions.
func (m *Module) NumFunctions() int {
// In case the number of functions hasn't been resolved yet, retrieve the int and keep it in the structure
if m.numFunctions == -1 {
m.numFunctions = int(m.Ptr().numFunctions)
}
return m.numFunctions
}
func (m *Module) FunctionNames() []string {
functions := make([]string, 0)
for i := 0; i < int(m.Ptr().numFunctions); i++ {
f := C.module_get_function(m.Ptr(), C.int(i))
functions = append(functions, C.GoString(f.name))
fmt.Printf("fun: '%v' module: %p\n", C.GoString(f.name), f.module)
}
return functions
}
// NumImports provides access to numImports
func (m *Module) NumImports() int {
if m.numImports == -1 {
m.numImports = int(m.Ptr().numImports)
}
return m.numImports
}
// TODO: Store the CStrings to later free them!
func (m *Module) LinkRawFunction(moduleName, functionName, signature string, fn unsafe.Pointer) error {
_moduleName := C.CString(moduleName)
// defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
// defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
// defer C.free(unsafe.Pointer(_signature))
result := C.m3_LinkRawFunction(m.Ptr(), _moduleName, _functionName, _signature, (*[0]byte)(fn))
if result != nil {
return fmt.Errorf(C.GoString(result))
}
return nil
}
// GetModule retreive the function's module
func (f *Function) GetModule() *Module {
return NewModule(f.ptr.module)
}
func (f *Function) GetImportModule() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_module(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
func (f *Function) GetImportField() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_field(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
// Name gets the module's name
func (m *Module) Name() string {
return C.GoString(m.ptr.name)
}
// NewModule wraps a WASM3 moduke
func NewModule(ptr ModuleT) *Module {
return &Module{
ptr: ptr,
numFunctions: -1,
numImports: -1,
}
}
// Function is a function wrapper
type Function struct {
ptr FunctionT
// fnWrapper FunctionWrapper
Name string
}
// FunctionWrapper is used to wrap WASM3 call methods and make the calls more idiomatic
type FunctionWrapper func(args ...interface{}) (interface{}, error)
// Ptr returns a pointer to IM3Function
func (f *Function) Ptr() C.IM3Function {
return (C.IM3Function)(f.ptr)
}
// Call implements a better call function
func (f *Function) Call(args ...interface{}) (interface{}, error) {
length := len(args)
cArgs := make([]int64, length)
for i, v := range args {
p := unsafe.Pointer(&cArgs[i])
switch val := v.(type) {
case int:
*(*C.i32)(p) = C.i32(val)
case int32:
*(*C.i32)(p) = C.i32(val)
case int64:
*(*C.i64)(p) = C.i64(val)
case float32:
*(*C.f32)(p) = C.f32(val)
case float64:
*(*C.f64)(p) = C.f64(val)
default:
return 0, fmt.Errorf("invalid arg type %T", val)
}
}
var result [8]byte
var err C.int
if length == 0 {
err = C.call(f.Ptr(), 0, nil, unsafe.Pointer(&result[0]))
} else {
err = C.call(f.Ptr(), C.uint(length), unsafe.Pointer(&cArgs[0]), unsafe.Pointer(&result[0]))
}
if err == -1 {
return 0, errors.New(LastErrorString())
}
switch f.Ptr().funcType.returnType {
case C.c_m3Type_i32:
return *(*int32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_i64:
return *(*int64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f32:
return *(*float32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f64:
return *(*float64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_none:
return 0, nil
default:
return 0, errors.New("unexpected return type (go)")
}
}
// Environment wraps a WASM3 environment
type Environment struct {
ptr EnvironmentT
}
// ParseModule wraps m3_ParseModule
func (e *Environment) ParseModule(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
e.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
return NewModule((ModuleT)(module)), nil
}
// Ptr returns a pointer to IM3Environment
func (e *Environment) Ptr() C.IM3Environment {
return (C.IM3Environment)(e.ptr)
}
// Destroy calls m3_FreeEnvironment
func (e *Environment) Destroy() {
C.m3_FreeEnvironment(e.Ptr())
}
// NewEnvironment initializes a new environment
func NewEnvironment() *Environment {
ptr := C.m3_NewEnvironment()
return &Environment{
ptr: (EnvironmentT)(ptr),
}
} |
// CallbackFunction is the signature for callbacks
type CallbackFunction func(runtime RuntimeT, sp unsafe.Pointer, mem unsafe.Pointer) int | random_line_split |
wasm3.go | package wasm3
/*
#cgo LDFLAGS: -lm
// #cgo CFLAGS: -Iinclude
// #cgo darwin LDFLAGS: -L${SRCDIR}/lib/darwin -lm3
// #cgo !android,linux LDFLAGS: -L${SRCDIR}/lib/linux -lm3 -lm
// #cgo android,arm LDFLAGS: -L${SRCDIR}/lib/android/armeabi-v7a -lm3 -lm
// #cgo android,arm64 LDFLAGS: -L${SRCDIR}/lib/android/arm64-v8a -lm3 -lm
// #cgo android,386 LDFLAGS: -L${SRCDIR}/lib/android/x86 -lm3 -lm
// #cgo android,amd64 LDFLAGS: -L${SRCDIR}/lib/android/x86_64 -lm3 -lm
#include "wasm3.h"
#include "m3_api_libc.h"
#include "m3_api_wasi.h"
#include "m3_env.h"
#include "go-wasm3.h"
#include <stdio.h>
typedef uint32_t __wasi_size_t;
// #include "extra/wasi_core.h"
IM3Function module_get_function(IM3Module i_module, int index);
IM3Function module_get_imported_function(IM3Module i_module, int index);
int call(IM3Function i_function, uint32_t i_argc, void* i_argv, void* o_result);
int get_allocated_memory_length(IM3Runtime i_runtime);
u8* get_allocated_memory(IM3Runtime i_runtime);
const void * mowrapper(IM3Runtime runtime, uint64_t * _sp, void * _mem);
int attachFunction(IM3Module i_module, char* moduleName, char* functionName, char* signature);
void* m3ApiOffsetToPtr(void* offset, void* _mem);
const char* function_get_import_module(IM3Function i_function);
const char* function_get_import_field(IM3Function i_function);
int findFunction(IM3Function * o_function, IM3Runtime i_runtime, const char * const i_moduleName, const char * const i_functionName);
void get_function(IM3Function * o_function, IM3Module i_module, int i);
u8 function_get_arg_type(IM3Function i_function, int index);
typedef struct wasi_iovec_t
{
__wasi_size_t buf;
__wasi_size_t buf_len;
} wasi_iovec_t;
*/
import "C"
import (
"errors"
"fmt"
"reflect"
"sync"
"unsafe"
)
const (
PageSize uint32 = 0x10000
)
// RuntimeT is an alias for IM3Runtime
type RuntimeT C.IM3Runtime
// EnvironmentT is an alias for IM3Environment
type EnvironmentT C.IM3Environment
// ModuleT is an alias for IM3Module
type ModuleT C.IM3Module
// FunctionT is an alias for IM3Function
type FunctionT C.IM3Function
// FuncTypeT is an alias for IM3FuncType
type FuncTypeT C.IM3FuncType
// ResultT is an alias for M3Result
type ResultT C.M3Result
// WasiIoVec is an alias for wasi_iovec_t
type WasiIoVec C.wasi_iovec_t
// CallbackFunction is the signature for callbacks
type CallbackFunction func(runtime RuntimeT, sp unsafe.Pointer, mem unsafe.Pointer) int
var slotsToCallbacks = make(map[int]CallbackFunction)
// GetBuf return the internal buffer index
func (w *WasiIoVec) GetBuf() uint32 {
return uint32(w.buf)
}
// GetBufLen return the buffer len
func (w *WasiIoVec) GetBufLen() int {
return int(w.buf_len)
}
//export dynamicFunctionWrapper
func dynamicFunctionWrapper(runtime RuntimeT, _sp unsafe.Pointer, _mem unsafe.Pointer, slot uint64) int {
lock.Lock()
fn := slotsToCallbacks[int(slot)]
lock.Unlock()
return fn(runtime, _sp, _mem)
}
var (
errParseModule = errors.New("Parse error")
errLoadModule = errors.New("Load error")
errFuncLookupFailed = errors.New("Function lookup failed")
)
// Config holds the runtime and environment configuration
type Config struct {
Environment *Environment
StackSize uint
// EnableWASI bool
EnableSpecTest bool
}
// Runtime wraps a WASM3 runtime
type Runtime struct {
ptr RuntimeT
cfg *Config
}
// Ptr returns a IM3Runtime pointer
func (r *Runtime) Ptr() C.IM3Runtime {
return (C.IM3Runtime)(r.ptr)
}
// Load wraps the parse and load module calls.
// This will be replaced by env.ParseModule and Runtime.LoadModule.
func (r *Runtime) Load(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
r.cfg.Environment.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
if module.memoryImported {
module.memoryImported = false
}
result = C.m3_LoadModule(
r.Ptr(),
module,
)
if result != nil {
return nil, errLoadModule
}
result = C.m3_LinkSpecTest(r.Ptr().modules)
if result != nil {
return nil, errors.New("LinkSpecTest failed")
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
m := NewModule((ModuleT)(module))
return m, nil
}
var lock = sync.Mutex{}
// AttachFunction binds a callable function to a module+func
func (r *Runtime) AttachFunction(moduleName string, functionName string, signature string, callback CallbackFunction) {
_moduleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
defer C.free(unsafe.Pointer(_signature))
lock.Lock()
slot := C.attachFunction(r.Ptr().modules, _moduleName, _functionName, _signature)
slotsToCallbacks[int(slot)] = callback
lock.Unlock()
}
// LoadModule wraps m3_LoadModule and returns a module object
func (r *Runtime) LoadModule(module *Module) (*Module, error) {
if module.Ptr().memoryImported {
module.Ptr().memoryImported = false
}
result := C.m3Err_none
result = C.m3_LoadModule(
r.Ptr(),
module.Ptr(),
)
if result != nil |
if r.cfg.EnableSpecTest {
C.m3_LinkSpecTest(r.Ptr().modules)
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
return module, nil
}
// FindFunction calls m3_FindFunction and returns a call function
func (r *Runtime) FindFunction(funcName string) (FunctionWrapper, error) {
result := C.m3Err_none
var f C.IM3Function
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result = C.m3_FindFunction(
&f,
r.Ptr(),
cFuncName,
)
if result != nil {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// FindFunction does thins
func (r *Runtime) FindFunctionByModule(moduleName string, funcName string) (FunctionWrapper, error) {
var f C.IM3Function
cModuleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(cModuleName))
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result := C.findFunction(
&f,
r.Ptr(),
cModuleName,
cFuncName,
)
if result != 0 {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// Destroy free calls m3_FreeRuntime
func (r *Runtime) Destroy() {
C.m3_FreeRuntime(r.Ptr())
r.cfg.Environment.Destroy()
}
// Memory allows access to runtime Memory.
// Taken from Wasmer extension: https://github.com/wasmerio/go-ext-wasm
func (r *Runtime) Memory() []byte {
mem := C.get_allocated_memory(
r.Ptr(),
)
var data = (*uint8)(mem)
length := r.GetAllocatedMemoryLength()
var header reflect.SliceHeader
header = *(*reflect.SliceHeader)(unsafe.Pointer(&header))
header.Data = uintptr(unsafe.Pointer(data))
header.Len = int(length)
header.Cap = int(length)
return *(*[]byte)(unsafe.Pointer(&header))
}
// GetAllocatedMemoryLength returns the amount of allocated runtime memory
func (r *Runtime) GetAllocatedMemoryLength() int {
length := C.get_allocated_memory_length(r.Ptr())
return int(length)
}
func (r *Runtime) ResizeMemory(numPages int32) error {
err := C.ResizeMemory(r.Ptr(), C.u32(numPages))
if err != C.m3Err_none {
return errors.New(C.GoString(err))
}
return nil
}
// ParseModule is a helper that calls the same function in env.
func (r *Runtime) ParseModule(wasmBytes []byte) (*Module, error) {
return r.cfg.Environment.ParseModule(wasmBytes)
}
func (r *Runtime) PrintRuntimeInfo() {
C.m3_PrintRuntimeInfo(r.Ptr())
C.m3_PrintM3Info()
C.m3_PrintProfilerInfo()
}
// NewRuntime initializes a new runtime
// TODO: nativeStackInfo is passed as NULL
func NewRuntime(cfg *Config) *Runtime {
// env *Environment, stackSize uint
ptr := C.m3_NewRuntime(
cfg.Environment.Ptr(),
C.uint(cfg.StackSize),
nil,
)
return &Runtime{
ptr: (RuntimeT)(ptr),
cfg: cfg,
}
}
// Module wraps a WASM3 module.
type Module struct {
ptr ModuleT
numFunctions int
numImports int
}
// Ptr returns a pointer to IM3Module
func (m *Module) Ptr() C.IM3Module {
return (C.IM3Module)(m.ptr)
}
// GetFunction provides access to IM3Function->functions
func (m *Module) GetFunction(index uint) (*Function, error) {
if uint(m.NumFunctions()) <= index {
return nil, errFuncLookupFailed
}
ptr := C.module_get_function(m.Ptr(), C.int(index))
name := C.GoString(ptr.name)
return &Function{
ptr: (FunctionT)(ptr),
Name: name,
}, nil
}
func (f *Function) GetReturnType() uint8 {
return uint8(f.ptr.funcType.returnType)
}
func (f *Function) GetNumArgs() uint32 {
return uint32(f.ptr.funcType.numArgs)
}
func (f *Function) GetArgType(index int) uint8 {
return uint8(C.function_get_arg_type(f.ptr, C.int(index)))
}
func (f *Function) GetSignature() string {
// TODO this is completely wrong but should work for basic functions for the moment...
s := "i("
for i := uint32(0); i < f.GetNumArgs(); i++ {
s += "i"
}
s += ")"
return s
}
// GetFunctionByName is a helper to lookup functions by name
// TODO: could be optimized by caching function names and pointer on the Go side, right after the load call.
func (m *Module) GetFunctionByName(lookupName string) (*Function, error) {
var fn *Function
for i := 0; i < m.NumFunctions(); i++ {
ptr := C.module_get_function(m.Ptr(), C.int(i))
name := C.GoString(ptr.name)
if name != lookupName {
continue
}
fn = &Function{
ptr: (FunctionT)(ptr),
Name: name,
}
return fn, nil
}
return nil, errFuncLookupFailed
}
// NumFunctions provides access to numFunctions.
func (m *Module) NumFunctions() int {
// In case the number of functions hasn't been resolved yet, retrieve the int and keep it in the structure
if m.numFunctions == -1 {
m.numFunctions = int(m.Ptr().numFunctions)
}
return m.numFunctions
}
func (m *Module) FunctionNames() []string {
functions := make([]string, 0)
for i := 0; i < int(m.Ptr().numFunctions); i++ {
f := C.module_get_function(m.Ptr(), C.int(i))
functions = append(functions, C.GoString(f.name))
fmt.Printf("fun: '%v' module: %p\n", C.GoString(f.name), f.module)
}
return functions
}
// NumImports provides access to numImports
func (m *Module) NumImports() int {
if m.numImports == -1 {
m.numImports = int(m.Ptr().numImports)
}
return m.numImports
}
// TODO: Store the CStrings to later free them!
func (m *Module) LinkRawFunction(moduleName, functionName, signature string, fn unsafe.Pointer) error {
_moduleName := C.CString(moduleName)
// defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
// defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
// defer C.free(unsafe.Pointer(_signature))
result := C.m3_LinkRawFunction(m.Ptr(), _moduleName, _functionName, _signature, (*[0]byte)(fn))
if result != nil {
return fmt.Errorf(C.GoString(result))
}
return nil
}
// GetModule retreive the function's module
func (f *Function) GetModule() *Module {
return NewModule(f.ptr.module)
}
func (f *Function) GetImportModule() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_module(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
func (f *Function) GetImportField() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_field(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
// Name gets the module's name
func (m *Module) Name() string {
return C.GoString(m.ptr.name)
}
// NewModule wraps a WASM3 moduke
func NewModule(ptr ModuleT) *Module {
return &Module{
ptr: ptr,
numFunctions: -1,
numImports: -1,
}
}
// Function is a function wrapper
type Function struct {
ptr FunctionT
// fnWrapper FunctionWrapper
Name string
}
// FunctionWrapper is used to wrap WASM3 call methods and make the calls more idiomatic
type FunctionWrapper func(args ...interface{}) (interface{}, error)
// Ptr returns a pointer to IM3Function
func (f *Function) Ptr() C.IM3Function {
return (C.IM3Function)(f.ptr)
}
// Call implements a better call function
func (f *Function) Call(args ...interface{}) (interface{}, error) {
length := len(args)
cArgs := make([]int64, length)
for i, v := range args {
p := unsafe.Pointer(&cArgs[i])
switch val := v.(type) {
case int:
*(*C.i32)(p) = C.i32(val)
case int32:
*(*C.i32)(p) = C.i32(val)
case int64:
*(*C.i64)(p) = C.i64(val)
case float32:
*(*C.f32)(p) = C.f32(val)
case float64:
*(*C.f64)(p) = C.f64(val)
default:
return 0, fmt.Errorf("invalid arg type %T", val)
}
}
var result [8]byte
var err C.int
if length == 0 {
err = C.call(f.Ptr(), 0, nil, unsafe.Pointer(&result[0]))
} else {
err = C.call(f.Ptr(), C.uint(length), unsafe.Pointer(&cArgs[0]), unsafe.Pointer(&result[0]))
}
if err == -1 {
return 0, errors.New(LastErrorString())
}
switch f.Ptr().funcType.returnType {
case C.c_m3Type_i32:
return *(*int32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_i64:
return *(*int64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f32:
return *(*float32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f64:
return *(*float64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_none:
return 0, nil
default:
return 0, errors.New("unexpected return type (go)")
}
}
// Environment wraps a WASM3 environment
type Environment struct {
ptr EnvironmentT
}
// ParseModule wraps m3_ParseModule
func (e *Environment) ParseModule(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
e.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
return NewModule((ModuleT)(module)), nil
}
// Ptr returns a pointer to IM3Environment
func (e *Environment) Ptr() C.IM3Environment {
return (C.IM3Environment)(e.ptr)
}
// Destroy calls m3_FreeEnvironment
func (e *Environment) Destroy() {
C.m3_FreeEnvironment(e.Ptr())
}
// NewEnvironment initializes a new environment
func NewEnvironment() *Environment {
ptr := C.m3_NewEnvironment()
return &Environment{
ptr: (EnvironmentT)(ptr),
}
}
| {
return nil, errLoadModule
} | conditional_block |
wasm3.go | package wasm3
/*
#cgo LDFLAGS: -lm
// #cgo CFLAGS: -Iinclude
// #cgo darwin LDFLAGS: -L${SRCDIR}/lib/darwin -lm3
// #cgo !android,linux LDFLAGS: -L${SRCDIR}/lib/linux -lm3 -lm
// #cgo android,arm LDFLAGS: -L${SRCDIR}/lib/android/armeabi-v7a -lm3 -lm
// #cgo android,arm64 LDFLAGS: -L${SRCDIR}/lib/android/arm64-v8a -lm3 -lm
// #cgo android,386 LDFLAGS: -L${SRCDIR}/lib/android/x86 -lm3 -lm
// #cgo android,amd64 LDFLAGS: -L${SRCDIR}/lib/android/x86_64 -lm3 -lm
#include "wasm3.h"
#include "m3_api_libc.h"
#include "m3_api_wasi.h"
#include "m3_env.h"
#include "go-wasm3.h"
#include <stdio.h>
typedef uint32_t __wasi_size_t;
// #include "extra/wasi_core.h"
IM3Function module_get_function(IM3Module i_module, int index);
IM3Function module_get_imported_function(IM3Module i_module, int index);
int call(IM3Function i_function, uint32_t i_argc, void* i_argv, void* o_result);
int get_allocated_memory_length(IM3Runtime i_runtime);
u8* get_allocated_memory(IM3Runtime i_runtime);
const void * mowrapper(IM3Runtime runtime, uint64_t * _sp, void * _mem);
int attachFunction(IM3Module i_module, char* moduleName, char* functionName, char* signature);
void* m3ApiOffsetToPtr(void* offset, void* _mem);
const char* function_get_import_module(IM3Function i_function);
const char* function_get_import_field(IM3Function i_function);
int findFunction(IM3Function * o_function, IM3Runtime i_runtime, const char * const i_moduleName, const char * const i_functionName);
void get_function(IM3Function * o_function, IM3Module i_module, int i);
u8 function_get_arg_type(IM3Function i_function, int index);
typedef struct wasi_iovec_t
{
__wasi_size_t buf;
__wasi_size_t buf_len;
} wasi_iovec_t;
*/
import "C"
import (
"errors"
"fmt"
"reflect"
"sync"
"unsafe"
)
const (
PageSize uint32 = 0x10000
)
// RuntimeT is an alias for IM3Runtime
type RuntimeT C.IM3Runtime
// EnvironmentT is an alias for IM3Environment
type EnvironmentT C.IM3Environment
// ModuleT is an alias for IM3Module
type ModuleT C.IM3Module
// FunctionT is an alias for IM3Function
type FunctionT C.IM3Function
// FuncTypeT is an alias for IM3FuncType
type FuncTypeT C.IM3FuncType
// ResultT is an alias for M3Result
type ResultT C.M3Result
// WasiIoVec is an alias for wasi_iovec_t
type WasiIoVec C.wasi_iovec_t
// CallbackFunction is the signature for callbacks
type CallbackFunction func(runtime RuntimeT, sp unsafe.Pointer, mem unsafe.Pointer) int
var slotsToCallbacks = make(map[int]CallbackFunction)
// GetBuf return the internal buffer index
func (w *WasiIoVec) GetBuf() uint32 {
return uint32(w.buf)
}
// GetBufLen return the buffer len
func (w *WasiIoVec) GetBufLen() int {
return int(w.buf_len)
}
//export dynamicFunctionWrapper
func dynamicFunctionWrapper(runtime RuntimeT, _sp unsafe.Pointer, _mem unsafe.Pointer, slot uint64) int {
lock.Lock()
fn := slotsToCallbacks[int(slot)]
lock.Unlock()
return fn(runtime, _sp, _mem)
}
var (
errParseModule = errors.New("Parse error")
errLoadModule = errors.New("Load error")
errFuncLookupFailed = errors.New("Function lookup failed")
)
// Config holds the runtime and environment configuration
type Config struct {
Environment *Environment
StackSize uint
// EnableWASI bool
EnableSpecTest bool
}
// Runtime wraps a WASM3 runtime
type Runtime struct {
ptr RuntimeT
cfg *Config
}
// Ptr returns a IM3Runtime pointer
func (r *Runtime) Ptr() C.IM3Runtime {
return (C.IM3Runtime)(r.ptr)
}
// Load wraps the parse and load module calls.
// This will be replaced by env.ParseModule and Runtime.LoadModule.
func (r *Runtime) Load(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
r.cfg.Environment.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
if module.memoryImported {
module.memoryImported = false
}
result = C.m3_LoadModule(
r.Ptr(),
module,
)
if result != nil {
return nil, errLoadModule
}
result = C.m3_LinkSpecTest(r.Ptr().modules)
if result != nil {
return nil, errors.New("LinkSpecTest failed")
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
m := NewModule((ModuleT)(module))
return m, nil
}
var lock = sync.Mutex{}
// AttachFunction binds a callable function to a module+func
func (r *Runtime) AttachFunction(moduleName string, functionName string, signature string, callback CallbackFunction) {
_moduleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
defer C.free(unsafe.Pointer(_signature))
lock.Lock()
slot := C.attachFunction(r.Ptr().modules, _moduleName, _functionName, _signature)
slotsToCallbacks[int(slot)] = callback
lock.Unlock()
}
// LoadModule wraps m3_LoadModule and returns a module object
func (r *Runtime) LoadModule(module *Module) (*Module, error) {
if module.Ptr().memoryImported {
module.Ptr().memoryImported = false
}
result := C.m3Err_none
result = C.m3_LoadModule(
r.Ptr(),
module.Ptr(),
)
if result != nil {
return nil, errLoadModule
}
if r.cfg.EnableSpecTest {
C.m3_LinkSpecTest(r.Ptr().modules)
}
// if r.cfg.EnableWASI {
// C.m3_LinkWASI(r.Ptr().modules)
// }
return module, nil
}
// FindFunction calls m3_FindFunction and returns a call function
func (r *Runtime) FindFunction(funcName string) (FunctionWrapper, error) {
result := C.m3Err_none
var f C.IM3Function
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result = C.m3_FindFunction(
&f,
r.Ptr(),
cFuncName,
)
if result != nil {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// FindFunction does thins
func (r *Runtime) FindFunctionByModule(moduleName string, funcName string) (FunctionWrapper, error) {
var f C.IM3Function
cModuleName := C.CString(moduleName)
defer C.free(unsafe.Pointer(cModuleName))
cFuncName := C.CString(funcName)
defer C.free(unsafe.Pointer(cFuncName))
result := C.findFunction(
&f,
r.Ptr(),
cModuleName,
cFuncName,
)
if result != 0 {
return nil, errFuncLookupFailed
}
fn := &Function{
ptr: (FunctionT)(f),
}
// var fnWrapper FunctionWrapper
// fnWrapper = fn.Call
return FunctionWrapper(fn.Call), nil
}
// Destroy free calls m3_FreeRuntime
func (r *Runtime) Destroy() {
C.m3_FreeRuntime(r.Ptr())
r.cfg.Environment.Destroy()
}
// Memory allows access to runtime Memory.
// Taken from Wasmer extension: https://github.com/wasmerio/go-ext-wasm
func (r *Runtime) Memory() []byte {
mem := C.get_allocated_memory(
r.Ptr(),
)
var data = (*uint8)(mem)
length := r.GetAllocatedMemoryLength()
var header reflect.SliceHeader
header = *(*reflect.SliceHeader)(unsafe.Pointer(&header))
header.Data = uintptr(unsafe.Pointer(data))
header.Len = int(length)
header.Cap = int(length)
return *(*[]byte)(unsafe.Pointer(&header))
}
// GetAllocatedMemoryLength returns the amount of allocated runtime memory
func (r *Runtime) GetAllocatedMemoryLength() int {
length := C.get_allocated_memory_length(r.Ptr())
return int(length)
}
func (r *Runtime) ResizeMemory(numPages int32) error {
err := C.ResizeMemory(r.Ptr(), C.u32(numPages))
if err != C.m3Err_none {
return errors.New(C.GoString(err))
}
return nil
}
// ParseModule is a helper that calls the same function in env.
func (r *Runtime) ParseModule(wasmBytes []byte) (*Module, error) {
return r.cfg.Environment.ParseModule(wasmBytes)
}
func (r *Runtime) PrintRuntimeInfo() {
C.m3_PrintRuntimeInfo(r.Ptr())
C.m3_PrintM3Info()
C.m3_PrintProfilerInfo()
}
// NewRuntime initializes a new runtime
// TODO: nativeStackInfo is passed as NULL
func NewRuntime(cfg *Config) *Runtime {
// env *Environment, stackSize uint
ptr := C.m3_NewRuntime(
cfg.Environment.Ptr(),
C.uint(cfg.StackSize),
nil,
)
return &Runtime{
ptr: (RuntimeT)(ptr),
cfg: cfg,
}
}
// Module wraps a WASM3 module.
type Module struct {
ptr ModuleT
numFunctions int
numImports int
}
// Ptr returns a pointer to IM3Module
func (m *Module) Ptr() C.IM3Module {
return (C.IM3Module)(m.ptr)
}
// GetFunction provides access to IM3Function->functions
func (m *Module) GetFunction(index uint) (*Function, error) {
if uint(m.NumFunctions()) <= index {
return nil, errFuncLookupFailed
}
ptr := C.module_get_function(m.Ptr(), C.int(index))
name := C.GoString(ptr.name)
return &Function{
ptr: (FunctionT)(ptr),
Name: name,
}, nil
}
func (f *Function) GetReturnType() uint8 {
return uint8(f.ptr.funcType.returnType)
}
func (f *Function) GetNumArgs() uint32 {
return uint32(f.ptr.funcType.numArgs)
}
func (f *Function) GetArgType(index int) uint8 {
return uint8(C.function_get_arg_type(f.ptr, C.int(index)))
}
func (f *Function) GetSignature() string {
// TODO this is completely wrong but should work for basic functions for the moment...
s := "i("
for i := uint32(0); i < f.GetNumArgs(); i++ {
s += "i"
}
s += ")"
return s
}
// GetFunctionByName is a helper to lookup functions by name
// TODO: could be optimized by caching function names and pointer on the Go side, right after the load call.
func (m *Module) | (lookupName string) (*Function, error) {
var fn *Function
for i := 0; i < m.NumFunctions(); i++ {
ptr := C.module_get_function(m.Ptr(), C.int(i))
name := C.GoString(ptr.name)
if name != lookupName {
continue
}
fn = &Function{
ptr: (FunctionT)(ptr),
Name: name,
}
return fn, nil
}
return nil, errFuncLookupFailed
}
// NumFunctions provides access to numFunctions.
func (m *Module) NumFunctions() int {
// In case the number of functions hasn't been resolved yet, retrieve the int and keep it in the structure
if m.numFunctions == -1 {
m.numFunctions = int(m.Ptr().numFunctions)
}
return m.numFunctions
}
func (m *Module) FunctionNames() []string {
functions := make([]string, 0)
for i := 0; i < int(m.Ptr().numFunctions); i++ {
f := C.module_get_function(m.Ptr(), C.int(i))
functions = append(functions, C.GoString(f.name))
fmt.Printf("fun: '%v' module: %p\n", C.GoString(f.name), f.module)
}
return functions
}
// NumImports provides access to numImports
func (m *Module) NumImports() int {
if m.numImports == -1 {
m.numImports = int(m.Ptr().numImports)
}
return m.numImports
}
// TODO: Store the CStrings to later free them!
func (m *Module) LinkRawFunction(moduleName, functionName, signature string, fn unsafe.Pointer) error {
_moduleName := C.CString(moduleName)
// defer C.free(unsafe.Pointer(_moduleName))
_functionName := C.CString(functionName)
// defer C.free(unsafe.Pointer(_functionName))
_signature := C.CString(signature)
// defer C.free(unsafe.Pointer(_signature))
result := C.m3_LinkRawFunction(m.Ptr(), _moduleName, _functionName, _signature, (*[0]byte)(fn))
if result != nil {
return fmt.Errorf(C.GoString(result))
}
return nil
}
// GetModule retreive the function's module
func (f *Function) GetModule() *Module {
return NewModule(f.ptr.module)
}
func (f *Function) GetImportModule() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_module(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
func (f *Function) GetImportField() *string {
if f.ptr == nil {
return nil
}
cs := C.function_get_import_field(f.ptr)
if cs == nil {
return nil
}
res := C.GoString(cs)
return &res
}
// Name gets the module's name
func (m *Module) Name() string {
return C.GoString(m.ptr.name)
}
// NewModule wraps a WASM3 moduke
func NewModule(ptr ModuleT) *Module {
return &Module{
ptr: ptr,
numFunctions: -1,
numImports: -1,
}
}
// Function is a function wrapper
type Function struct {
ptr FunctionT
// fnWrapper FunctionWrapper
Name string
}
// FunctionWrapper is used to wrap WASM3 call methods and make the calls more idiomatic
type FunctionWrapper func(args ...interface{}) (interface{}, error)
// Ptr returns a pointer to IM3Function
func (f *Function) Ptr() C.IM3Function {
return (C.IM3Function)(f.ptr)
}
// Call implements a better call function
func (f *Function) Call(args ...interface{}) (interface{}, error) {
length := len(args)
cArgs := make([]int64, length)
for i, v := range args {
p := unsafe.Pointer(&cArgs[i])
switch val := v.(type) {
case int:
*(*C.i32)(p) = C.i32(val)
case int32:
*(*C.i32)(p) = C.i32(val)
case int64:
*(*C.i64)(p) = C.i64(val)
case float32:
*(*C.f32)(p) = C.f32(val)
case float64:
*(*C.f64)(p) = C.f64(val)
default:
return 0, fmt.Errorf("invalid arg type %T", val)
}
}
var result [8]byte
var err C.int
if length == 0 {
err = C.call(f.Ptr(), 0, nil, unsafe.Pointer(&result[0]))
} else {
err = C.call(f.Ptr(), C.uint(length), unsafe.Pointer(&cArgs[0]), unsafe.Pointer(&result[0]))
}
if err == -1 {
return 0, errors.New(LastErrorString())
}
switch f.Ptr().funcType.returnType {
case C.c_m3Type_i32:
return *(*int32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_i64:
return *(*int64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f32:
return *(*float32)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_f64:
return *(*float64)(unsafe.Pointer(&result[0])), nil
case C.c_m3Type_none:
return 0, nil
default:
return 0, errors.New("unexpected return type (go)")
}
}
// Environment wraps a WASM3 environment
type Environment struct {
ptr EnvironmentT
}
// ParseModule wraps m3_ParseModule
func (e *Environment) ParseModule(wasmBytes []byte) (*Module, error) {
result := C.m3Err_none
bytes := C.CBytes(wasmBytes)
length := len(wasmBytes)
var module C.IM3Module
result = C.m3_ParseModule(
e.Ptr(),
&module,
(*C.uchar)(bytes),
C.uint(length),
)
if result != nil {
return nil, errParseModule
}
return NewModule((ModuleT)(module)), nil
}
// Ptr returns a pointer to IM3Environment
func (e *Environment) Ptr() C.IM3Environment {
return (C.IM3Environment)(e.ptr)
}
// Destroy calls m3_FreeEnvironment
func (e *Environment) Destroy() {
C.m3_FreeEnvironment(e.Ptr())
}
// NewEnvironment initializes a new environment
func NewEnvironment() *Environment {
ptr := C.m3_NewEnvironment()
return &Environment{
ptr: (EnvironmentT)(ptr),
}
}
| GetFunctionByName | identifier_name |
preview_panel.js | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
/**
* PreviewPanel UI class.
* @param {HTMLElement} element DOM Element of preview panel.
* @param {PreviewPanel.VisibilityType} visibilityType Initial value of the
* visibility type.
* @param {MetadataCache} metadataCache Metadata cache.
* @param {VolumeManagerWrapper} volumeManager Volume manager.
* @constructor
* @extends {cr.EventTarget}
*/
var PreviewPanel = function(element,
visibilityType,
metadataCache,
volumeManager) {
/**
* The cached height of preview panel.
* @type {number}
* @private
*/
this.height_ = 0;
/**
* Visibility type of the preview panel.
* @type {PreviewPanel.VisiblityType}
* @private
*/
this.visibilityType_ = visibilityType;
/**
* Current entry to be displayed.
* @type {Entry}
* @private
*/
this.currentEntry_ = null;
/**
* Dom element of the preview panel.
* @type {HTMLElement}
* @private
*/
this.element_ = element;
/**
* @type {PreviewPanel.Thumbnails}
*/
this.thumbnails = new PreviewPanel.Thumbnails(
element.querySelector('.preview-thumbnails'),
metadataCache,
volumeManager);
/**
* @type {HTMLElement}
* @private
*/
this.summaryElement_ = element.querySelector('.preview-summary');
/**
* @type {PreviewPanel.CalculatingSizeLabel}
* @private
*/
this.calculatingSizeLabel_ = new PreviewPanel.CalculatingSizeLabel(
this.summaryElement_.querySelector('.calculating-size'));
/**
* @type {HTMLElement}
* @private
*/
this.previewText_ = element.querySelector('.preview-text');
/**
* FileSelection to be displayed.
* @type {FileSelection}
* @private
*/
this.selection_ = {entries: [], computeBytes: function() {}};
/**
* Sequence value that is incremented by every selection update and is used to
* check if the callback is up to date or not.
* @type {number}
* @private
*/
this.sequence_ = 0;
/**
* @type {VolumeManagerWrapper}
* @private
*/
this.volumeManager_ = volumeManager;
cr.EventTarget.call(this);
};
/**
* Name of PreviewPanels's event.
* @enum {string}
* @const
*/
PreviewPanel.Event = Object.freeze({
// Event to be triggered at the end of visibility change.
VISIBILITY_CHANGE: 'visibilityChange'
});
/**
* Visibility type of the preview panel.
*/
PreviewPanel.VisibilityType = Object.freeze({
// Preview panel always shows.
ALWAYS_VISIBLE: 'alwaysVisible',
// Preview panel shows when the selection property are set.
AUTO: 'auto',
// Preview panel does not show.
ALWAYS_HIDDEN: 'alwaysHidden'
});
/**
* @private
*/
PreviewPanel.Visibility_ = Object.freeze({
VISIBLE: 'visible',
HIDING: 'hiding',
HIDDEN: 'hidden'
});
PreviewPanel.prototype = {
__proto__: cr.EventTarget.prototype,
/**
* Setter for the current entry.
* @param {Entry} entry New entry.
*/
set currentEntry(entry) {
if (util.isSameEntry(this.currentEntry_, entry))
return;
this.currentEntry_ = entry;
this.updateVisibility_();
this.updatePreviewArea_();
},
/**
* Setter for the visibility type.
* @param {PreviewPanel.VisibilityType} visibilityType New value of visibility
* type.
*/
set visibilityType(visibilityType) {
this.visibilityType_ = visibilityType;
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
},
get visible() {
return this.element_.getAttribute('visibility') ==
PreviewPanel.Visibility_.VISIBLE;
},
/**
* Obtains the height of preview panel.
* @return {number} Height of preview panel.
*/
get height() {
this.height_ = this.height_ || this.element_.clientHeight;
return this.height_;
}
};
/**
* Initializes the element.
*/
PreviewPanel.prototype.initialize = function() {
this.element_.addEventListener('webkitTransitionEnd',
this.onTransitionEnd_.bind(this));
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
};
/**
* Apply the selection and update the view of the preview panel.
* @param {FileSelection} selection Selection to be applied.
*/
PreviewPanel.prototype.setSelection = function(selection) {
this.sequence_++;
this.selection_ = selection;
this.updateVisibility_();
this.updatePreviewArea_();
};
/**
* Update the visibility of the preview panel.
* @private
*/
PreviewPanel.prototype.updateVisibility_ = function() {
// Get the new visibility value.
var visibility = this.element_.getAttribute('visibility');
var newVisible = null;
switch (this.visibilityType_) {
case PreviewPanel.VisibilityType.ALWAYS_VISIBLE:
newVisible = true;
break;
case PreviewPanel.VisibilityType.AUTO:
newVisible = this.selection_.entries.length !== 0;
break;
case PreviewPanel.VisibilityType.ALWAYS_HIDDEN:
newVisible = false;
break;
default:
console.error('Invalid visibilityType.');
return;
}
// If the visibility has been already the new value, just return.
if ((visibility == PreviewPanel.Visibility_.VISIBLE && newVisible) ||
(visibility == PreviewPanel.Visibility_.HIDDEN && !newVisible))
return;
// Set the new visibility value.
if (newVisible) {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.VISIBLE);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
} else |
};
/**
* Update the text in the preview panel.
* @private
*/
PreviewPanel.prototype.updatePreviewArea_ = function() {
// If the preview panel is hiding, does not update the current view.
if (!this.visible)
return;
var selection = this.selection_;
// If no item is selected, no information is displayed on the footer.
if (selection.totalCount === 0) {
this.thumbnails.hidden = true;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = '';
return;
}
// If one item is selected, show thumbnail and entry name of the item.
if (selection.totalCount === 1) {
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = util.getEntryLabel(
this.volumeManager_, selection.entries[0]);
return;
}
// Update thumbnails.
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
// Obtains the preview text.
var text;
if (selection.directoryCount == 0)
text = strf('MANY_FILES_SELECTED', selection.fileCount);
else if (selection.fileCount == 0)
text = strf('MANY_DIRECTORIES_SELECTED', selection.directoryCount);
else
text = strf('MANY_ENTRIES_SELECTED', selection.totalCount);
// Obtains the size of files.
this.calculatingSizeLabel_.hidden = selection.bytesKnown;
if (selection.bytesKnown && selection.showBytes)
text += ', ' + util.bytesToString(selection.bytes);
// Set the preview text to the element.
this.previewText_.textContent = text;
// Request the byte calculation if needed.
if (!selection.bytesKnown) {
this.selection_.computeBytes(function(sequence) {
// Selection has been already updated.
if (this.sequence_ != sequence)
return;
this.updatePreviewArea_();
}.bind(this, this.sequence_));
}
};
/**
* Event handler to be called at the end of hiding transition.
* @param {Event} event The webkitTransitionEnd event.
* @private
*/
PreviewPanel.prototype.onTransitionEnd_ = function(event) {
if (event.target != this.element_ || event.propertyName != 'opacity')
return;
var visibility = this.element_.getAttribute('visibility');
if (visibility != PreviewPanel.Visibility_.HIDING)
return;
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDDEN);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
};
/**
* Animating label that is shown during the bytes of selection entries is being
* calculated.
*
* This label shows dots and varying the number of dots every
* CalculatingSizeLabel.PERIOD milliseconds.
* @param {HTMLElement} element DOM element of the label.
* @constructor
*/
PreviewPanel.CalculatingSizeLabel = function(element) {
this.element_ = element;
this.count_ = 0;
this.intervalID_ = null;
Object.seal(this);
};
/**
* Time period in milliseconds.
* @const {number}
*/
PreviewPanel.CalculatingSizeLabel.PERIOD = 500;
PreviewPanel.CalculatingSizeLabel.prototype = {
/**
* Set visibility of the label.
* When it is displayed, the text is animated.
* @param {boolean} hidden Whether to hide the label or not.
*/
set hidden(hidden) {
this.element_.hidden = hidden;
if (!hidden) {
if (this.intervalID_ != null)
return;
this.count_ = 2;
this.intervalID_ =
setInterval(this.onStep_.bind(this),
PreviewPanel.CalculatingSizeLabel.PERIOD);
this.onStep_();
} else {
if (this.intervalID_ == null)
return;
clearInterval(this.intervalID_);
this.intervalID_ = null;
}
}
};
/**
* Increments the counter and updates the number of dots.
* @private
*/
PreviewPanel.CalculatingSizeLabel.prototype.onStep_ = function() {
var text = str('CALCULATING_SIZE');
for (var i = 0; i < ~~(this.count_ / 2) % 4; i++) {
text += '.';
}
this.element_.textContent = text;
this.count_++;
};
/**
* Thumbnails on the preview panel.
*
* @param {HTMLElement} element DOM Element of thumbnail container.
* @param {MetadataCache} metadataCache MetadataCache.
* @param {VolumeManagerWrapper} volumeManager Volume manager instance.
* @constructor
*/
PreviewPanel.Thumbnails = function(element, metadataCache, volumeManager) {
this.element_ = element;
this.metadataCache_ = metadataCache;
this.volumeManager_ = volumeManager;
this.sequence_ = 0;
Object.seal(this);
};
/**
* Maximum number of thumbnails.
* @const {number}
*/
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT = 4;
/**
* Edge length of the thumbnail square.
* @const {number}
*/
PreviewPanel.Thumbnails.THUMBNAIL_SIZE = 35;
/**
* Longer edge length of zoomed thumbnail rectangle.
* @const {number}
*/
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE = 200;
PreviewPanel.Thumbnails.prototype = {
/**
* Sets entries to be displayed in the view.
* @param {Array.<Entry>} value Entries.
*/
set selection(value) {
this.sequence_++;
this.loadThumbnails_(value);
},
/**
* Set visibility of the thumbnails.
* @param {boolean} value Whether to hide the thumbnails or not.
*/
set hidden(value) {
this.element_.hidden = value;
}
};
/**
* Loads thumbnail images.
* @param {FileSelection} selection Selection containing entries that are
* sources of images.
* @private
*/
PreviewPanel.Thumbnails.prototype.loadThumbnails_ = function(selection) {
var entries = selection.entries;
this.element_.classList.remove('has-zoom');
this.element_.innerText = '';
var clickHandler = selection.tasks &&
selection.tasks.executeDefault.bind(selection.tasks);
var length = Math.min(entries.length,
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT);
for (var i = 0; i < length; i++) {
// Create a box.
var box = this.element_.ownerDocument.createElement('div');
box.style.zIndex = PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT + 1 - i;
// Load the image.
if (entries[i]) {
FileGrid.decorateThumbnailBox(box,
entries[i],
this.metadataCache_,
this.volumeManager_,
ThumbnailLoader.FillMode.FILL,
FileGrid.ThumbnailQuality.LOW,
i == 0 && length == 1 &&
this.setZoomedImage_.bind(this));
}
// Register the click handler.
if (clickHandler)
box.addEventListener('click', clickHandler);
// Append
this.element_.appendChild(box);
}
};
/**
* Create the zoomed version of image and set it to the DOM element to show the
* zoomed image.
*
* @param {Image} image Image to be source of the zoomed image.
* @param {transform} transform Transformation to be applied to the image.
* @private
*/
PreviewPanel.Thumbnails.prototype.setZoomedImage_ = function(image, transform) {
if (!image)
return;
var width = image.width || 0;
var height = image.height || 0;
if (width == 0 ||
height == 0 ||
(width < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2 &&
height < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2))
return;
var scale = Math.min(1,
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE /
Math.max(width, height));
var imageWidth = ~~(width * scale);
var imageHeight = ~~(height * scale);
var zoomedImage = this.element_.ownerDocument.createElement('img');
if (scale < 0.3) {
// Scaling large images kills animation. Downscale it in advance.
// Canvas scales images with liner interpolation. Make a larger
// image (but small enough to not kill animation) and let IMAGE
// scale it smoothly.
var INTERMEDIATE_SCALE = 3;
var canvas = this.element_.ownerDocument.createElement('canvas');
canvas.width = imageWidth * INTERMEDIATE_SCALE;
canvas.height = imageHeight * INTERMEDIATE_SCALE;
var ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
// Using bigger than default compression reduces image size by
// several times. Quality degradation compensated by greater resolution.
zoomedImage.src = canvas.toDataURL('image/jpeg', 0.6);
} else {
zoomedImage.src = image.src;
}
var boxWidth = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageWidth);
var boxHeight = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageHeight);
if (transform && transform.rotate90 % 2 == 1) {
var t = boxWidth;
boxWidth = boxHeight;
boxHeight = t;
}
util.applyTransform(zoomedImage, transform);
var zoomedBox = this.element_.ownerDocument.createElement('div');
zoomedBox.className = 'popup';
zoomedBox.style.width = boxWidth + 'px';
zoomedBox.style.height = boxHeight + 'px';
zoomedBox.appendChild(zoomedImage);
this.element_.appendChild(zoomedBox);
this.element_.classList.add('has-zoom');
return;
};
| {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDING);
} | conditional_block |
preview_panel.js | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
/**
* PreviewPanel UI class.
* @param {HTMLElement} element DOM Element of preview panel.
* @param {PreviewPanel.VisibilityType} visibilityType Initial value of the
* visibility type.
* @param {MetadataCache} metadataCache Metadata cache.
* @param {VolumeManagerWrapper} volumeManager Volume manager.
* @constructor
* @extends {cr.EventTarget}
*/
var PreviewPanel = function(element,
visibilityType,
metadataCache,
volumeManager) {
/**
* The cached height of preview panel.
* @type {number}
* @private
*/
this.height_ = 0;
/**
* Visibility type of the preview panel.
* @type {PreviewPanel.VisiblityType}
* @private
*/ | * Current entry to be displayed.
* @type {Entry}
* @private
*/
this.currentEntry_ = null;
/**
* Dom element of the preview panel.
* @type {HTMLElement}
* @private
*/
this.element_ = element;
/**
* @type {PreviewPanel.Thumbnails}
*/
this.thumbnails = new PreviewPanel.Thumbnails(
element.querySelector('.preview-thumbnails'),
metadataCache,
volumeManager);
/**
* @type {HTMLElement}
* @private
*/
this.summaryElement_ = element.querySelector('.preview-summary');
/**
* @type {PreviewPanel.CalculatingSizeLabel}
* @private
*/
this.calculatingSizeLabel_ = new PreviewPanel.CalculatingSizeLabel(
this.summaryElement_.querySelector('.calculating-size'));
/**
* @type {HTMLElement}
* @private
*/
this.previewText_ = element.querySelector('.preview-text');
/**
* FileSelection to be displayed.
* @type {FileSelection}
* @private
*/
this.selection_ = {entries: [], computeBytes: function() {}};
/**
* Sequence value that is incremented by every selection update and is used to
* check if the callback is up to date or not.
* @type {number}
* @private
*/
this.sequence_ = 0;
/**
* @type {VolumeManagerWrapper}
* @private
*/
this.volumeManager_ = volumeManager;
cr.EventTarget.call(this);
};
/**
* Name of PreviewPanels's event.
* @enum {string}
* @const
*/
PreviewPanel.Event = Object.freeze({
// Event to be triggered at the end of visibility change.
VISIBILITY_CHANGE: 'visibilityChange'
});
/**
* Visibility type of the preview panel.
*/
PreviewPanel.VisibilityType = Object.freeze({
// Preview panel always shows.
ALWAYS_VISIBLE: 'alwaysVisible',
// Preview panel shows when the selection property are set.
AUTO: 'auto',
// Preview panel does not show.
ALWAYS_HIDDEN: 'alwaysHidden'
});
/**
* @private
*/
PreviewPanel.Visibility_ = Object.freeze({
VISIBLE: 'visible',
HIDING: 'hiding',
HIDDEN: 'hidden'
});
PreviewPanel.prototype = {
__proto__: cr.EventTarget.prototype,
/**
* Setter for the current entry.
* @param {Entry} entry New entry.
*/
set currentEntry(entry) {
if (util.isSameEntry(this.currentEntry_, entry))
return;
this.currentEntry_ = entry;
this.updateVisibility_();
this.updatePreviewArea_();
},
/**
* Setter for the visibility type.
* @param {PreviewPanel.VisibilityType} visibilityType New value of visibility
* type.
*/
set visibilityType(visibilityType) {
this.visibilityType_ = visibilityType;
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
},
get visible() {
return this.element_.getAttribute('visibility') ==
PreviewPanel.Visibility_.VISIBLE;
},
/**
* Obtains the height of preview panel.
* @return {number} Height of preview panel.
*/
get height() {
this.height_ = this.height_ || this.element_.clientHeight;
return this.height_;
}
};
/**
* Initializes the element.
*/
PreviewPanel.prototype.initialize = function() {
this.element_.addEventListener('webkitTransitionEnd',
this.onTransitionEnd_.bind(this));
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
};
/**
* Apply the selection and update the view of the preview panel.
* @param {FileSelection} selection Selection to be applied.
*/
PreviewPanel.prototype.setSelection = function(selection) {
this.sequence_++;
this.selection_ = selection;
this.updateVisibility_();
this.updatePreviewArea_();
};
/**
* Update the visibility of the preview panel.
* @private
*/
PreviewPanel.prototype.updateVisibility_ = function() {
// Get the new visibility value.
var visibility = this.element_.getAttribute('visibility');
var newVisible = null;
switch (this.visibilityType_) {
case PreviewPanel.VisibilityType.ALWAYS_VISIBLE:
newVisible = true;
break;
case PreviewPanel.VisibilityType.AUTO:
newVisible = this.selection_.entries.length !== 0;
break;
case PreviewPanel.VisibilityType.ALWAYS_HIDDEN:
newVisible = false;
break;
default:
console.error('Invalid visibilityType.');
return;
}
// If the visibility has been already the new value, just return.
if ((visibility == PreviewPanel.Visibility_.VISIBLE && newVisible) ||
(visibility == PreviewPanel.Visibility_.HIDDEN && !newVisible))
return;
// Set the new visibility value.
if (newVisible) {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.VISIBLE);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
} else {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDING);
}
};
/**
* Update the text in the preview panel.
* @private
*/
PreviewPanel.prototype.updatePreviewArea_ = function() {
// If the preview panel is hiding, does not update the current view.
if (!this.visible)
return;
var selection = this.selection_;
// If no item is selected, no information is displayed on the footer.
if (selection.totalCount === 0) {
this.thumbnails.hidden = true;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = '';
return;
}
// If one item is selected, show thumbnail and entry name of the item.
if (selection.totalCount === 1) {
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = util.getEntryLabel(
this.volumeManager_, selection.entries[0]);
return;
}
// Update thumbnails.
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
// Obtains the preview text.
var text;
if (selection.directoryCount == 0)
text = strf('MANY_FILES_SELECTED', selection.fileCount);
else if (selection.fileCount == 0)
text = strf('MANY_DIRECTORIES_SELECTED', selection.directoryCount);
else
text = strf('MANY_ENTRIES_SELECTED', selection.totalCount);
// Obtains the size of files.
this.calculatingSizeLabel_.hidden = selection.bytesKnown;
if (selection.bytesKnown && selection.showBytes)
text += ', ' + util.bytesToString(selection.bytes);
// Set the preview text to the element.
this.previewText_.textContent = text;
// Request the byte calculation if needed.
if (!selection.bytesKnown) {
this.selection_.computeBytes(function(sequence) {
// Selection has been already updated.
if (this.sequence_ != sequence)
return;
this.updatePreviewArea_();
}.bind(this, this.sequence_));
}
};
/**
* Event handler to be called at the end of hiding transition.
* @param {Event} event The webkitTransitionEnd event.
* @private
*/
PreviewPanel.prototype.onTransitionEnd_ = function(event) {
if (event.target != this.element_ || event.propertyName != 'opacity')
return;
var visibility = this.element_.getAttribute('visibility');
if (visibility != PreviewPanel.Visibility_.HIDING)
return;
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDDEN);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
};
/**
* Animating label that is shown during the bytes of selection entries is being
* calculated.
*
* This label shows dots and varying the number of dots every
* CalculatingSizeLabel.PERIOD milliseconds.
* @param {HTMLElement} element DOM element of the label.
* @constructor
*/
PreviewPanel.CalculatingSizeLabel = function(element) {
this.element_ = element;
this.count_ = 0;
this.intervalID_ = null;
Object.seal(this);
};
/**
* Time period in milliseconds.
* @const {number}
*/
PreviewPanel.CalculatingSizeLabel.PERIOD = 500;
PreviewPanel.CalculatingSizeLabel.prototype = {
/**
* Set visibility of the label.
* When it is displayed, the text is animated.
* @param {boolean} hidden Whether to hide the label or not.
*/
set hidden(hidden) {
this.element_.hidden = hidden;
if (!hidden) {
if (this.intervalID_ != null)
return;
this.count_ = 2;
this.intervalID_ =
setInterval(this.onStep_.bind(this),
PreviewPanel.CalculatingSizeLabel.PERIOD);
this.onStep_();
} else {
if (this.intervalID_ == null)
return;
clearInterval(this.intervalID_);
this.intervalID_ = null;
}
}
};
/**
* Increments the counter and updates the number of dots.
* @private
*/
PreviewPanel.CalculatingSizeLabel.prototype.onStep_ = function() {
var text = str('CALCULATING_SIZE');
for (var i = 0; i < ~~(this.count_ / 2) % 4; i++) {
text += '.';
}
this.element_.textContent = text;
this.count_++;
};
/**
* Thumbnails on the preview panel.
*
* @param {HTMLElement} element DOM Element of thumbnail container.
* @param {MetadataCache} metadataCache MetadataCache.
* @param {VolumeManagerWrapper} volumeManager Volume manager instance.
* @constructor
*/
PreviewPanel.Thumbnails = function(element, metadataCache, volumeManager) {
this.element_ = element;
this.metadataCache_ = metadataCache;
this.volumeManager_ = volumeManager;
this.sequence_ = 0;
Object.seal(this);
};
/**
* Maximum number of thumbnails.
* @const {number}
*/
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT = 4;
/**
* Edge length of the thumbnail square.
* @const {number}
*/
PreviewPanel.Thumbnails.THUMBNAIL_SIZE = 35;
/**
* Longer edge length of zoomed thumbnail rectangle.
* @const {number}
*/
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE = 200;
PreviewPanel.Thumbnails.prototype = {
/**
* Sets entries to be displayed in the view.
* @param {Array.<Entry>} value Entries.
*/
set selection(value) {
this.sequence_++;
this.loadThumbnails_(value);
},
/**
* Set visibility of the thumbnails.
* @param {boolean} value Whether to hide the thumbnails or not.
*/
set hidden(value) {
this.element_.hidden = value;
}
};
/**
* Loads thumbnail images.
* @param {FileSelection} selection Selection containing entries that are
* sources of images.
* @private
*/
PreviewPanel.Thumbnails.prototype.loadThumbnails_ = function(selection) {
var entries = selection.entries;
this.element_.classList.remove('has-zoom');
this.element_.innerText = '';
var clickHandler = selection.tasks &&
selection.tasks.executeDefault.bind(selection.tasks);
var length = Math.min(entries.length,
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT);
for (var i = 0; i < length; i++) {
// Create a box.
var box = this.element_.ownerDocument.createElement('div');
box.style.zIndex = PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT + 1 - i;
// Load the image.
if (entries[i]) {
FileGrid.decorateThumbnailBox(box,
entries[i],
this.metadataCache_,
this.volumeManager_,
ThumbnailLoader.FillMode.FILL,
FileGrid.ThumbnailQuality.LOW,
i == 0 && length == 1 &&
this.setZoomedImage_.bind(this));
}
// Register the click handler.
if (clickHandler)
box.addEventListener('click', clickHandler);
// Append
this.element_.appendChild(box);
}
};
/**
* Create the zoomed version of image and set it to the DOM element to show the
* zoomed image.
*
* @param {Image} image Image to be source of the zoomed image.
* @param {transform} transform Transformation to be applied to the image.
* @private
*/
PreviewPanel.Thumbnails.prototype.setZoomedImage_ = function(image, transform) {
if (!image)
return;
var width = image.width || 0;
var height = image.height || 0;
if (width == 0 ||
height == 0 ||
(width < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2 &&
height < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2))
return;
var scale = Math.min(1,
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE /
Math.max(width, height));
var imageWidth = ~~(width * scale);
var imageHeight = ~~(height * scale);
var zoomedImage = this.element_.ownerDocument.createElement('img');
if (scale < 0.3) {
// Scaling large images kills animation. Downscale it in advance.
// Canvas scales images with liner interpolation. Make a larger
// image (but small enough to not kill animation) and let IMAGE
// scale it smoothly.
var INTERMEDIATE_SCALE = 3;
var canvas = this.element_.ownerDocument.createElement('canvas');
canvas.width = imageWidth * INTERMEDIATE_SCALE;
canvas.height = imageHeight * INTERMEDIATE_SCALE;
var ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
// Using bigger than default compression reduces image size by
// several times. Quality degradation compensated by greater resolution.
zoomedImage.src = canvas.toDataURL('image/jpeg', 0.6);
} else {
zoomedImage.src = image.src;
}
var boxWidth = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageWidth);
var boxHeight = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageHeight);
if (transform && transform.rotate90 % 2 == 1) {
var t = boxWidth;
boxWidth = boxHeight;
boxHeight = t;
}
util.applyTransform(zoomedImage, transform);
var zoomedBox = this.element_.ownerDocument.createElement('div');
zoomedBox.className = 'popup';
zoomedBox.style.width = boxWidth + 'px';
zoomedBox.style.height = boxHeight + 'px';
zoomedBox.appendChild(zoomedImage);
this.element_.appendChild(zoomedBox);
this.element_.classList.add('has-zoom');
return;
}; | this.visibilityType_ = visibilityType;
/** | random_line_split |
preview_panel.js | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
/**
* PreviewPanel UI class.
* @param {HTMLElement} element DOM Element of preview panel.
* @param {PreviewPanel.VisibilityType} visibilityType Initial value of the
* visibility type.
* @param {MetadataCache} metadataCache Metadata cache.
* @param {VolumeManagerWrapper} volumeManager Volume manager.
* @constructor
* @extends {cr.EventTarget}
*/
var PreviewPanel = function(element,
visibilityType,
metadataCache,
volumeManager) {
/**
* The cached height of preview panel.
* @type {number}
* @private
*/
this.height_ = 0;
/**
* Visibility type of the preview panel.
* @type {PreviewPanel.VisiblityType}
* @private
*/
this.visibilityType_ = visibilityType;
/**
* Current entry to be displayed.
* @type {Entry}
* @private
*/
this.currentEntry_ = null;
/**
* Dom element of the preview panel.
* @type {HTMLElement}
* @private
*/
this.element_ = element;
/**
* @type {PreviewPanel.Thumbnails}
*/
this.thumbnails = new PreviewPanel.Thumbnails(
element.querySelector('.preview-thumbnails'),
metadataCache,
volumeManager);
/**
* @type {HTMLElement}
* @private
*/
this.summaryElement_ = element.querySelector('.preview-summary');
/**
* @type {PreviewPanel.CalculatingSizeLabel}
* @private
*/
this.calculatingSizeLabel_ = new PreviewPanel.CalculatingSizeLabel(
this.summaryElement_.querySelector('.calculating-size'));
/**
* @type {HTMLElement}
* @private
*/
this.previewText_ = element.querySelector('.preview-text');
/**
* FileSelection to be displayed.
* @type {FileSelection}
* @private
*/
this.selection_ = {entries: [], computeBytes: function() {}};
/**
* Sequence value that is incremented by every selection update and is used to
* check if the callback is up to date or not.
* @type {number}
* @private
*/
this.sequence_ = 0;
/**
* @type {VolumeManagerWrapper}
* @private
*/
this.volumeManager_ = volumeManager;
cr.EventTarget.call(this);
};
/**
* Name of PreviewPanels's event.
* @enum {string}
* @const
*/
PreviewPanel.Event = Object.freeze({
// Event to be triggered at the end of visibility change.
VISIBILITY_CHANGE: 'visibilityChange'
});
/**
* Visibility type of the preview panel.
*/
PreviewPanel.VisibilityType = Object.freeze({
// Preview panel always shows.
ALWAYS_VISIBLE: 'alwaysVisible',
// Preview panel shows when the selection property are set.
AUTO: 'auto',
// Preview panel does not show.
ALWAYS_HIDDEN: 'alwaysHidden'
});
/**
* @private
*/
PreviewPanel.Visibility_ = Object.freeze({
VISIBLE: 'visible',
HIDING: 'hiding',
HIDDEN: 'hidden'
});
PreviewPanel.prototype = {
__proto__: cr.EventTarget.prototype,
/**
* Setter for the current entry.
* @param {Entry} entry New entry.
*/
set currentEntry(entry) {
if (util.isSameEntry(this.currentEntry_, entry))
return;
this.currentEntry_ = entry;
this.updateVisibility_();
this.updatePreviewArea_();
},
/**
* Setter for the visibility type.
* @param {PreviewPanel.VisibilityType} visibilityType New value of visibility
* type.
*/
set visibilityType(visibilityType) {
this.visibilityType_ = visibilityType;
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
},
get visible() {
return this.element_.getAttribute('visibility') ==
PreviewPanel.Visibility_.VISIBLE;
},
/**
* Obtains the height of preview panel.
* @return {number} Height of preview panel.
*/
get height() {
this.height_ = this.height_ || this.element_.clientHeight;
return this.height_;
}
};
/**
* Initializes the element.
*/
PreviewPanel.prototype.initialize = function() {
this.element_.addEventListener('webkitTransitionEnd',
this.onTransitionEnd_.bind(this));
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
};
/**
* Apply the selection and update the view of the preview panel.
* @param {FileSelection} selection Selection to be applied.
*/
PreviewPanel.prototype.setSelection = function(selection) {
this.sequence_++;
this.selection_ = selection;
this.updateVisibility_();
this.updatePreviewArea_();
};
/**
* Update the visibility of the preview panel.
* @private
*/
PreviewPanel.prototype.updateVisibility_ = function() {
// Get the new visibility value.
var visibility = this.element_.getAttribute('visibility');
var newVisible = null;
switch (this.visibilityType_) {
case PreviewPanel.VisibilityType.ALWAYS_VISIBLE:
newVisible = true;
break;
case PreviewPanel.VisibilityType.AUTO:
newVisible = this.selection_.entries.length !== 0;
break;
case PreviewPanel.VisibilityType.ALWAYS_HIDDEN:
newVisible = false;
break;
default:
console.error('Invalid visibilityType.');
return;
}
// If the visibility has been already the new value, just return.
if ((visibility == PreviewPanel.Visibility_.VISIBLE && newVisible) ||
(visibility == PreviewPanel.Visibility_.HIDDEN && !newVisible))
return;
// Set the new visibility value.
if (newVisible) {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.VISIBLE);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
} else {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDING);
}
};
/**
* Update the text in the preview panel.
* @private
*/
PreviewPanel.prototype.updatePreviewArea_ = function() {
// If the preview panel is hiding, does not update the current view.
if (!this.visible)
return;
var selection = this.selection_;
// If no item is selected, no information is displayed on the footer.
if (selection.totalCount === 0) {
this.thumbnails.hidden = true;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = '';
return;
}
// If one item is selected, show thumbnail and entry name of the item.
if (selection.totalCount === 1) {
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = util.getEntryLabel(
this.volumeManager_, selection.entries[0]);
return;
}
// Update thumbnails.
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
// Obtains the preview text.
var text;
if (selection.directoryCount == 0)
text = strf('MANY_FILES_SELECTED', selection.fileCount);
else if (selection.fileCount == 0)
text = strf('MANY_DIRECTORIES_SELECTED', selection.directoryCount);
else
text = strf('MANY_ENTRIES_SELECTED', selection.totalCount);
// Obtains the size of files.
this.calculatingSizeLabel_.hidden = selection.bytesKnown;
if (selection.bytesKnown && selection.showBytes)
text += ', ' + util.bytesToString(selection.bytes);
// Set the preview text to the element.
this.previewText_.textContent = text;
// Request the byte calculation if needed.
if (!selection.bytesKnown) {
this.selection_.computeBytes(function(sequence) {
// Selection has been already updated.
if (this.sequence_ != sequence)
return;
this.updatePreviewArea_();
}.bind(this, this.sequence_));
}
};
/**
* Event handler to be called at the end of hiding transition.
* @param {Event} event The webkitTransitionEnd event.
* @private
*/
PreviewPanel.prototype.onTransitionEnd_ = function(event) {
if (event.target != this.element_ || event.propertyName != 'opacity')
return;
var visibility = this.element_.getAttribute('visibility');
if (visibility != PreviewPanel.Visibility_.HIDING)
return;
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDDEN);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
};
/**
* Animating label that is shown during the bytes of selection entries is being
* calculated.
*
* This label shows dots and varying the number of dots every
* CalculatingSizeLabel.PERIOD milliseconds.
* @param {HTMLElement} element DOM element of the label.
* @constructor
*/
PreviewPanel.CalculatingSizeLabel = function(element) {
this.element_ = element;
this.count_ = 0;
this.intervalID_ = null;
Object.seal(this);
};
/**
* Time period in milliseconds.
* @const {number}
*/
PreviewPanel.CalculatingSizeLabel.PERIOD = 500;
PreviewPanel.CalculatingSizeLabel.prototype = {
/**
* Set visibility of the label.
* When it is displayed, the text is animated.
* @param {boolean} hidden Whether to hide the label or not.
*/
set hidden(hidden) {
this.element_.hidden = hidden;
if (!hidden) {
if (this.intervalID_ != null)
return;
this.count_ = 2;
this.intervalID_ =
setInterval(this.onStep_.bind(this),
PreviewPanel.CalculatingSizeLabel.PERIOD);
this.onStep_();
} else {
if (this.intervalID_ == null)
return;
clearInterval(this.intervalID_);
this.intervalID_ = null;
}
}
};
/**
* Increments the counter and updates the number of dots.
* @private
*/
PreviewPanel.CalculatingSizeLabel.prototype.onStep_ = function() {
var text = str('CALCULATING_SIZE');
for (var i = 0; i < ~~(this.count_ / 2) % 4; i++) {
text += '.';
}
this.element_.textContent = text;
this.count_++;
};
/**
* Thumbnails on the preview panel.
*
* @param {HTMLElement} element DOM Element of thumbnail container.
* @param {MetadataCache} metadataCache MetadataCache.
* @param {VolumeManagerWrapper} volumeManager Volume manager instance.
* @constructor
*/
PreviewPanel.Thumbnails = function(element, metadataCache, volumeManager) {
this.element_ = element;
this.metadataCache_ = metadataCache;
this.volumeManager_ = volumeManager;
this.sequence_ = 0;
Object.seal(this);
};
/**
* Maximum number of thumbnails.
* @const {number}
*/
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT = 4;
/**
* Edge length of the thumbnail square.
* @const {number}
*/
PreviewPanel.Thumbnails.THUMBNAIL_SIZE = 35;
/**
* Longer edge length of zoomed thumbnail rectangle.
* @const {number}
*/
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE = 200;
PreviewPanel.Thumbnails.prototype = {
/**
* Sets entries to be displayed in the view.
* @param {Array.<Entry>} value Entries.
*/
set | (value) {
this.sequence_++;
this.loadThumbnails_(value);
},
/**
* Set visibility of the thumbnails.
* @param {boolean} value Whether to hide the thumbnails or not.
*/
set hidden(value) {
this.element_.hidden = value;
}
};
/**
* Loads thumbnail images.
* @param {FileSelection} selection Selection containing entries that are
* sources of images.
* @private
*/
PreviewPanel.Thumbnails.prototype.loadThumbnails_ = function(selection) {
var entries = selection.entries;
this.element_.classList.remove('has-zoom');
this.element_.innerText = '';
var clickHandler = selection.tasks &&
selection.tasks.executeDefault.bind(selection.tasks);
var length = Math.min(entries.length,
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT);
for (var i = 0; i < length; i++) {
// Create a box.
var box = this.element_.ownerDocument.createElement('div');
box.style.zIndex = PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT + 1 - i;
// Load the image.
if (entries[i]) {
FileGrid.decorateThumbnailBox(box,
entries[i],
this.metadataCache_,
this.volumeManager_,
ThumbnailLoader.FillMode.FILL,
FileGrid.ThumbnailQuality.LOW,
i == 0 && length == 1 &&
this.setZoomedImage_.bind(this));
}
// Register the click handler.
if (clickHandler)
box.addEventListener('click', clickHandler);
// Append
this.element_.appendChild(box);
}
};
/**
* Create the zoomed version of image and set it to the DOM element to show the
* zoomed image.
*
* @param {Image} image Image to be source of the zoomed image.
* @param {transform} transform Transformation to be applied to the image.
* @private
*/
PreviewPanel.Thumbnails.prototype.setZoomedImage_ = function(image, transform) {
if (!image)
return;
var width = image.width || 0;
var height = image.height || 0;
if (width == 0 ||
height == 0 ||
(width < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2 &&
height < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2))
return;
var scale = Math.min(1,
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE /
Math.max(width, height));
var imageWidth = ~~(width * scale);
var imageHeight = ~~(height * scale);
var zoomedImage = this.element_.ownerDocument.createElement('img');
if (scale < 0.3) {
// Scaling large images kills animation. Downscale it in advance.
// Canvas scales images with liner interpolation. Make a larger
// image (but small enough to not kill animation) and let IMAGE
// scale it smoothly.
var INTERMEDIATE_SCALE = 3;
var canvas = this.element_.ownerDocument.createElement('canvas');
canvas.width = imageWidth * INTERMEDIATE_SCALE;
canvas.height = imageHeight * INTERMEDIATE_SCALE;
var ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
// Using bigger than default compression reduces image size by
// several times. Quality degradation compensated by greater resolution.
zoomedImage.src = canvas.toDataURL('image/jpeg', 0.6);
} else {
zoomedImage.src = image.src;
}
var boxWidth = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageWidth);
var boxHeight = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageHeight);
if (transform && transform.rotate90 % 2 == 1) {
var t = boxWidth;
boxWidth = boxHeight;
boxHeight = t;
}
util.applyTransform(zoomedImage, transform);
var zoomedBox = this.element_.ownerDocument.createElement('div');
zoomedBox.className = 'popup';
zoomedBox.style.width = boxWidth + 'px';
zoomedBox.style.height = boxHeight + 'px';
zoomedBox.appendChild(zoomedImage);
this.element_.appendChild(zoomedBox);
this.element_.classList.add('has-zoom');
return;
};
| selection | identifier_name |
preview_panel.js | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
/**
* PreviewPanel UI class.
* @param {HTMLElement} element DOM Element of preview panel.
* @param {PreviewPanel.VisibilityType} visibilityType Initial value of the
* visibility type.
* @param {MetadataCache} metadataCache Metadata cache.
* @param {VolumeManagerWrapper} volumeManager Volume manager.
* @constructor
* @extends {cr.EventTarget}
*/
var PreviewPanel = function(element,
visibilityType,
metadataCache,
volumeManager) {
/**
* The cached height of preview panel.
* @type {number}
* @private
*/
this.height_ = 0;
/**
* Visibility type of the preview panel.
* @type {PreviewPanel.VisiblityType}
* @private
*/
this.visibilityType_ = visibilityType;
/**
* Current entry to be displayed.
* @type {Entry}
* @private
*/
this.currentEntry_ = null;
/**
* Dom element of the preview panel.
* @type {HTMLElement}
* @private
*/
this.element_ = element;
/**
* @type {PreviewPanel.Thumbnails}
*/
this.thumbnails = new PreviewPanel.Thumbnails(
element.querySelector('.preview-thumbnails'),
metadataCache,
volumeManager);
/**
* @type {HTMLElement}
* @private
*/
this.summaryElement_ = element.querySelector('.preview-summary');
/**
* @type {PreviewPanel.CalculatingSizeLabel}
* @private
*/
this.calculatingSizeLabel_ = new PreviewPanel.CalculatingSizeLabel(
this.summaryElement_.querySelector('.calculating-size'));
/**
* @type {HTMLElement}
* @private
*/
this.previewText_ = element.querySelector('.preview-text');
/**
* FileSelection to be displayed.
* @type {FileSelection}
* @private
*/
this.selection_ = {entries: [], computeBytes: function() {}};
/**
* Sequence value that is incremented by every selection update and is used to
* check if the callback is up to date or not.
* @type {number}
* @private
*/
this.sequence_ = 0;
/**
* @type {VolumeManagerWrapper}
* @private
*/
this.volumeManager_ = volumeManager;
cr.EventTarget.call(this);
};
/**
* Name of PreviewPanels's event.
* @enum {string}
* @const
*/
PreviewPanel.Event = Object.freeze({
// Event to be triggered at the end of visibility change.
VISIBILITY_CHANGE: 'visibilityChange'
});
/**
* Visibility type of the preview panel.
*/
PreviewPanel.VisibilityType = Object.freeze({
// Preview panel always shows.
ALWAYS_VISIBLE: 'alwaysVisible',
// Preview panel shows when the selection property are set.
AUTO: 'auto',
// Preview panel does not show.
ALWAYS_HIDDEN: 'alwaysHidden'
});
/**
* @private
*/
PreviewPanel.Visibility_ = Object.freeze({
VISIBLE: 'visible',
HIDING: 'hiding',
HIDDEN: 'hidden'
});
PreviewPanel.prototype = {
__proto__: cr.EventTarget.prototype,
/**
* Setter for the current entry.
* @param {Entry} entry New entry.
*/
set currentEntry(entry) {
if (util.isSameEntry(this.currentEntry_, entry))
return;
this.currentEntry_ = entry;
this.updateVisibility_();
this.updatePreviewArea_();
},
/**
* Setter for the visibility type.
* @param {PreviewPanel.VisibilityType} visibilityType New value of visibility
* type.
*/
set visibilityType(visibilityType) {
this.visibilityType_ = visibilityType;
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
},
get visible() {
return this.element_.getAttribute('visibility') ==
PreviewPanel.Visibility_.VISIBLE;
},
/**
* Obtains the height of preview panel.
* @return {number} Height of preview panel.
*/
get height() |
};
/**
* Initializes the element.
*/
PreviewPanel.prototype.initialize = function() {
this.element_.addEventListener('webkitTransitionEnd',
this.onTransitionEnd_.bind(this));
this.updateVisibility_();
// Also update the preview area contents, because the update is suppressed
// while the visibility is hiding or hidden.
this.updatePreviewArea_();
};
/**
* Apply the selection and update the view of the preview panel.
* @param {FileSelection} selection Selection to be applied.
*/
PreviewPanel.prototype.setSelection = function(selection) {
this.sequence_++;
this.selection_ = selection;
this.updateVisibility_();
this.updatePreviewArea_();
};
/**
* Update the visibility of the preview panel.
* @private
*/
PreviewPanel.prototype.updateVisibility_ = function() {
// Get the new visibility value.
var visibility = this.element_.getAttribute('visibility');
var newVisible = null;
switch (this.visibilityType_) {
case PreviewPanel.VisibilityType.ALWAYS_VISIBLE:
newVisible = true;
break;
case PreviewPanel.VisibilityType.AUTO:
newVisible = this.selection_.entries.length !== 0;
break;
case PreviewPanel.VisibilityType.ALWAYS_HIDDEN:
newVisible = false;
break;
default:
console.error('Invalid visibilityType.');
return;
}
// If the visibility has been already the new value, just return.
if ((visibility == PreviewPanel.Visibility_.VISIBLE && newVisible) ||
(visibility == PreviewPanel.Visibility_.HIDDEN && !newVisible))
return;
// Set the new visibility value.
if (newVisible) {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.VISIBLE);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
} else {
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDING);
}
};
/**
* Update the text in the preview panel.
* @private
*/
PreviewPanel.prototype.updatePreviewArea_ = function() {
// If the preview panel is hiding, does not update the current view.
if (!this.visible)
return;
var selection = this.selection_;
// If no item is selected, no information is displayed on the footer.
if (selection.totalCount === 0) {
this.thumbnails.hidden = true;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = '';
return;
}
// If one item is selected, show thumbnail and entry name of the item.
if (selection.totalCount === 1) {
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
this.calculatingSizeLabel_.hidden = true;
this.previewText_.textContent = util.getEntryLabel(
this.volumeManager_, selection.entries[0]);
return;
}
// Update thumbnails.
this.thumbnails.hidden = false;
this.thumbnails.selection = selection;
// Obtains the preview text.
var text;
if (selection.directoryCount == 0)
text = strf('MANY_FILES_SELECTED', selection.fileCount);
else if (selection.fileCount == 0)
text = strf('MANY_DIRECTORIES_SELECTED', selection.directoryCount);
else
text = strf('MANY_ENTRIES_SELECTED', selection.totalCount);
// Obtains the size of files.
this.calculatingSizeLabel_.hidden = selection.bytesKnown;
if (selection.bytesKnown && selection.showBytes)
text += ', ' + util.bytesToString(selection.bytes);
// Set the preview text to the element.
this.previewText_.textContent = text;
// Request the byte calculation if needed.
if (!selection.bytesKnown) {
this.selection_.computeBytes(function(sequence) {
// Selection has been already updated.
if (this.sequence_ != sequence)
return;
this.updatePreviewArea_();
}.bind(this, this.sequence_));
}
};
/**
* Event handler to be called at the end of hiding transition.
* @param {Event} event The webkitTransitionEnd event.
* @private
*/
PreviewPanel.prototype.onTransitionEnd_ = function(event) {
if (event.target != this.element_ || event.propertyName != 'opacity')
return;
var visibility = this.element_.getAttribute('visibility');
if (visibility != PreviewPanel.Visibility_.HIDING)
return;
this.element_.setAttribute('visibility', PreviewPanel.Visibility_.HIDDEN);
cr.dispatchSimpleEvent(this, PreviewPanel.Event.VISIBILITY_CHANGE);
};
/**
* Animating label that is shown during the bytes of selection entries is being
* calculated.
*
* This label shows dots and varying the number of dots every
* CalculatingSizeLabel.PERIOD milliseconds.
* @param {HTMLElement} element DOM element of the label.
* @constructor
*/
PreviewPanel.CalculatingSizeLabel = function(element) {
this.element_ = element;
this.count_ = 0;
this.intervalID_ = null;
Object.seal(this);
};
/**
* Time period in milliseconds.
* @const {number}
*/
PreviewPanel.CalculatingSizeLabel.PERIOD = 500;
PreviewPanel.CalculatingSizeLabel.prototype = {
/**
* Set visibility of the label.
* When it is displayed, the text is animated.
* @param {boolean} hidden Whether to hide the label or not.
*/
set hidden(hidden) {
this.element_.hidden = hidden;
if (!hidden) {
if (this.intervalID_ != null)
return;
this.count_ = 2;
this.intervalID_ =
setInterval(this.onStep_.bind(this),
PreviewPanel.CalculatingSizeLabel.PERIOD);
this.onStep_();
} else {
if (this.intervalID_ == null)
return;
clearInterval(this.intervalID_);
this.intervalID_ = null;
}
}
};
/**
* Increments the counter and updates the number of dots.
* @private
*/
PreviewPanel.CalculatingSizeLabel.prototype.onStep_ = function() {
var text = str('CALCULATING_SIZE');
for (var i = 0; i < ~~(this.count_ / 2) % 4; i++) {
text += '.';
}
this.element_.textContent = text;
this.count_++;
};
/**
* Thumbnails on the preview panel.
*
* @param {HTMLElement} element DOM Element of thumbnail container.
* @param {MetadataCache} metadataCache MetadataCache.
* @param {VolumeManagerWrapper} volumeManager Volume manager instance.
* @constructor
*/
PreviewPanel.Thumbnails = function(element, metadataCache, volumeManager) {
this.element_ = element;
this.metadataCache_ = metadataCache;
this.volumeManager_ = volumeManager;
this.sequence_ = 0;
Object.seal(this);
};
/**
* Maximum number of thumbnails.
* @const {number}
*/
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT = 4;
/**
* Edge length of the thumbnail square.
* @const {number}
*/
PreviewPanel.Thumbnails.THUMBNAIL_SIZE = 35;
/**
* Longer edge length of zoomed thumbnail rectangle.
* @const {number}
*/
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE = 200;
PreviewPanel.Thumbnails.prototype = {
/**
* Sets entries to be displayed in the view.
* @param {Array.<Entry>} value Entries.
*/
set selection(value) {
this.sequence_++;
this.loadThumbnails_(value);
},
/**
* Set visibility of the thumbnails.
* @param {boolean} value Whether to hide the thumbnails or not.
*/
set hidden(value) {
this.element_.hidden = value;
}
};
/**
* Loads thumbnail images.
* @param {FileSelection} selection Selection containing entries that are
* sources of images.
* @private
*/
PreviewPanel.Thumbnails.prototype.loadThumbnails_ = function(selection) {
var entries = selection.entries;
this.element_.classList.remove('has-zoom');
this.element_.innerText = '';
var clickHandler = selection.tasks &&
selection.tasks.executeDefault.bind(selection.tasks);
var length = Math.min(entries.length,
PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT);
for (var i = 0; i < length; i++) {
// Create a box.
var box = this.element_.ownerDocument.createElement('div');
box.style.zIndex = PreviewPanel.Thumbnails.MAX_THUMBNAIL_COUNT + 1 - i;
// Load the image.
if (entries[i]) {
FileGrid.decorateThumbnailBox(box,
entries[i],
this.metadataCache_,
this.volumeManager_,
ThumbnailLoader.FillMode.FILL,
FileGrid.ThumbnailQuality.LOW,
i == 0 && length == 1 &&
this.setZoomedImage_.bind(this));
}
// Register the click handler.
if (clickHandler)
box.addEventListener('click', clickHandler);
// Append
this.element_.appendChild(box);
}
};
/**
* Create the zoomed version of image and set it to the DOM element to show the
* zoomed image.
*
* @param {Image} image Image to be source of the zoomed image.
* @param {transform} transform Transformation to be applied to the image.
* @private
*/
PreviewPanel.Thumbnails.prototype.setZoomedImage_ = function(image, transform) {
if (!image)
return;
var width = image.width || 0;
var height = image.height || 0;
if (width == 0 ||
height == 0 ||
(width < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2 &&
height < PreviewPanel.Thumbnails.THUMBNAIL_SIZE * 2))
return;
var scale = Math.min(1,
PreviewPanel.Thumbnails.ZOOMED_THUMBNAIL_SIZE /
Math.max(width, height));
var imageWidth = ~~(width * scale);
var imageHeight = ~~(height * scale);
var zoomedImage = this.element_.ownerDocument.createElement('img');
if (scale < 0.3) {
// Scaling large images kills animation. Downscale it in advance.
// Canvas scales images with liner interpolation. Make a larger
// image (but small enough to not kill animation) and let IMAGE
// scale it smoothly.
var INTERMEDIATE_SCALE = 3;
var canvas = this.element_.ownerDocument.createElement('canvas');
canvas.width = imageWidth * INTERMEDIATE_SCALE;
canvas.height = imageHeight * INTERMEDIATE_SCALE;
var ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0, canvas.width, canvas.height);
// Using bigger than default compression reduces image size by
// several times. Quality degradation compensated by greater resolution.
zoomedImage.src = canvas.toDataURL('image/jpeg', 0.6);
} else {
zoomedImage.src = image.src;
}
var boxWidth = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageWidth);
var boxHeight = Math.max(PreviewPanel.Thumbnails.THUMBNAIL_SIZE, imageHeight);
if (transform && transform.rotate90 % 2 == 1) {
var t = boxWidth;
boxWidth = boxHeight;
boxHeight = t;
}
util.applyTransform(zoomedImage, transform);
var zoomedBox = this.element_.ownerDocument.createElement('div');
zoomedBox.className = 'popup';
zoomedBox.style.width = boxWidth + 'px';
zoomedBox.style.height = boxHeight + 'px';
zoomedBox.appendChild(zoomedImage);
this.element_.appendChild(zoomedBox);
this.element_.classList.add('has-zoom');
return;
};
| {
this.height_ = this.height_ || this.element_.clientHeight;
return this.height_;
} | identifier_body |
cylinder.rs | //! Construct cylinders that are curved sheets, not volumes.
use surface::{Sheet, LatticeType};
use coord::{Coord, Direction, Translate,
rotate_coords, rotate_planar_coords_to_alignment};
use describe::{unwrap_name, Describe};
use error::Result;
use iterator::{ResidueIter, ResidueIterOut};
use system::*;
use std::f64::consts::PI;
use std::fmt;
use std::fmt::{Display, Formatter};
impl_component![Cylinder];
impl_translate![Cylinder];
#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize)]
/// Cylinders can be capped in either or both ends.
pub enum CylinderCap {
Top,
Bottom,
Both,
}
impl Display for CylinderCap {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
CylinderCap::Top => write!(f, "Top"),
CylinderCap::Bottom => write!(f, "Bottom"),
CylinderCap::Both => write!(f, "Both"),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
/// A 2D cylindrical surface.
pub struct Cylinder {
/// Name of cylinder in database.
pub name: Option<String>,
/// Optional residue placed at each coordinate. If not set the cylinder describes
/// a general collection of coordinates.
pub residue: Option<Residue>,
/// lattice type used to construct the cylinder surface structure.
pub lattice: LatticeType,
/// The axis along which the cylinder is aligned.
pub alignment: Direction,
/// Cylinders can be capped at its ends.
pub cap: Option<CylinderCap>,
#[serde(skip)]
/// Origin of the cylinder. Located in the center of the bottom.
pub origin: Coord,
#[serde(skip)]
/// Radius of cylinder.
pub radius: f64,
#[serde(skip)]
/// Height of cylinder.
pub height: f64,
#[serde(skip)]
/// List of coordinates belonging to the cylinder. Relative to the `origin.
pub coords: Vec<Coord>,
}
impl Cylinder {
/// Construct the cylinder coordinates and return the object.
///
/// # Errors
/// Returns an error if either the radius or height is non-positive.
pub fn construct(self) -> Result<Cylinder> {
// Bend a `Sheet` of the chosen lattice type into the cylinder.
let length = 2.0 * PI * self.radius;
let width = self.height;
let sheet = Sheet {
name: None,
residue: None,
lattice: self.lattice.clone(),
std_z: None,
origin: Coord::default(),
normal: Direction::Z,
length,
width,
coords: vec![],
}.construct()?;
let final_radius = sheet.length / (2.0 * PI);
let final_height = sheet.width;
// The cylinder will be created aligned to the Y axis
let mut coords: Vec<_> = sheet.coords
.iter()
.map(|coord| {
let (x0, y, _) = coord.to_tuple();
let angle = (x0 * 360.0 / sheet.length).to_radians();
let x = final_radius * angle.sin();
let z = -final_radius * angle.cos();
Coord::new(x, y, z)
})
.collect();
if let Some(cap) = self.cap {
// The cylinder is aligned along the y axis. Construct a cap from
// the same sheet and rotate it to match.
let mut bottom = sheet.to_circle(final_radius); //.rotate(Direction::X);
bottom.coords = rotate_planar_coords_to_alignment(&bottom.coords,
Direction::Z, Direction::Y);
// Get the top cap coordinates by shifting the bottom ones, not just the origin.
let top_coords: Vec<_> = bottom.coords
.iter()
.map(|&coord| coord + Coord::new(0.0, final_height, 0.0))
.collect();
match cap {
CylinderCap::Bottom => coords.extend_from_slice(&bottom.coords),
CylinderCap::Top => coords.extend_from_slice(&top_coords),
CylinderCap::Both => {
coords.extend_from_slice(&bottom.coords);
coords.extend_from_slice(&top_coords);
}
}
}
// Rotate the cylinder once along the x-axis to align them to the z-axis.
Ok(Cylinder {
alignment: Direction::Z,
radius: final_radius,
height: final_height,
coords: rotate_coords(&coords, Direction::X),
.. self
})
}
/// Calculate the box size.
fn calc_box_size(&self) -> Coord {
let diameter = 2.0 * self.radius;
match self.alignment {
Direction::X => Coord::new(self.height, diameter, diameter),
Direction::Y => Coord::new(diameter, self.height, diameter),
Direction::Z => Coord::new(diameter, diameter, self.height),
}
}
}
impl Describe for Cylinder {
fn describe(&self) -> String {
format!("{} (Cylinder surface of radius {:.2} and height {:.2} at {})",
unwrap_name(&self.name), self.radius, self.height, self.origin)
} | }
#[cfg(test)]
mod tests {
use super::*;
use surface::LatticeType::*;
fn setup_cylinder(radius: f64, height: f64, lattice: &LatticeType) -> Cylinder {
Cylinder {
name: None,
residue: None,
lattice: lattice.clone(),
alignment: Direction::Z,
cap: None,
origin: Coord::default(),
radius,
height,
coords: vec![],
}
}
#[test]
fn cylinder_is_bent_from_sheet_as_expected() {
let radius = 2.0;
let height = 5.0;
let density = 10.0;
let lattice = PoissonDisc { density };
let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap();
// We should have a rough surface density match
let expected = 2.0 * PI * radius * height * density;
assert!((expected - cylinder.coords.len() as f64).abs() / expected < 0.1);
// Not all coords should be at z = 0, ie. not still a sheet
let sum_z = cylinder.coords.iter().map(|&Coord { x: _, y: _, z }| z.abs()).sum::<f64>();
assert!(sum_z > 0.0);
// Currently the alignment should be along Z
assert_eq!(Direction::Z, cylinder.alignment);
// Rigorous test of coordinate structure
for coord in cylinder.coords {
let (r, h) = Coord::ORIGO.distance_cylindrical(coord, Direction::Z);
assert!(r <= cylinder.radius);
assert!(h >= 0.0 && h <= cylinder.height);
}
}
#[test]
fn cylinder_corrects_radius_and_height_to_match_lattice_spacing() {
let radius = 1.0; // should give circumference = 2 * PI
let height = 5.0;
let a = 1.0; // not a match to the circumference
let b = 1.1; // not a match to the height
let lattice = Triclinic { a, b, gamma: 90.0 };
let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap();
assert_ne!(radius, cylinder.radius);
assert_ne!(height, cylinder.height);
// The best match to the circumference 2 * PI is the multiple 6 * a.
assert_eq!(6.0 * a / (2.0 * PI), cylinder.radius);
assert_eq!(5.0 * b, cylinder.height);
}
#[test]
fn constructing_cylinder_with_negative_radius_or_height_returns_error() {
let lattice = PoissonDisc { density: 10.0 };
assert!(setup_cylinder(-1.0, 1.0, &lattice).construct().is_err());
assert!(setup_cylinder(1.0, -1.0, &lattice).construct().is_err());
assert!(setup_cylinder(1.0, 1.0, &lattice).construct().is_ok());
}
#[test]
fn add_caps_to_cylinder() {
let radius = 2.0;
let height = 5.0;
let lattice = Hexagonal { a: 0.1 };
let mut conf = setup_cylinder(radius, height, &lattice);
// Without caps
let cylinder = conf.clone().construct().unwrap();
let num_coords = cylinder.coords.len();
// With a bottom cap
conf.cap = Some(CylinderCap::Bottom);
let cylinder_cap = conf.clone().construct().unwrap();
// The first coordinates should be the original cylinder
let (original, bottom) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
assert!(bottom.len() > 0);
// All the bottom coordinates should be at z = 0
for coord in bottom {
assert_eq!(coord.z, 0.0);
}
// A top cap
conf.cap = Some(CylinderCap::Top);
let cylinder_cap = conf.clone().construct().unwrap();
let (original, top) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
assert_eq!(top.len(), bottom.len());
// All the top coordinates should be at the cylinder height
for coord in top {
assert_eq!(coord.z, cylinder.height);
}
// Both caps
conf.cap = Some(CylinderCap::Both);
let cylinder_cap = conf.clone().construct().unwrap();
let (original, bottom_and_top) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
let (bottom_from_both, top_from_both) = bottom_and_top.split_at(bottom.len());
assert_eq!(bottom, bottom_from_both);
assert_eq!(top, top_from_both);
}
#[test]
fn calc_box_size_of_cylinder() {
let radius = 2.0;
let height = 5.0;
let lattice = Hexagonal { a: 0.1 };
// Check each direction
let mut cylinder = Cylinder {
alignment: Direction::X,
.. setup_cylinder(radius, height, &lattice)
};
let diameter = 2.0 * radius;
assert_eq!(Coord::new(height, diameter, diameter), cylinder.calc_box_size());
cylinder.alignment = Direction::Y;
assert_eq!(Coord::new(diameter, height, diameter), cylinder.calc_box_size());
cylinder.alignment = Direction::Z;
assert_eq!(Coord::new(diameter, diameter, height), cylinder.calc_box_size());
}
} |
fn describe_short(&self) -> String {
format!("{} (Cylinder)", unwrap_name(&self.name))
} | random_line_split |
cylinder.rs | //! Construct cylinders that are curved sheets, not volumes.
use surface::{Sheet, LatticeType};
use coord::{Coord, Direction, Translate,
rotate_coords, rotate_planar_coords_to_alignment};
use describe::{unwrap_name, Describe};
use error::Result;
use iterator::{ResidueIter, ResidueIterOut};
use system::*;
use std::f64::consts::PI;
use std::fmt;
use std::fmt::{Display, Formatter};
impl_component![Cylinder];
impl_translate![Cylinder];
#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize)]
/// Cylinders can be capped in either or both ends.
pub enum CylinderCap {
Top,
Bottom,
Both,
}
impl Display for CylinderCap {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
CylinderCap::Top => write!(f, "Top"),
CylinderCap::Bottom => write!(f, "Bottom"),
CylinderCap::Both => write!(f, "Both"),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
/// A 2D cylindrical surface.
pub struct Cylinder {
/// Name of cylinder in database.
pub name: Option<String>,
/// Optional residue placed at each coordinate. If not set the cylinder describes
/// a general collection of coordinates.
pub residue: Option<Residue>,
/// lattice type used to construct the cylinder surface structure.
pub lattice: LatticeType,
/// The axis along which the cylinder is aligned.
pub alignment: Direction,
/// Cylinders can be capped at its ends.
pub cap: Option<CylinderCap>,
#[serde(skip)]
/// Origin of the cylinder. Located in the center of the bottom.
pub origin: Coord,
#[serde(skip)]
/// Radius of cylinder.
pub radius: f64,
#[serde(skip)]
/// Height of cylinder.
pub height: f64,
#[serde(skip)]
/// List of coordinates belonging to the cylinder. Relative to the `origin.
pub coords: Vec<Coord>,
}
impl Cylinder {
/// Construct the cylinder coordinates and return the object.
///
/// # Errors
/// Returns an error if either the radius or height is non-positive.
pub fn construct(self) -> Result<Cylinder> {
// Bend a `Sheet` of the chosen lattice type into the cylinder.
let length = 2.0 * PI * self.radius;
let width = self.height;
let sheet = Sheet {
name: None,
residue: None,
lattice: self.lattice.clone(),
std_z: None,
origin: Coord::default(),
normal: Direction::Z,
length,
width,
coords: vec![],
}.construct()?;
let final_radius = sheet.length / (2.0 * PI);
let final_height = sheet.width;
// The cylinder will be created aligned to the Y axis
let mut coords: Vec<_> = sheet.coords
.iter()
.map(|coord| {
let (x0, y, _) = coord.to_tuple();
let angle = (x0 * 360.0 / sheet.length).to_radians();
let x = final_radius * angle.sin();
let z = -final_radius * angle.cos();
Coord::new(x, y, z)
})
.collect();
if let Some(cap) = self.cap {
// The cylinder is aligned along the y axis. Construct a cap from
// the same sheet and rotate it to match.
let mut bottom = sheet.to_circle(final_radius); //.rotate(Direction::X);
bottom.coords = rotate_planar_coords_to_alignment(&bottom.coords,
Direction::Z, Direction::Y);
// Get the top cap coordinates by shifting the bottom ones, not just the origin.
let top_coords: Vec<_> = bottom.coords
.iter()
.map(|&coord| coord + Coord::new(0.0, final_height, 0.0))
.collect();
match cap {
CylinderCap::Bottom => coords.extend_from_slice(&bottom.coords),
CylinderCap::Top => coords.extend_from_slice(&top_coords),
CylinderCap::Both => {
coords.extend_from_slice(&bottom.coords);
coords.extend_from_slice(&top_coords);
}
}
}
// Rotate the cylinder once along the x-axis to align them to the z-axis.
Ok(Cylinder {
alignment: Direction::Z,
radius: final_radius,
height: final_height,
coords: rotate_coords(&coords, Direction::X),
.. self
})
}
/// Calculate the box size.
fn calc_box_size(&self) -> Coord {
let diameter = 2.0 * self.radius;
match self.alignment {
Direction::X => Coord::new(self.height, diameter, diameter),
Direction::Y => Coord::new(diameter, self.height, diameter),
Direction::Z => Coord::new(diameter, diameter, self.height),
}
}
}
impl Describe for Cylinder {
fn describe(&self) -> String {
format!("{} (Cylinder surface of radius {:.2} and height {:.2} at {})",
unwrap_name(&self.name), self.radius, self.height, self.origin)
}
fn describe_short(&self) -> String {
format!("{} (Cylinder)", unwrap_name(&self.name))
}
}
#[cfg(test)]
mod tests {
use super::*;
use surface::LatticeType::*;
fn setup_cylinder(radius: f64, height: f64, lattice: &LatticeType) -> Cylinder {
Cylinder {
name: None,
residue: None,
lattice: lattice.clone(),
alignment: Direction::Z,
cap: None,
origin: Coord::default(),
radius,
height,
coords: vec![],
}
}
#[test]
fn cylinder_is_bent_from_sheet_as_expected() {
let radius = 2.0;
let height = 5.0;
let density = 10.0;
let lattice = PoissonDisc { density };
let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap();
// We should have a rough surface density match
let expected = 2.0 * PI * radius * height * density;
assert!((expected - cylinder.coords.len() as f64).abs() / expected < 0.1);
// Not all coords should be at z = 0, ie. not still a sheet
let sum_z = cylinder.coords.iter().map(|&Coord { x: _, y: _, z }| z.abs()).sum::<f64>();
assert!(sum_z > 0.0);
// Currently the alignment should be along Z
assert_eq!(Direction::Z, cylinder.alignment);
// Rigorous test of coordinate structure
for coord in cylinder.coords {
let (r, h) = Coord::ORIGO.distance_cylindrical(coord, Direction::Z);
assert!(r <= cylinder.radius);
assert!(h >= 0.0 && h <= cylinder.height);
}
}
#[test]
fn cylinder_corrects_radius_and_height_to_match_lattice_spacing() {
let radius = 1.0; // should give circumference = 2 * PI
let height = 5.0;
let a = 1.0; // not a match to the circumference
let b = 1.1; // not a match to the height
let lattice = Triclinic { a, b, gamma: 90.0 };
let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap();
assert_ne!(radius, cylinder.radius);
assert_ne!(height, cylinder.height);
// The best match to the circumference 2 * PI is the multiple 6 * a.
assert_eq!(6.0 * a / (2.0 * PI), cylinder.radius);
assert_eq!(5.0 * b, cylinder.height);
}
#[test]
fn | () {
let lattice = PoissonDisc { density: 10.0 };
assert!(setup_cylinder(-1.0, 1.0, &lattice).construct().is_err());
assert!(setup_cylinder(1.0, -1.0, &lattice).construct().is_err());
assert!(setup_cylinder(1.0, 1.0, &lattice).construct().is_ok());
}
#[test]
fn add_caps_to_cylinder() {
let radius = 2.0;
let height = 5.0;
let lattice = Hexagonal { a: 0.1 };
let mut conf = setup_cylinder(radius, height, &lattice);
// Without caps
let cylinder = conf.clone().construct().unwrap();
let num_coords = cylinder.coords.len();
// With a bottom cap
conf.cap = Some(CylinderCap::Bottom);
let cylinder_cap = conf.clone().construct().unwrap();
// The first coordinates should be the original cylinder
let (original, bottom) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
assert!(bottom.len() > 0);
// All the bottom coordinates should be at z = 0
for coord in bottom {
assert_eq!(coord.z, 0.0);
}
// A top cap
conf.cap = Some(CylinderCap::Top);
let cylinder_cap = conf.clone().construct().unwrap();
let (original, top) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
assert_eq!(top.len(), bottom.len());
// All the top coordinates should be at the cylinder height
for coord in top {
assert_eq!(coord.z, cylinder.height);
}
// Both caps
conf.cap = Some(CylinderCap::Both);
let cylinder_cap = conf.clone().construct().unwrap();
let (original, bottom_and_top) = cylinder_cap.coords.split_at(num_coords);
assert_eq!(&original, &cylinder.coords.as_slice());
let (bottom_from_both, top_from_both) = bottom_and_top.split_at(bottom.len());
assert_eq!(bottom, bottom_from_both);
assert_eq!(top, top_from_both);
}
#[test]
fn calc_box_size_of_cylinder() {
let radius = 2.0;
let height = 5.0;
let lattice = Hexagonal { a: 0.1 };
// Check each direction
let mut cylinder = Cylinder {
alignment: Direction::X,
.. setup_cylinder(radius, height, &lattice)
};
let diameter = 2.0 * radius;
assert_eq!(Coord::new(height, diameter, diameter), cylinder.calc_box_size());
cylinder.alignment = Direction::Y;
assert_eq!(Coord::new(diameter, height, diameter), cylinder.calc_box_size());
cylinder.alignment = Direction::Z;
assert_eq!(Coord::new(diameter, diameter, height), cylinder.calc_box_size());
}
}
| constructing_cylinder_with_negative_radius_or_height_returns_error | identifier_name |
writeAllSln.py | # -*- python -*-
# $Id: writeAllSln.py,v 1.7 2013/04/02 23:59:31 jrb Exp $
#
# Delete old all.sln if there is one
# Read in *.sln for specified directory (defaults to "."), saving
# - some boilerplate which is in all sln files and needs to go in our output
# - dict. of project definitions, keyed by project file name (excluding .vcproj)
# - dict. of per-project stuff appearing in Global postSolution section
#
# Write out all.sln, using all of above
import os, os.path, sys, re
class allSln(object):
def __init__(self):
self.projectDict = {}
self.postSolutionDict = {}
self.header = []
self.global_presolution1 = []
self.global_presolution2 = []
self.global_postsolution_header = ""
# By default include all projects.
self.pkgname = ""
# Define pattern for first line of Project definition, picking out name (e.g. CalibSvcLib) and
# unique id, which appears to be of form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where each x is
# a hex digit.
self.projPat = re.compile('Project\(".*\s=\s"(.+)"\,\s".*"\,\s"\{([0-9,A-F,-]*)\}"')
self.endProjPat = re.compile('^EndProject\s*$')
self.gProjPat = re.compile('\w*\{([0-9,A-F,-]*)\}.*')
# Args:
# fp is filepointer
# firstline is typically returned from previous call
# If base is True, we're reading corresponding sln from base release;
# in this case don't preserve dependencies and don't include at all
# if key is already in dict
def readProject(self, fp, firstline, base=False):
# find and save project name and project id in firstline
mobj = self.projPat.match(firstline)
if mobj == None:
print "cannot parse supposed Project definition starting ",
print firstline
exit(1)
# If not base save all lines through line with contents "EndProject"
# in projLines. Otherwise ignore lines listing dependencies
projLines = [firstline]
lenp = len(firstline)
try:
ln = fp.readline()
lenp += len(ln)
except:
print "readline inside readProject failed"
exit(1)
if not base:
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
else:
#print "Processing project section for project", mobj.group(1)
if "ProjectSection(ProjectDependencies)" in ln:
projLines.append(ln)
lenp += len(ln)
elif "EndProject" in ln:
pass # no dependencies for this project
else: # give up! We can't parse it
print "Cannot parse sln file in base release"
exit(1)
if "EndProject" in ln:
pass
else:
while "EndProjectSection" not in ln: #skip
ln = fp.readline()
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
projLines.append(ln)
# Make dictionary entry (a quintuple) keyed by project name if
# (we're including all projects ) OR (project name ends in "Lib") OR
# (project name ends in "InstallSrc") OR
# (pkgname is non-null and project name = pkgname or "test_" + package name
useIt = (self.pkgname == "")
projName = mobj.group(1)
#print "Parsing projName = ", projName, " useIt is ", useIt
if (not useIt) and ((projName[len(projName)-3:] == "Lib") or
(projName[len(projName)-10:] == "InstallSrc") or
(projName == self.pkgname) or
(projName == "test_" + self.pkgname)): useIt = True
# One more special case: for alluserAlg, include userApp
if((self.pkgname == 'userAlg') and (projName == 'userApp')): useIt = True
if projName not in self.projectDict:
#print "First-time dict entry for ", projName
if useIt: self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, base]
else:
# If base and entry already exists, don't use this one.
# Else see if new entry is shorter. If so, replace old with it
if not base:
if self.projectDict[projName][4] == True:
#if useIt: Unnecessary. Wouldn't get here if useIt were false
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace base entry with non-base entry"
elif self.projectDict[projName][3] > lenp:
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace entry projName ",projName ," with shorter one"
# read one more line and return it
#print "Done parsing project definition for ", mobj.group(1)
next = fp.readline()
return next
def readGlobal(self, fp, firstGlobal, base=False):
try:
next = fp.readline()
except:
print "read inside readGlobal failed"
sys.stdout.flush()
exit(1)
if next.rfind("EndGlobal") != -1:
print "found EndGlobal prematurely"
return 0
if next.rfind("preSolution") != -1:
sys.stdout.flush()
if len(self.global_presolution1) == 0:
self.global_presolution1.append(next)
try:
next = fp.readline()
self.global_presolution1.append(next)
next = fp.readline()
self.global_presolution1.append(next)
except:
print "failed to read all of preSolution1"
sys.stdout.flush()
exit(1)
else:
try:
fp.readline()
fp.readline()
except:
print "failed to read remainder of presolution1"
sys.stdout.flush()
exit(1)
try:
next = fp.readline()
except:
print "failed read past preSolution1"
sys.stdout.flush()
exit(1)
sys.stdout.flush()
if next.rfind("postSolution") != -1:
if self.global_postsolution_header == "":
self.global_postsolution_header = next
# read in a line. Either it signifies end of section or it's the start
# of a 2-line entry a particular project. Store the two lines in a dict,
# keyed by project id
next = fp.readline()
while next.rfind("EndGlobalSection") == -1:
plist = [next]
mobj = self.gProjPat.search(next)
if mobj == None:
print "Bad global postSolution section"
sys.stdout.flush()
return 1
plist.append(fp.readline())
if base: # check we have corresponding project
for k in self.projectDict:
if k[1] in mobj.group(1):
self.postSolutionDict[mobj.group(1)] = plist
break
else: # always add it
self.postSolutionDict[mobj.group(1)] = plist
next = fp.readline()
next = fp.readline()
sys.stdout.flush()
if next.rfind("preSolution") != -1:
if self.global_presolution2 == []:
self.global_presolution2.append(next)
self.global_presolution2.append(fp.readline())
self.global_presolution2.append(fp.readline())
else:
fp.readline()
fp.readline()
next = fp.readline()
if next[:9] == "EndGlobal":
return 0
else : return 1
# fp is file pointer to sln to be read. if base is True we're
# in supersede case and are reading corresponding base sln file
def readSln(self, fp, base=False):
h1 = fp.readline()
h2 = fp.readline()
if self.header == []: self.header = [h1, h2]
next = fp.readline()
while next[:8] == "Project(":
next = self.readProject(fp, next, base)
# Next should be Global
if next[:6] == "Global":
return self.readGlobal(fp, next, base)
elif next != '':
print "Unexpected section ", next, " in input solution file"
fp.close()
exit(1)
else:
return 0
def writeAllSln(self, path):
#print "Writing ", path
try:
fp = open(path, 'w')
except:
print "Cannot open ", path, " for write"
exit(1)
try:
fp.write(self.header[0])
fp.write(self.header[1])
# Write out all the projects
for k in self.projectDict:
###print "First line of project is ", str(self.projectDict[k][2][0])
for ln in self.projectDict[k][2]:
fp.write(ln)
fp.write("Global\n")
for ln in self.global_presolution1:
fp.write(ln)
fp.write(self.global_postsolution_header)
for ps in self.postSolutionDict:
fp.write(self.postSolutionDict[ps][0])
fp.write(self.postSolutionDict[ps][1])
fp.write("\tEndGlobalSection\n")
for ln in self.global_presolution2:
|
fp.write("EndGlobal\n")
except:
print "Failure writing to ", path
finally:
fp.close()
# main
# We assume we're in the same directory as the solution files
#
obj = allSln()
if len(sys.argv) > 1:
sdir = sys.argv[1]
else:
sdir = (".")
obj.sdir = sdir
basedir = sdir
if len(sys.argv) > 2: basedir = sys.argv[2]
obj.basedir = basedir
if len(sys.argv) > 3:
obj.pkgname = sys.argv[3]
else:
obj.pkgname = ""
outfile = "all" + obj.pkgname + ".sln"
# get rid of old all.sln if there is one
try:
os.remove(os.path.join(sdir, outfile))
except:
# Don't care if it fails; probably just means the file didn't exist
pass
studiofiles = os.listdir(sdir)
for fname in studiofiles:
if fname[len(fname)-4:] == ".sln":
if fname[0:2] != "all": # we use it
try:
#print "Found file ", fname
f = open(os.path.join(sdir, fname))
obj.readSln(f)
f.close()
except:
print "Unable to read file ", fname
if f != None: f.close()
if basedir != sdir:
# Read corresponding sln file in basedir
try:
f = open(os.path.join(basedir, outfile))
obj.readSln(f, True)
f.close()
except:
print "Unable to read base inst. file ", os.path.join(basedir, outfile)
obj.writeAllSln(os.path.join(sdir, outfile))
exit(0)
| fp.write(ln) | conditional_block |
writeAllSln.py | # -*- python -*-
# $Id: writeAllSln.py,v 1.7 2013/04/02 23:59:31 jrb Exp $
#
# Delete old all.sln if there is one
# Read in *.sln for specified directory (defaults to "."), saving
# - some boilerplate which is in all sln files and needs to go in our output
# - dict. of project definitions, keyed by project file name (excluding .vcproj)
# - dict. of per-project stuff appearing in Global postSolution section
#
# Write out all.sln, using all of above
import os, os.path, sys, re
class allSln(object):
def __init__(self):
self.projectDict = {}
self.postSolutionDict = {}
self.header = []
self.global_presolution1 = []
self.global_presolution2 = []
self.global_postsolution_header = ""
# By default include all projects.
self.pkgname = ""
# Define pattern for first line of Project definition, picking out name (e.g. CalibSvcLib) and
# unique id, which appears to be of form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where each x is
# a hex digit.
self.projPat = re.compile('Project\(".*\s=\s"(.+)"\,\s".*"\,\s"\{([0-9,A-F,-]*)\}"')
self.endProjPat = re.compile('^EndProject\s*$')
self.gProjPat = re.compile('\w*\{([0-9,A-F,-]*)\}.*')
# Args:
# fp is filepointer
# firstline is typically returned from previous call
# If base is True, we're reading corresponding sln from base release;
# in this case don't preserve dependencies and don't include at all
# if key is already in dict
def readProject(self, fp, firstline, base=False):
# find and save project name and project id in firstline
mobj = self.projPat.match(firstline)
if mobj == None:
print "cannot parse supposed Project definition starting ",
print firstline
exit(1)
# If not base save all lines through line with contents "EndProject"
# in projLines. Otherwise ignore lines listing dependencies
projLines = [firstline]
lenp = len(firstline)
try:
ln = fp.readline()
lenp += len(ln)
except:
print "readline inside readProject failed"
exit(1)
if not base:
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
else:
#print "Processing project section for project", mobj.group(1)
if "ProjectSection(ProjectDependencies)" in ln:
projLines.append(ln)
lenp += len(ln)
elif "EndProject" in ln:
pass # no dependencies for this project
else: # give up! We can't parse it
print "Cannot parse sln file in base release"
exit(1)
if "EndProject" in ln:
pass
else:
while "EndProjectSection" not in ln: #skip
ln = fp.readline()
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
projLines.append(ln)
# Make dictionary entry (a quintuple) keyed by project name if
# (we're including all projects ) OR (project name ends in "Lib") OR
# (project name ends in "InstallSrc") OR
# (pkgname is non-null and project name = pkgname or "test_" + package name
useIt = (self.pkgname == "")
projName = mobj.group(1)
#print "Parsing projName = ", projName, " useIt is ", useIt
if (not useIt) and ((projName[len(projName)-3:] == "Lib") or
(projName[len(projName)-10:] == "InstallSrc") or
(projName == self.pkgname) or
(projName == "test_" + self.pkgname)): useIt = True
# One more special case: for alluserAlg, include userApp
if((self.pkgname == 'userAlg') and (projName == 'userApp')): useIt = True
if projName not in self.projectDict:
#print "First-time dict entry for ", projName
if useIt: self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, base]
else:
# If base and entry already exists, don't use this one.
# Else see if new entry is shorter. If so, replace old with it
if not base:
if self.projectDict[projName][4] == True:
#if useIt: Unnecessary. Wouldn't get here if useIt were false
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace base entry with non-base entry"
elif self.projectDict[projName][3] > lenp:
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace entry projName ",projName ," with shorter one"
# read one more line and return it
#print "Done parsing project definition for ", mobj.group(1)
next = fp.readline()
return next
def readGlobal(self, fp, firstGlobal, base=False):
try:
next = fp.readline()
except:
print "read inside readGlobal failed"
sys.stdout.flush()
exit(1)
if next.rfind("EndGlobal") != -1:
print "found EndGlobal prematurely"
return 0
if next.rfind("preSolution") != -1:
sys.stdout.flush()
if len(self.global_presolution1) == 0:
self.global_presolution1.append(next)
try:
next = fp.readline()
self.global_presolution1.append(next)
next = fp.readline()
self.global_presolution1.append(next)
except:
print "failed to read all of preSolution1"
sys.stdout.flush()
exit(1)
else:
try:
fp.readline()
fp.readline()
except:
print "failed to read remainder of presolution1"
sys.stdout.flush()
exit(1)
try:
next = fp.readline()
except:
print "failed read past preSolution1"
sys.stdout.flush()
exit(1)
sys.stdout.flush()
if next.rfind("postSolution") != -1:
if self.global_postsolution_header == "":
self.global_postsolution_header = next
# read in a line. Either it signifies end of section or it's the start
# of a 2-line entry a particular project. Store the two lines in a dict,
# keyed by project id
next = fp.readline()
while next.rfind("EndGlobalSection") == -1:
plist = [next]
mobj = self.gProjPat.search(next)
if mobj == None:
print "Bad global postSolution section"
sys.stdout.flush()
return 1
plist.append(fp.readline())
if base: # check we have corresponding project
for k in self.projectDict:
if k[1] in mobj.group(1):
self.postSolutionDict[mobj.group(1)] = plist
break
else: # always add it
self.postSolutionDict[mobj.group(1)] = plist
next = fp.readline()
next = fp.readline()
sys.stdout.flush()
if next.rfind("preSolution") != -1:
if self.global_presolution2 == []:
self.global_presolution2.append(next)
self.global_presolution2.append(fp.readline())
self.global_presolution2.append(fp.readline())
else:
fp.readline()
fp.readline()
next = fp.readline()
if next[:9] == "EndGlobal":
return 0
else : return 1
# fp is file pointer to sln to be read. if base is True we're
# in supersede case and are reading corresponding base sln file
def readSln(self, fp, base=False):
|
def writeAllSln(self, path):
#print "Writing ", path
try:
fp = open(path, 'w')
except:
print "Cannot open ", path, " for write"
exit(1)
try:
fp.write(self.header[0])
fp.write(self.header[1])
# Write out all the projects
for k in self.projectDict:
###print "First line of project is ", str(self.projectDict[k][2][0])
for ln in self.projectDict[k][2]:
fp.write(ln)
fp.write("Global\n")
for ln in self.global_presolution1:
fp.write(ln)
fp.write(self.global_postsolution_header)
for ps in self.postSolutionDict:
fp.write(self.postSolutionDict[ps][0])
fp.write(self.postSolutionDict[ps][1])
fp.write("\tEndGlobalSection\n")
for ln in self.global_presolution2:
fp.write(ln)
fp.write("EndGlobal\n")
except:
print "Failure writing to ", path
finally:
fp.close()
# main
# We assume we're in the same directory as the solution files
#
obj = allSln()
if len(sys.argv) > 1:
sdir = sys.argv[1]
else:
sdir = (".")
obj.sdir = sdir
basedir = sdir
if len(sys.argv) > 2: basedir = sys.argv[2]
obj.basedir = basedir
if len(sys.argv) > 3:
obj.pkgname = sys.argv[3]
else:
obj.pkgname = ""
outfile = "all" + obj.pkgname + ".sln"
# get rid of old all.sln if there is one
try:
os.remove(os.path.join(sdir, outfile))
except:
# Don't care if it fails; probably just means the file didn't exist
pass
studiofiles = os.listdir(sdir)
for fname in studiofiles:
if fname[len(fname)-4:] == ".sln":
if fname[0:2] != "all": # we use it
try:
#print "Found file ", fname
f = open(os.path.join(sdir, fname))
obj.readSln(f)
f.close()
except:
print "Unable to read file ", fname
if f != None: f.close()
if basedir != sdir:
# Read corresponding sln file in basedir
try:
f = open(os.path.join(basedir, outfile))
obj.readSln(f, True)
f.close()
except:
print "Unable to read base inst. file ", os.path.join(basedir, outfile)
obj.writeAllSln(os.path.join(sdir, outfile))
exit(0)
| h1 = fp.readline()
h2 = fp.readline()
if self.header == []: self.header = [h1, h2]
next = fp.readline()
while next[:8] == "Project(":
next = self.readProject(fp, next, base)
# Next should be Global
if next[:6] == "Global":
return self.readGlobal(fp, next, base)
elif next != '':
print "Unexpected section ", next, " in input solution file"
fp.close()
exit(1)
else:
return 0 | identifier_body |
writeAllSln.py | # -*- python -*-
# $Id: writeAllSln.py,v 1.7 2013/04/02 23:59:31 jrb Exp $
#
# Delete old all.sln if there is one
# Read in *.sln for specified directory (defaults to "."), saving
# - some boilerplate which is in all sln files and needs to go in our output
# - dict. of project definitions, keyed by project file name (excluding .vcproj)
# - dict. of per-project stuff appearing in Global postSolution section
#
# Write out all.sln, using all of above
import os, os.path, sys, re
class allSln(object):
def __init__(self):
self.projectDict = {}
self.postSolutionDict = {}
self.header = []
self.global_presolution1 = []
self.global_presolution2 = []
self.global_postsolution_header = ""
# By default include all projects.
self.pkgname = ""
# Define pattern for first line of Project definition, picking out name (e.g. CalibSvcLib) and
# unique id, which appears to be of form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where each x is
# a hex digit.
self.projPat = re.compile('Project\(".*\s=\s"(.+)"\,\s".*"\,\s"\{([0-9,A-F,-]*)\}"')
self.endProjPat = re.compile('^EndProject\s*$')
self.gProjPat = re.compile('\w*\{([0-9,A-F,-]*)\}.*')
# Args:
# fp is filepointer
# firstline is typically returned from previous call
# If base is True, we're reading corresponding sln from base release;
# in this case don't preserve dependencies and don't include at all
# if key is already in dict
def readProject(self, fp, firstline, base=False):
# find and save project name and project id in firstline
mobj = self.projPat.match(firstline)
if mobj == None:
print "cannot parse supposed Project definition starting ",
print firstline
exit(1)
# If not base save all lines through line with contents "EndProject"
# in projLines. Otherwise ignore lines listing dependencies
projLines = [firstline]
lenp = len(firstline)
try:
ln = fp.readline()
lenp += len(ln)
except:
print "readline inside readProject failed"
exit(1)
if not base:
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
else:
#print "Processing project section for project", mobj.group(1)
if "ProjectSection(ProjectDependencies)" in ln:
projLines.append(ln)
lenp += len(ln)
elif "EndProject" in ln:
pass # no dependencies for this project
else: # give up! We can't parse it
print "Cannot parse sln file in base release"
exit(1)
if "EndProject" in ln:
pass
else:
while "EndProjectSection" not in ln: #skip
ln = fp.readline()
while ln[:10] != "EndProject":
projLines.append(ln)
ln = fp.readline()
lenp += len(ln)
projLines.append(ln)
# Make dictionary entry (a quintuple) keyed by project name if
# (we're including all projects ) OR (project name ends in "Lib") OR
# (project name ends in "InstallSrc") OR
# (pkgname is non-null and project name = pkgname or "test_" + package name
useIt = (self.pkgname == "")
projName = mobj.group(1)
#print "Parsing projName = ", projName, " useIt is ", useIt
if (not useIt) and ((projName[len(projName)-3:] == "Lib") or
(projName[len(projName)-10:] == "InstallSrc") or
(projName == self.pkgname) or
(projName == "test_" + self.pkgname)): useIt = True
# One more special case: for alluserAlg, include userApp
if((self.pkgname == 'userAlg') and (projName == 'userApp')): useIt = True
if projName not in self.projectDict:
#print "First-time dict entry for ", projName
if useIt: self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, base]
else:
# If base and entry already exists, don't use this one.
# Else see if new entry is shorter. If so, replace old with it
if not base:
if self.projectDict[projName][4] == True:
#if useIt: Unnecessary. Wouldn't get here if useIt were false
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace base entry with non-base entry"
elif self.projectDict[projName][3] > lenp:
self.projectDict[projName] = [projName, mobj.group(2), projLines, lenp, False]
#print "replace entry projName ",projName ," with shorter one"
# read one more line and return it
#print "Done parsing project definition for ", mobj.group(1)
next = fp.readline()
return next
def readGlobal(self, fp, firstGlobal, base=False):
try:
next = fp.readline()
except:
print "read inside readGlobal failed"
sys.stdout.flush()
exit(1)
if next.rfind("EndGlobal") != -1:
print "found EndGlobal prematurely"
return 0
if next.rfind("preSolution") != -1:
sys.stdout.flush()
if len(self.global_presolution1) == 0:
self.global_presolution1.append(next)
try:
next = fp.readline()
self.global_presolution1.append(next)
next = fp.readline()
self.global_presolution1.append(next)
except:
print "failed to read all of preSolution1"
sys.stdout.flush()
exit(1)
else:
try:
fp.readline()
fp.readline()
except:
print "failed to read remainder of presolution1"
sys.stdout.flush()
exit(1)
try:
next = fp.readline()
except:
print "failed read past preSolution1"
sys.stdout.flush()
exit(1)
sys.stdout.flush()
if next.rfind("postSolution") != -1:
if self.global_postsolution_header == "":
self.global_postsolution_header = next
# read in a line. Either it signifies end of section or it's the start
# of a 2-line entry a particular project. Store the two lines in a dict,
# keyed by project id
next = fp.readline()
while next.rfind("EndGlobalSection") == -1:
plist = [next]
mobj = self.gProjPat.search(next)
if mobj == None:
print "Bad global postSolution section"
sys.stdout.flush()
return 1
plist.append(fp.readline())
if base: # check we have corresponding project
for k in self.projectDict:
if k[1] in mobj.group(1):
self.postSolutionDict[mobj.group(1)] = plist
break
else: # always add it
self.postSolutionDict[mobj.group(1)] = plist
next = fp.readline()
next = fp.readline()
sys.stdout.flush()
if next.rfind("preSolution") != -1:
if self.global_presolution2 == []:
self.global_presolution2.append(next)
self.global_presolution2.append(fp.readline())
self.global_presolution2.append(fp.readline())
else:
fp.readline()
fp.readline()
next = fp.readline()
if next[:9] == "EndGlobal":
return 0
else : return 1
# fp is file pointer to sln to be read. if base is True we're
# in supersede case and are reading corresponding base sln file
def readSln(self, fp, base=False):
h1 = fp.readline()
h2 = fp.readline()
if self.header == []: self.header = [h1, h2]
next = fp.readline()
while next[:8] == "Project(":
next = self.readProject(fp, next, base)
# Next should be Global
if next[:6] == "Global":
return self.readGlobal(fp, next, base)
elif next != '':
print "Unexpected section ", next, " in input solution file"
fp.close()
exit(1)
else:
return 0
def | (self, path):
#print "Writing ", path
try:
fp = open(path, 'w')
except:
print "Cannot open ", path, " for write"
exit(1)
try:
fp.write(self.header[0])
fp.write(self.header[1])
# Write out all the projects
for k in self.projectDict:
###print "First line of project is ", str(self.projectDict[k][2][0])
for ln in self.projectDict[k][2]:
fp.write(ln)
fp.write("Global\n")
for ln in self.global_presolution1:
fp.write(ln)
fp.write(self.global_postsolution_header)
for ps in self.postSolutionDict:
fp.write(self.postSolutionDict[ps][0])
fp.write(self.postSolutionDict[ps][1])
fp.write("\tEndGlobalSection\n")
for ln in self.global_presolution2:
fp.write(ln)
fp.write("EndGlobal\n")
except:
print "Failure writing to ", path
finally:
fp.close()
# main
# We assume we're in the same directory as the solution files
#
obj = allSln()
if len(sys.argv) > 1:
sdir = sys.argv[1]
else:
sdir = (".")
obj.sdir = sdir
basedir = sdir
if len(sys.argv) > 2: basedir = sys.argv[2]
obj.basedir = basedir
if len(sys.argv) > 3:
obj.pkgname = sys.argv[3]
else:
obj.pkgname = ""
outfile = "all" + obj.pkgname + ".sln"
# get rid of old all.sln if there is one
try:
os.remove(os.path.join(sdir, outfile))
except:
# Don't care if it fails; probably just means the file didn't exist
pass
studiofiles = os.listdir(sdir)
for fname in studiofiles:
if fname[len(fname)-4:] == ".sln":
if fname[0:2] != "all": # we use it
try:
#print "Found file ", fname
f = open(os.path.join(sdir, fname))
obj.readSln(f)
f.close()
except:
print "Unable to read file ", fname
if f != None: f.close()
if basedir != sdir:
# Read corresponding sln file in basedir
try:
f = open(os.path.join(basedir, outfile))
obj.readSln(f, True)
f.close()
except:
print "Unable to read base inst. file ", os.path.join(basedir, outfile)
obj.writeAllSln(os.path.join(sdir, outfile))
exit(0)
| writeAllSln | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.